Merge 4.19-rc7 into tty-next
We want the fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
4e1a606d55
|
@ -184,6 +184,11 @@ API for implementing a new FPGA Manager driver
|
|||
API for programming an FPGA
|
||||
---------------------------
|
||||
|
||||
FPGA Manager flags
|
||||
|
||||
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
|
||||
:doc: FPGA Manager flags
|
||||
|
||||
.. kernel-doc:: include/linux/fpga/fpga-mgr.h
|
||||
:functions: fpga_image_info
|
||||
|
||||
|
|
|
@ -15,7 +15,8 @@ than x86. Check the v86d documentation for a list of currently supported
|
|||
arches.
|
||||
|
||||
v86d source code can be downloaded from the following website:
|
||||
http://dev.gentoo.org/~spock/projects/uvesafb
|
||||
|
||||
https://github.com/mjanusz/v86d
|
||||
|
||||
Please refer to the v86d documentation for detailed configuration and
|
||||
installation instructions.
|
||||
|
@ -177,7 +178,7 @@ from the Video BIOS if you set pixclock to 0 in fb_var_screeninfo.
|
|||
|
||||
--
|
||||
Michal Januszewski <spock@gentoo.org>
|
||||
Last updated: 2009-03-30
|
||||
Last updated: 2017-10-10
|
||||
|
||||
Documentation of the uvesafb options is loosely based on vesafb.txt.
|
||||
|
||||
|
|
|
@ -425,7 +425,7 @@ tcp_mtu_probing - INTEGER
|
|||
1 - Disabled by default, enabled when an ICMP black hole detected
|
||||
2 - Always enabled, use initial MSS of tcp_base_mss.
|
||||
|
||||
tcp_probe_interval - INTEGER
|
||||
tcp_probe_interval - UNSIGNED INTEGER
|
||||
Controls how often to start TCP Packetization-Layer Path MTU
|
||||
Discovery reprobe. The default is reprobing every 10 minutes as
|
||||
per RFC4821.
|
||||
|
|
14
MAINTAINERS
14
MAINTAINERS
|
@ -324,7 +324,6 @@ F: Documentation/ABI/testing/sysfs-bus-acpi
|
|||
F: Documentation/ABI/testing/configfs-acpi
|
||||
F: drivers/pci/*acpi*
|
||||
F: drivers/pci/*/*acpi*
|
||||
F: drivers/pci/*/*/*acpi*
|
||||
F: tools/power/acpi/
|
||||
|
||||
ACPI APEI
|
||||
|
@ -1251,7 +1250,7 @@ N: meson
|
|||
|
||||
ARM/Annapurna Labs ALPINE ARCHITECTURE
|
||||
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
|
||||
M: Antoine Tenart <antoine.tenart@free-electrons.com>
|
||||
M: Antoine Tenart <antoine.tenart@bootlin.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/mach-alpine/
|
||||
|
@ -2956,7 +2955,6 @@ F: include/linux/bcm963xx_tag.h
|
|||
|
||||
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
|
||||
M: Rasesh Mody <rasesh.mody@cavium.com>
|
||||
M: Harish Patil <harish.patil@cavium.com>
|
||||
M: Dept-GELinuxNICDev@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -2977,6 +2975,7 @@ F: drivers/scsi/bnx2i/
|
|||
|
||||
BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER
|
||||
M: Ariel Elior <ariel.elior@cavium.com>
|
||||
M: Sudarsana Kalluru <sudarsana.kalluru@cavium.com>
|
||||
M: everest-linux-l2@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
|
@ -5470,7 +5469,8 @@ S: Odd Fixes
|
|||
F: drivers/net/ethernet/agere/
|
||||
|
||||
ETHERNET BRIDGE
|
||||
M: Stephen Hemminger <stephen@networkplumber.org>
|
||||
M: Roopa Prabhu <roopa@cumulusnetworks.com>
|
||||
M: Nikolay Aleksandrov <nikolay@cumulusnetworks.com>
|
||||
L: bridge@lists.linux-foundation.org (moderated for non-subscribers)
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net:Bridge
|
||||
|
@ -8598,7 +8598,6 @@ F: include/linux/spinlock*.h
|
|||
F: arch/*/include/asm/spinlock*.h
|
||||
F: include/linux/rwlock*.h
|
||||
F: include/linux/mutex*.h
|
||||
F: arch/*/include/asm/mutex*.h
|
||||
F: include/linux/rwsem*.h
|
||||
F: arch/*/include/asm/rwsem.h
|
||||
F: include/linux/seqlock.h
|
||||
|
@ -11995,7 +11994,7 @@ F: Documentation/scsi/LICENSE.qla4xxx
|
|||
F: drivers/scsi/qla4xxx/
|
||||
|
||||
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
|
||||
M: Harish Patil <harish.patil@cavium.com>
|
||||
M: Shahed Shaikh <Shahed.Shaikh@cavium.com>
|
||||
M: Manish Chopra <manish.chopra@cavium.com>
|
||||
M: Dept-GELinuxNICDev@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -12003,7 +12002,6 @@ S: Supported
|
|||
F: drivers/net/ethernet/qlogic/qlcnic/
|
||||
|
||||
QLOGIC QLGE 10Gb ETHERNET DRIVER
|
||||
M: Harish Patil <harish.patil@cavium.com>
|
||||
M: Manish Chopra <manish.chopra@cavium.com>
|
||||
M: Dept-GELinuxNICDev@cavium.com
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -15411,7 +15409,7 @@ S: Maintained
|
|||
UVESAFB DRIVER
|
||||
M: Michal Januszewski <spock@gentoo.org>
|
||||
L: linux-fbdev@vger.kernel.org
|
||||
W: http://dev.gentoo.org/~spock/projects/uvesafb/
|
||||
W: https://github.com/mjanusz/v86d
|
||||
S: Maintained
|
||||
F: Documentation/fb/uvesafb.txt
|
||||
F: drivers/video/fbdev/uvesafb.*
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc6
|
||||
EXTRAVERSION = -rc7
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
#include "sama5d2-pinfunc.h"
|
||||
#include <dt-bindings/mfd/atmel-flexcom.h>
|
||||
#include <dt-bindings/gpio/gpio.h>
|
||||
#include <dt-bindings/pinctrl/at91.h>
|
||||
|
||||
/ {
|
||||
model = "Atmel SAMA5D2 PTC EK";
|
||||
|
@ -299,6 +300,7 @@ re_we_data {
|
|||
<PIN_PA30__NWE_NANDWE>,
|
||||
<PIN_PB2__NRD_NANDOE>;
|
||||
bias-pull-up;
|
||||
atmel,drive-strength = <ATMEL_PIO_DRVSTR_ME>;
|
||||
};
|
||||
|
||||
ale_cle_rdy_cs {
|
||||
|
|
|
@ -106,21 +106,23 @@ gic: interrupt-controller@1e100 {
|
|||
global_timer: timer@1e200 {
|
||||
compatible = "arm,cortex-a9-global-timer";
|
||||
reg = <0x1e200 0x20>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
|
||||
clocks = <&axi_clk>;
|
||||
};
|
||||
|
||||
local_timer: local-timer@1e600 {
|
||||
compatible = "arm,cortex-a9-twd-timer";
|
||||
reg = <0x1e600 0x20>;
|
||||
interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(2) |
|
||||
IRQ_TYPE_EDGE_RISING)>;
|
||||
clocks = <&axi_clk>;
|
||||
};
|
||||
|
||||
twd_watchdog: watchdog@1e620 {
|
||||
compatible = "arm,cortex-a9-twd-wdt";
|
||||
reg = <0x1e620 0x20>;
|
||||
interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) |
|
||||
IRQ_TYPE_LEVEL_HIGH)>;
|
||||
};
|
||||
|
||||
armpll: armpll {
|
||||
|
@ -158,7 +160,7 @@ timer: timer@80 {
|
|||
serial0: serial@600 {
|
||||
compatible = "brcm,bcm6345-uart";
|
||||
reg = <0x600 0x1b>;
|
||||
interrupts = <GIC_SPI 32 0>;
|
||||
interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&periph_clk>;
|
||||
clock-names = "periph";
|
||||
status = "disabled";
|
||||
|
@ -167,7 +169,7 @@ serial0: serial@600 {
|
|||
serial1: serial@620 {
|
||||
compatible = "brcm,bcm6345-uart";
|
||||
reg = <0x620 0x1b>;
|
||||
interrupts = <GIC_SPI 33 0>;
|
||||
interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&periph_clk>;
|
||||
clock-names = "periph";
|
||||
status = "disabled";
|
||||
|
@ -180,7 +182,7 @@ nand: nand@2000 {
|
|||
reg = <0x2000 0x600>, <0xf0 0x10>;
|
||||
reg-names = "nand", "nand-int-base";
|
||||
status = "disabled";
|
||||
interrupts = <GIC_SPI 38 0>;
|
||||
interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "nand";
|
||||
};
|
||||
|
||||
|
|
|
@ -1078,8 +1078,8 @@ spi6: spi@5c001000 {
|
|||
interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
|
||||
clocks = <&rcc SPI6_K>;
|
||||
resets = <&rcc SPI6_R>;
|
||||
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0 0>,
|
||||
<&mdma1 35 0x0 0x40002 0x0 0x0 0>;
|
||||
dmas = <&mdma1 34 0x0 0x40008 0x0 0x0>,
|
||||
<&mdma1 35 0x0 0x40002 0x0 0x0>;
|
||||
dma-names = "rx", "tx";
|
||||
status = "disabled";
|
||||
};
|
||||
|
|
|
@ -800,8 +800,7 @@ hdmi_out: port@1 {
|
|||
};
|
||||
|
||||
hdmi_phy: hdmi-phy@1ef0000 {
|
||||
compatible = "allwinner,sun8i-r40-hdmi-phy",
|
||||
"allwinner,sun50i-a64-hdmi-phy";
|
||||
compatible = "allwinner,sun8i-r40-hdmi-phy";
|
||||
reg = <0x01ef0000 0x10000>;
|
||||
clocks = <&ccu CLK_BUS_HDMI1>, <&ccu CLK_HDMI_SLOW>,
|
||||
<&ccu 7>, <&ccu 16>;
|
||||
|
|
|
@ -473,7 +473,7 @@ void pci_ioremap_set_mem_type(int mem_type)
|
|||
|
||||
int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
|
||||
{
|
||||
BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
|
||||
BUG_ON(offset + SZ_64K - 1 > IO_SPACE_LIMIT);
|
||||
|
||||
return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
|
||||
PCI_IO_VIRT_BASE + offset + SZ_64K,
|
||||
|
|
|
@ -413,3 +413,4 @@
|
|||
396 common pkey_free sys_pkey_free
|
||||
397 common statx sys_statx
|
||||
398 common rseq sys_rseq
|
||||
399 common io_pgetevents sys_io_pgetevents
|
||||
|
|
|
@ -57,6 +57,45 @@ static u64 core_reg_offset_from_id(u64 id)
|
|||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int validate_core_offset(const struct kvm_one_reg *reg)
|
||||
{
|
||||
u64 off = core_reg_offset_from_id(reg->id);
|
||||
int size;
|
||||
|
||||
switch (off) {
|
||||
case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(regs.regs[30]):
|
||||
case KVM_REG_ARM_CORE_REG(regs.sp):
|
||||
case KVM_REG_ARM_CORE_REG(regs.pc):
|
||||
case KVM_REG_ARM_CORE_REG(regs.pstate):
|
||||
case KVM_REG_ARM_CORE_REG(sp_el1):
|
||||
case KVM_REG_ARM_CORE_REG(elr_el1):
|
||||
case KVM_REG_ARM_CORE_REG(spsr[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
|
||||
size = sizeof(__u64);
|
||||
break;
|
||||
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
|
||||
KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
|
||||
size = sizeof(__uint128_t);
|
||||
break;
|
||||
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
|
||||
case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
|
||||
size = sizeof(__u32);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) == size &&
|
||||
IS_ALIGNED(off, size / sizeof(__u32)))
|
||||
return 0;
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/*
|
||||
|
@ -76,6 +115,9 @@ static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (validate_core_offset(reg))
|
||||
return -EINVAL;
|
||||
|
||||
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
|
||||
return -EFAULT;
|
||||
|
||||
|
@ -98,6 +140,9 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
|
||||
return -ENOENT;
|
||||
|
||||
if (validate_core_offset(reg))
|
||||
return -EINVAL;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -107,17 +152,25 @@ static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
|||
}
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
|
||||
u32 mode = (*(u32 *)valp) & PSR_AA32_MODE_MASK;
|
||||
u64 mode = (*(u64 *)valp) & PSR_AA32_MODE_MASK;
|
||||
switch (mode) {
|
||||
case PSR_AA32_MODE_USR:
|
||||
if (!system_supports_32bit_el0())
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_AA32_MODE_FIQ:
|
||||
case PSR_AA32_MODE_IRQ:
|
||||
case PSR_AA32_MODE_SVC:
|
||||
case PSR_AA32_MODE_ABT:
|
||||
case PSR_AA32_MODE_UND:
|
||||
if (!vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
case PSR_MODE_EL0t:
|
||||
case PSR_MODE_EL1t:
|
||||
case PSR_MODE_EL1h:
|
||||
if (vcpu_el1_is_32bit(vcpu))
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
err = -EINVAL;
|
||||
|
|
|
@ -117,11 +117,14 @@ static pte_t get_clear_flush(struct mm_struct *mm,
|
|||
|
||||
/*
|
||||
* If HW_AFDBM is enabled, then the HW could turn on
|
||||
* the dirty bit for any page in the set, so check
|
||||
* them all. All hugetlb entries are already young.
|
||||
* the dirty or accessed bit for any page in the set,
|
||||
* so check them all.
|
||||
*/
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
}
|
||||
|
||||
if (valid) {
|
||||
|
@ -320,11 +323,40 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|||
return get_clear_flush(mm, addr, ptep, pgsize, ncontig);
|
||||
}
|
||||
|
||||
/*
|
||||
* huge_ptep_set_access_flags will update access flags (dirty, accesssed)
|
||||
* and write permission.
|
||||
*
|
||||
* For a contiguous huge pte range we need to check whether or not write
|
||||
* permission has to change only on the first pte in the set. Then for
|
||||
* all the contiguous ptes we need to check whether or not there is a
|
||||
* discrepancy between dirty or young.
|
||||
*/
|
||||
static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (pte_write(pte) != pte_write(huge_ptep_get(ptep)))
|
||||
return 1;
|
||||
|
||||
for (i = 0; i < ncontig; i++) {
|
||||
pte_t orig_pte = huge_ptep_get(ptep + i);
|
||||
|
||||
if (pte_dirty(pte) != pte_dirty(orig_pte))
|
||||
return 1;
|
||||
|
||||
if (pte_young(pte) != pte_young(orig_pte))
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty)
|
||||
{
|
||||
int ncontig, i, changed = 0;
|
||||
int ncontig, i;
|
||||
size_t pgsize = 0;
|
||||
unsigned long pfn = pte_pfn(pte), dpfn;
|
||||
pgprot_t hugeprot;
|
||||
|
@ -336,19 +368,23 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize);
|
||||
dpfn = pgsize >> PAGE_SHIFT;
|
||||
|
||||
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
|
||||
if (!pte_same(orig_pte, pte))
|
||||
changed = 1;
|
||||
if (!__cont_access_flags_changed(ptep, pte, ncontig))
|
||||
return 0;
|
||||
|
||||
/* Make sure we don't lose the dirty state */
|
||||
orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig);
|
||||
|
||||
/* Make sure we don't lose the dirty or young state */
|
||||
if (pte_dirty(orig_pte))
|
||||
pte = pte_mkdirty(pte);
|
||||
|
||||
if (pte_young(orig_pte))
|
||||
pte = pte_mkyoung(pte);
|
||||
|
||||
hugeprot = pte_pgprot(pte);
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
|
||||
set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot));
|
||||
|
||||
return changed;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
|
|
|
@ -1306,6 +1306,16 @@ void show_user_instructions(struct pt_regs *regs)
|
|||
|
||||
pc = regs->nip - (instructions_to_print * 3 / 4 * sizeof(int));
|
||||
|
||||
/*
|
||||
* Make sure the NIP points at userspace, not kernel text/data or
|
||||
* elsewhere.
|
||||
*/
|
||||
if (!__access_ok(pc, instructions_to_print * sizeof(int), USER_DS)) {
|
||||
pr_info("%s[%d]: Bad NIP, not dumping instructions.\n",
|
||||
current->comm, current->pid);
|
||||
return;
|
||||
}
|
||||
|
||||
pr_info("%s[%d]: code: ", current->comm, current->pid);
|
||||
|
||||
for (i = 0; i < instructions_to_print; i++) {
|
||||
|
|
|
@ -646,6 +646,16 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
/*
|
||||
* If the PTE disappeared temporarily due to a THP
|
||||
* collapse, just return and let the guest try again.
|
||||
*/
|
||||
if (!ptep) {
|
||||
local_irq_enable();
|
||||
if (page)
|
||||
put_page(page);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
|
||||
|
|
|
@ -28,12 +28,6 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
|
|||
{
|
||||
int err;
|
||||
|
||||
/* Make sure we aren't patching a freed init section */
|
||||
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
|
||||
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__put_user_size(instr, patch_addr, 4, err);
|
||||
if (err)
|
||||
return err;
|
||||
|
@ -148,7 +142,7 @@ static inline int unmap_patch_area(unsigned long addr)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
static int do_patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
{
|
||||
int err;
|
||||
unsigned int *patch_addr = NULL;
|
||||
|
@ -188,12 +182,22 @@ int patch_instruction(unsigned int *addr, unsigned int instr)
|
|||
}
|
||||
#else /* !CONFIG_STRICT_KERNEL_RWX */
|
||||
|
||||
int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
static int do_patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
{
|
||||
return raw_patch_instruction(addr, instr);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_STRICT_KERNEL_RWX */
|
||||
|
||||
int patch_instruction(unsigned int *addr, unsigned int instr)
|
||||
{
|
||||
/* Make sure we aren't patching a freed init section */
|
||||
if (init_mem_is_free && init_section_contains(addr, 4)) {
|
||||
pr_debug("Skipping init section patching addr: 0x%px\n", addr);
|
||||
return 0;
|
||||
}
|
||||
return do_patch_instruction(addr, instr);
|
||||
}
|
||||
NOKPROBE_SYMBOL(patch_instruction);
|
||||
|
||||
int patch_branch(unsigned int *addr, unsigned long target, int flags)
|
||||
|
|
|
@ -1217,9 +1217,10 @@ int find_and_online_cpu_nid(int cpu)
|
|||
* Need to ensure that NODE_DATA is initialized for a node from
|
||||
* available memory (see memblock_alloc_try_nid). If unable to
|
||||
* init the node, then default to nearest node that has memory
|
||||
* installed.
|
||||
* installed. Skip onlining a node if the subsystems are not
|
||||
* yet initialized.
|
||||
*/
|
||||
if (try_online_node(new_nid))
|
||||
if (!topology_inited || try_online_node(new_nid))
|
||||
new_nid = first_online_node;
|
||||
#else
|
||||
/*
|
||||
|
|
|
@ -186,7 +186,7 @@ static void __init setup_bootmem(void)
|
|||
BUG_ON(mem_size == 0);
|
||||
|
||||
set_max_mapnr(PFN_DOWN(mem_size));
|
||||
max_low_pfn = pfn_base + PFN_DOWN(mem_size);
|
||||
max_low_pfn = memblock_end_of_DRAM();
|
||||
|
||||
#ifdef CONFIG_BLK_DEV_INITRD
|
||||
setup_initrd();
|
||||
|
|
|
@ -68,7 +68,13 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
|
|||
CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
|
||||
$(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
|
||||
-fno-omit-frame-pointer -foptimize-sibling-calls \
|
||||
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO $(RETPOLINE_VDSO_CFLAGS)
|
||||
-DDISABLE_BRANCH_PROFILING -DBUILD_VDSO
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||
CFL += $(RETPOLINE_VDSO_CFLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
$(vobjs): KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS) $(RETPOLINE_CFLAGS),$(KBUILD_CFLAGS)) $(CFL)
|
||||
|
||||
|
@ -138,7 +144,13 @@ KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
|
|||
KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
|
||||
KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
|
||||
KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
|
||||
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
||||
|
||||
ifdef CONFIG_RETPOLINE
|
||||
ifneq ($(RETPOLINE_VDSO_CFLAGS),)
|
||||
KBUILD_CFLAGS_32 += $(RETPOLINE_VDSO_CFLAGS)
|
||||
endif
|
||||
endif
|
||||
|
||||
$(obj)/vdso32.so.dbg: KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
|
||||
|
||||
$(obj)/vdso32.so.dbg: FORCE \
|
||||
|
|
|
@ -43,8 +43,9 @@ extern u8 hvclock_page
|
|||
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
||||
{
|
||||
long ret;
|
||||
asm("syscall" : "=a" (ret) :
|
||||
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
|
||||
asm ("syscall" : "=a" (ret), "=m" (*ts) :
|
||||
"0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
|
||||
"memory", "rcx", "r11");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -52,8 +53,9 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|||
{
|
||||
long ret;
|
||||
|
||||
asm("syscall" : "=a" (ret) :
|
||||
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
|
||||
asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
|
||||
"0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
|
||||
"memory", "rcx", "r11");
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -64,13 +66,13 @@ notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
|
|||
{
|
||||
long ret;
|
||||
|
||||
asm(
|
||||
asm (
|
||||
"mov %%ebx, %%edx \n"
|
||||
"mov %2, %%ebx \n"
|
||||
"mov %[clock], %%ebx \n"
|
||||
"call __kernel_vsyscall \n"
|
||||
"mov %%edx, %%ebx \n"
|
||||
: "=a" (ret)
|
||||
: "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
|
||||
: "=a" (ret), "=m" (*ts)
|
||||
: "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
|
||||
: "memory", "edx");
|
||||
return ret;
|
||||
}
|
||||
|
@ -79,13 +81,13 @@ notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
|
|||
{
|
||||
long ret;
|
||||
|
||||
asm(
|
||||
asm (
|
||||
"mov %%ebx, %%edx \n"
|
||||
"mov %2, %%ebx \n"
|
||||
"mov %[tv], %%ebx \n"
|
||||
"call __kernel_vsyscall \n"
|
||||
"mov %%edx, %%ebx \n"
|
||||
: "=a" (ret)
|
||||
: "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
|
||||
: "=a" (ret), "=m" (*tv), "=m" (*tz)
|
||||
: "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
|
||||
: "memory", "edx");
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
|
||||
static int num_counters_llc;
|
||||
static int num_counters_nb;
|
||||
static bool l3_mask;
|
||||
|
||||
static HLIST_HEAD(uncore_unused_list);
|
||||
|
||||
|
@ -209,6 +210,13 @@ static int amd_uncore_event_init(struct perf_event *event)
|
|||
hwc->config = event->attr.config & AMD64_RAW_EVENT_MASK_NB;
|
||||
hwc->idx = -1;
|
||||
|
||||
/*
|
||||
* SliceMask and ThreadMask need to be set for certain L3 events in
|
||||
* Family 17h. For other events, the two fields do not affect the count.
|
||||
*/
|
||||
if (l3_mask)
|
||||
hwc->config |= (AMD64_L3_SLICE_MASK | AMD64_L3_THREAD_MASK);
|
||||
|
||||
if (event->cpu < 0)
|
||||
return -EINVAL;
|
||||
|
||||
|
@ -525,6 +533,7 @@ static int __init amd_uncore_init(void)
|
|||
amd_llc_pmu.name = "amd_l3";
|
||||
format_attr_event_df.show = &event_show_df;
|
||||
format_attr_event_l3.show = &event_show_l3;
|
||||
l3_mask = true;
|
||||
} else {
|
||||
num_counters_nb = NUM_COUNTERS_NB;
|
||||
num_counters_llc = NUM_COUNTERS_L2;
|
||||
|
@ -532,6 +541,7 @@ static int __init amd_uncore_init(void)
|
|||
amd_llc_pmu.name = "amd_l2";
|
||||
format_attr_event_df = format_attr_event;
|
||||
format_attr_event_l3 = format_attr_event;
|
||||
l3_mask = false;
|
||||
}
|
||||
|
||||
amd_nb_pmu.attr_groups = amd_uncore_attr_groups_df;
|
||||
|
|
|
@ -3061,7 +3061,7 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = {
|
|||
|
||||
void bdx_uncore_cpu_init(void)
|
||||
{
|
||||
int pkg = topology_phys_to_logical_pkg(0);
|
||||
int pkg = topology_phys_to_logical_pkg(boot_cpu_data.phys_proc_id);
|
||||
|
||||
if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
|
||||
bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
|
||||
|
@ -3931,16 +3931,16 @@ static const struct pci_device_id skx_uncore_pci_ids[] = {
|
|||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(21, 5, SKX_PCI_UNCORE_M2PCIE, 3),
|
||||
},
|
||||
{ /* M3UPI0 Link 0 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 0, SKX_PCI_UNCORE_M3UPI, 0),
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 0),
|
||||
},
|
||||
{ /* M3UPI0 Link 1 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 1, SKX_PCI_UNCORE_M3UPI, 1),
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204E),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 2, SKX_PCI_UNCORE_M3UPI, 1),
|
||||
},
|
||||
{ /* M3UPI1 Link 2 */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204C),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 4, SKX_PCI_UNCORE_M3UPI, 2),
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x204D),
|
||||
.driver_data = UNCORE_PCI_DEV_FULL_DATA(18, 5, SKX_PCI_UNCORE_M3UPI, 2),
|
||||
},
|
||||
{ /* end: all zeroes */ }
|
||||
};
|
||||
|
|
|
@ -46,6 +46,14 @@
|
|||
#define INTEL_ARCH_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
|
||||
|
||||
#define AMD64_L3_SLICE_SHIFT 48
|
||||
#define AMD64_L3_SLICE_MASK \
|
||||
((0xFULL) << AMD64_L3_SLICE_SHIFT)
|
||||
|
||||
#define AMD64_L3_THREAD_SHIFT 56
|
||||
#define AMD64_L3_THREAD_MASK \
|
||||
((0xFFULL) << AMD64_L3_THREAD_SHIFT)
|
||||
|
||||
#define X86_RAW_EVENT_MASK \
|
||||
(ARCH_PERFMON_EVENTSEL_EVENT | \
|
||||
ARCH_PERFMON_EVENTSEL_UMASK | \
|
||||
|
|
|
@ -10,8 +10,13 @@ struct cpumask;
|
|||
struct mm_struct;
|
||||
|
||||
#ifdef CONFIG_X86_UV
|
||||
#include <linux/efi.h>
|
||||
|
||||
extern enum uv_system_type get_uv_system_type(void);
|
||||
static inline bool is_early_uv_system(void)
|
||||
{
|
||||
return !((efi.uv_systab == EFI_INVALID_TABLE_ADDR) || !efi.uv_systab);
|
||||
}
|
||||
extern int is_uv_system(void);
|
||||
extern int is_uv_hubless(void);
|
||||
extern void uv_cpu_init(void);
|
||||
|
@ -23,6 +28,7 @@ extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
|
|||
#else /* X86_UV */
|
||||
|
||||
static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
|
||||
static inline bool is_early_uv_system(void) { return 0; }
|
||||
static inline int is_uv_system(void) { return 0; }
|
||||
static inline int is_uv_hubless(void) { return 0; }
|
||||
static inline void uv_cpu_init(void) { }
|
||||
|
|
|
@ -922,7 +922,7 @@ static void init_amd(struct cpuinfo_x86 *c)
|
|||
static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
|
||||
{
|
||||
/* AMD errata T13 (order #21922) */
|
||||
if ((c->x86 == 6)) {
|
||||
if (c->x86 == 6) {
|
||||
/* Duron Rev A0 */
|
||||
if (c->x86_model == 3 && c->x86_stepping == 0)
|
||||
size = 64;
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include <asm/apic.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/i8259.h>
|
||||
#include <asm/uv/uv.h>
|
||||
|
||||
unsigned int __read_mostly cpu_khz; /* TSC clocks / usec, not used here */
|
||||
EXPORT_SYMBOL(cpu_khz);
|
||||
|
@ -1433,6 +1434,9 @@ void __init tsc_early_init(void)
|
|||
{
|
||||
if (!boot_cpu_has(X86_FEATURE_TSC))
|
||||
return;
|
||||
/* Don't change UV TSC multi-chassis synchronization */
|
||||
if (is_early_uv_system())
|
||||
return;
|
||||
if (!determine_cpu_tsc_frequencies(true))
|
||||
return;
|
||||
loops_per_jiffy = get_loops_per_jiffy();
|
||||
|
|
|
@ -249,6 +249,17 @@ static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
|
|||
*/
|
||||
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;
|
||||
|
||||
/*
|
||||
* In some cases, we need to preserve the GFN of a non-present or reserved
|
||||
* SPTE when we usurp the upper five bits of the physical address space to
|
||||
* defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll
|
||||
* shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask
|
||||
* left into the reserved bits, i.e. the GFN in the SPTE will be split into
|
||||
* high and low parts. This mask covers the lower bits of the GFN.
|
||||
*/
|
||||
static u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||
|
||||
|
||||
static void mmu_spte_set(u64 *sptep, u64 spte);
|
||||
static union kvm_mmu_page_role
|
||||
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
|
||||
|
@ -357,9 +368,7 @@ static bool is_mmio_spte(u64 spte)
|
|||
|
||||
static gfn_t get_mmio_spte_gfn(u64 spte)
|
||||
{
|
||||
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
|
||||
shadow_nonpresent_or_rsvd_mask;
|
||||
u64 gpa = spte & ~mask;
|
||||
u64 gpa = spte & shadow_nonpresent_or_rsvd_lower_gfn_mask;
|
||||
|
||||
gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
|
||||
& shadow_nonpresent_or_rsvd_mask;
|
||||
|
@ -423,6 +432,8 @@ EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
|
|||
|
||||
static void kvm_mmu_reset_all_pte_masks(void)
|
||||
{
|
||||
u8 low_phys_bits;
|
||||
|
||||
shadow_user_mask = 0;
|
||||
shadow_accessed_mask = 0;
|
||||
shadow_dirty_mask = 0;
|
||||
|
@ -437,12 +448,17 @@ static void kvm_mmu_reset_all_pte_masks(void)
|
|||
* appropriate mask to guard against L1TF attacks. Otherwise, it is
|
||||
* assumed that the CPU is not vulnerable to L1TF.
|
||||
*/
|
||||
low_phys_bits = boot_cpu_data.x86_phys_bits;
|
||||
if (boot_cpu_data.x86_phys_bits <
|
||||
52 - shadow_nonpresent_or_rsvd_mask_len)
|
||||
52 - shadow_nonpresent_or_rsvd_mask_len) {
|
||||
shadow_nonpresent_or_rsvd_mask =
|
||||
rsvd_bits(boot_cpu_data.x86_phys_bits -
|
||||
shadow_nonpresent_or_rsvd_mask_len,
|
||||
boot_cpu_data.x86_phys_bits - 1);
|
||||
low_phys_bits -= shadow_nonpresent_or_rsvd_mask_len;
|
||||
}
|
||||
shadow_nonpresent_or_rsvd_lower_gfn_mask =
|
||||
GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
|
||||
}
|
||||
|
||||
static int is_cpuid_PSE36(void)
|
||||
|
|
|
@ -121,7 +121,6 @@ module_param_named(pml, enable_pml, bool, S_IRUGO);
|
|||
|
||||
#define MSR_BITMAP_MODE_X2APIC 1
|
||||
#define MSR_BITMAP_MODE_X2APIC_APICV 2
|
||||
#define MSR_BITMAP_MODE_LM 4
|
||||
|
||||
#define KVM_VMX_TSC_MULTIPLIER_MAX 0xffffffffffffffffULL
|
||||
|
||||
|
@ -857,6 +856,7 @@ struct nested_vmx {
|
|||
|
||||
/* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
|
||||
u64 vmcs01_debugctl;
|
||||
u64 vmcs01_guest_bndcfgs;
|
||||
|
||||
u16 vpid02;
|
||||
u16 last_vpid;
|
||||
|
@ -2899,8 +2899,7 @@ static void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
|
|||
vmx->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
|
||||
}
|
||||
|
||||
if (is_long_mode(&vmx->vcpu))
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
#else
|
||||
savesegment(fs, fs_sel);
|
||||
savesegment(gs, gs_sel);
|
||||
|
@ -2951,8 +2950,7 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
|||
vmx->loaded_cpu_state = NULL;
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
if (is_long_mode(&vmx->vcpu))
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
#endif
|
||||
if (host_state->ldt_sel || (host_state->gs_sel & 7)) {
|
||||
kvm_load_ldt(host_state->ldt_sel);
|
||||
|
@ -2980,24 +2978,19 @@ static void vmx_prepare_switch_to_host(struct vcpu_vmx *vmx)
|
|||
#ifdef CONFIG_X86_64
|
||||
static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
|
||||
{
|
||||
if (is_long_mode(&vmx->vcpu)) {
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
rdmsrl(MSR_KERNEL_GS_BASE,
|
||||
vmx->msr_guest_kernel_gs_base);
|
||||
preempt_enable();
|
||||
}
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
|
||||
preempt_enable();
|
||||
return vmx->msr_guest_kernel_gs_base;
|
||||
}
|
||||
|
||||
static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
|
||||
{
|
||||
if (is_long_mode(&vmx->vcpu)) {
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, data);
|
||||
preempt_enable();
|
||||
}
|
||||
preempt_disable();
|
||||
if (vmx->loaded_cpu_state)
|
||||
wrmsrl(MSR_KERNEL_GS_BASE, data);
|
||||
preempt_enable();
|
||||
vmx->msr_guest_kernel_gs_base = data;
|
||||
}
|
||||
#endif
|
||||
|
@ -3533,9 +3526,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
|
||||
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
|
||||
|
||||
if (kvm_mpx_supported())
|
||||
msrs->exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
|
||||
|
||||
/* We support free control of debug control saving. */
|
||||
msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
|
||||
|
||||
|
@ -3552,8 +3542,6 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
VM_ENTRY_LOAD_IA32_PAT;
|
||||
msrs->entry_ctls_high |=
|
||||
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
|
||||
if (kvm_mpx_supported())
|
||||
msrs->entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
|
||||
|
||||
/* We support free control of debug control loading. */
|
||||
msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
|
||||
|
@ -3601,12 +3589,12 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
msrs->secondary_ctls_high);
|
||||
msrs->secondary_ctls_low = 0;
|
||||
msrs->secondary_ctls_high &=
|
||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_DESC |
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
|
||||
SECONDARY_EXEC_APIC_REGISTER_VIRT |
|
||||
SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
|
||||
SECONDARY_EXEC_WBINVD_EXITING;
|
||||
|
||||
/*
|
||||
* We can emulate "VMCS shadowing," even if the hardware
|
||||
* doesn't support it.
|
||||
|
@ -3663,6 +3651,10 @@ static void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, bool apicv)
|
|||
msrs->secondary_ctls_high |=
|
||||
SECONDARY_EXEC_UNRESTRICTED_GUEST;
|
||||
|
||||
if (flexpriority_enabled)
|
||||
msrs->secondary_ctls_high |=
|
||||
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
|
||||
|
||||
/* miscellaneous data */
|
||||
rdmsr(MSR_IA32_VMX_MISC,
|
||||
msrs->misc_low,
|
||||
|
@ -5073,19 +5065,6 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
|
|||
if (!msr)
|
||||
return;
|
||||
|
||||
/*
|
||||
* MSR_KERNEL_GS_BASE is not intercepted when the guest is in
|
||||
* 64-bit mode as a 64-bit kernel may frequently access the
|
||||
* MSR. This means we need to manually save/restore the MSR
|
||||
* when switching between guest and host state, but only if
|
||||
* the guest is in 64-bit mode. Sync our cached value if the
|
||||
* guest is transitioning to 32-bit mode and the CPU contains
|
||||
* guest state, i.e. the cache is stale.
|
||||
*/
|
||||
#ifdef CONFIG_X86_64
|
||||
if (!(efer & EFER_LMA))
|
||||
(void)vmx_read_guest_kernel_gs_base(vmx);
|
||||
#endif
|
||||
vcpu->arch.efer = efer;
|
||||
if (efer & EFER_LMA) {
|
||||
vm_entry_controls_setbit(to_vmx(vcpu), VM_ENTRY_IA32E_MODE);
|
||||
|
@ -6078,9 +6057,6 @@ static u8 vmx_msr_bitmap_mode(struct kvm_vcpu *vcpu)
|
|||
mode |= MSR_BITMAP_MODE_X2APIC_APICV;
|
||||
}
|
||||
|
||||
if (is_long_mode(vcpu))
|
||||
mode |= MSR_BITMAP_MODE_LM;
|
||||
|
||||
return mode;
|
||||
}
|
||||
|
||||
|
@ -6121,9 +6097,6 @@ static void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu)
|
|||
if (!changed)
|
||||
return;
|
||||
|
||||
vmx_set_intercept_for_msr(msr_bitmap, MSR_KERNEL_GS_BASE, MSR_TYPE_RW,
|
||||
!(mode & MSR_BITMAP_MODE_LM));
|
||||
|
||||
if (changed & (MSR_BITMAP_MODE_X2APIC | MSR_BITMAP_MODE_X2APIC_APICV))
|
||||
vmx_update_msr_bitmap_x2apic(msr_bitmap, mode);
|
||||
|
||||
|
@ -6189,6 +6162,11 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
|
|||
nested_mark_vmcs12_pages_dirty(vcpu);
|
||||
}
|
||||
|
||||
static u8 vmx_get_rvi(void)
|
||||
{
|
||||
return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
||||
}
|
||||
|
||||
static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -6201,7 +6179,7 @@ static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu)
|
|||
WARN_ON_ONCE(!vmx->nested.virtual_apic_page))
|
||||
return false;
|
||||
|
||||
rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff;
|
||||
rvi = vmx_get_rvi();
|
||||
|
||||
vapic_page = kmap(vmx->nested.virtual_apic_page);
|
||||
vppr = *((u32 *)(vapic_page + APIC_PROCPRI));
|
||||
|
@ -10245,15 +10223,16 @@ static void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
|
|||
if (!lapic_in_kernel(vcpu))
|
||||
return;
|
||||
|
||||
if (!flexpriority_enabled &&
|
||||
!cpu_has_vmx_virtualize_x2apic_mode())
|
||||
return;
|
||||
|
||||
/* Postpone execution until vmcs01 is the current VMCS. */
|
||||
if (is_guest_mode(vcpu)) {
|
||||
to_vmx(vcpu)->nested.change_vmcs01_virtual_apic_mode = true;
|
||||
return;
|
||||
}
|
||||
|
||||
if (!cpu_need_tpr_shadow(vcpu))
|
||||
return;
|
||||
|
||||
sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
|
||||
sec_exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
|
||||
|
@ -10375,6 +10354,14 @@ static int vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
|
|||
return max_irr;
|
||||
}
|
||||
|
||||
static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 rvi = vmx_get_rvi();
|
||||
u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
|
||||
|
||||
return ((rvi & 0xf0) > (vppr & 0xf0));
|
||||
}
|
||||
|
||||
static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
|
||||
{
|
||||
if (!kvm_vcpu_apicv_active(vcpu))
|
||||
|
@ -11264,6 +11251,23 @@ static void nested_vmx_cr_fixed1_bits_update(struct kvm_vcpu *vcpu)
|
|||
#undef cr4_fixed1_update
|
||||
}
|
||||
|
||||
static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
||||
if (kvm_mpx_supported()) {
|
||||
bool mpx_enabled = guest_cpuid_has(vcpu, X86_FEATURE_MPX);
|
||||
|
||||
if (mpx_enabled) {
|
||||
vmx->nested.msrs.entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
|
||||
vmx->nested.msrs.exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
|
||||
} else {
|
||||
vmx->nested.msrs.entry_ctls_high &= ~VM_ENTRY_LOAD_BNDCFGS;
|
||||
vmx->nested.msrs.exit_ctls_high &= ~VM_EXIT_CLEAR_BNDCFGS;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct vcpu_vmx *vmx = to_vmx(vcpu);
|
||||
|
@ -11280,8 +11284,10 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
|
|||
to_vmx(vcpu)->msr_ia32_feature_control_valid_bits &=
|
||||
~FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
|
||||
|
||||
if (nested_vmx_allowed(vcpu))
|
||||
if (nested_vmx_allowed(vcpu)) {
|
||||
nested_vmx_cr_fixed1_bits_update(vcpu);
|
||||
nested_vmx_entry_exit_ctls_update(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
|
||||
|
@ -12049,8 +12055,13 @@ static void prepare_vmcs02_full(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
|
||||
set_cr4_guest_host_mask(vmx);
|
||||
|
||||
if (vmx_mpx_supported())
|
||||
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
||||
if (kvm_mpx_supported()) {
|
||||
if (vmx->nested.nested_run_pending &&
|
||||
(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
||||
vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
|
||||
else
|
||||
vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
|
||||
}
|
||||
|
||||
if (enable_vpid) {
|
||||
if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
|
||||
|
@ -12595,15 +12606,21 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
|||
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
|
||||
bool from_vmentry = !!exit_qual;
|
||||
u32 dummy_exit_qual;
|
||||
u32 vmcs01_cpu_exec_ctrl;
|
||||
bool evaluate_pending_interrupts;
|
||||
int r = 0;
|
||||
|
||||
vmcs01_cpu_exec_ctrl = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
|
||||
evaluate_pending_interrupts = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL) &
|
||||
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING);
|
||||
if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
|
||||
evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
|
||||
|
||||
enter_guest_mode(vcpu);
|
||||
|
||||
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
|
||||
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
|
||||
if (kvm_mpx_supported() &&
|
||||
!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
|
||||
vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
|
||||
|
||||
vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
|
||||
vmx_segment_cache_clear(vmx);
|
||||
|
@ -12643,16 +12660,14 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu, u32 *exit_qual)
|
|||
* to L1 or delivered directly to L2 (e.g. In case L1 don't
|
||||
* intercept EXTERNAL_INTERRUPT).
|
||||
*
|
||||
* Usually this would be handled by L0 requesting a
|
||||
* IRQ/NMI window by setting VMCS accordingly. However,
|
||||
* this setting was done on VMCS01 and now VMCS02 is active
|
||||
* instead. Thus, we force L0 to perform pending event
|
||||
* evaluation by requesting a KVM_REQ_EVENT.
|
||||
* Usually this would be handled by the processor noticing an
|
||||
* IRQ/NMI window request, or checking RVI during evaluation of
|
||||
* pending virtual interrupts. However, this setting was done
|
||||
* on VMCS01 and now VMCS02 is active instead. Thus, we force L0
|
||||
* to perform pending event evaluation by requesting a KVM_REQ_EVENT.
|
||||
*/
|
||||
if (vmcs01_cpu_exec_ctrl &
|
||||
(CPU_BASED_VIRTUAL_INTR_PENDING | CPU_BASED_VIRTUAL_NMI_PENDING)) {
|
||||
if (unlikely(evaluate_pending_interrupts))
|
||||
kvm_make_request(KVM_REQ_EVENT, vcpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* Note no nested_vmx_succeed or nested_vmx_fail here. At this point
|
||||
|
|
|
@ -4698,7 +4698,7 @@ static void kvm_init_msr_list(void)
|
|||
*/
|
||||
switch (msrs_to_save[i]) {
|
||||
case MSR_IA32_BNDCFGS:
|
||||
if (!kvm_x86_ops->mpx_supported())
|
||||
if (!kvm_mpx_supported())
|
||||
continue;
|
||||
break;
|
||||
case MSR_TSC_AUX:
|
||||
|
|
|
@ -226,8 +226,11 @@ static int alloc_lookup_fw_priv(const char *fw_name,
|
|||
}
|
||||
|
||||
tmp = __allocate_fw_priv(fw_name, fwc, dbuf, size);
|
||||
if (tmp && !(opt_flags & FW_OPT_NOCACHE))
|
||||
list_add(&tmp->list, &fwc->head);
|
||||
if (tmp) {
|
||||
INIT_LIST_HEAD(&tmp->list);
|
||||
if (!(opt_flags & FW_OPT_NOCACHE))
|
||||
list_add(&tmp->list, &fwc->head);
|
||||
}
|
||||
spin_unlock(&fwc->lock);
|
||||
|
||||
*fw_priv = tmp;
|
||||
|
|
|
@ -1713,8 +1713,10 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
|
||||
dpm_wait_for_subordinate(dev, async);
|
||||
|
||||
if (async_error)
|
||||
if (async_error) {
|
||||
dev->power.direct_complete = false;
|
||||
goto Complete;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a device configured to wake up the system from sleep states
|
||||
|
@ -1726,6 +1728,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async)
|
|||
pm_wakeup_event(dev, 0);
|
||||
|
||||
if (pm_wakeup_pending()) {
|
||||
dev->power.direct_complete = false;
|
||||
async_error = -EBUSY;
|
||||
goto Complete;
|
||||
}
|
||||
|
|
|
@ -1553,8 +1553,8 @@ static struct ablkcipher_edesc *ablkcipher_edesc_alloc(struct ablkcipher_request
|
|||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
|
||||
desc_bytes;
|
||||
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||
desc_bytes);
|
||||
edesc->iv_dir = DMA_TO_DEVICE;
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
|
@ -1757,8 +1757,8 @@ static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
|
|||
edesc->src_nents = src_nents;
|
||||
edesc->dst_nents = dst_nents;
|
||||
edesc->sec4_sg_bytes = sec4_sg_bytes;
|
||||
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
|
||||
desc_bytes;
|
||||
edesc->sec4_sg = (struct sec4_sg_entry *)((u8 *)edesc->hw_desc +
|
||||
desc_bytes);
|
||||
edesc->iv_dir = DMA_FROM_DEVICE;
|
||||
|
||||
/* Make sure IV is located in a DMAable area */
|
||||
|
|
|
@ -367,7 +367,8 @@ static inline void dsgl_walk_init(struct dsgl_walk *walk,
|
|||
walk->to = (struct phys_sge_pairs *)(dsgl + 1);
|
||||
}
|
||||
|
||||
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
||||
static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid,
|
||||
int pci_chan_id)
|
||||
{
|
||||
struct cpl_rx_phys_dsgl *phys_cpl;
|
||||
|
||||
|
@ -385,6 +386,7 @@ static inline void dsgl_walk_end(struct dsgl_walk *walk, unsigned short qid)
|
|||
phys_cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR;
|
||||
phys_cpl->rss_hdr_int.qid = htons(qid);
|
||||
phys_cpl->rss_hdr_int.hash_val = 0;
|
||||
phys_cpl->rss_hdr_int.channel = pci_chan_id;
|
||||
}
|
||||
|
||||
static inline void dsgl_walk_add_page(struct dsgl_walk *walk,
|
||||
|
@ -718,7 +720,7 @@ static inline void create_wreq(struct chcr_context *ctx,
|
|||
FILL_WR_RX_Q_ID(ctx->dev->rx_channel_id, qid,
|
||||
!!lcb, ctx->tx_qidx);
|
||||
|
||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->dev->tx_channel_id,
|
||||
chcr_req->ulptx.cmd_dest = FILL_ULPTX_CMD_DEST(ctx->tx_chan_id,
|
||||
qid);
|
||||
chcr_req->ulptx.len = htonl((DIV_ROUND_UP(len16, 16) -
|
||||
((sizeof(chcr_req->wreq)) >> 4)));
|
||||
|
@ -1339,16 +1341,23 @@ static int chcr_device_init(struct chcr_context *ctx)
|
|||
adap->vres.ncrypto_fc);
|
||||
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
|
||||
txq_perchan = ntxq / u_ctx->lldi.nchan;
|
||||
rxq_idx = ctx->dev->tx_channel_id * rxq_perchan;
|
||||
rxq_idx += id % rxq_perchan;
|
||||
txq_idx = ctx->dev->tx_channel_id * txq_perchan;
|
||||
txq_idx += id % txq_perchan;
|
||||
spin_lock(&ctx->dev->lock_chcr_dev);
|
||||
ctx->rx_qidx = rxq_idx;
|
||||
ctx->tx_qidx = txq_idx;
|
||||
ctx->tx_chan_id = ctx->dev->tx_channel_id;
|
||||
ctx->dev->tx_channel_id = !ctx->dev->tx_channel_id;
|
||||
ctx->dev->rx_channel_id = 0;
|
||||
spin_unlock(&ctx->dev->lock_chcr_dev);
|
||||
rxq_idx = ctx->tx_chan_id * rxq_perchan;
|
||||
rxq_idx += id % rxq_perchan;
|
||||
txq_idx = ctx->tx_chan_id * txq_perchan;
|
||||
txq_idx += id % txq_perchan;
|
||||
ctx->rx_qidx = rxq_idx;
|
||||
ctx->tx_qidx = txq_idx;
|
||||
/* Channel Id used by SGE to forward packet to Host.
|
||||
* Same value should be used in cpl_fw6_pld RSS_CH field
|
||||
* by FW. Driver programs PCI channel ID to be used in fw
|
||||
* at the time of queue allocation with value "pi->tx_chan"
|
||||
*/
|
||||
ctx->pci_chan_id = txq_idx / txq_perchan;
|
||||
}
|
||||
out:
|
||||
return err;
|
||||
|
@ -2503,6 +2512,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|||
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
|
||||
struct dsgl_walk dsgl_walk;
|
||||
unsigned int authsize = crypto_aead_authsize(tfm);
|
||||
struct chcr_context *ctx = a_ctx(tfm);
|
||||
u32 temp;
|
||||
|
||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||
|
@ -2512,7 +2522,7 @@ void chcr_add_aead_dst_ent(struct aead_request *req,
|
|||
dsgl_walk_add_page(&dsgl_walk, IV, &reqctx->iv_dma);
|
||||
temp = req->cryptlen + (reqctx->op ? -authsize : authsize);
|
||||
dsgl_walk_add_sg(&dsgl_walk, req->dst, temp, req->assoclen);
|
||||
dsgl_walk_end(&dsgl_walk, qid);
|
||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||
}
|
||||
|
||||
void chcr_add_cipher_src_ent(struct ablkcipher_request *req,
|
||||
|
@ -2544,6 +2554,8 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
|||
unsigned short qid)
|
||||
{
|
||||
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
|
||||
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(wrparam->req);
|
||||
struct chcr_context *ctx = c_ctx(tfm);
|
||||
struct dsgl_walk dsgl_walk;
|
||||
|
||||
dsgl_walk_init(&dsgl_walk, phys_cpl);
|
||||
|
@ -2552,7 +2564,7 @@ void chcr_add_cipher_dst_ent(struct ablkcipher_request *req,
|
|||
reqctx->dstsg = dsgl_walk.last_sg;
|
||||
reqctx->dst_ofst = dsgl_walk.last_sg_len;
|
||||
|
||||
dsgl_walk_end(&dsgl_walk, qid);
|
||||
dsgl_walk_end(&dsgl_walk, qid, ctx->pci_chan_id);
|
||||
}
|
||||
|
||||
void chcr_add_hash_src_ent(struct ahash_request *req,
|
||||
|
|
|
@ -255,6 +255,8 @@ struct chcr_context {
|
|||
struct chcr_dev *dev;
|
||||
unsigned char tx_qidx;
|
||||
unsigned char rx_qidx;
|
||||
unsigned char tx_chan_id;
|
||||
unsigned char pci_chan_id;
|
||||
struct __crypto_ctx crypto_ctx[0];
|
||||
};
|
||||
|
||||
|
|
|
@ -63,7 +63,7 @@ struct dcp {
|
|||
struct dcp_coherent_block *coh;
|
||||
|
||||
struct completion completion[DCP_MAX_CHANS];
|
||||
struct mutex mutex[DCP_MAX_CHANS];
|
||||
spinlock_t lock[DCP_MAX_CHANS];
|
||||
struct task_struct *thread[DCP_MAX_CHANS];
|
||||
struct crypto_queue queue[DCP_MAX_CHANS];
|
||||
};
|
||||
|
@ -349,13 +349,20 @@ static int dcp_chan_thread_aes(void *data)
|
|||
|
||||
int ret;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -363,11 +370,8 @@ static int dcp_chan_thread_aes(void *data)
|
|||
if (arq) {
|
||||
ret = mxs_dcp_aes_block_crypt(arq);
|
||||
arq->complete(arq, ret);
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -409,9 +413,9 @@ static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb)
|
|||
rctx->ecb = ecb;
|
||||
actx->chan = DCP_CHAN_CRYPTO;
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
|
||||
|
@ -640,13 +644,20 @@ static int dcp_chan_thread_sha(void *data)
|
|||
struct ahash_request *req;
|
||||
int ret, fini;
|
||||
|
||||
do {
|
||||
__set_current_state(TASK_INTERRUPTIBLE);
|
||||
while (!kthread_should_stop()) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
mutex_lock(&sdcp->mutex[chan]);
|
||||
spin_lock(&sdcp->lock[chan]);
|
||||
backlog = crypto_get_backlog(&sdcp->queue[chan]);
|
||||
arq = crypto_dequeue_request(&sdcp->queue[chan]);
|
||||
mutex_unlock(&sdcp->mutex[chan]);
|
||||
spin_unlock(&sdcp->lock[chan]);
|
||||
|
||||
if (!backlog && !arq) {
|
||||
schedule();
|
||||
continue;
|
||||
}
|
||||
|
||||
set_current_state(TASK_RUNNING);
|
||||
|
||||
if (backlog)
|
||||
backlog->complete(backlog, -EINPROGRESS);
|
||||
|
@ -658,12 +669,8 @@ static int dcp_chan_thread_sha(void *data)
|
|||
ret = dcp_sha_req_to_buf(arq);
|
||||
fini = rctx->fini;
|
||||
arq->complete(arq, ret);
|
||||
if (!fini)
|
||||
continue;
|
||||
}
|
||||
|
||||
schedule();
|
||||
} while (!kthread_should_stop());
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -721,9 +728,9 @@ static int dcp_sha_update_fx(struct ahash_request *req, int fini)
|
|||
rctx->init = 1;
|
||||
}
|
||||
|
||||
mutex_lock(&sdcp->mutex[actx->chan]);
|
||||
spin_lock(&sdcp->lock[actx->chan]);
|
||||
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
|
||||
mutex_unlock(&sdcp->mutex[actx->chan]);
|
||||
spin_unlock(&sdcp->lock[actx->chan]);
|
||||
|
||||
wake_up_process(sdcp->thread[actx->chan]);
|
||||
mutex_unlock(&actx->mutex);
|
||||
|
@ -997,7 +1004,7 @@ static int mxs_dcp_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, sdcp);
|
||||
|
||||
for (i = 0; i < DCP_MAX_CHANS; i++) {
|
||||
mutex_init(&sdcp->mutex[i]);
|
||||
spin_lock_init(&sdcp->lock[i]);
|
||||
init_completion(&sdcp->completion[i]);
|
||||
crypto_init_queue(&sdcp->queue[i], 50);
|
||||
}
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C3XXX_PCI_DEVICE_ID:
|
||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C3XXXIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C62X_PCI_DEVICE_ID:
|
||||
|
@ -235,8 +236,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_C62XIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -123,7 +123,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCC_PCI_DEVICE_ID:
|
||||
|
@ -237,8 +238,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -125,7 +125,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
struct adf_hw_device_data *hw_data;
|
||||
char name[ADF_DEVICE_NAME_LENGTH];
|
||||
unsigned int i, bar_nr;
|
||||
int ret, bar_mask;
|
||||
unsigned long bar_mask;
|
||||
int ret;
|
||||
|
||||
switch (ent->device) {
|
||||
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
|
||||
|
@ -215,8 +216,7 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
/* Find and map all the device's BARS */
|
||||
i = 0;
|
||||
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
|
||||
for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
|
||||
ADF_PCI_MAX_BARS * 2) {
|
||||
for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
|
||||
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
|
||||
|
||||
bar->base_addr = pci_resource_start(pdev, bar_nr);
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/fpga/fpga-mgr.h>
|
||||
#include <linux/fpga/fpga-region.h>
|
||||
|
||||
#include "dfl-fme-pr.h"
|
||||
|
@ -66,9 +67,10 @@ static int fme_region_probe(struct platform_device *pdev)
|
|||
static int fme_region_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fpga_region *region = dev_get_drvdata(&pdev->dev);
|
||||
struct fpga_manager *mgr = region->mgr;
|
||||
|
||||
fpga_region_unregister(region);
|
||||
fpga_mgr_put(region->mgr);
|
||||
fpga_mgr_put(mgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ static int fpga_bridge_dev_match(struct device *dev, const void *data)
|
|||
*
|
||||
* Given a device, get an exclusive reference to a fpga bridge.
|
||||
*
|
||||
* Return: fpga manager struct or IS_ERR() condition containing error code.
|
||||
* Return: fpga bridge struct or IS_ERR() condition containing error code.
|
||||
*/
|
||||
struct fpga_bridge *fpga_bridge_get(struct device *dev,
|
||||
struct fpga_image_info *info)
|
||||
|
|
|
@ -437,9 +437,10 @@ static int of_fpga_region_probe(struct platform_device *pdev)
|
|||
static int of_fpga_region_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct fpga_region *region = platform_get_drvdata(pdev);
|
||||
struct fpga_manager *mgr = region->mgr;
|
||||
|
||||
fpga_region_unregister(region);
|
||||
fpga_mgr_put(region->mgr);
|
||||
fpga_mgr_put(mgr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -571,7 +571,7 @@ static int linehandle_create(struct gpio_device *gdev, void __user *ip)
|
|||
if (ret)
|
||||
goto out_free_descs;
|
||||
lh->descs[i] = desc;
|
||||
count = i;
|
||||
count = i + 1;
|
||||
|
||||
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
|
||||
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
|
||||
|
|
|
@ -358,8 +358,8 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
struct queue *q,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
int retval;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
int retval;
|
||||
|
||||
mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
|
||||
if (!mqd_mgr)
|
||||
|
@ -387,8 +387,12 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
|
|||
if (!q->properties.is_active)
|
||||
return 0;
|
||||
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, q->process->mm);
|
||||
if (WARN(q->process->mm != current->mm,
|
||||
"should only run in user thread"))
|
||||
retval = -EFAULT;
|
||||
else
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, current->mm);
|
||||
if (retval)
|
||||
goto out_uninit_mqd;
|
||||
|
||||
|
@ -545,9 +549,15 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
|
|||
retval = map_queues_cpsch(dqm);
|
||||
else if (q->properties.is_active &&
|
||||
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
|
||||
&q->properties, q->process->mm);
|
||||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
|
||||
if (WARN(q->process->mm != current->mm,
|
||||
"should only run in user thread"))
|
||||
retval = -EFAULT;
|
||||
else
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
|
||||
q->pipe, q->queue,
|
||||
&q->properties, current->mm);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
dqm_unlock(dqm);
|
||||
|
@ -653,6 +663,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
|
|||
static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
||||
struct qcm_process_device *qpd)
|
||||
{
|
||||
struct mm_struct *mm = NULL;
|
||||
struct queue *q;
|
||||
struct mqd_manager *mqd_mgr;
|
||||
struct kfd_process_device *pdd;
|
||||
|
@ -686,6 +697,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
kfd_flush_tlb(pdd);
|
||||
}
|
||||
|
||||
/* Take a safe reference to the mm_struct, which may otherwise
|
||||
* disappear even while the kfd_process is still referenced.
|
||||
*/
|
||||
mm = get_task_mm(pdd->process->lead_thread);
|
||||
if (!mm) {
|
||||
retval = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* activate all active queues on the qpd */
|
||||
list_for_each_entry(q, &qpd->queues_list, list) {
|
||||
if (!q->properties.is_evicted)
|
||||
|
@ -700,14 +720,15 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
|
|||
q->properties.is_evicted = false;
|
||||
q->properties.is_active = true;
|
||||
retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
|
||||
q->queue, &q->properties,
|
||||
q->process->mm);
|
||||
q->queue, &q->properties, mm);
|
||||
if (retval)
|
||||
goto out;
|
||||
dqm->queue_count++;
|
||||
}
|
||||
qpd->evicted = 0;
|
||||
out:
|
||||
if (mm)
|
||||
mmput(mm);
|
||||
dqm_unlock(dqm);
|
||||
return retval;
|
||||
}
|
||||
|
|
|
@ -4633,12 +4633,18 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
}
|
||||
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
|
||||
|
||||
/* Signal HW programming completion */
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
if (wait_for_vblank)
|
||||
drm_atomic_helper_wait_for_flip_done(dev, state);
|
||||
|
||||
/*
|
||||
* FIXME:
|
||||
* Delay hw_done() until flip_done() is signaled. This is to block
|
||||
* another commit from freeing the CRTC state while we're still
|
||||
* waiting on flip_done.
|
||||
*/
|
||||
drm_atomic_helper_commit_hw_done(state);
|
||||
|
||||
drm_atomic_helper_cleanup_planes(dev, state);
|
||||
|
||||
/* Finally, drop a runtime PM reference for each newly disabled CRTC,
|
||||
|
|
|
@ -63,20 +63,21 @@ static void drm_client_close(struct drm_client_dev *client)
|
|||
EXPORT_SYMBOL(drm_client_close);
|
||||
|
||||
/**
|
||||
* drm_client_new - Create a DRM client
|
||||
* drm_client_init - Initialise a DRM client
|
||||
* @dev: DRM device
|
||||
* @client: DRM client
|
||||
* @name: Client name
|
||||
* @funcs: DRM client functions (optional)
|
||||
*
|
||||
* This initialises the client and opens a &drm_file. Use drm_client_add() to complete the process.
|
||||
* The caller needs to hold a reference on @dev before calling this function.
|
||||
* The client is freed when the &drm_device is unregistered. See drm_client_release().
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success or negative error code on failure.
|
||||
*/
|
||||
int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
||||
const char *name, const struct drm_client_funcs *funcs)
|
||||
int drm_client_init(struct drm_device *dev, struct drm_client_dev *client,
|
||||
const char *name, const struct drm_client_funcs *funcs)
|
||||
{
|
||||
int ret;
|
||||
|
||||
|
@ -95,10 +96,6 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
|||
if (ret)
|
||||
goto err_put_module;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
|
||||
drm_dev_get(dev);
|
||||
|
||||
return 0;
|
||||
|
@ -109,13 +106,33 @@ int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
|
|||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_new);
|
||||
EXPORT_SYMBOL(drm_client_init);
|
||||
|
||||
/**
|
||||
* drm_client_add - Add client to the device list
|
||||
* @client: DRM client
|
||||
*
|
||||
* Add the client to the &drm_device client list to activate its callbacks.
|
||||
* @client must be initialized by a call to drm_client_init(). After
|
||||
* drm_client_add() it is no longer permissible to call drm_client_release()
|
||||
* directly (outside the unregister callback), instead cleanup will happen
|
||||
* automatically on driver unload.
|
||||
*/
|
||||
void drm_client_add(struct drm_client_dev *client)
|
||||
{
|
||||
struct drm_device *dev = client->dev;
|
||||
|
||||
mutex_lock(&dev->clientlist_mutex);
|
||||
list_add(&client->list, &dev->clientlist);
|
||||
mutex_unlock(&dev->clientlist_mutex);
|
||||
}
|
||||
EXPORT_SYMBOL(drm_client_add);
|
||||
|
||||
/**
|
||||
* drm_client_release - Release DRM client resources
|
||||
* @client: DRM client
|
||||
*
|
||||
* Releases resources by closing the &drm_file that was opened by drm_client_new().
|
||||
* Releases resources by closing the &drm_file that was opened by drm_client_init().
|
||||
* It is called automatically if the &drm_client_funcs.unregister callback is _not_ set.
|
||||
*
|
||||
* This function should only be called from the unregister callback. An exception
|
||||
|
|
|
@ -160,7 +160,7 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
|||
|
||||
fb_helper = &fbdev_cma->fb_helper;
|
||||
|
||||
ret = drm_client_new(dev, &fb_helper->client, "fbdev", NULL);
|
||||
ret = drm_client_init(dev, &fb_helper->client, "fbdev", NULL);
|
||||
if (ret)
|
||||
goto err_free;
|
||||
|
||||
|
@ -169,6 +169,8 @@ struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
|
|||
if (ret)
|
||||
goto err_client_put;
|
||||
|
||||
drm_client_add(&fb_helper->client);
|
||||
|
||||
return fbdev_cma;
|
||||
|
||||
err_client_put:
|
||||
|
|
|
@ -3218,12 +3218,14 @@ int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
|
|||
if (!fb_helper)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = drm_client_new(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
||||
ret = drm_client_init(dev, &fb_helper->client, "fbdev", &drm_fbdev_client_funcs);
|
||||
if (ret) {
|
||||
kfree(fb_helper);
|
||||
return ret;
|
||||
}
|
||||
|
||||
drm_client_add(&fb_helper->client);
|
||||
|
||||
fb_helper->preferred_bpp = preferred_bpp;
|
||||
|
||||
drm_fbdev_client_hotplug(&fb_helper->client);
|
||||
|
|
|
@ -566,14 +566,14 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
|
|||
lessee_priv->is_master = 1;
|
||||
lessee_priv->authenticated = 1;
|
||||
|
||||
/* Hook up the fd */
|
||||
fd_install(fd, lessee_file);
|
||||
|
||||
/* Pass fd back to userspace */
|
||||
DRM_DEBUG_LEASE("Returning fd %d id %d\n", fd, lessee->lessee_id);
|
||||
cl->fd = fd;
|
||||
cl->lessee_id = lessee->lessee_id;
|
||||
|
||||
/* Hook up the fd */
|
||||
fd_install(fd, lessee_file);
|
||||
|
||||
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -55,37 +55,12 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
|||
static inline int __exynos_iommu_create_mapping(struct exynos_drm_private *priv,
|
||||
unsigned long start, unsigned long size)
|
||||
{
|
||||
struct iommu_domain *domain;
|
||||
int ret;
|
||||
|
||||
domain = iommu_domain_alloc(priv->dma_dev->bus);
|
||||
if (!domain)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = iommu_get_dma_cookie(domain);
|
||||
if (ret)
|
||||
goto free_domain;
|
||||
|
||||
ret = iommu_dma_init_domain(domain, start, size, NULL);
|
||||
if (ret)
|
||||
goto put_cookie;
|
||||
|
||||
priv->mapping = domain;
|
||||
priv->mapping = iommu_get_domain_for_dev(priv->dma_dev);
|
||||
return 0;
|
||||
|
||||
put_cookie:
|
||||
iommu_put_dma_cookie(domain);
|
||||
free_domain:
|
||||
iommu_domain_free(domain);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void __exynos_iommu_release_mapping(struct exynos_drm_private *priv)
|
||||
{
|
||||
struct iommu_domain *domain = priv->mapping;
|
||||
|
||||
iommu_put_dma_cookie(domain);
|
||||
iommu_domain_free(domain);
|
||||
priv->mapping = NULL;
|
||||
}
|
||||
|
||||
|
@ -94,7 +69,9 @@ static inline int __exynos_iommu_attach(struct exynos_drm_private *priv,
|
|||
{
|
||||
struct iommu_domain *domain = priv->mapping;
|
||||
|
||||
return iommu_attach_device(domain, dev);
|
||||
if (dev != priv->dma_dev)
|
||||
return iommu_attach_device(domain, dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
||||
|
@ -102,7 +79,8 @@ static inline void __exynos_iommu_detach(struct exynos_drm_private *priv,
|
|||
{
|
||||
struct iommu_domain *domain = priv->mapping;
|
||||
|
||||
iommu_detach_device(domain, dev);
|
||||
if (dev != priv->dma_dev)
|
||||
iommu_detach_device(domain, dev);
|
||||
}
|
||||
#else
|
||||
#error Unsupported architecture and IOMMU/DMA-mapping glue code
|
||||
|
|
|
@ -191,7 +191,8 @@ static irqreturn_t tda9950_irq(int irq, void *data)
|
|||
break;
|
||||
}
|
||||
/* TDA9950 executes all retries for us */
|
||||
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
|
||||
if (tx_status != CEC_TX_STATUS_OK)
|
||||
tx_status |= CEC_TX_STATUS_MAX_RETRIES;
|
||||
cec_transmit_done(priv->adap, tx_status, arb_lost_cnt,
|
||||
nack_cnt, 0, err_cnt);
|
||||
break;
|
||||
|
@ -310,7 +311,7 @@ static void tda9950_release(struct tda9950_priv *priv)
|
|||
/* Wait up to .5s for it to signal non-busy */
|
||||
do {
|
||||
csr = tda9950_read(client, REG_CSR);
|
||||
if (!(csr & CSR_BUSY) || --timeout)
|
||||
if (!(csr & CSR_BUSY) || !--timeout)
|
||||
break;
|
||||
msleep(10);
|
||||
} while (1);
|
||||
|
|
|
@ -232,6 +232,20 @@ static bool compress_init(struct compress *c)
|
|||
return true;
|
||||
}
|
||||
|
||||
static void *compress_next_page(struct drm_i915_error_object *dst)
|
||||
{
|
||||
unsigned long page;
|
||||
|
||||
if (dst->page_count >= dst->num_pages)
|
||||
return ERR_PTR(-ENOSPC);
|
||||
|
||||
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!page)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
return dst->pages[dst->page_count++] = (void *)page;
|
||||
}
|
||||
|
||||
static int compress_page(struct compress *c,
|
||||
void *src,
|
||||
struct drm_i915_error_object *dst)
|
||||
|
@ -245,19 +259,14 @@ static int compress_page(struct compress *c,
|
|||
|
||||
do {
|
||||
if (zstream->avail_out == 0) {
|
||||
unsigned long page;
|
||||
zstream->next_out = compress_next_page(dst);
|
||||
if (IS_ERR(zstream->next_out))
|
||||
return PTR_ERR(zstream->next_out);
|
||||
|
||||
page = __get_free_page(GFP_ATOMIC | __GFP_NOWARN);
|
||||
if (!page)
|
||||
return -ENOMEM;
|
||||
|
||||
dst->pages[dst->page_count++] = (void *)page;
|
||||
|
||||
zstream->next_out = (void *)page;
|
||||
zstream->avail_out = PAGE_SIZE;
|
||||
}
|
||||
|
||||
if (zlib_deflate(zstream, Z_SYNC_FLUSH) != Z_OK)
|
||||
if (zlib_deflate(zstream, Z_NO_FLUSH) != Z_OK)
|
||||
return -EIO;
|
||||
} while (zstream->avail_in);
|
||||
|
||||
|
@ -268,19 +277,42 @@ static int compress_page(struct compress *c,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int compress_flush(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
do {
|
||||
switch (zlib_deflate(zstream, Z_FINISH)) {
|
||||
case Z_OK: /* more space requested */
|
||||
zstream->next_out = compress_next_page(dst);
|
||||
if (IS_ERR(zstream->next_out))
|
||||
return PTR_ERR(zstream->next_out);
|
||||
|
||||
zstream->avail_out = PAGE_SIZE;
|
||||
break;
|
||||
|
||||
case Z_STREAM_END:
|
||||
goto end;
|
||||
|
||||
default: /* any error */
|
||||
return -EIO;
|
||||
}
|
||||
} while (1);
|
||||
|
||||
end:
|
||||
memset(zstream->next_out, 0, zstream->avail_out);
|
||||
dst->unused = zstream->avail_out;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void compress_fini(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
struct z_stream_s *zstream = &c->zstream;
|
||||
|
||||
if (dst) {
|
||||
zlib_deflate(zstream, Z_FINISH);
|
||||
dst->unused = zstream->avail_out;
|
||||
}
|
||||
|
||||
zlib_deflateEnd(zstream);
|
||||
kfree(zstream->workspace);
|
||||
|
||||
if (c->tmp)
|
||||
free_page((unsigned long)c->tmp);
|
||||
}
|
||||
|
@ -319,6 +351,12 @@ static int compress_page(struct compress *c,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int compress_flush(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void compress_fini(struct compress *c,
|
||||
struct drm_i915_error_object *dst)
|
||||
{
|
||||
|
@ -917,6 +955,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
unsigned long num_pages;
|
||||
struct sgt_iter iter;
|
||||
dma_addr_t dma;
|
||||
int ret;
|
||||
|
||||
if (!vma)
|
||||
return NULL;
|
||||
|
@ -930,6 +969,7 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
|
||||
dst->gtt_offset = vma->node.start;
|
||||
dst->gtt_size = vma->node.size;
|
||||
dst->num_pages = num_pages;
|
||||
dst->page_count = 0;
|
||||
dst->unused = 0;
|
||||
|
||||
|
@ -938,28 +978,26 @@ i915_error_object_create(struct drm_i915_private *i915,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
ret = -EINVAL;
|
||||
for_each_sgt_dma(dma, iter, vma->pages) {
|
||||
void __iomem *s;
|
||||
int ret;
|
||||
|
||||
ggtt->vm.insert_page(&ggtt->vm, dma, slot, I915_CACHE_NONE, 0);
|
||||
|
||||
s = io_mapping_map_atomic_wc(&ggtt->iomap, slot);
|
||||
ret = compress_page(&compress, (void __force *)s, dst);
|
||||
io_mapping_unmap_atomic(s);
|
||||
|
||||
if (ret)
|
||||
goto unwind;
|
||||
break;
|
||||
}
|
||||
goto out;
|
||||
|
||||
unwind:
|
||||
while (dst->page_count--)
|
||||
free_page((unsigned long)dst->pages[dst->page_count]);
|
||||
kfree(dst);
|
||||
dst = NULL;
|
||||
if (ret || compress_flush(&compress, dst)) {
|
||||
while (dst->page_count--)
|
||||
free_page((unsigned long)dst->pages[dst->page_count]);
|
||||
kfree(dst);
|
||||
dst = NULL;
|
||||
}
|
||||
|
||||
out:
|
||||
compress_fini(&compress, dst);
|
||||
ggtt->vm.clear_range(&ggtt->vm, slot, PAGE_SIZE);
|
||||
return dst;
|
||||
|
|
|
@ -135,6 +135,7 @@ struct i915_gpu_state {
|
|||
struct drm_i915_error_object {
|
||||
u64 gtt_offset;
|
||||
u64 gtt_size;
|
||||
int num_pages;
|
||||
int page_count;
|
||||
int unused;
|
||||
u32 *pages[0];
|
||||
|
|
|
@ -3091,36 +3091,27 @@ gen11_gt_irq_handler(struct drm_i915_private * const i915,
|
|||
spin_unlock(&i915->irq_lock);
|
||||
}
|
||||
|
||||
static void
|
||||
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl,
|
||||
u32 *iir)
|
||||
static u32
|
||||
gen11_gu_misc_irq_ack(struct drm_i915_private *dev_priv, const u32 master_ctl)
|
||||
{
|
||||
void __iomem * const regs = dev_priv->regs;
|
||||
u32 iir;
|
||||
|
||||
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
||||
return;
|
||||
return 0;
|
||||
|
||||
*iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
|
||||
if (likely(*iir))
|
||||
raw_reg_write(regs, GEN11_GU_MISC_IIR, *iir);
|
||||
iir = raw_reg_read(regs, GEN11_GU_MISC_IIR);
|
||||
if (likely(iir))
|
||||
raw_reg_write(regs, GEN11_GU_MISC_IIR, iir);
|
||||
|
||||
return iir;
|
||||
}
|
||||
|
||||
static void
|
||||
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv,
|
||||
const u32 master_ctl, const u32 iir)
|
||||
gen11_gu_misc_irq_handler(struct drm_i915_private *dev_priv, const u32 iir)
|
||||
{
|
||||
if (!(master_ctl & GEN11_GU_MISC_IRQ))
|
||||
return;
|
||||
|
||||
if (unlikely(!iir)) {
|
||||
DRM_ERROR("GU_MISC iir blank!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (iir & GEN11_GU_MISC_GSE)
|
||||
intel_opregion_asle_intr(dev_priv);
|
||||
else
|
||||
DRM_ERROR("Unexpected GU_MISC interrupt 0x%x\n", iir);
|
||||
}
|
||||
|
||||
static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
||||
|
@ -3157,12 +3148,12 @@ static irqreturn_t gen11_irq_handler(int irq, void *arg)
|
|||
enable_rpm_wakeref_asserts(i915);
|
||||
}
|
||||
|
||||
gen11_gu_misc_irq_ack(i915, master_ctl, &gu_misc_iir);
|
||||
gu_misc_iir = gen11_gu_misc_irq_ack(i915, master_ctl);
|
||||
|
||||
/* Acknowledge and enable interrupts. */
|
||||
raw_reg_write(regs, GEN11_GFX_MSTR_IRQ, GEN11_MASTER_IRQ | master_ctl);
|
||||
|
||||
gen11_gu_misc_irq_handler(i915, master_ctl, gu_misc_iir);
|
||||
gen11_gu_misc_irq_handler(i915, gu_misc_iir);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -592,7 +592,6 @@ static const struct intel_device_info intel_cannonlake_info = {
|
|||
GEN10_FEATURES, \
|
||||
GEN(11), \
|
||||
.ddb_size = 2048, \
|
||||
.has_csr = 0, \
|
||||
.has_logical_ring_elsq = 1
|
||||
|
||||
static const struct intel_device_info intel_icelake_11_info = {
|
||||
|
|
|
@ -976,7 +976,6 @@
|
|||
#define USB_DEVICE_ID_SIS817_TOUCH 0x0817
|
||||
#define USB_DEVICE_ID_SIS_TS 0x1013
|
||||
#define USB_DEVICE_ID_SIS1030_TOUCH 0x1030
|
||||
#define USB_DEVICE_ID_SIS10FB_TOUCH 0x10fb
|
||||
|
||||
#define USB_VENDOR_ID_SKYCABLE 0x1223
|
||||
#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
|
||||
|
|
|
@ -47,7 +47,7 @@
|
|||
/* quirks to control the device */
|
||||
#define I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV BIT(0)
|
||||
#define I2C_HID_QUIRK_NO_IRQ_AFTER_RESET BIT(1)
|
||||
#define I2C_HID_QUIRK_RESEND_REPORT_DESCR BIT(2)
|
||||
#define I2C_HID_QUIRK_NO_RUNTIME_PM BIT(2)
|
||||
|
||||
/* flags */
|
||||
#define I2C_HID_STARTED 0
|
||||
|
@ -169,9 +169,8 @@ static const struct i2c_hid_quirks {
|
|||
{ USB_VENDOR_ID_WEIDA, USB_DEVICE_ID_WEIDA_8755,
|
||||
I2C_HID_QUIRK_SET_PWR_WAKEUP_DEV },
|
||||
{ I2C_VENDOR_ID_HANTICK, I2C_PRODUCT_ID_HANTICK_5288,
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET },
|
||||
{ USB_VENDOR_ID_SIS_TOUCH, USB_DEVICE_ID_SIS10FB_TOUCH,
|
||||
I2C_HID_QUIRK_RESEND_REPORT_DESCR },
|
||||
I2C_HID_QUIRK_NO_IRQ_AFTER_RESET |
|
||||
I2C_HID_QUIRK_NO_RUNTIME_PM },
|
||||
{ 0, 0 }
|
||||
};
|
||||
|
||||
|
@ -1105,7 +1104,9 @@ static int i2c_hid_probe(struct i2c_client *client,
|
|||
goto err_mem_free;
|
||||
}
|
||||
|
||||
pm_runtime_put(&client->dev);
|
||||
if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
|
||||
pm_runtime_put(&client->dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_mem_free:
|
||||
|
@ -1130,7 +1131,8 @@ static int i2c_hid_remove(struct i2c_client *client)
|
|||
struct i2c_hid *ihid = i2c_get_clientdata(client);
|
||||
struct hid_device *hid;
|
||||
|
||||
pm_runtime_get_sync(&client->dev);
|
||||
if (!(ihid->quirks & I2C_HID_QUIRK_NO_RUNTIME_PM))
|
||||
pm_runtime_get_sync(&client->dev);
|
||||
pm_runtime_disable(&client->dev);
|
||||
pm_runtime_set_suspended(&client->dev);
|
||||
pm_runtime_put_noidle(&client->dev);
|
||||
|
@ -1236,22 +1238,13 @@ static int i2c_hid_resume(struct device *dev)
|
|||
|
||||
/* Instead of resetting device, simply powers the device on. This
|
||||
* solves "incomplete reports" on Raydium devices 2386:3118 and
|
||||
* 2386:4B33
|
||||
* 2386:4B33 and fixes various SIS touchscreens no longer sending
|
||||
* data after a suspend/resume.
|
||||
*/
|
||||
ret = i2c_hid_set_power(client, I2C_HID_PWR_ON);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Some devices need to re-send report descr cmd
|
||||
* after resume, after this it will be back normal.
|
||||
* otherwise it issues too many incomplete reports.
|
||||
*/
|
||||
if (ihid->quirks & I2C_HID_QUIRK_RESEND_REPORT_DESCR) {
|
||||
ret = i2c_hid_command(client, &hid_report_descr_cmd, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (hid->driver && hid->driver->reset_resume) {
|
||||
ret = hid->driver->reset_resume(hid);
|
||||
return ret;
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#define CNL_Ax_DEVICE_ID 0x9DFC
|
||||
#define GLK_Ax_DEVICE_ID 0x31A2
|
||||
#define CNL_H_DEVICE_ID 0xA37C
|
||||
#define ICL_MOBILE_DEVICE_ID 0x34FC
|
||||
#define SPT_H_DEVICE_ID 0xA135
|
||||
|
||||
#define REVISION_ID_CHT_A0 0x6
|
||||
|
|
|
@ -38,6 +38,7 @@ static const struct pci_device_id ish_pci_tbl[] = {
|
|||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_Ax_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, GLK_Ax_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, CNL_H_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, ICL_MOBILE_DEVICE_ID)},
|
||||
{PCI_DEVICE(PCI_VENDOR_ID_INTEL, SPT_H_DEVICE_ID)},
|
||||
{0, }
|
||||
};
|
||||
|
|
|
@ -76,6 +76,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
|
|||
__u32 version)
|
||||
{
|
||||
int ret = 0;
|
||||
unsigned int cur_cpu;
|
||||
struct vmbus_channel_initiate_contact *msg;
|
||||
unsigned long flags;
|
||||
|
||||
|
@ -118,9 +119,10 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
|
|||
* the CPU attempting to connect may not be CPU 0.
|
||||
*/
|
||||
if (version >= VERSION_WIN8_1) {
|
||||
msg->target_vcpu =
|
||||
hv_cpu_number_to_vp_number(smp_processor_id());
|
||||
vmbus_connection.connect_cpu = smp_processor_id();
|
||||
cur_cpu = get_cpu();
|
||||
msg->target_vcpu = hv_cpu_number_to_vp_number(cur_cpu);
|
||||
vmbus_connection.connect_cpu = cur_cpu;
|
||||
put_cpu();
|
||||
} else {
|
||||
msg->target_vcpu = 0;
|
||||
vmbus_connection.connect_cpu = 0;
|
||||
|
|
|
@ -34,11 +34,11 @@ static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev)
|
|||
|
||||
static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
|
||||
{
|
||||
u32 ic_clk = i2c_dw_clk_rate(dev);
|
||||
const char *mode_str, *fp_str = "";
|
||||
u32 comp_param1;
|
||||
u32 sda_falling_time, scl_falling_time;
|
||||
struct i2c_timings *t = &dev->timings;
|
||||
u32 ic_clk;
|
||||
int ret;
|
||||
|
||||
ret = i2c_dw_acquire_lock(dev);
|
||||
|
@ -53,6 +53,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
|
|||
|
||||
/* Calculate SCL timing parameters for standard mode if not set */
|
||||
if (!dev->ss_hcnt || !dev->ss_lcnt) {
|
||||
ic_clk = i2c_dw_clk_rate(dev);
|
||||
dev->ss_hcnt =
|
||||
i2c_dw_scl_hcnt(ic_clk,
|
||||
4000, /* tHD;STA = tHIGH = 4.0 us */
|
||||
|
@ -89,6 +90,7 @@ static int i2c_dw_set_timings_master(struct dw_i2c_dev *dev)
|
|||
* needed also in high speed mode.
|
||||
*/
|
||||
if (!dev->fs_hcnt || !dev->fs_lcnt) {
|
||||
ic_clk = i2c_dw_clk_rate(dev);
|
||||
dev->fs_hcnt =
|
||||
i2c_dw_scl_hcnt(ic_clk,
|
||||
600, /* tHD;STA = tHIGH = 0.6 us */
|
||||
|
|
|
@ -164,7 +164,7 @@ static s32 sch_access(struct i2c_adapter *adap, u16 addr,
|
|||
* run ~75 kHz instead which should do no harm.
|
||||
*/
|
||||
dev_notice(&sch_adapter.dev,
|
||||
"Clock divider unitialized. Setting defaults\n");
|
||||
"Clock divider uninitialized. Setting defaults\n");
|
||||
outw(backbone_speed / (4 * 100), SMBHSTCLK);
|
||||
}
|
||||
|
||||
|
|
|
@ -367,20 +367,26 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
|||
dma_addr_t rx_dma;
|
||||
enum geni_se_xfer_mode mode;
|
||||
unsigned long time_left = XFER_TIMEOUT;
|
||||
void *dma_buf;
|
||||
|
||||
gi2c->cur = msg;
|
||||
mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
|
||||
mode = GENI_SE_FIFO;
|
||||
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
|
||||
if (dma_buf)
|
||||
mode = GENI_SE_DMA;
|
||||
|
||||
geni_se_select_mode(&gi2c->se, mode);
|
||||
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_RX_TRANS_LEN);
|
||||
geni_se_setup_m_cmd(&gi2c->se, I2C_READ, m_param);
|
||||
if (mode == GENI_SE_DMA) {
|
||||
int ret;
|
||||
|
||||
ret = geni_se_rx_dma_prep(&gi2c->se, msg->buf, msg->len,
|
||||
ret = geni_se_rx_dma_prep(&gi2c->se, dma_buf, msg->len,
|
||||
&rx_dma);
|
||||
if (ret) {
|
||||
mode = GENI_SE_FIFO;
|
||||
geni_se_select_mode(&gi2c->se, mode);
|
||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -393,6 +399,7 @@ static int geni_i2c_rx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
|||
if (gi2c->err)
|
||||
geni_i2c_rx_fsm_rst(gi2c);
|
||||
geni_se_rx_dma_unprep(&gi2c->se, rx_dma, msg->len);
|
||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
|
||||
}
|
||||
return gi2c->err;
|
||||
}
|
||||
|
@ -403,20 +410,26 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
|||
dma_addr_t tx_dma;
|
||||
enum geni_se_xfer_mode mode;
|
||||
unsigned long time_left;
|
||||
void *dma_buf;
|
||||
|
||||
gi2c->cur = msg;
|
||||
mode = msg->len > 32 ? GENI_SE_DMA : GENI_SE_FIFO;
|
||||
mode = GENI_SE_FIFO;
|
||||
dma_buf = i2c_get_dma_safe_msg_buf(msg, 32);
|
||||
if (dma_buf)
|
||||
mode = GENI_SE_DMA;
|
||||
|
||||
geni_se_select_mode(&gi2c->se, mode);
|
||||
writel_relaxed(msg->len, gi2c->se.base + SE_I2C_TX_TRANS_LEN);
|
||||
geni_se_setup_m_cmd(&gi2c->se, I2C_WRITE, m_param);
|
||||
if (mode == GENI_SE_DMA) {
|
||||
int ret;
|
||||
|
||||
ret = geni_se_tx_dma_prep(&gi2c->se, msg->buf, msg->len,
|
||||
ret = geni_se_tx_dma_prep(&gi2c->se, dma_buf, msg->len,
|
||||
&tx_dma);
|
||||
if (ret) {
|
||||
mode = GENI_SE_FIFO;
|
||||
geni_se_select_mode(&gi2c->se, mode);
|
||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,6 +445,7 @@ static int geni_i2c_tx_one_msg(struct geni_i2c_dev *gi2c, struct i2c_msg *msg,
|
|||
if (gi2c->err)
|
||||
geni_i2c_tx_fsm_rst(gi2c);
|
||||
geni_se_tx_dma_unprep(&gi2c->se, tx_dma, msg->len);
|
||||
i2c_put_dma_safe_msg_buf(dma_buf, msg, !gi2c->err);
|
||||
}
|
||||
return gi2c->err;
|
||||
}
|
||||
|
|
|
@ -152,6 +152,7 @@ acpi_smbus_cmi_access(struct i2c_adapter *adap, u16 addr, unsigned short flags,
|
|||
mt_params[3].type = ACPI_TYPE_INTEGER;
|
||||
mt_params[3].integer.value = len;
|
||||
mt_params[4].type = ACPI_TYPE_BUFFER;
|
||||
mt_params[4].buffer.length = len;
|
||||
mt_params[4].buffer.pointer = data->block + 1;
|
||||
}
|
||||
break;
|
||||
|
|
|
@ -3069,7 +3069,7 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
|
|||
return 0;
|
||||
|
||||
offset_mask = pte_pgsize - 1;
|
||||
__pte = *pte & PM_ADDR_MASK;
|
||||
__pte = __sme_clr(*pte & PM_ADDR_MASK);
|
||||
|
||||
return (__pte & ~offset_mask) | (iova & offset_mask);
|
||||
}
|
||||
|
|
|
@ -1455,8 +1455,8 @@ static int __load_mappings(struct dm_cache_metadata *cmd,
|
|||
if (hints_valid) {
|
||||
r = dm_array_cursor_next(&cmd->hint_cursor);
|
||||
if (r) {
|
||||
DMERR("dm_array_cursor_next for hint failed");
|
||||
goto out;
|
||||
dm_array_cursor_end(&cmd->hint_cursor);
|
||||
hints_valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3009,8 +3009,13 @@ static dm_cblock_t get_cache_dev_size(struct cache *cache)
|
|||
|
||||
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
|
||||
{
|
||||
if (from_cblock(new_size) > from_cblock(cache->cache_size))
|
||||
return true;
|
||||
if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
|
||||
if (cache->sized) {
|
||||
DMERR("%s: unable to extend cache due to missing cache table reload",
|
||||
cache_device_name(cache));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* We can't drop a dirty block when shrinking the cache.
|
||||
|
|
|
@ -806,19 +806,19 @@ static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
|
|||
}
|
||||
|
||||
static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
|
||||
const char *attached_handler_name, char **error)
|
||||
const char **attached_handler_name, char **error)
|
||||
{
|
||||
struct request_queue *q = bdev_get_queue(bdev);
|
||||
int r;
|
||||
|
||||
if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
|
||||
retain:
|
||||
if (attached_handler_name) {
|
||||
if (*attached_handler_name) {
|
||||
/*
|
||||
* Clear any hw_handler_params associated with a
|
||||
* handler that isn't already attached.
|
||||
*/
|
||||
if (m->hw_handler_name && strcmp(attached_handler_name, m->hw_handler_name)) {
|
||||
if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
|
||||
kfree(m->hw_handler_params);
|
||||
m->hw_handler_params = NULL;
|
||||
}
|
||||
|
@ -830,7 +830,8 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
|
|||
* handler instead of the original table passed in.
|
||||
*/
|
||||
kfree(m->hw_handler_name);
|
||||
m->hw_handler_name = attached_handler_name;
|
||||
m->hw_handler_name = *attached_handler_name;
|
||||
*attached_handler_name = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -867,7 +868,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
struct pgpath *p;
|
||||
struct multipath *m = ti->private;
|
||||
struct request_queue *q;
|
||||
const char *attached_handler_name;
|
||||
const char *attached_handler_name = NULL;
|
||||
|
||||
/* we need at least a path arg */
|
||||
if (as->argc < 1) {
|
||||
|
@ -890,7 +891,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
|
||||
if (attached_handler_name || m->hw_handler_name) {
|
||||
INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
|
||||
r = setup_scsi_dh(p->path.dev->bdev, m, attached_handler_name, &ti->error);
|
||||
r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
|
||||
if (r) {
|
||||
dm_put_device(ti, p->path.dev);
|
||||
goto bad;
|
||||
|
@ -905,6 +906,7 @@ static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps
|
|||
|
||||
return p;
|
||||
bad:
|
||||
kfree(attached_handler_name);
|
||||
free_pgpath(p);
|
||||
return ERR_PTR(r);
|
||||
}
|
||||
|
|
|
@ -3353,7 +3353,7 @@ static const char *sync_str(enum sync_state state)
|
|||
};
|
||||
|
||||
/* Return enum sync_state for @mddev derived from @recovery flags */
|
||||
static const enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
|
||||
static enum sync_state decipher_sync_action(struct mddev *mddev, unsigned long recovery)
|
||||
{
|
||||
if (test_bit(MD_RECOVERY_FROZEN, &recovery))
|
||||
return st_frozen;
|
||||
|
|
|
@ -832,10 +832,8 @@ static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
|
|||
if (r) {
|
||||
DMERR("could not get size of metadata device");
|
||||
pmd->metadata_reserve = max_blocks;
|
||||
} else {
|
||||
sector_div(total, 10);
|
||||
pmd->metadata_reserve = min(max_blocks, total);
|
||||
}
|
||||
} else
|
||||
pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
|
||||
}
|
||||
|
||||
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
|
||||
|
|
|
@ -115,14 +115,6 @@ static void __v4l2_event_queue_fh(struct v4l2_fh *fh, const struct v4l2_event *e
|
|||
if (sev == NULL)
|
||||
return;
|
||||
|
||||
/*
|
||||
* If the event has been added to the fh->subscribed list, but its
|
||||
* add op has not completed yet elems will be 0, treat this as
|
||||
* not being subscribed.
|
||||
*/
|
||||
if (!sev->elems)
|
||||
return;
|
||||
|
||||
/* Increase event sequence number on fh. */
|
||||
fh->sequence++;
|
||||
|
||||
|
@ -208,6 +200,7 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
|||
struct v4l2_subscribed_event *sev, *found_ev;
|
||||
unsigned long flags;
|
||||
unsigned i;
|
||||
int ret = 0;
|
||||
|
||||
if (sub->type == V4L2_EVENT_ALL)
|
||||
return -EINVAL;
|
||||
|
@ -225,31 +218,36 @@ int v4l2_event_subscribe(struct v4l2_fh *fh,
|
|||
sev->flags = sub->flags;
|
||||
sev->fh = fh;
|
||||
sev->ops = ops;
|
||||
sev->elems = elems;
|
||||
|
||||
mutex_lock(&fh->subscribe_lock);
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
if (!found_ev)
|
||||
list_add(&sev->list, &fh->subscribed);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
if (found_ev) {
|
||||
/* Already listening */
|
||||
kvfree(sev);
|
||||
return 0; /* Already listening */
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (sev->ops && sev->ops->add) {
|
||||
int ret = sev->ops->add(sev, elems);
|
||||
ret = sev->ops->add(sev, elems);
|
||||
if (ret) {
|
||||
sev->ops = NULL;
|
||||
v4l2_event_unsubscribe(fh, sub);
|
||||
return ret;
|
||||
kvfree(sev);
|
||||
goto out_unlock;
|
||||
}
|
||||
}
|
||||
|
||||
/* Mark as ready for use */
|
||||
sev->elems = elems;
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
list_add(&sev->list, &fh->subscribed);
|
||||
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
|
||||
|
||||
return 0;
|
||||
out_unlock:
|
||||
mutex_unlock(&fh->subscribe_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
|
||||
|
||||
|
@ -288,6 +286,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
|||
return 0;
|
||||
}
|
||||
|
||||
mutex_lock(&fh->subscribe_lock);
|
||||
|
||||
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
|
||||
|
||||
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
|
||||
|
@ -305,6 +305,8 @@ int v4l2_event_unsubscribe(struct v4l2_fh *fh,
|
|||
if (sev && sev->ops && sev->ops->del)
|
||||
sev->ops->del(sev);
|
||||
|
||||
mutex_unlock(&fh->subscribe_lock);
|
||||
|
||||
kvfree(sev);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -45,6 +45,7 @@ void v4l2_fh_init(struct v4l2_fh *fh, struct video_device *vdev)
|
|||
INIT_LIST_HEAD(&fh->available);
|
||||
INIT_LIST_HEAD(&fh->subscribed);
|
||||
fh->sequence = -1;
|
||||
mutex_init(&fh->subscribe_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_init);
|
||||
|
||||
|
@ -90,6 +91,7 @@ void v4l2_fh_exit(struct v4l2_fh *fh)
|
|||
return;
|
||||
v4l_disable_media_source(fh->vdev);
|
||||
v4l2_event_unsubscribe_all(fh);
|
||||
mutex_destroy(&fh->subscribe_lock);
|
||||
fh->vdev = NULL;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
|
||||
|
|
|
@ -235,7 +235,7 @@ int mmc_of_parse(struct mmc_host *host)
|
|||
host->caps |= MMC_CAP_NEEDS_POLL;
|
||||
|
||||
ret = mmc_gpiod_request_cd(host, "cd", 0, true,
|
||||
cd_debounce_delay_ms,
|
||||
cd_debounce_delay_ms * 1000,
|
||||
&cd_gpio_invert);
|
||||
if (!ret)
|
||||
dev_info(host->parent, "Got CD GPIO\n");
|
||||
|
|
|
@ -271,7 +271,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
|
|||
if (debounce) {
|
||||
ret = gpiod_set_debounce(desc, debounce);
|
||||
if (ret < 0)
|
||||
ctx->cd_debounce_delay_ms = debounce;
|
||||
ctx->cd_debounce_delay_ms = debounce / 1000;
|
||||
}
|
||||
|
||||
if (gpio_invert)
|
||||
|
|
|
@ -498,7 +498,8 @@ static const struct soc_device_attribute gen3_soc_whitelist[] = {
|
|||
|
||||
static int renesas_sdhi_sys_dmac_probe(struct platform_device *pdev)
|
||||
{
|
||||
if (of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible &&
|
||||
if ((of_device_get_match_data(&pdev->dev) == &of_rcar_gen3_compatible ||
|
||||
of_device_get_match_data(&pdev->dev) == &of_rcar_r8a7795_compatible) &&
|
||||
!soc_device_match(gen3_soc_whitelist))
|
||||
return -ENODEV;
|
||||
|
||||
|
|
|
@ -210,6 +210,7 @@ static void bond_get_stats(struct net_device *bond_dev,
|
|||
static void bond_slave_arr_handler(struct work_struct *work);
|
||||
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
|
||||
int mod);
|
||||
static void bond_netdev_notify_work(struct work_struct *work);
|
||||
|
||||
/*---------------------------- General routines -----------------------------*/
|
||||
|
||||
|
@ -1170,9 +1171,27 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
|
|||
}
|
||||
}
|
||||
|
||||
/* don't change skb->dev for link-local packets */
|
||||
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
|
||||
/* Link-local multicast packets should be passed to the
|
||||
* stack on the link they arrive as well as pass them to the
|
||||
* bond-master device. These packets are mostly usable when
|
||||
* stack receives it with the link on which they arrive
|
||||
* (e.g. LLDP) they also must be available on master. Some of
|
||||
* the use cases include (but are not limited to): LLDP agents
|
||||
* that must be able to operate both on enslaved interfaces as
|
||||
* well as on bonds themselves; linux bridges that must be able
|
||||
* to process/pass BPDUs from attached bonds when any kind of
|
||||
* STP version is enabled on the network.
|
||||
*/
|
||||
if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
|
||||
struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
|
||||
|
||||
if (nskb) {
|
||||
nskb->dev = bond->dev;
|
||||
nskb->queue_mapping = 0;
|
||||
netif_rx(nskb);
|
||||
}
|
||||
return RX_HANDLER_PASS;
|
||||
}
|
||||
if (bond_should_deliver_exact_match(skb, slave, bond))
|
||||
return RX_HANDLER_EXACT;
|
||||
|
||||
|
@ -1269,6 +1288,8 @@ static struct slave *bond_alloc_slave(struct bonding *bond)
|
|||
return NULL;
|
||||
}
|
||||
}
|
||||
INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
|
||||
|
||||
return slave;
|
||||
}
|
||||
|
||||
|
@ -1276,6 +1297,7 @@ static void bond_free_slave(struct slave *slave)
|
|||
{
|
||||
struct bonding *bond = bond_get_bond_by_slave(slave);
|
||||
|
||||
cancel_delayed_work_sync(&slave->notify_work);
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD)
|
||||
kfree(SLAVE_AD_INFO(slave));
|
||||
|
||||
|
@ -1297,39 +1319,26 @@ static void bond_fill_ifslave(struct slave *slave, struct ifslave *info)
|
|||
info->link_failure_count = slave->link_failure_count;
|
||||
}
|
||||
|
||||
static void bond_netdev_notify(struct net_device *dev,
|
||||
struct netdev_bonding_info *info)
|
||||
{
|
||||
rtnl_lock();
|
||||
netdev_bonding_info_change(dev, info);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void bond_netdev_notify_work(struct work_struct *_work)
|
||||
{
|
||||
struct netdev_notify_work *w =
|
||||
container_of(_work, struct netdev_notify_work, work.work);
|
||||
struct slave *slave = container_of(_work, struct slave,
|
||||
notify_work.work);
|
||||
|
||||
bond_netdev_notify(w->dev, &w->bonding_info);
|
||||
dev_put(w->dev);
|
||||
kfree(w);
|
||||
if (rtnl_trylock()) {
|
||||
struct netdev_bonding_info binfo;
|
||||
|
||||
bond_fill_ifslave(slave, &binfo.slave);
|
||||
bond_fill_ifbond(slave->bond, &binfo.master);
|
||||
netdev_bonding_info_change(slave->dev, &binfo);
|
||||
rtnl_unlock();
|
||||
} else {
|
||||
queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
|
||||
}
|
||||
}
|
||||
|
||||
void bond_queue_slave_event(struct slave *slave)
|
||||
{
|
||||
struct bonding *bond = slave->bond;
|
||||
struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
|
||||
|
||||
if (!nnw)
|
||||
return;
|
||||
|
||||
dev_hold(slave->dev);
|
||||
nnw->dev = slave->dev;
|
||||
bond_fill_ifslave(slave, &nnw->bonding_info.slave);
|
||||
bond_fill_ifbond(bond, &nnw->bonding_info.master);
|
||||
INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
|
||||
|
||||
queue_delayed_work(slave->bond->wq, &nnw->work, 0);
|
||||
queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
|
||||
}
|
||||
|
||||
void bond_lower_state_changed(struct slave *slave)
|
||||
|
|
|
@ -1107,7 +1107,7 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
|
|||
b53_get_vlan_entry(dev, vid, vl);
|
||||
|
||||
vl->members |= BIT(port);
|
||||
if (untagged)
|
||||
if (untagged && !dsa_is_cpu_port(ds, port))
|
||||
vl->untag |= BIT(port);
|
||||
else
|
||||
vl->untag &= ~BIT(port);
|
||||
|
@ -1149,7 +1149,7 @@ int b53_vlan_del(struct dsa_switch *ds, int port,
|
|||
pvid = 0;
|
||||
}
|
||||
|
||||
if (untagged)
|
||||
if (untagged && !dsa_is_cpu_port(ds, port))
|
||||
vl->untag &= ~(BIT(port));
|
||||
|
||||
b53_set_vlan_entry(dev, vid, vl);
|
||||
|
|
|
@ -2185,25 +2185,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
|||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void ena_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct ena_adapter *adapter = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* Dont schedule NAPI if the driver is in the middle of reset
|
||||
* or netdev is down.
|
||||
*/
|
||||
|
||||
if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags) ||
|
||||
test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags))
|
||||
return;
|
||||
|
||||
for (i = 0; i < adapter->num_queues; i++)
|
||||
napi_schedule(&adapter->ena_napi[i].napi);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
static u16 ena_select_queue(struct net_device *dev, struct sk_buff *skb,
|
||||
struct net_device *sb_dev,
|
||||
select_queue_fallback_t fallback)
|
||||
|
@ -2369,9 +2350,6 @@ static const struct net_device_ops ena_netdev_ops = {
|
|||
.ndo_change_mtu = ena_change_mtu,
|
||||
.ndo_set_mac_address = NULL,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ena_netpoll,
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
};
|
||||
|
||||
static int ena_device_validate_params(struct ena_adapter *adapter,
|
||||
|
|
|
@ -1031,6 +1031,7 @@ static int dec_lance_probe(struct device *bdev, const int type)
|
|||
int i, ret;
|
||||
unsigned long esar_base;
|
||||
unsigned char *esar;
|
||||
const char *desc;
|
||||
|
||||
if (dec_lance_debug && version_printed++ == 0)
|
||||
printk(version);
|
||||
|
@ -1216,19 +1217,20 @@ static int dec_lance_probe(struct device *bdev, const int type)
|
|||
*/
|
||||
switch (type) {
|
||||
case ASIC_LANCE:
|
||||
printk("%s: IOASIC onboard LANCE", name);
|
||||
desc = "IOASIC onboard LANCE";
|
||||
break;
|
||||
case PMAD_LANCE:
|
||||
printk("%s: PMAD-AA", name);
|
||||
desc = "PMAD-AA";
|
||||
break;
|
||||
case PMAX_LANCE:
|
||||
printk("%s: PMAX onboard LANCE", name);
|
||||
desc = "PMAX onboard LANCE";
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < 6; i++)
|
||||
dev->dev_addr[i] = esar[i * 4];
|
||||
|
||||
printk(", addr = %pM, irq = %d\n", dev->dev_addr, dev->irq);
|
||||
printk("%s: %s, addr = %pM, irq = %d\n",
|
||||
name, desc, dev->dev_addr, dev->irq);
|
||||
|
||||
dev->netdev_ops = &lance_netdev_ops;
|
||||
dev->watchdog_timeo = 5*HZ;
|
||||
|
|
|
@ -1069,9 +1069,6 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
|
|||
{
|
||||
u32 reg;
|
||||
|
||||
/* Stop monitoring MPD interrupt */
|
||||
intrl2_0_mask_set(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
|
||||
|
||||
/* Disable RXCHK, active filters and Broadcom tag matching */
|
||||
reg = rxchk_readl(priv, RXCHK_CONTROL);
|
||||
reg &= ~(RXCHK_BRCM_TAG_MATCH_MASK <<
|
||||
|
@ -1081,6 +1078,17 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
|
|||
/* Clear the MagicPacket detection logic */
|
||||
mpd_enable_set(priv, false);
|
||||
|
||||
reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
|
||||
if (reg & INTRL2_0_MPD)
|
||||
netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
|
||||
|
||||
if (reg & INTRL2_0_BRCM_MATCH_TAG) {
|
||||
reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
|
||||
RXCHK_BRCM_TAG_MATCH_MASK;
|
||||
netdev_info(priv->netdev,
|
||||
"Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
|
||||
}
|
||||
|
||||
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
|
||||
}
|
||||
|
||||
|
@ -1105,7 +1113,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
|
|||
struct bcm_sysport_priv *priv = netdev_priv(dev);
|
||||
struct bcm_sysport_tx_ring *txr;
|
||||
unsigned int ring, ring_bit;
|
||||
u32 reg;
|
||||
|
||||
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
|
||||
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
@ -1131,16 +1138,6 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
|
|||
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
|
||||
bcm_sysport_tx_reclaim_all(priv);
|
||||
|
||||
if (priv->irq0_stat & INTRL2_0_MPD)
|
||||
netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
|
||||
|
||||
if (priv->irq0_stat & INTRL2_0_BRCM_MATCH_TAG) {
|
||||
reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
|
||||
RXCHK_BRCM_TAG_MATCH_MASK;
|
||||
netdev_info(priv->netdev,
|
||||
"Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
|
||||
}
|
||||
|
||||
if (!priv->is_lite)
|
||||
goto out;
|
||||
|
||||
|
@ -2641,9 +2638,6 @@ static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
|
|||
/* UniMAC receive needs to be turned on */
|
||||
umac_enable_set(priv, CMD_RX_EN, 1);
|
||||
|
||||
/* Enable the interrupt wake-up source */
|
||||
intrl2_0_mask_clear(priv, INTRL2_0_MPD | INTRL2_0_BRCM_MATCH_TAG);
|
||||
|
||||
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1884,8 +1884,11 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
|||
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
|
||||
tx_pkts++;
|
||||
/* return full budget so NAPI will complete. */
|
||||
if (unlikely(tx_pkts > bp->tx_wake_thresh))
|
||||
if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
|
||||
rx_pkts = budget;
|
||||
raw_cons = NEXT_RAW_CMP(raw_cons);
|
||||
break;
|
||||
}
|
||||
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
|
||||
if (likely(budget))
|
||||
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &event);
|
||||
|
@ -1913,7 +1916,7 @@ static int bnxt_poll_work(struct bnxt *bp, struct bnxt_napi *bnapi, int budget)
|
|||
}
|
||||
raw_cons = NEXT_RAW_CMP(raw_cons);
|
||||
|
||||
if (rx_pkts == budget)
|
||||
if (rx_pkts && rx_pkts == budget)
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2027,8 +2030,12 @@ static int bnxt_poll(struct napi_struct *napi, int budget)
|
|||
while (1) {
|
||||
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
|
||||
|
||||
if (work_done >= budget)
|
||||
if (work_done >= budget) {
|
||||
if (!budget)
|
||||
BNXT_CP_DB_REARM(cpr->cp_doorbell,
|
||||
cpr->cp_raw_cons);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!bnxt_has_work(bp, cpr)) {
|
||||
if (napi_complete_done(napi, work_done))
|
||||
|
@ -3010,10 +3017,11 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp)
|
|||
{
|
||||
struct pci_dev *pdev = bp->pdev;
|
||||
|
||||
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
|
||||
bp->hwrm_cmd_resp_dma_addr);
|
||||
|
||||
bp->hwrm_cmd_resp_addr = NULL;
|
||||
if (bp->hwrm_cmd_resp_addr) {
|
||||
dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr,
|
||||
bp->hwrm_cmd_resp_dma_addr);
|
||||
bp->hwrm_cmd_resp_addr = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int bnxt_alloc_hwrm_resources(struct bnxt *bp)
|
||||
|
@ -4643,7 +4651,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req,
|
|||
FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS : 0;
|
||||
enables |= ring_grps ?
|
||||
FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS : 0;
|
||||
enables |= vnics ? FUNC_VF_CFG_REQ_ENABLES_NUM_VNICS : 0;
|
||||
enables |= vnics ? FUNC_CFG_REQ_ENABLES_NUM_VNICS : 0;
|
||||
|
||||
req->num_rx_rings = cpu_to_le16(rx_rings);
|
||||
req->num_hw_ring_grps = cpu_to_le16(ring_grps);
|
||||
|
@ -8614,7 +8622,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx,
|
|||
*max_tx = hw_resc->max_tx_rings;
|
||||
*max_rx = hw_resc->max_rx_rings;
|
||||
*max_cp = min_t(int, bnxt_get_max_func_cp_rings_for_en(bp),
|
||||
hw_resc->max_irqs);
|
||||
hw_resc->max_irqs - bnxt_get_ulp_msix_num(bp));
|
||||
*max_cp = min_t(int, *max_cp, hw_resc->max_stat_ctxs);
|
||||
max_ring_grps = hw_resc->max_hw_ring_grps;
|
||||
if (BNXT_CHIP_TYPE_NITRO_A0(bp) && BNXT_PF(bp)) {
|
||||
|
@ -9050,6 +9058,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
|||
bnxt_clear_int_mode(bp);
|
||||
|
||||
init_err_pci_clean:
|
||||
bnxt_free_hwrm_resources(bp);
|
||||
bnxt_cleanup_pci(bp);
|
||||
|
||||
init_err_free:
|
||||
|
|
|
@ -98,13 +98,13 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets,
|
|||
|
||||
bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1);
|
||||
for (i = 0; i < max_tc; i++) {
|
||||
u8 qidx;
|
||||
u8 qidx = bp->tc_to_qidx[i];
|
||||
|
||||
req.enables |= cpu_to_le32(
|
||||
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << i);
|
||||
QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID <<
|
||||
qidx);
|
||||
|
||||
memset(&cos2bw, 0, sizeof(cos2bw));
|
||||
qidx = bp->tc_to_qidx[i];
|
||||
cos2bw.queue_id = bp->q_info[qidx].queue_id;
|
||||
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_STRICT) {
|
||||
cos2bw.tsa =
|
||||
|
|
|
@ -2160,6 +2160,7 @@ static void macb_configure_dma(struct macb *bp)
|
|||
else
|
||||
dmacfg &= ~GEM_BIT(TXCOEN);
|
||||
|
||||
dmacfg &= ~GEM_BIT(ADDR64);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (bp->hw_dma_cap & HW_DMA_CAP_64B)
|
||||
dmacfg |= GEM_BIT(ADDR64);
|
||||
|
|
|
@ -2159,6 +2159,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EPERM;
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_SET_QSET_PARAMS)
|
||||
return -EINVAL;
|
||||
if (t.qset_idx >= SGE_QSETS)
|
||||
return -EINVAL;
|
||||
if (!in_range(t.intr_lat, 0, M_NEWTIMER) ||
|
||||
|
@ -2258,6 +2260,9 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
|
||||
if (t.cmd != CHELSIO_GET_QSET_PARAMS)
|
||||
return -EINVAL;
|
||||
|
||||
/* Display qsets for all ports when offload enabled */
|
||||
if (test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
|
||||
q1 = 0;
|
||||
|
@ -2303,6 +2308,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EBUSY;
|
||||
if (copy_from_user(&edata, useraddr, sizeof(edata)))
|
||||
return -EFAULT;
|
||||
if (edata.cmd != CHELSIO_SET_QSET_NUM)
|
||||
return -EINVAL;
|
||||
if (edata.val < 1 ||
|
||||
(edata.val > 1 && !(adapter->flags & USING_MSIX)))
|
||||
return -EINVAL;
|
||||
|
@ -2343,6 +2350,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EPERM;
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_LOAD_FW)
|
||||
return -EINVAL;
|
||||
/* Check t.len sanity ? */
|
||||
fw_data = memdup_user(useraddr + sizeof(t), t.len);
|
||||
if (IS_ERR(fw_data))
|
||||
|
@ -2366,6 +2375,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EBUSY;
|
||||
if (copy_from_user(&m, useraddr, sizeof(m)))
|
||||
return -EFAULT;
|
||||
if (m.cmd != CHELSIO_SETMTUTAB)
|
||||
return -EINVAL;
|
||||
if (m.nmtus != NMTUS)
|
||||
return -EINVAL;
|
||||
if (m.mtus[0] < 81) /* accommodate SACK */
|
||||
|
@ -2407,6 +2418,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EBUSY;
|
||||
if (copy_from_user(&m, useraddr, sizeof(m)))
|
||||
return -EFAULT;
|
||||
if (m.cmd != CHELSIO_SET_PM)
|
||||
return -EINVAL;
|
||||
if (!is_power_of_2(m.rx_pg_sz) ||
|
||||
!is_power_of_2(m.tx_pg_sz))
|
||||
return -EINVAL; /* not power of 2 */
|
||||
|
@ -2440,6 +2453,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EIO; /* need the memory controllers */
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_GET_MEM)
|
||||
return -EINVAL;
|
||||
if ((t.addr & 7) || (t.len & 7))
|
||||
return -EINVAL;
|
||||
if (t.mem_id == MEM_CM)
|
||||
|
@ -2492,6 +2507,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
|
|||
return -EAGAIN;
|
||||
if (copy_from_user(&t, useraddr, sizeof(t)))
|
||||
return -EFAULT;
|
||||
if (t.cmd != CHELSIO_SET_TRACE_FILTER)
|
||||
return -EINVAL;
|
||||
|
||||
tp = (const struct trace_params *)&t.sip;
|
||||
if (t.config_tx)
|
||||
|
|
|
@ -4002,8 +4002,6 @@ static int be_enable_vxlan_offloads(struct be_adapter *adapter)
|
|||
netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
netdev->features |= NETIF_F_GSO_UDP_TUNNEL;
|
||||
|
||||
dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
|
||||
be16_to_cpu(port));
|
||||
|
@ -4025,8 +4023,6 @@ static void be_disable_vxlan_offloads(struct be_adapter *adapter)
|
|||
adapter->vxlan_port = 0;
|
||||
|
||||
netdev->hw_enc_features = 0;
|
||||
netdev->hw_features &= ~(NETIF_F_GSO_UDP_TUNNEL);
|
||||
netdev->features &= ~(NETIF_F_GSO_UDP_TUNNEL);
|
||||
}
|
||||
|
||||
static void be_calculate_vf_res(struct be_adapter *adapter, u16 num_vfs,
|
||||
|
@ -5320,6 +5316,7 @@ static void be_netdev_init(struct net_device *netdev)
|
|||
struct be_adapter *adapter = netdev_priv(netdev);
|
||||
|
||||
netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
|
||||
NETIF_F_GSO_UDP_TUNNEL |
|
||||
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
|
||||
NETIF_F_HW_VLAN_CTAG_TX;
|
||||
if ((be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
|
||||
|
|
|
@ -1158,7 +1158,7 @@ static void fec_enet_timeout_work(struct work_struct *work)
|
|||
napi_disable(&fep->napi);
|
||||
netif_tx_lock_bh(ndev);
|
||||
fec_restart(ndev);
|
||||
netif_wake_queue(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
napi_enable(&fep->napi);
|
||||
}
|
||||
|
@ -1273,7 +1273,7 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id)
|
|||
|
||||
/* Since we have freed up a buffer, the ring is no longer full
|
||||
*/
|
||||
if (netif_queue_stopped(ndev)) {
|
||||
if (netif_tx_queue_stopped(nq)) {
|
||||
entries_free = fec_enet_get_free_txdesc_num(txq);
|
||||
if (entries_free >= txq->tx_wake_threshold)
|
||||
netif_tx_wake_queue(nq);
|
||||
|
@ -1746,7 +1746,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
|
|||
napi_disable(&fep->napi);
|
||||
netif_tx_lock_bh(ndev);
|
||||
fec_restart(ndev);
|
||||
netif_wake_queue(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
napi_enable(&fep->napi);
|
||||
}
|
||||
|
@ -2247,7 +2247,7 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
|
|||
napi_disable(&fep->napi);
|
||||
netif_tx_lock_bh(ndev);
|
||||
fec_restart(ndev);
|
||||
netif_wake_queue(ndev);
|
||||
netif_tx_wake_all_queues(ndev);
|
||||
netif_tx_unlock_bh(ndev);
|
||||
napi_enable(&fep->napi);
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
|
|||
if (cb->type == DESC_TYPE_SKB)
|
||||
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
|
||||
ring_to_dma_dir(ring));
|
||||
else
|
||||
else if (cb->length)
|
||||
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
|
||||
ring_to_dma_dir(ring));
|
||||
}
|
||||
|
|
|
@ -40,9 +40,9 @@
|
|||
#define SKB_TMP_LEN(SKB) \
|
||||
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
|
||||
|
||||
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
||||
int size, dma_addr_t dma, int frag_end,
|
||||
int buf_num, enum hns_desc_type type, int mtu)
|
||||
static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
|
||||
int send_sz, dma_addr_t dma, int frag_end,
|
||||
int buf_num, enum hns_desc_type type, int mtu)
|
||||
{
|
||||
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
|
||||
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
|
||||
|
@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
|||
desc_cb->type = type;
|
||||
|
||||
desc->addr = cpu_to_le64(dma);
|
||||
desc->tx.send_size = cpu_to_le16((u16)size);
|
||||
desc->tx.send_size = cpu_to_le16((u16)send_sz);
|
||||
|
||||
/* config bd buffer end */
|
||||
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
|
||||
|
@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
|||
ring_ptr_move_fw(ring, next_to_use);
|
||||
}
|
||||
|
||||
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
|
||||
int size, dma_addr_t dma, int frag_end,
|
||||
int buf_num, enum hns_desc_type type, int mtu)
|
||||
{
|
||||
fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
|
||||
buf_num, type, mtu);
|
||||
}
|
||||
|
||||
static const struct acpi_device_id hns_enet_acpi_match[] = {
|
||||
{ "HISI00C1", 0 },
|
||||
{ "HISI00C2", 0 },
|
||||
|
@ -289,15 +297,15 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
|
|||
|
||||
/* when the frag size is bigger than hardware, split this frag */
|
||||
for (k = 0; k < frag_buf_num; k++)
|
||||
fill_v2_desc(ring, priv,
|
||||
(k == frag_buf_num - 1) ?
|
||||
fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
|
||||
(k == frag_buf_num - 1) ?
|
||||
sizeoflast : BD_MAX_SEND_SIZE,
|
||||
dma + BD_MAX_SEND_SIZE * k,
|
||||
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
|
||||
buf_num,
|
||||
(type == DESC_TYPE_SKB && !k) ?
|
||||
dma + BD_MAX_SEND_SIZE * k,
|
||||
frag_end && (k == frag_buf_num - 1) ? 1 : 0,
|
||||
buf_num,
|
||||
(type == DESC_TYPE_SKB && !k) ?
|
||||
DESC_TYPE_SKB : DESC_TYPE_PAGE,
|
||||
mtu);
|
||||
mtu);
|
||||
}
|
||||
|
||||
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
|
||||
|
@ -1495,21 +1503,6 @@ static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
|
|||
return phy_mii_ioctl(phy_dev, ifr, cmd);
|
||||
}
|
||||
|
||||
/* use only for netconsole to poll with the device without interrupt */
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void hns_nic_poll_controller(struct net_device *ndev)
|
||||
{
|
||||
struct hns_nic_priv *priv = netdev_priv(ndev);
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
local_irq_save(flags);
|
||||
for (i = 0; i < priv->ae_handle->q_num * 2; i++)
|
||||
napi_schedule(&priv->ring_data[i].napi);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
#endif
|
||||
|
||||
static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
|
||||
struct net_device *ndev)
|
||||
{
|
||||
|
@ -1962,9 +1955,6 @@ static const struct net_device_ops hns_nic_netdev_ops = {
|
|||
.ndo_set_features = hns_nic_set_features,
|
||||
.ndo_fix_features = hns_nic_fix_features,
|
||||
.ndo_get_stats64 = hns_nic_get_stats64,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = hns_nic_poll_controller,
|
||||
#endif
|
||||
.ndo_set_rx_mode = hns_nic_set_rx_mode,
|
||||
.ndo_select_queue = hns_nic_select_queue,
|
||||
};
|
||||
|
|
|
@ -789,23 +789,6 @@ static void hinic_get_stats64(struct net_device *netdev,
|
|||
stats->tx_errors = nic_tx_stats->tx_dropped;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void hinic_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct hinic_dev *nic_dev = netdev_priv(netdev);
|
||||
int i, num_qps;
|
||||
|
||||
num_qps = hinic_hwdev_num_qps(nic_dev->hwdev);
|
||||
for (i = 0; i < num_qps; i++) {
|
||||
struct hinic_txq *txq = &nic_dev->txqs[i];
|
||||
struct hinic_rxq *rxq = &nic_dev->rxqs[i];
|
||||
|
||||
napi_schedule(&txq->napi);
|
||||
napi_schedule(&rxq->napi);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static const struct net_device_ops hinic_netdev_ops = {
|
||||
.ndo_open = hinic_open,
|
||||
.ndo_stop = hinic_close,
|
||||
|
@ -818,9 +801,6 @@ static const struct net_device_ops hinic_netdev_ops = {
|
|||
.ndo_start_xmit = hinic_xmit_frame,
|
||||
.ndo_tx_timeout = hinic_tx_timeout,
|
||||
.ndo_get_stats64 = hinic_get_stats64,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = hinic_netpoll,
|
||||
#endif
|
||||
};
|
||||
|
||||
static void netdev_features_init(struct net_device *netdev)
|
||||
|
|
|
@ -921,17 +921,6 @@ static int ehea_poll(struct napi_struct *napi, int budget)
|
|||
return rx;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void ehea_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct ehea_port *port = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < port->num_def_qps; i++)
|
||||
napi_schedule(&port->port_res[i].napi);
|
||||
}
|
||||
#endif
|
||||
|
||||
static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
|
||||
{
|
||||
struct ehea_port_res *pr = param;
|
||||
|
@ -2953,9 +2942,6 @@ static const struct net_device_ops ehea_netdev_ops = {
|
|||
.ndo_open = ehea_open,
|
||||
.ndo_stop = ehea_stop,
|
||||
.ndo_start_xmit = ehea_start_xmit,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ehea_netpoll,
|
||||
#endif
|
||||
.ndo_get_stats64 = ehea_get_stats64,
|
||||
.ndo_set_mac_address = ehea_set_mac_addr,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
|
|
|
@ -2207,19 +2207,6 @@ static int ibmvnic_poll(struct napi_struct *napi, int budget)
|
|||
return frames_processed;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void ibmvnic_netpoll_controller(struct net_device *dev)
|
||||
{
|
||||
struct ibmvnic_adapter *adapter = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
replenish_pools(netdev_priv(dev));
|
||||
for (i = 0; i < adapter->req_rx_queues; i++)
|
||||
ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
|
||||
adapter->rx_scrq[i]);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int wait_for_reset(struct ibmvnic_adapter *adapter)
|
||||
{
|
||||
int rc, ret;
|
||||
|
@ -2292,9 +2279,6 @@ static const struct net_device_ops ibmvnic_netdev_ops = {
|
|||
.ndo_set_mac_address = ibmvnic_set_mac,
|
||||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_tx_timeout = ibmvnic_tx_timeout,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ibmvnic_netpoll_controller,
|
||||
#endif
|
||||
.ndo_change_mtu = ibmvnic_change_mtu,
|
||||
.ndo_features_check = ibmvnic_features_check,
|
||||
};
|
||||
|
|
|
@ -3196,11 +3196,13 @@ int ixgbe_poll(struct napi_struct *napi, int budget)
|
|||
return budget;
|
||||
|
||||
/* all work done, exit the polling mode */
|
||||
napi_complete_done(napi, work_done);
|
||||
if (adapter->rx_itr_setting & 1)
|
||||
ixgbe_set_itr(q_vector);
|
||||
if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
ixgbe_irq_enable_queues(adapter, BIT_ULL(q_vector->v_idx));
|
||||
if (likely(napi_complete_done(napi, work_done))) {
|
||||
if (adapter->rx_itr_setting & 1)
|
||||
ixgbe_set_itr(q_vector);
|
||||
if (!test_bit(__IXGBE_DOWN, &adapter->state))
|
||||
ixgbe_irq_enable_queues(adapter,
|
||||
BIT_ULL(q_vector->v_idx));
|
||||
}
|
||||
|
||||
return min(work_done, budget - 1);
|
||||
}
|
||||
|
|
|
@ -1725,7 +1725,7 @@ static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
|
|||
}
|
||||
|
||||
/* Set Tx descriptors fields relevant for CSUM calculation */
|
||||
static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
|
||||
static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
|
||||
int ip_hdr_len, int l4_proto)
|
||||
{
|
||||
u32 command;
|
||||
|
@ -2600,14 +2600,15 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
|
|||
if (skb->ip_summed == CHECKSUM_PARTIAL) {
|
||||
int ip_hdr_len = 0;
|
||||
u8 l4_proto;
|
||||
__be16 l3_proto = vlan_get_protocol(skb);
|
||||
|
||||
if (skb->protocol == htons(ETH_P_IP)) {
|
||||
if (l3_proto == htons(ETH_P_IP)) {
|
||||
struct iphdr *ip4h = ip_hdr(skb);
|
||||
|
||||
/* Calculate IPv4 checksum and L4 checksum */
|
||||
ip_hdr_len = ip4h->ihl;
|
||||
l4_proto = ip4h->protocol;
|
||||
} else if (skb->protocol == htons(ETH_P_IPV6)) {
|
||||
} else if (l3_proto == htons(ETH_P_IPV6)) {
|
||||
struct ipv6hdr *ip6h = ipv6_hdr(skb);
|
||||
|
||||
/* Read l4_protocol from one of IPv6 extra headers */
|
||||
|
@ -2619,7 +2620,7 @@ static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
|
|||
}
|
||||
|
||||
return mvpp2_txq_desc_csum(skb_network_offset(skb),
|
||||
skb->protocol, ip_hdr_len, l4_proto);
|
||||
l3_proto, ip_hdr_len, l4_proto);
|
||||
}
|
||||
|
||||
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
|
||||
|
|
|
@ -54,6 +54,7 @@
|
|||
#include "en_stats.h"
|
||||
#include "en/fs.h"
|
||||
|
||||
extern const struct net_device_ops mlx5e_netdev_ops;
|
||||
struct page_pool;
|
||||
|
||||
#define MLX5E_METADATA_ETHER_TYPE (0x8CE4)
|
||||
|
|
|
@ -16,6 +16,8 @@ struct mlx5e_tc_table {
|
|||
|
||||
DECLARE_HASHTABLE(mod_hdr_tbl, 8);
|
||||
DECLARE_HASHTABLE(hairpin_tbl, 8);
|
||||
|
||||
struct notifier_block netdevice_nb;
|
||||
};
|
||||
|
||||
struct mlx5e_flow_table {
|
||||
|
|
|
@ -4315,7 +4315,7 @@ static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
|
|||
}
|
||||
}
|
||||
|
||||
static const struct net_device_ops mlx5e_netdev_ops = {
|
||||
const struct net_device_ops mlx5e_netdev_ops = {
|
||||
.ndo_open = mlx5e_open,
|
||||
.ndo_stop = mlx5e_close,
|
||||
.ndo_start_xmit = mlx5e_xmit,
|
||||
|
|
|
@ -1368,6 +1368,9 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
|||
|
||||
*match_level = MLX5_MATCH_L2;
|
||||
}
|
||||
} else {
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, svlan_tag, 1);
|
||||
MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
|
||||
}
|
||||
|
||||
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CVLAN)) {
|
||||
|
@ -2946,14 +2949,71 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void mlx5e_tc_hairpin_update_dead_peer(struct mlx5e_priv *priv,
|
||||
struct mlx5e_priv *peer_priv)
|
||||
{
|
||||
struct mlx5_core_dev *peer_mdev = peer_priv->mdev;
|
||||
struct mlx5e_hairpin_entry *hpe;
|
||||
u16 peer_vhca_id;
|
||||
int bkt;
|
||||
|
||||
if (!same_hw_devs(priv, peer_priv))
|
||||
return;
|
||||
|
||||
peer_vhca_id = MLX5_CAP_GEN(peer_mdev, vhca_id);
|
||||
|
||||
hash_for_each(priv->fs.tc.hairpin_tbl, bkt, hpe, hairpin_hlist) {
|
||||
if (hpe->peer_vhca_id == peer_vhca_id)
|
||||
hpe->hp->pair->peer_gone = true;
|
||||
}
|
||||
}
|
||||
|
||||
static int mlx5e_tc_netdev_event(struct notifier_block *this,
|
||||
unsigned long event, void *ptr)
|
||||
{
|
||||
struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
|
||||
struct mlx5e_flow_steering *fs;
|
||||
struct mlx5e_priv *peer_priv;
|
||||
struct mlx5e_tc_table *tc;
|
||||
struct mlx5e_priv *priv;
|
||||
|
||||
if (ndev->netdev_ops != &mlx5e_netdev_ops ||
|
||||
event != NETDEV_UNREGISTER ||
|
||||
ndev->reg_state == NETREG_REGISTERED)
|
||||
return NOTIFY_DONE;
|
||||
|
||||
tc = container_of(this, struct mlx5e_tc_table, netdevice_nb);
|
||||
fs = container_of(tc, struct mlx5e_flow_steering, tc);
|
||||
priv = container_of(fs, struct mlx5e_priv, fs);
|
||||
peer_priv = netdev_priv(ndev);
|
||||
if (priv == peer_priv ||
|
||||
!(priv->netdev->features & NETIF_F_HW_TC))
|
||||
return NOTIFY_DONE;
|
||||
|
||||
mlx5e_tc_hairpin_update_dead_peer(priv, peer_priv);
|
||||
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
int mlx5e_tc_nic_init(struct mlx5e_priv *priv)
|
||||
{
|
||||
struct mlx5e_tc_table *tc = &priv->fs.tc;
|
||||
int err;
|
||||
|
||||
hash_init(tc->mod_hdr_tbl);
|
||||
hash_init(tc->hairpin_tbl);
|
||||
|
||||
return rhashtable_init(&tc->ht, &tc_ht_params);
|
||||
err = rhashtable_init(&tc->ht, &tc_ht_params);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event;
|
||||
if (register_netdevice_notifier(&tc->netdevice_nb)) {
|
||||
tc->netdevice_nb.notifier_call = NULL;
|
||||
mlx5_core_warn(priv->mdev, "Failed to register netdev notifier\n");
|
||||
}
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void _mlx5e_tc_del_flow(void *ptr, void *arg)
|
||||
|
@ -2969,6 +3029,9 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv)
|
|||
{
|
||||
struct mlx5e_tc_table *tc = &priv->fs.tc;
|
||||
|
||||
if (tc->netdevice_nb.notifier_call)
|
||||
unregister_netdevice_notifier(&tc->netdevice_nb);
|
||||
|
||||
rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL);
|
||||
|
||||
if (!IS_ERR_OR_NULL(tc->t)) {
|
||||
|
|
|
@ -2000,7 +2000,7 @@ static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
|
|||
u32 max_guarantee = 0;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= esw->total_vports; i++) {
|
||||
for (i = 0; i < esw->total_vports; i++) {
|
||||
evport = &esw->vports[i];
|
||||
if (!evport->enabled || evport->info.min_rate < max_guarantee)
|
||||
continue;
|
||||
|
@ -2020,7 +2020,7 @@ static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
|
|||
int err;
|
||||
int i;
|
||||
|
||||
for (i = 0; i <= esw->total_vports; i++) {
|
||||
for (i = 0; i < esw->total_vports; i++) {
|
||||
evport = &esw->vports[i];
|
||||
if (!evport->enabled)
|
||||
continue;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue