Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net

Resolved kernel/bpf/btf.c using instructions from merge commit
69138b34a7

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2020-08-02 01:02:12 -07:00
commit bd0b33b248
207 changed files with 1700 additions and 836 deletions

View File

@ -16,7 +16,16 @@ Description: Allow the root user to disable/enable in runtime the clock
gating mechanism in Gaudi. Due to how Gaudi is built, the
clock gating needs to be disabled in order to access the
registers of the TPC and MME engines. This is sometimes needed
during debug by the user and hence the user needs this option
during debug by the user and hence the user needs this option.
The user can supply a bitmask value, each bit represents
a different engine to disable/enable its clock gating feature.
The bitmask is composed of 20 bits:
0 - 7 : DMA channels
8 - 11 : MME engines
12 - 19 : TPC engines
The bit's location of a specific engine can be determined
using (1 << GAUDI_ENGINE_ID_*). GAUDI_ENGINE_ID_* values
are defined in uapi habanalabs.h file in enum gaudi_engine_id
What: /sys/kernel/debug/habanalabs/hl<n>/command_buffers
Date: Jan 2019

View File

@ -47,6 +47,9 @@ properties:
$ref: /schemas/types.yaml#/definitions/phandle-array
description: Phandle to the device SRAM
iommus:
maxItems: 1
memory-region:
description:
CMA pool to use for buffers allocation instead of the default

View File

@ -8,9 +8,8 @@ There are various L3 encapsulation standards using UDP being discussed to
leverage the UDP based load balancing capability of different networks.
MPLSoUDP (__ https://tools.ietf.org/html/rfc7510) is one among them.
The Bareudp tunnel module provides a generic L3 encapsulation tunnelling
support for tunnelling different L3 protocols like MPLS, IP, NSH etc. inside
a UDP tunnel.
The Bareudp tunnel module provides a generic L3 encapsulation support for
tunnelling different L3 protocols like MPLS, IP, NSH etc. inside a UDP tunnel.
Special Handling
----------------

View File

@ -486,6 +486,10 @@ narrow. The description of these groups must be added to the following table:
- Contains packet traps for packets that should be locally delivered after
routing, but do not match more specific packet traps (e.g.,
``ipv4_bgp``)
* - ``external_delivery``
- Contains packet traps for packets that should be routed through an
external interface (e.g., management interface) that does not belong to
the same device (e.g., switch ASIC) as the ingress interface
* - ``ipv6``
- Contains packet traps for various IPv6 control packets (e.g., Router
Advertisements)

View File

@ -782,7 +782,7 @@ F: include/dt-bindings/reset/altr,rst-mgr-a10sr.h
F: include/linux/mfd/altera-a10sr.h
ALTERA TRIPLE SPEED ETHERNET DRIVER
M: Thor Thayer <thor.thayer@linux.intel.com>
M: Joyce Ooi <joyce.ooi@intel.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/ethernet/altera/
@ -1425,7 +1425,7 @@ F: arch/arm*/include/asm/perf_event.h
F: arch/arm*/kernel/hw_breakpoint.c
F: arch/arm*/kernel/perf_*
F: arch/arm/oprofile/common.c
F: drivers/perf/*
F: drivers/perf/
F: include/linux/perf/arm_pmu.h
ARM PORT
@ -14191,7 +14191,8 @@ F: Documentation/devicetree/bindings/net/qcom,ethqos.txt
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
QUALCOMM GENERIC INTERFACE I2C DRIVER
M: Alok Chauhan <alokc@codeaurora.org>
M: Akash Asthana <akashast@codeaurora.org>
M: Mukesh Savaliya <msavaliy@codeaurora.org>
L: linux-i2c@vger.kernel.org
L: linux-arm-msm@vger.kernel.org
S: Supported

View File

@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc7
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@ -568,7 +568,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
ifneq ($(CROSS_COMPILE),)
CLANG_FLAGS += --target=$(notdir $(CROSS_COMPILE:%-=%))
GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)elfedit))
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)
CLANG_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE))
GCC_TOOLCHAIN := $(realpath $(GCC_TOOLCHAIN_DIR)/..)
endif
ifneq ($(GCC_TOOLCHAIN),)
@ -1769,7 +1769,7 @@ PHONY += descend $(build-dirs)
descend: $(build-dirs)
$(build-dirs): prepare
$(Q)$(MAKE) $(build)=$@ \
single-build=$(if $(filter-out $@/, $(filter $@/%, $(single-no-ko))),1) \
single-build=$(if $(filter-out $@/, $(filter $@/%, $(KBUILD_SINGLE_TARGETS))),1) \
need-builtin=1 need-modorder=1
clean-dirs := $(addprefix _clean_, $(clean-dirs))

View File

@ -342,7 +342,8 @@ gateclk: clock-gating-control@18220 {
comphy: phy@18300 {
compatible = "marvell,armada-380-comphy";
reg = <0x18300 0x100>;
reg-names = "comphy", "conf";
reg = <0x18300 0x100>, <0x18460 4>;
#address-cells = <1>;
#size-cells = <0>;

View File

@ -397,7 +397,7 @@ MX6QDL_PAD_SD4_DAT1__PWM3_OUT 0x1b0b1
pinctrl_usbotg: usbotggrp {
fsl,pins = <
MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059
MX6QDL_PAD_ENET_RX_ER__USB_OTG_ID 0x17059
>;
};
@ -409,6 +409,7 @@ MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17070
MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17070
MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17070
MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17070
MX6QDL_PAD_GPIO_1__GPIO1_IO01 0x1b0b0
>;
};

View File

@ -99,7 +99,7 @@ ethphy1: ethernet-phy@1 {
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy0>;
fsl,magic-packet;
status = "okay";

View File

@ -213,7 +213,7 @@ ethphy2: ethernet-phy@2 {
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
phy-mode = "rgmii-id";
phy-handle = <&ethphy2>;
status = "okay";
};

View File

@ -402,7 +402,7 @@ ethphy0: ethernet-phy@0 {
&gbe0 {
phy-handle = <&ethphy0>;
phy-mode = "rgmii-id";
phy-mode = "rgmii-rxid";
status = "okay";
};

View File

@ -198,7 +198,7 @@ reserved-memory {
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -117,7 +117,7 @@ reserved-memory {
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -181,7 +181,7 @@ reserved-memory {
default-pool {
compatible = "shared-dma-pool";
size = <0x6000000>;
alloc-ranges = <0x4a000000 0x6000000>;
alloc-ranges = <0x40000000 0x10000000>;
reusable;
linux,cma-default;
};

View File

@ -5,6 +5,8 @@
#ifndef _ASM_ARM_PERCPU_H_
#define _ASM_ARM_PERCPU_H_
#include <asm/thread_info.h>
/*
* Same as asm-generic/percpu.h, except that we store the per cpu offset
* in the TPIDRPRW. TPIDRPRW only exists on V6K and V7

View File

@ -683,6 +683,12 @@ static void disable_single_step(struct perf_event *bp)
arch_install_hw_breakpoint(bp);
}
static int watchpoint_fault_on_uaccess(struct pt_regs *regs,
struct arch_hw_breakpoint *info)
{
return !user_mode(regs) && info->ctrl.privilege == ARM_BREAKPOINT_USER;
}
static void watchpoint_handler(unsigned long addr, unsigned int fsr,
struct pt_regs *regs)
{
@ -742,16 +748,27 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
}
pr_debug("watchpoint fired: address = 0x%x\n", info->trigger);
/*
* If we triggered a user watchpoint from a uaccess routine,
* then handle the stepping ourselves since userspace really
* can't help us with this.
*/
if (watchpoint_fault_on_uaccess(regs, info))
goto step;
perf_bp_event(wp, regs);
/*
* If no overflow handler is present, insert a temporary
* mismatch breakpoint so we can single-step over the
* watchpoint trigger.
* Defer stepping to the overflow handler if one is installed.
* Otherwise, insert a temporary mismatch breakpoint so that
* we can single-step over the watchpoint trigger.
*/
if (is_default_overflow_handler(wp))
enable_single_step(wp, instruction_pointer(regs));
if (!is_default_overflow_handler(wp))
goto unlock;
step:
enable_single_step(wp, instruction_pointer(regs));
unlock:
rcu_read_unlock();
}

View File

@ -184,6 +184,7 @@ static void __init patch_vdso(void *ehdr)
if (!cntvct_ok) {
vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
}
}

View File

@ -966,7 +966,7 @@ void __init create_mapping_late(struct mm_struct *mm, struct map_desc *md,
pud_t *pud;
p4d = p4d_alloc(mm, pgd_offset(mm, md->virtual), md->virtual);
if (!WARN_ON(!p4d))
if (WARN_ON(!p4d))
return;
pud = pud_alloc(mm, p4d, md->virtual);
if (WARN_ON(!pud))

View File

@ -161,6 +161,7 @@ video-codec@1c0e000 {
resets = <&ccu RST_BUS_VE>;
interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
allwinner,sram = <&ve_sram 1>;
iommus = <&iommu 3>;
};
gpu: gpu@1800000 {

View File

@ -77,9 +77,9 @@ static inline void apply_alternatives_module(void *start, size_t length) { }
"663:\n\t" \
newinstr "\n" \
"664:\n\t" \
".previous\n\t" \
".org . - (664b-663b) + (662b-661b)\n\t" \
".org . - (662b-661b) + (664b-663b)\n" \
".org . - (662b-661b) + (664b-663b)\n\t" \
".previous\n" \
".endif\n"
#define __ALTERNATIVE_CFG_CB(oldinstr, feature, cfg_enabled, cb) \

View File

@ -24,16 +24,17 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
__uint128_t tmp;
u64 sum;
int n = ihl; /* we want it signed */
tmp = *(const __uint128_t *)iph;
iph += 16;
ihl -= 4;
n -= 4;
tmp += ((tmp >> 64) | (tmp << 64));
sum = tmp >> 64;
do {
sum += *(const u32 *)iph;
iph += 4;
} while (--ihl);
} while (--n > 0);
sum += ((sum >> 32) | (sum << 32));
return csum_fold((__force u32)(sum >> 32));

View File

@ -30,7 +30,6 @@
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/thread_info.h>
#include <asm/pointer_auth.h>
DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);

View File

@ -212,6 +212,8 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}
#define atomic64_set_release(v, i) atomic64_set((v), (i))
static __inline__ s64
atomic64_read(const atomic64_t *v)
{

View File

@ -60,6 +60,7 @@ extern void __cmpxchg_called_with_bad_pointer(void);
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
@ -71,6 +72,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, (u8)old, (u8)new_);
}
__cmpxchg_called_with_bad_pointer();
return old;

View File

@ -79,3 +79,15 @@ unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsign
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
{
unsigned long flags;
u8 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}

View File

@ -3072,10 +3072,18 @@ do_hash_page:
ori r0,r0,DSISR_BAD_FAULT_64S@l
and. r0,r5,r0 /* weird error? */
bne- handle_page_fault /* if not, try to insert a HPTE */
/*
* If we are in an "NMI" (e.g., an interrupt when soft-disabled), then
* don't call hash_page, just fail the fault. This is required to
* prevent re-entrancy problems in the hash code, namely perf
* interrupts hitting while something holds H_PAGE_BUSY, and taking a
* hash fault. See the comment in hash_preload().
*/
ld r11, PACA_THREAD_INFO(r13)
lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
bne 77f /* then don't call hash_page now */
lwz r0,TI_PREEMPT(r11)
andis. r0,r0,NMI_MASK@h
bne 77f
/*
* r3 contains the trap number

View File

@ -1559,6 +1559,7 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
pgd_t *pgdir;
int rc, ssize, update_flags = 0;
unsigned long access = _PAGE_PRESENT | _PAGE_READ | (is_exec ? _PAGE_EXEC : 0);
unsigned long flags;
BUG_ON(get_region_id(ea) != USER_REGION_ID);
@ -1592,6 +1593,28 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
return;
#endif /* CONFIG_PPC_64K_PAGES */
/*
* __hash_page_* must run with interrupts off, as it sets the
* H_PAGE_BUSY bit. It's possible for perf interrupts to hit at any
* time and may take a hash fault reading the user stack, see
* read_user_stack_slow() in the powerpc/perf code.
*
* If that takes a hash fault on the same page as we lock here, it
* will bail out when seeing H_PAGE_BUSY set, and retry the access
* leading to an infinite loop.
*
* Disabling interrupts here does not prevent perf interrupts, but it
* will prevent them taking hash faults (see the NMI test in
* do_hash_page), then read_user_stack's copy_from_user_nofault will
* fail and perf will fall back to read_user_stack_slow(), which
* walks the Linux page tables.
*
* Interrupts must also be off for the duration of the
* mm_is_thread_local test and update, to prevent preempt running the
* mm on another CPU (XXX: this may be racy vs kthread_use_mm).
*/
local_irq_save(flags);
/* Is that local to this CPU ? */
if (mm_is_thread_local(mm))
update_flags |= HPTE_LOCAL_UPDATE;
@ -1614,6 +1637,8 @@ static void hash_preload(struct mm_struct *mm, pte_t *ptep, unsigned long ea,
mm_ctx_user_psize(&mm->context),
mm_ctx_user_psize(&mm->context),
pte_val(*ptep));
local_irq_restore(flags);
}
/*

View File

@ -2179,6 +2179,12 @@ static void __perf_event_interrupt(struct pt_regs *regs)
perf_read_regs(regs);
/*
* If perf interrupts hit in a local_irq_disable (soft-masked) region,
* we consider them as NMIs. This is required to prevent hash faults on
* user addresses when reading callchains. See the NMI test in
* do_hash_page.
*/
nmi = perf_intr_is_nmi(regs);
if (nmi)
nmi_enter();

View File

@ -12,6 +12,7 @@ extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
extern pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address);
extern void pmd_free(struct mm_struct *mm, pmd_t *pmd);
#define __pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, (pmdp))
#endif
static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
@ -33,13 +34,4 @@ do { \
tlb_remove_page((tlb), (pte)); \
} while (0)
#if CONFIG_PGTABLE_LEVELS > 2
#define __pmd_free_tlb(tlb, pmdp, addr) \
do { \
struct page *page = virt_to_page(pmdp); \
pgtable_pmd_page_dtor(page); \
tlb_remove_page((tlb), page); \
} while (0);
#endif
#endif /* __ASM_SH_PGALLOC_H */

View File

@ -199,7 +199,7 @@ syscall_trace_entry:
mov.l @(OFF_R7,r15), r7 ! arg3
mov.l @(OFF_R3,r15), r3 ! syscall_nr
!
mov.l 2f, r10 ! Number of syscalls
mov.l 6f, r10 ! Number of syscalls
cmp/hs r10, r3
bf syscall_call
mov #-ENOSYS, r0
@ -353,7 +353,7 @@ ENTRY(system_call)
tst r9, r8
bf syscall_trace_entry
!
mov.l 2f, r8 ! Number of syscalls
mov.l 6f, r8 ! Number of syscalls
cmp/hs r8, r3
bt syscall_badsys
!
@ -392,7 +392,7 @@ syscall_exit:
#if !defined(CONFIG_CPU_SH2)
1: .long TRA
#endif
2: .long NR_syscalls
6: .long NR_syscalls
3: .long sys_call_table
7: .long do_syscall_trace_enter
8: .long do_syscall_trace_leave

View File

@ -947,7 +947,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
trace_binder_unmap_user_end(alloc, index);
}
mmap_read_unlock(mm);
mmput(mm);
mmput_async(mm);
trace_binder_unmap_kernel_start(alloc, index);

View File

@ -433,9 +433,15 @@ static int atmtcp_remove_persistent(int itf)
return -EMEDIUMTYPE;
}
dev_data = PRIV(dev);
if (!dev_data->persist) return 0;
if (!dev_data->persist) {
atm_dev_put(dev);
return 0;
}
dev_data->persist = 0;
if (PRIV(dev)->vcc) return 0;
if (PRIV(dev)->vcc) {
atm_dev_put(dev);
return 0;
}
kfree(dev_data);
atm_dev_put(dev);
atm_dev_deregister(dev);

View File

@ -721,7 +721,7 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
return next;
/* When no more children in primary, continue with secondary */
if (!IS_ERR_OR_NULL(fwnode->secondary))
if (fwnode && !IS_ERR_OR_NULL(fwnode->secondary))
next = fwnode_get_next_child_node(fwnode->secondary, child);
return next;

View File

@ -814,7 +814,8 @@ static struct inode *devmem_inode;
#ifdef CONFIG_IO_STRICT_DEVMEM
void revoke_devmem(struct resource *res)
{
struct inode *inode = READ_ONCE(devmem_inode);
/* pairs with smp_store_release() in devmem_init_inode() */
struct inode *inode = smp_load_acquire(&devmem_inode);
/*
* Check that the initialization has completed. Losing the race
@ -1028,8 +1029,11 @@ static int devmem_init_inode(void)
return rc;
}
/* publish /dev/mem initialized */
WRITE_ONCE(devmem_inode, inode);
/*
* Publish /dev/mem initialized.
* Pairs with smp_load_acquire() in revoke_devmem().
*/
smp_store_release(&devmem_inode, inode);
return 0;
}

View File

@ -1277,6 +1277,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
fast_mix(fast_pool);
add_interrupt_bench(cycles);
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
if (unlikely(crng_init == 0)) {
if ((fast_pool->count >= 64) &&

View File

@ -605,8 +605,10 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
/* register entry under "/sys/firmware/qemu_fw_cfg/by_key/" */
err = kobject_init_and_add(&entry->kobj, &fw_cfg_sysfs_entry_ktype,
fw_cfg_sel_ko, "%d", entry->select);
if (err)
goto err_register;
if (err) {
kobject_put(&entry->kobj);
return err;
}
/* add raw binary content access */
err = sysfs_create_bin_file(&entry->kobj, &fw_cfg_sysfs_attr_raw);
@ -622,7 +624,6 @@ static int fw_cfg_register_file(const struct fw_cfg_file *f)
err_add_raw:
kobject_del(&entry->kobj);
err_register:
kfree(entry);
return err;
}

View File

@ -83,7 +83,8 @@ int __afu_port_disable(struct platform_device *pdev)
* on this port and minimum soft reset pulse width has elapsed.
* Driver polls port_soft_reset_ack to determine if reset done by HW.
*/
if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
if (readq_poll_timeout(base + PORT_HDR_CTRL, v,
v & PORT_CTRL_SFTRST_ACK,
RST_POLL_INVL, RST_POLL_TIMEOUT)) {
dev_err(&pdev->dev, "timeout, fail to reset device\n");
return -ETIMEDOUT;

View File

@ -227,7 +227,6 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
{
struct cci_drvdata *drvdata = pci_get_drvdata(pcidev);
struct dfl_fpga_cdev *cdev = drvdata->cdev;
int ret = 0;
if (!num_vfs) {
/*
@ -239,6 +238,8 @@ static int cci_pci_sriov_configure(struct pci_dev *pcidev, int num_vfs)
dfl_fpga_cdev_config_ports_pf(cdev);
} else {
int ret;
/*
* before enable SRIOV, put released ports into VF access mode
* first of all.

View File

@ -692,9 +692,10 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
return n ? -EFAULT : 0;
}
case AMDGPU_INFO_DEV_INFO: {
struct drm_amdgpu_info_device dev_info = {};
struct drm_amdgpu_info_device dev_info;
uint64_t vm_size;
memset(&dev_info, 0, sizeof(dev_info));
dev_info.device_id = dev->pdev->device;
dev_info.chip_rev = adev->rev_id;
dev_info.external_rev = adev->external_rev_id;

View File

@ -778,7 +778,8 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
tmp_str++;
while (isspace(*++tmp_str));
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;
@ -1038,7 +1039,8 @@ static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask)
memcpy(buf_cpy, buf, bytes);
buf_cpy[bytes] = '\0';
tmp = buf_cpy;
while ((sub_str = strsep(&tmp, delimiter)) != NULL) {
while (tmp[0]) {
sub_str = strsep(&tmp, delimiter);
if (strlen(sub_str)) {
ret = kstrtol(sub_str, 0, &level);
if (ret)
@ -1635,7 +1637,8 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
i++;
memcpy(buf_cpy, buf, count-i);
tmp_str = buf_cpy;
while ((sub_str = strsep(&tmp_str, delimiter)) != NULL) {
while (tmp_str[0]) {
sub_str = strsep(&tmp_str, delimiter);
ret = kstrtol(sub_str, 0, &parameter[parameter_size]);
if (ret)
return -EINVAL;

View File

@ -8717,20 +8717,38 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
* the same resource. If we have a new DC context as part of
* the DM atomic state from validation we need to free it and
* retain the existing one instead.
*
* Furthermore, since the DM atomic state only contains the DC
* context and can safely be annulled, we can free the state
* and clear the associated private object now to free
* some memory and avoid a possible use-after-free later.
*/
struct dm_atomic_state *new_dm_state, *old_dm_state;
new_dm_state = dm_atomic_get_new_state(state);
old_dm_state = dm_atomic_get_old_state(state);
for (i = 0; i < state->num_private_objs; i++) {
struct drm_private_obj *obj = state->private_objs[i].ptr;
if (new_dm_state && old_dm_state) {
if (new_dm_state->context)
dc_release_state(new_dm_state->context);
if (obj->funcs == adev->dm.atomic_obj.funcs) {
int j = state->num_private_objs-1;
new_dm_state->context = old_dm_state->context;
dm_atomic_destroy_state(obj,
state->private_objs[i].state);
if (old_dm_state->context)
dc_retain_state(old_dm_state->context);
/* If i is not at the end of the array then the
* last element needs to be moved to where i was
* before the array can safely be truncated.
*/
if (i != j)
state->private_objs[i] =
state->private_objs[j];
state->private_objs[j].ptr = NULL;
state->private_objs[j].state = NULL;
state->private_objs[j].old_state = NULL;
state->private_objs[j].new_state = NULL;
state->num_private_objs = j;
break;
}
}
}

View File

@ -146,6 +146,7 @@ int bochs_kms_init(struct bochs_device *bochs)
bochs->dev->mode_config.preferred_depth = 24;
bochs->dev->mode_config.prefer_shadow = 0;
bochs->dev->mode_config.prefer_shadow_fbdev = 1;
bochs->dev->mode_config.fbdev_use_iomem = true;
bochs->dev->mode_config.quirk_addfb_prefer_host_byte_order = true;
bochs->dev->mode_config.funcs = &bochs_mode_funcs;

View File

@ -1224,6 +1224,7 @@ static int adv7511_probe(struct i2c_client *i2c, const struct i2c_device_id *id)
adv7511->bridge.funcs = &adv7511_bridge_funcs;
adv7511->bridge.of_node = dev->of_node;
adv7511->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
drm_bridge_add(&adv7511->bridge);

View File

@ -917,11 +917,6 @@ static int nwl_dsi_bridge_attach(struct drm_bridge *bridge,
struct drm_panel *panel;
int ret;
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
DRM_ERROR("Fix bridge driver to make connector optional!");
return -EINVAL;
}
ret = drm_of_find_panel_or_bridge(dsi->dev->of_node, 1, 0, &panel,
&panel_bridge);
if (ret)

View File

@ -399,7 +399,11 @@ static void drm_fb_helper_dirty_blit_real(struct drm_fb_helper *fb_helper,
unsigned int y;
for (y = clip->y1; y < clip->y2; y++) {
memcpy(dst, src, len);
if (!fb_helper->dev->mode_config.fbdev_use_iomem)
memcpy(dst, src, len);
else
memcpy_toio((void __iomem *)dst, src, len);
src += fb->pitches[0];
dst += fb->pitches[0];
}

View File

@ -871,9 +871,6 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,
* @file_priv: drm file-private structure
*
* Open an object using the global name, returning a handle and the size.
*
* This handle (of course) holds a reference to the object, so the object
* will not go away until the handle is deleted.
*/
int
drm_gem_open_ioctl(struct drm_device *dev, void *data,
@ -898,14 +895,15 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,
/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
drm_gem_object_put_unlocked(obj);
if (ret)
return ret;
goto err;
args->handle = handle;
args->size = obj->size;
return 0;
err:
drm_gem_object_put_unlocked(obj);
return ret;
}
/**

View File

@ -923,7 +923,7 @@ static int mipi_dbi_spi1_transfer(struct mipi_dbi *dbi, int dc,
}
}
tr.len = chunk;
tr.len = chunk * 2;
len -= chunk;
ret = spi_sync(spi, &m);

View File

@ -322,10 +322,8 @@ static int drm_of_lvds_get_remote_pixels_type(
* configurations by passing the endpoints explicitly to
* drm_of_lvds_get_dual_link_pixel_order().
*/
if (!current_pt || pixels_type != current_pt) {
of_node_put(remote_port);
if (!current_pt || pixels_type != current_pt)
return -EINVAL;
}
}
return pixels_type;

View File

@ -1060,9 +1060,14 @@ static void mcde_display_update(struct drm_simple_display_pipe *pipe,
*/
if (fb) {
mcde_set_extsrc(mcde, drm_fb_cma_get_gem_addr(fb, pstate, 0));
if (!mcde->video_mode)
/* Send a single frame using software sync */
mcde_display_send_one_frame(mcde);
if (!mcde->video_mode) {
/*
* Send a single frame using software sync if the flow
* is not active yet.
*/
if (mcde->flow_active == 0)
mcde_display_send_one_frame(mcde);
}
dev_info_once(mcde->dev, "sent first display update\n");
} else {
/*

View File

@ -2073,7 +2073,7 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
*/
if (core->assign_windows) {
core->func->wndw.owner(core);
core->func->update(core, interlock, false);
nv50_disp_atomic_commit_core(state, interlock);
core->assign_windows = false;
interlock[NV50_DISP_INTERLOCK_CORE] = 0;
}
@ -2506,7 +2506,7 @@ nv50_display_create(struct drm_device *dev)
if (disp->disp->object.oclass >= TU102_DISP)
nouveau_display(dev)->format_modifiers = wndwc57e_modifiers;
else
if (disp->disp->object.oclass >= GF110_DISP)
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
nouveau_display(dev)->format_modifiers = disp90xx_modifiers;
else
nouveau_display(dev)->format_modifiers = disp50xx_modifiers;

View File

@ -191,6 +191,7 @@ nouveau_decode_mod(struct nouveau_drm *drm,
uint32_t *tile_mode,
uint8_t *kind)
{
struct nouveau_display *disp = nouveau_display(drm->dev);
BUG_ON(!tile_mode || !kind);
if (modifier == DRM_FORMAT_MOD_LINEAR) {
@ -202,6 +203,12 @@ nouveau_decode_mod(struct nouveau_drm *drm,
* Extract the block height and kind from the corresponding
* modifier fields. See drm_fourcc.h for details.
*/
if ((modifier & (0xffull << 12)) == 0ull) {
/* Legacy modifier. Translate to this dev's 'kind.' */
modifier |= disp->format_modifiers[0] & (0xffull << 12);
}
*tile_mode = (uint32_t)(modifier & 0xF);
*kind = (uint8_t)((modifier >> 12) & 0xFF);
@ -227,6 +234,16 @@ nouveau_framebuffer_get_layout(struct drm_framebuffer *fb,
}
}
static const u64 legacy_modifiers[] = {
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(0),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(1),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(2),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(3),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(4),
DRM_FORMAT_MOD_NVIDIA_16BX2_BLOCK(5),
DRM_FORMAT_MOD_INVALID
};
static int
nouveau_validate_decode_mod(struct nouveau_drm *drm,
uint64_t modifier,
@ -247,8 +264,14 @@ nouveau_validate_decode_mod(struct nouveau_drm *drm,
(disp->format_modifiers[mod] != modifier);
mod++);
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
if (disp->format_modifiers[mod] == DRM_FORMAT_MOD_INVALID) {
for (mod = 0;
(legacy_modifiers[mod] != DRM_FORMAT_MOD_INVALID) &&
(legacy_modifiers[mod] != modifier);
mod++);
if (legacy_modifiers[mod] == DRM_FORMAT_MOD_INVALID)
return -EINVAL;
}
nouveau_decode_mod(drm, modifier, tile_mode, kind);

View File

@ -315,7 +315,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb;
struct nouveau_channel *chan;
struct nouveau_bo *nvbo;
struct drm_mode_fb_cmd2 mode_cmd;
struct drm_mode_fb_cmd2 mode_cmd = {};
int ret;
mode_cmd.width = sizes->surface_width;
@ -590,6 +590,7 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_fini(&fbcon->helper);
free:
kfree(fbcon);
drm->fbcon = NULL;
return ret;
}

View File

@ -117,15 +117,6 @@ nvkm_outp_acquire_hda(struct nvkm_outp *outp, enum nvkm_ior_type type,
{
struct nvkm_ior *ior;
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
!ior->asy.outp && ior->arm.outp == outp)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* Failing that, a completely unused OR is the next best thing. */
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !!ior->func->hda.hpd == hda &&
@ -173,6 +164,27 @@ nvkm_outp_acquire(struct nvkm_outp *outp, u8 user, bool hda)
return nvkm_outp_acquire_ior(outp, user, ior);
}
/* First preference is to reuse the OR that is currently armed
* on HW, if any, in order to prevent unnecessary switching.
*/
list_for_each_entry(ior, &outp->disp->ior, head) {
if (!ior->identity && !ior->asy.outp && ior->arm.outp == outp) {
/*XXX: For various complicated reasons, we can't outright switch
* the boot-time OR on the first modeset without some fairly
* invasive changes.
*
* The systems that were fixed by modifying the OR selection
* code to account for HDA support shouldn't regress here as
* the HDA-enabled ORs match the relevant output's pad macro
* index, and the firmware seems to select an OR this way.
*
* This warning is to make it obvious if that proves wrong.
*/
WARN_ON(hda && !ior->func->hda.hpd);
return nvkm_outp_acquire_ior(outp, user, ior);
}
}
/* If we don't need HDA, first try to acquire an OR that doesn't
* support it to leave free the ones that do.
*/

View File

@ -615,9 +615,9 @@ static const struct panel_desc boe_tv101wum_nl6_desc = {
static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
.clock = 157000,
.hdisplay = 1200,
.hsync_start = 1200 + 80,
.hsync_end = 1200 + 80 + 24,
.htotal = 1200 + 80 + 24 + 36,
.hsync_start = 1200 + 60,
.hsync_end = 1200 + 60 + 24,
.htotal = 1200 + 60 + 24 + 56,
.vdisplay = 1920,
.vsync_start = 1920 + 16,
.vsync_end = 1920 + 16 + 4,

View File

@ -1250,7 +1250,21 @@ static const struct panel_desc boe_nv133fhm_n61 = {
.height = 165,
},
.delay = {
.hpd_absent_delay = 200,
/*
* When power is first given to the panel there's a short
* spike on the HPD line. It was explained that this spike
* was until the TCON data download was complete. On
* one system this was measured at 8 ms. We'll put 15 ms
* in the prepare delay just to be safe and take it away
* from the hpd_absent_delay (which would otherwise be 200 ms)
* to handle this. That means:
* - If HPD isn't hooked up you still have 200 ms delay.
* - If HPD is hooked up we won't try to look at it for the
* first 15 ms.
*/
.prepare = 15,
.hpd_absent_delay = 185,
.unprepare = 500,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,

View File

@ -18,10 +18,8 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
if (!client || !slave_cb) {
WARN(1, "insufficient data\n");
if (WARN(IS_ERR_OR_NULL(client) || !slave_cb, "insufficient data\n"))
return -EINVAL;
}
if (!(client->flags & I2C_CLIENT_SLAVE))
dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n",
@ -60,6 +58,9 @@ int i2c_slave_unregister(struct i2c_client *client)
{
int ret;
if (IS_ERR_OR_NULL(client))
return -EINVAL;
if (!client->adapter->algo->unreg_slave) {
dev_err(&client->dev, "%s: not supported by adapter\n", __func__);
return -EOPNOTSUPP;

View File

@ -72,6 +72,15 @@ static void rdma_dim_init(struct ib_cq *cq)
INIT_WORK(&dim->work, ib_cq_rdma_dim_work);
}
static void rdma_dim_destroy(struct ib_cq *cq)
{
if (!cq->dim)
return;
cancel_work_sync(&cq->dim->work);
kfree(cq->dim);
}
static int __poll_cq(struct ib_cq *cq, int num_entries, struct ib_wc *wc)
{
int rc;
@ -266,6 +275,7 @@ struct ib_cq *__ib_alloc_cq_user(struct ib_device *dev, void *private,
return cq;
out_destroy_cq:
rdma_dim_destroy(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
out_free_wc:
@ -331,12 +341,10 @@ void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata)
WARN_ON_ONCE(1);
}
rdma_dim_destroy(cq);
trace_cq_free(cq);
rdma_restrack_del(&cq->res);
cq->device->ops.destroy_cq(cq, udata);
if (cq->dim)
cancel_work_sync(&cq->dim->work);
kfree(cq->dim);
kfree(cq->wc);
kfree(cq);
}

View File

@ -1084,6 +1084,8 @@ static ssize_t ucma_connect(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
if (in_len < offsetofend(typeof(cmd), reserved))
return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;
@ -1141,6 +1143,8 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
size_t in_size;
int ret;
if (in_len < offsetofend(typeof(cmd), reserved))
return -EINVAL;
in_size = min_t(size_t, in_len, sizeof(cmd));
if (copy_from_user(&cmd, inbuf, in_size))
return -EFAULT;

View File

@ -1797,9 +1797,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->frags[i].mr =
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
if (!work->frags[i].mr) {
work->num_sge = i - 1;
if (i)
destroy_prefetch_work(work);
work->num_sge = i;
return false;
}
@ -1865,6 +1863,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
srcu_read_unlock(&dev->odp_srcu, srcu_key);
destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);

View File

@ -1766,15 +1766,14 @@ static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
int scqe_sz;
bool allow_scat_cqe = false;
if (ucmd)
allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
@ -1853,8 +1852,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in;
int err;
mutex_init(&qp->mutex);
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@ -1938,7 +1935,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@ -2012,7 +2008,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
configure_requester_scat_cqe(dev, qp, init_attr, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@ -2129,7 +2125,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@ -2543,13 +2538,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
switch (flag) {
case MLX5_QP_FLAG_SCATTER_CQE:
case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
/*
* We don't return error if this flag was provided,
* and mlx5 doesn't have right capability.
*/
*flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
* We don't return error if these flags were provided,
* and mlx5 doesn't have right capability.
*/
*flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
return;
default:
break;
}
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
@ -2589,6 +2589,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
if (qp->type == IB_QPT_RAW_PACKET) {
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@ -2963,6 +2965,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
goto free_ucmd;
}
mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);

View File

@ -901,8 +901,6 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
qp->s_tail_ack_queue = 0;
qp->s_acked_ack_queue = 0;
qp->s_num_rd_atomic = 0;
if (qp->r_rq.kwq)
qp->r_rq.kwq->count = qp->r_rq.size;
qp->r_sge.num_sge = 0;
atomic_set(&qp->s_reserved_used, 0);
}
@ -2366,31 +2364,6 @@ static int init_sge(struct rvt_qp *qp, struct rvt_rwqe *wqe)
return 0;
}
/**
* get_count - count numbers of request work queue entries
* in circular buffer
* @rq: data structure for request queue entry
* @tail: tail indices of the circular buffer
* @head: head indices of the circular buffer
*
* Return - total number of entries in the circular buffer
*/
static u32 get_count(struct rvt_rq *rq, u32 tail, u32 head)
{
u32 count;
count = head;
if (count >= rq->size)
count = 0;
if (count < tail)
count += rq->size - tail;
else
count -= tail;
return count;
}
/**
* get_rvt_head - get head indices of the circular buffer
* @rq: data structure for request queue entry
@ -2465,7 +2438,7 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
if (kwq->count < RVT_RWQ_COUNT_THRESHOLD) {
head = get_rvt_head(rq, ip);
kwq->count = get_count(rq, tail, head);
kwq->count = rvt_get_rq_count(rq, head, tail);
}
if (unlikely(kwq->count == 0)) {
ret = 0;
@ -2500,7 +2473,9 @@ int rvt_get_rwqe(struct rvt_qp *qp, bool wr_id_only)
* the number of remaining WQEs.
*/
if (kwq->count < srq->limit) {
kwq->count = get_count(rq, tail, get_rvt_head(rq, ip));
kwq->count =
rvt_get_rq_count(rq,
get_rvt_head(rq, ip), tail);
if (kwq->count < srq->limit) {
struct ib_event ev;

View File

@ -127,9 +127,7 @@ __be32 rvt_compute_aeth(struct rvt_qp *qp)
* not atomic, which is OK, since the fuzziness is
* resolved as further ACKs go out.
*/
credits = head - tail;
if ((int)credits < 0)
credits += qp->r_rq.size;
credits = rvt_get_rq_count(&qp->r_rq, head, tail);
}
/*
* Binary search the credit table to find the code to

View File

@ -243,6 +243,7 @@ static int aggregate_requests(struct icc_node *node)
{
struct icc_provider *p = node->provider;
struct icc_req *r;
u32 avg_bw, peak_bw;
node->avg_bw = 0;
node->peak_bw = 0;
@ -251,9 +252,14 @@ static int aggregate_requests(struct icc_node *node)
p->pre_aggregate(node);
hlist_for_each_entry(r, &node->req_list, req_node) {
if (!r->enabled)
continue;
p->aggregate(node, r->tag, r->avg_bw, r->peak_bw,
if (r->enabled) {
avg_bw = r->avg_bw;
peak_bw = r->peak_bw;
} else {
avg_bw = 0;
peak_bw = 0;
}
p->aggregate(node, r->tag, avg_bw, peak_bw,
&node->avg_bw, &node->peak_bw);
}

View File

@ -197,13 +197,13 @@ DEFINE_QNODE(pcnoc_int_0, MSM8916_PNOC_INT_0, 8, -1, -1, MSM8916_PNOC_SNOC_MAS,
DEFINE_QNODE(pcnoc_int_1, MSM8916_PNOC_INT_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
DEFINE_QNODE(pcnoc_m_0, MSM8916_PNOC_MAS_0, 8, -1, -1, MSM8916_PNOC_INT_0);
DEFINE_QNODE(pcnoc_m_1, MSM8916_PNOC_MAS_1, 8, -1, -1, MSM8916_PNOC_SNOC_MAS);
DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 8, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 8, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 8, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 8, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 8, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 8, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 8, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
DEFINE_QNODE(pcnoc_s_0, MSM8916_PNOC_SLV_0, 4, -1, -1, MSM8916_SLAVE_CLK_CTL, MSM8916_SLAVE_TLMM, MSM8916_SLAVE_TCSR, MSM8916_SLAVE_SECURITY, MSM8916_SLAVE_MSS);
DEFINE_QNODE(pcnoc_s_1, MSM8916_PNOC_SLV_1, 4, -1, -1, MSM8916_SLAVE_IMEM_CFG, MSM8916_SLAVE_CRYPTO_0_CFG, MSM8916_SLAVE_MSG_RAM, MSM8916_SLAVE_PDM, MSM8916_SLAVE_PRNG);
DEFINE_QNODE(pcnoc_s_2, MSM8916_PNOC_SLV_2, 4, -1, -1, MSM8916_SLAVE_SPDM, MSM8916_SLAVE_BOOT_ROM, MSM8916_SLAVE_BIMC_CFG, MSM8916_SLAVE_PNOC_CFG, MSM8916_SLAVE_PMIC_ARB);
DEFINE_QNODE(pcnoc_s_3, MSM8916_PNOC_SLV_3, 4, -1, -1, MSM8916_SLAVE_MPM, MSM8916_SLAVE_SNOC_CFG, MSM8916_SLAVE_RBCPR_CFG, MSM8916_SLAVE_QDSS_CFG, MSM8916_SLAVE_DEHR_CFG);
DEFINE_QNODE(pcnoc_s_4, MSM8916_PNOC_SLV_4, 4, -1, -1, MSM8916_SLAVE_VENUS_CFG, MSM8916_SLAVE_CAMERA_CFG, MSM8916_SLAVE_DISPLAY_CFG);
DEFINE_QNODE(pcnoc_s_8, MSM8916_PNOC_SLV_8, 4, -1, -1, MSM8916_SLAVE_USB_HS, MSM8916_SLAVE_SDCC_1, MSM8916_SLAVE_BLSP_1);
DEFINE_QNODE(pcnoc_s_9, MSM8916_PNOC_SLV_9, 4, -1, -1, MSM8916_SLAVE_SDCC_2, MSM8916_SLAVE_LPASS, MSM8916_SLAVE_GRAPHICS_3D_CFG);
DEFINE_QNODE(pcnoc_snoc_mas, MSM8916_PNOC_SNOC_MAS, 8, 29, -1, MSM8916_PNOC_SNOC_SLV);
DEFINE_QNODE(pcnoc_snoc_slv, MSM8916_PNOC_SNOC_SLV, 8, -1, 45, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC, MSM8916_SNOC_INT_1);
DEFINE_QNODE(qdss_int, MSM8916_SNOC_QDSS_INT, 8, -1, -1, MSM8916_SNOC_INT_0, MSM8916_SNOC_INT_BIMC);

View File

@ -499,11 +499,19 @@ static int validate_queue_index(struct hl_device *hdev,
struct asic_fixed_properties *asic = &hdev->asic_prop;
struct hw_queue_properties *hw_queue_prop;
/* This must be checked here to prevent out-of-bounds access to
* hw_queues_props array
*/
if (chunk->queue_index >= HL_MAX_QUEUES) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
chunk->queue_index);
return -EINVAL;
}
hw_queue_prop = &asic->hw_queues_props[chunk->queue_index];
if ((chunk->queue_index >= HL_MAX_QUEUES) ||
(hw_queue_prop->type == QUEUE_TYPE_NA)) {
dev_err(hdev->dev, "Queue index %d is invalid\n",
if (hw_queue_prop->type == QUEUE_TYPE_NA) {
dev_err(hdev->dev, "Queue index %d is not applicable\n",
chunk->queue_index);
return -EINVAL;
}

View File

@ -36,7 +36,7 @@ static int hl_debugfs_i2c_read(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.i2c_reg = i2c_reg;
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, (long *) val);
0, (long *) val);
if (rc)
dev_err(hdev->dev, "Failed to read from I2C, error %d\n", rc);
@ -63,7 +63,7 @@ static int hl_debugfs_i2c_write(struct hl_device *hdev, u8 i2c_bus, u8 i2c_addr,
pkt.value = cpu_to_le64(val);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to write to I2C, error %d\n", rc);
@ -87,7 +87,7 @@ static void hl_debugfs_led_set(struct hl_device *hdev, u8 led, u8 state)
pkt.value = cpu_to_le64(state);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set LED %d, error %d\n", led, rc);
@ -981,7 +981,7 @@ static ssize_t hl_clk_gate_read(struct file *f, char __user *buf,
if (*ppos)
return 0;
sprintf(tmp_buf, "%d\n", hdev->clock_gating);
sprintf(tmp_buf, "0x%llx\n", hdev->clock_gating_mask);
rc = simple_read_from_buffer(buf, strlen(tmp_buf) + 1, ppos, tmp_buf,
strlen(tmp_buf) + 1);
@ -993,7 +993,7 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
{
struct hl_dbg_device_entry *entry = file_inode(f)->i_private;
struct hl_device *hdev = entry->hdev;
u32 value;
u64 value;
ssize_t rc;
if (atomic_read(&hdev->in_reset)) {
@ -1002,19 +1002,12 @@ static ssize_t hl_clk_gate_write(struct file *f, const char __user *buf,
return 0;
}
rc = kstrtouint_from_user(buf, count, 10, &value);
rc = kstrtoull_from_user(buf, count, 16, &value);
if (rc)
return rc;
if (value) {
hdev->clock_gating = 1;
if (hdev->asic_funcs->enable_clock_gating)
hdev->asic_funcs->enable_clock_gating(hdev);
} else {
if (hdev->asic_funcs->disable_clock_gating)
hdev->asic_funcs->disable_clock_gating(hdev);
hdev->clock_gating = 0;
}
hdev->clock_gating_mask = value;
hdev->asic_funcs->set_clock_gating(hdev);
return count;
}

View File

@ -608,7 +608,7 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
hdev->in_debug = 0;
if (!hdev->hard_reset_pending)
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
goto out;
}

View File

@ -61,7 +61,7 @@ int hl_fw_send_pci_access_msg(struct hl_device *hdev, u32 opcode)
pkt.ctl = cpu_to_le32(opcode << ARMCP_PKT_CTL_OPCODE_SHIFT);
return hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt,
sizeof(pkt), HL_DEVICE_TIMEOUT_USEC, NULL);
sizeof(pkt), 0, NULL);
}
int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
@ -144,7 +144,7 @@ int hl_fw_unmask_irq(struct hl_device *hdev, u16 event_type)
pkt.value = cpu_to_le64(event_type);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, &result);
0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@ -183,7 +183,7 @@ int hl_fw_unmask_irq_arr(struct hl_device *hdev, const u32 *irq_arr,
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, HL_DEVICE_TIMEOUT_USEC, &result);
total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@ -204,7 +204,7 @@ int hl_fw_test_cpu_queue(struct hl_device *hdev)
test_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &test_pkt,
sizeof(test_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
sizeof(test_pkt), 0, &result);
if (!rc) {
if (result != ARMCP_PACKET_FENCE_VAL)
@ -248,7 +248,7 @@ int hl_fw_send_heartbeat(struct hl_device *hdev)
hb_pkt.value = cpu_to_le64(ARMCP_PACKET_FENCE_VAL);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &hb_pkt,
sizeof(hb_pkt), HL_DEVICE_TIMEOUT_USEC, &result);
sizeof(hb_pkt), 0, &result);
if ((rc) || (result != ARMCP_PACKET_FENCE_VAL))
rc = -EIO;

View File

@ -80,6 +80,7 @@
#define GAUDI_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_PLDM_TPC_KERNEL_WAIT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GAUDI_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GAUDI_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GAUDI_QMAN0_FENCE_VAL 0x72E91AB9
@ -98,6 +99,11 @@
#define GAUDI_ARB_WDT_TIMEOUT 0x1000000
#define GAUDI_CLK_GATE_DEBUGFS_MASK (\
BIT(GAUDI_ENGINE_ID_MME_0) |\
BIT(GAUDI_ENGINE_ID_MME_2) |\
GENMASK_ULL(GAUDI_ENGINE_ID_TPC_7, GAUDI_ENGINE_ID_TPC_0))
static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
"gaudi cq 0_0", "gaudi cq 0_1", "gaudi cq 0_2", "gaudi cq 0_3",
"gaudi cq 1_0", "gaudi cq 1_1", "gaudi cq 1_2", "gaudi cq 1_3",
@ -106,14 +112,14 @@ static const char gaudi_irq_name[GAUDI_MSI_ENTRIES][GAUDI_MAX_STRING_LEN] = {
};
static const u8 gaudi_dma_assignment[GAUDI_DMA_MAX] = {
[GAUDI_PCI_DMA_1] = 0,
[GAUDI_PCI_DMA_2] = 1,
[GAUDI_PCI_DMA_3] = 5,
[GAUDI_HBM_DMA_1] = 2,
[GAUDI_HBM_DMA_2] = 3,
[GAUDI_HBM_DMA_3] = 4,
[GAUDI_HBM_DMA_4] = 6,
[GAUDI_HBM_DMA_5] = 7
[GAUDI_PCI_DMA_1] = GAUDI_ENGINE_ID_DMA_0,
[GAUDI_PCI_DMA_2] = GAUDI_ENGINE_ID_DMA_1,
[GAUDI_PCI_DMA_3] = GAUDI_ENGINE_ID_DMA_5,
[GAUDI_HBM_DMA_1] = GAUDI_ENGINE_ID_DMA_2,
[GAUDI_HBM_DMA_2] = GAUDI_ENGINE_ID_DMA_3,
[GAUDI_HBM_DMA_3] = GAUDI_ENGINE_ID_DMA_4,
[GAUDI_HBM_DMA_4] = GAUDI_ENGINE_ID_DMA_6,
[GAUDI_HBM_DMA_5] = GAUDI_ENGINE_ID_DMA_7
};
static const u8 gaudi_cq_assignment[NUMBER_OF_CMPLT_QUEUES] = {
@ -1819,7 +1825,7 @@ static void gaudi_init_golden_registers(struct hl_device *hdev)
gaudi_init_rate_limiter(hdev);
gaudi_disable_clock_gating(hdev);
hdev->asic_funcs->disable_clock_gating(hdev);
for (tpc_id = 0, tpc_offset = 0;
tpc_id < TPC_NUMBER_OF_ENGINES;
@ -2531,46 +2537,55 @@ static void gaudi_tpc_stall(struct hl_device *hdev)
WREG32(mmTPC7_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
}
static void gaudi_enable_clock_gating(struct hl_device *hdev)
static void gaudi_set_clock_gating(struct hl_device *hdev)
{
struct gaudi_device *gaudi = hdev->asic_specific;
u32 qman_offset;
int i;
if (!hdev->clock_gating)
return;
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE)
return;
/* In case we are during debug session, don't enable the clock gate
* as it may interfere
*/
if (hdev->in_debug)
return;
for (i = 0, qman_offset = 0 ; i < PCI_DMA_NUMBER_OF_CHNLS ; i++) {
for (i = GAUDI_PCI_DMA_1, qman_offset = 0 ; i < GAUDI_HBM_DMA_1 ; i++) {
if (!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i]))))
continue;
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_UPPER_CP_CGM_PWR_GATE_EN);
}
for (; i < HBM_DMA_NUMBER_OF_CHNLS ; i++) {
for (i = GAUDI_HBM_DMA_1 ; i < GAUDI_DMA_MAX ; i++) {
if (!(hdev->clock_gating_mask &
(BIT_ULL(gaudi_dma_assignment[i]))))
continue;
qman_offset = gaudi_dma_assignment[i] * DMA_QMAN_OFFSET;
WREG32(mmDMA0_QM_CGM_CFG1 + qman_offset, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmDMA0_QM_CGM_CFG + qman_offset,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME0_QM_CGM_CFG,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME2_QM_CGM_CFG,
QMAN_COMMON_CP_CGM_PWR_GATE_EN);
if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_0))) {
WREG32(mmMME0_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME0_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
if (hdev->clock_gating_mask & (BIT_ULL(GAUDI_ENGINE_ID_MME_2))) {
WREG32(mmMME2_QM_CGM_CFG1, QMAN_CGM1_PWR_GATE_EN);
WREG32(mmMME2_QM_CGM_CFG, QMAN_COMMON_CP_CGM_PWR_GATE_EN);
}
for (i = 0, qman_offset = 0 ; i < TPC_NUMBER_OF_ENGINES ; i++) {
if (!(hdev->clock_gating_mask &
(BIT_ULL(GAUDI_ENGINE_ID_TPC_0 + i))))
continue;
WREG32(mmTPC0_QM_CGM_CFG1 + qman_offset,
QMAN_CGM1_PWR_GATE_EN);
WREG32(mmTPC0_QM_CGM_CFG + qman_offset,
@ -2663,7 +2678,7 @@ static void gaudi_halt_engines(struct hl_device *hdev, bool hard_reset)
gaudi_stop_hbm_dma_qmans(hdev);
gaudi_stop_pci_dma_qmans(hdev);
gaudi_disable_clock_gating(hdev);
hdev->asic_funcs->disable_clock_gating(hdev);
msleep(wait_timeout_ms);
@ -3003,7 +3018,7 @@ static int gaudi_hw_init(struct hl_device *hdev)
gaudi_init_tpc_qmans(hdev);
gaudi_enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
gaudi_enable_timestamp(hdev);
@ -3112,7 +3127,9 @@ static void gaudi_hw_fini(struct hl_device *hdev, bool hard_reset)
HW_CAP_HBM_DMA | HW_CAP_PLL |
HW_CAP_MMU |
HW_CAP_SRAM_SCRAMBLER |
HW_CAP_HBM_SCRAMBLER);
HW_CAP_HBM_SCRAMBLER |
HW_CAP_CLK_GATE);
memset(gaudi->events_stat, 0, sizeof(gaudi->events_stat));
}
@ -3463,6 +3480,9 @@ static int gaudi_send_cpu_message(struct hl_device *hdev, u32 *msg,
return 0;
}
if (!timeout)
timeout = GAUDI_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GAUDI_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@ -3865,6 +3885,12 @@ static int gaudi_validate_cb(struct hl_device *hdev,
rc = -EPERM;
break;
case PACKET_WREG_BULK:
dev_err(hdev->dev,
"User not allowed to use WREG_BULK\n");
rc = -EPERM;
break;
case PACKET_LOAD_AND_EXE:
rc = gaudi_validate_load_and_exe_pkt(hdev, parser,
(struct packet_load_and_exe *) user_pkt);
@ -3880,7 +3906,6 @@ static int gaudi_validate_cb(struct hl_device *hdev,
break;
case PACKET_WREG_32:
case PACKET_WREG_BULK:
case PACKET_MSG_LONG:
case PACKET_MSG_SHORT:
case PACKET_REPEAT:
@ -4521,13 +4546,18 @@ static int gaudi_debugfs_read32(struct hl_device *hdev, u64 addr, u32 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
*val = RREG32(addr - CFG_BASE);
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
*val = readl(hdev->pcie_bar[SRAM_BAR_ID] +
@ -4563,13 +4593,18 @@ static int gaudi_debugfs_write32(struct hl_device *hdev, u64 addr, u32 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr < CFG_BASE + CFG_SIZE)) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
} else {
WREG32(addr - CFG_BASE, val);
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr < SRAM_BASE_ADDR + SRAM_BAR_SIZE)) {
writel(val, hdev->pcie_bar[SRAM_BAR_ID] +
@ -4605,7 +4640,11 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't read register - clock gating is enabled!\n");
rc = -EFAULT;
@ -4615,6 +4654,7 @@ static int gaudi_debugfs_read64(struct hl_device *hdev, u64 addr, u64 *val)
*val = (((u64) val_h) << 32) | val_l;
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
*val = readq(hdev->pcie_bar[SRAM_BAR_ID] +
@ -4651,7 +4691,11 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
int rc = 0;
if ((addr >= CFG_BASE) && (addr <= CFG_BASE + CFG_SIZE - sizeof(u64))) {
if (gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) {
if ((gaudi->hw_cap_initialized & HW_CAP_CLK_GATE) &&
(hdev->clock_gating_mask &
GAUDI_CLK_GATE_DEBUGFS_MASK)) {
dev_err_ratelimited(hdev->dev,
"Can't write register - clock gating is enabled!\n");
rc = -EFAULT;
@ -4660,6 +4704,7 @@ static int gaudi_debugfs_write64(struct hl_device *hdev, u64 addr, u64 val)
WREG32(addr + sizeof(u32) - CFG_BASE,
upper_32_bits(val));
}
} else if ((addr >= SRAM_BASE_ADDR) &&
(addr <= SRAM_BASE_ADDR + SRAM_BAR_SIZE - sizeof(u64))) {
writeq(val, hdev->pcie_bar[SRAM_BAR_ID] +
@ -4881,7 +4926,7 @@ static void gaudi_mmu_prepare(struct hl_device *hdev, u32 asid)
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_ARUSER, asid);
gaudi_mmu_prepare_reg(hdev, mmPSOC_GLOBAL_CONF_TRACE_AWUSER, asid);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
@ -5262,7 +5307,7 @@ static void gaudi_print_ecc_info_generic(struct hl_device *hdev,
}
if (disable_clock_gating) {
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
}
}
@ -5749,7 +5794,7 @@ static bool gaudi_tpc_read_interrupts(struct hl_device *hdev, u8 tpc_id,
/* Clear interrupts */
WREG32(mmTPC0_CFG_TPC_INTR_CAUSE + tpc_offset, 0);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@ -6265,7 +6310,7 @@ static bool gaudi_is_device_idle(struct hl_device *hdev, u32 *mask,
if (s)
seq_puts(s, "\n");
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
@ -6366,7 +6411,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
dev_err(hdev->dev,
"Timeout while waiting for TPC%d icache prefetch\n",
tpc_id);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
return -EIO;
}
@ -6395,7 +6440,7 @@ static int gaudi_run_tpc_kernel(struct hl_device *hdev, u64 tpc_kernel,
1000,
kernel_timeout);
hdev->asic_funcs->enable_clock_gating(hdev);
hdev->asic_funcs->set_clock_gating(hdev);
mutex_unlock(&gaudi->clk_gate_mutex);
if (rc) {
@ -6736,7 +6781,7 @@ static const struct hl_asic_funcs gaudi_funcs = {
.mmu_invalidate_cache = gaudi_mmu_invalidate_cache,
.mmu_invalidate_cache_range = gaudi_mmu_invalidate_cache_range,
.send_heartbeat = gaudi_send_heartbeat,
.enable_clock_gating = gaudi_enable_clock_gating,
.set_clock_gating = gaudi_set_clock_gating,
.disable_clock_gating = gaudi_disable_clock_gating,
.debug_coresight = gaudi_debug_coresight,
.is_device_idle = gaudi_is_device_idle,

View File

@ -88,6 +88,7 @@
#define GOYA_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 100)
#define GOYA_PLDM_QMAN0_TIMEOUT_USEC (HL_DEVICE_TIMEOUT_USEC * 30)
#define GOYA_BOOT_FIT_REQ_TIMEOUT_USEC 1000000 /* 1s */
#define GOYA_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */
#define GOYA_QMAN0_FENCE_VAL 0xD169B243
@ -2830,6 +2831,9 @@ int goya_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len,
return 0;
}
if (!timeout)
timeout = GOYA_MSG_TO_CPU_TIMEOUT_USEC;
return hl_fw_send_cpu_message(hdev, GOYA_QUEUE_ID_CPU_PQ, msg, len,
timeout, result);
}
@ -4431,8 +4435,8 @@ static int goya_unmask_irq_arr(struct hl_device *hdev, u32 *irq_arr,
pkt->armcp_pkt.ctl = cpu_to_le32(ARMCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY <<
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = goya_send_cpu_message(hdev, (u32 *) pkt, total_pkt_size,
HL_DEVICE_TIMEOUT_USEC, &result);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) pkt,
total_pkt_size, 0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask IRQ array\n");
@ -4464,8 +4468,8 @@ static int goya_unmask_irq(struct hl_device *hdev, u16 event_type)
ARMCP_PKT_CTL_OPCODE_SHIFT);
pkt.value = cpu_to_le64(event_type);
rc = goya_send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
HL_DEVICE_TIMEOUT_USEC, &result);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
0, &result);
if (rc)
dev_err(hdev->dev, "failed to unmask RAZWI IRQ %d", event_type);
@ -5028,14 +5032,14 @@ int goya_armcp_info_get(struct hl_device *hdev)
return 0;
}
static void goya_enable_clock_gating(struct hl_device *hdev)
static void goya_set_clock_gating(struct hl_device *hdev)
{
/* clock gating not supported in Goya */
}
static void goya_disable_clock_gating(struct hl_device *hdev)
{
/* clock gating not supported in Goya */
}
static bool goya_is_device_idle(struct hl_device *hdev, u32 *mask,
@ -5259,7 +5263,7 @@ static const struct hl_asic_funcs goya_funcs = {
.mmu_invalidate_cache = goya_mmu_invalidate_cache,
.mmu_invalidate_cache_range = goya_mmu_invalidate_cache_range,
.send_heartbeat = goya_send_heartbeat,
.enable_clock_gating = goya_enable_clock_gating,
.set_clock_gating = goya_set_clock_gating,
.disable_clock_gating = goya_disable_clock_gating,
.debug_coresight = goya_debug_coresight,
.is_device_idle = goya_is_device_idle,

View File

@ -578,8 +578,9 @@ enum hl_pll_frequency {
* @mmu_invalidate_cache_range: flush specific MMU STLB cache lines with
* ASID-VA-size mask.
* @send_heartbeat: send is-alive packet to ArmCP and verify response.
* @enable_clock_gating: enable clock gating for reducing power consumption.
* @disable_clock_gating: disable clock for accessing registers on HBW.
* @set_clock_gating: enable/disable clock gating per engine according to
* clock gating mask in hdev
* @disable_clock_gating: disable clock gating completely
* @debug_coresight: perform certain actions on Coresight for debugging.
* @is_device_idle: return true if device is idle, false otherwise.
* @soft_reset_late_init: perform certain actions needed after soft reset.
@ -587,7 +588,11 @@ enum hl_pll_frequency {
* @hw_queues_unlock: release H/W queues lock.
* @get_pci_id: retrieve PCI ID.
* @get_eeprom_data: retrieve EEPROM data from F/W.
* @send_cpu_message: send buffer to ArmCP.
* @send_cpu_message: send message to F/W. If the message is timedout, the
* driver will eventually reset the device. The timeout can
* be determined by the calling function or it can be 0 and
* then the timeout is the default timeout for the specific
* ASIC
* @get_hw_state: retrieve the H/W state
* @pci_bars_map: Map PCI BARs.
* @set_dram_bar_base: Set DRAM BAR to map specific device address. Returns
@ -680,7 +685,7 @@ struct hl_asic_funcs {
int (*mmu_invalidate_cache_range)(struct hl_device *hdev, bool is_hard,
u32 asid, u64 va, u64 size);
int (*send_heartbeat)(struct hl_device *hdev);
void (*enable_clock_gating)(struct hl_device *hdev);
void (*set_clock_gating)(struct hl_device *hdev);
void (*disable_clock_gating)(struct hl_device *hdev);
int (*debug_coresight)(struct hl_device *hdev, void *data);
bool (*is_device_idle)(struct hl_device *hdev, u32 *mask,
@ -1398,6 +1403,9 @@ struct hl_device_idle_busy_ts {
* @max_power: the max power of the device, as configured by the sysadmin. This
* value is saved so in case of hard-reset, the driver will restore
* this value and update the F/W after the re-initialization
* @clock_gating_mask: is clock gating enabled. bitmask that represents the
* different engines. See debugfs-driver-habanalabs for
* details.
* @in_reset: is device in reset flow.
* @curr_pll_profile: current PLL profile.
* @cs_active_cnt: number of active command submissions on this device (active
@ -1425,7 +1433,6 @@ struct hl_device_idle_busy_ts {
* @init_done: is the initialization of the device done.
* @mmu_enable: is MMU enabled.
* @mmu_huge_page_opt: is MMU huge pages optimization enabled.
* @clock_gating: is clock gating enabled.
* @device_cpu_disabled: is the device CPU disabled (due to timeouts)
* @dma_mask: the dma mask that was set for this device
* @in_debug: is device under debug. This, together with fpriv_list, enforces
@ -1493,6 +1500,7 @@ struct hl_device {
atomic64_t dram_used_mem;
u64 timeout_jiffies;
u64 max_power;
u64 clock_gating_mask;
atomic_t in_reset;
enum hl_pll_frequency curr_pll_profile;
int cs_active_cnt;
@ -1514,7 +1522,6 @@ struct hl_device {
u8 dram_default_page_mapping;
u8 pmmu_huge_range;
u8 init_done;
u8 clock_gating;
u8 device_cpu_disabled;
u8 dma_mask;
u8 in_debug;

View File

@ -232,7 +232,7 @@ static void set_driver_behavior_per_device(struct hl_device *hdev)
hdev->fw_loading = 1;
hdev->cpu_queues_enable = 1;
hdev->heartbeat = 1;
hdev->clock_gating = 1;
hdev->clock_gating_mask = ULONG_MAX;
hdev->reset_pcilink = 0;
hdev->axi_drain = 0;

View File

@ -10,7 +10,6 @@
#include <linux/pci.h>
#include <linux/hwmon.h>
#define SENSORS_PKT_TIMEOUT 1000000 /* 1s */
#define HWMON_NR_SENSOR_TYPES (hwmon_pwm + 1)
int hl_build_hwmon_channel_info(struct hl_device *hdev,
@ -323,7 +322,7 @@ int hl_get_temperature(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -350,7 +349,7 @@ int hl_set_temperature(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -374,7 +373,7 @@ int hl_get_voltage(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -400,7 +399,7 @@ int hl_get_current(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -426,7 +425,7 @@ int hl_get_fan_speed(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -452,7 +451,7 @@ int hl_get_pwm_info(struct hl_device *hdev,
pkt.type = __cpu_to_le16(attr);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, value);
0, value);
if (rc) {
dev_err(hdev->dev,
@ -479,7 +478,7 @@ void hl_set_pwm_info(struct hl_device *hdev, int sensor_index, u32 attr,
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -502,7 +501,7 @@ int hl_set_voltage(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -527,7 +526,7 @@ int hl_set_current(struct hl_device *hdev,
pkt.value = __cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SENSORS_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,

View File

@ -9,9 +9,6 @@
#include <linux/pci.h>
#define SET_CLK_PKT_TIMEOUT 1000000 /* 1s */
#define SET_PWR_PKT_TIMEOUT 1000000 /* 1s */
long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
{
struct armcp_packet pkt;
@ -29,7 +26,7 @@ long hl_get_frequency(struct hl_device *hdev, u32 pll_index, bool curr)
pkt.pll_index = cpu_to_le32(pll_index);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, &result);
0, &result);
if (rc) {
dev_err(hdev->dev,
@ -54,7 +51,7 @@ void hl_set_frequency(struct hl_device *hdev, u32 pll_index, u64 freq)
pkt.value = cpu_to_le64(freq);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_CLK_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev,
@ -74,7 +71,7 @@ u64 hl_get_max_power(struct hl_device *hdev)
ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_PWR_PKT_TIMEOUT, &result);
0, &result);
if (rc) {
dev_err(hdev->dev, "Failed to get max power, error %d\n", rc);
@ -96,7 +93,7 @@ void hl_set_max_power(struct hl_device *hdev, u64 value)
pkt.value = cpu_to_le64(value);
rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt),
SET_PWR_PKT_TIMEOUT, NULL);
0, NULL);
if (rc)
dev_err(hdev->dev, "Failed to set max power, error %d\n", rc);

View File

@ -406,19 +406,34 @@ static int bareudp6_xmit_skb(struct sk_buff *skb, struct net_device *dev,
return err;
}
static bool bareudp_proto_valid(struct bareudp_dev *bareudp, __be16 proto)
{
if (bareudp->ethertype == proto)
return true;
if (!bareudp->multi_proto_mode)
return false;
if (bareudp->ethertype == htons(ETH_P_MPLS_UC) &&
proto == htons(ETH_P_MPLS_MC))
return true;
if (bareudp->ethertype == htons(ETH_P_IP) &&
proto == htons(ETH_P_IPV6))
return true;
return false;
}
static netdev_tx_t bareudp_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct bareudp_dev *bareudp = netdev_priv(dev);
struct ip_tunnel_info *info = NULL;
int err;
if (skb->protocol != bareudp->ethertype) {
if (!bareudp->multi_proto_mode ||
(skb->protocol != htons(ETH_P_MPLS_MC) &&
skb->protocol != htons(ETH_P_IPV6))) {
err = -EINVAL;
goto tx_error;
}
if (!bareudp_proto_valid(bareudp, skb->protocol)) {
err = -EINVAL;
goto tx_error;
}
info = skb_tunnel_info(skb);

View File

@ -2446,6 +2446,7 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
port->reset = devm_reset_control_get_exclusive(dev, NULL);
if (IS_ERR(port->reset)) {
dev_err(dev, "no reset\n");
clk_disable_unprepare(port->pclk);
return PTR_ERR(port->reset);
}
reset_control_reset(port->reset);
@ -2501,8 +2502,10 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
IRQF_SHARED,
port_names[port->id],
port);
if (ret)
if (ret) {
clk_disable_unprepare(port->pclk);
return ret;
}
ret = register_netdev(netdev);
if (!ret) {

View File

@ -1093,16 +1093,8 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
int k, sizeoflast;
dma_addr_t dma;
if (type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
int ret;
ret = hns3_fill_skb_desc(ring, skb, desc);
if (unlikely(ret < 0))
return ret;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
} else if (type == DESC_TYPE_FRAGLIST_SKB) {
if (type == DESC_TYPE_FRAGLIST_SKB ||
type == DESC_TYPE_SKB) {
struct sk_buff *skb = (struct sk_buff *)priv;
dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
@ -1439,6 +1431,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
next_to_use_head = ring->next_to_use;
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use]);
if (unlikely(ret < 0))
goto fill_err;
ret = hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
if (unlikely(ret < 0))
goto fill_err;
@ -4153,8 +4149,8 @@ static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
return;
if (linkup) {
netif_carrier_on(netdev);
netif_tx_wake_all_queues(netdev);
netif_carrier_on(netdev);
if (netif_msg_link(handle))
netdev_info(netdev, "link up\n");
} else {

View File

@ -5806,9 +5806,9 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle,
/* to avoid rule conflict, when user configure rule by ethtool,
* we need to clear all arfs rules
*/
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
spin_lock_bh(&hdev->fd_rule_lock);
ret = hclge_fd_config_rule(hdev, rule);
spin_unlock_bh(&hdev->fd_rule_lock);
@ -5851,6 +5851,7 @@ static int hclge_del_fd_entry(struct hnae3_handle *handle,
return ret;
}
/* make sure being called after lock up with fd_rule_lock */
static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bool clear_list)
{
@ -5863,7 +5864,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
if (!hnae3_dev_fd_supported(hdev))
return;
spin_lock_bh(&hdev->fd_rule_lock);
for_each_set_bit(location, hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1])
hclge_fd_tcam_config(hdev, HCLGE_FD_STAGE_1, true, location,
@ -5880,8 +5880,6 @@ static void hclge_del_all_fd_entries(struct hnae3_handle *handle,
bitmap_zero(hdev->fd_bmap,
hdev->fd_cfg.rule_num[HCLGE_FD_STAGE_1]);
}
spin_unlock_bh(&hdev->fd_rule_lock);
}
static int hclge_restore_fd_entries(struct hnae3_handle *handle)
@ -6263,7 +6261,7 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
u16 flow_id, struct flow_keys *fkeys)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_fd_rule_tuples new_tuples;
struct hclge_fd_rule_tuples new_tuples = {};
struct hclge_dev *hdev = vport->back;
struct hclge_fd_rule *rule;
u16 tmp_queue_id;
@ -6273,19 +6271,17 @@ static int hclge_add_fd_entry_by_arfs(struct hnae3_handle *handle, u16 queue_id,
if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP;
memset(&new_tuples, 0, sizeof(new_tuples));
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
spin_lock_bh(&hdev->fd_rule_lock);
/* when there is already fd rule existed add by user,
* arfs should not work
*/
spin_lock_bh(&hdev->fd_rule_lock);
if (hdev->fd_active_type == HCLGE_FD_EP_ACTIVE) {
spin_unlock_bh(&hdev->fd_rule_lock);
return -EOPNOTSUPP;
}
hclge_fd_get_flow_tuples(fkeys, &new_tuples);
/* check is there flow director filter existed for this flow,
* if not, create a new filter for it;
* if filter exist with different queue id, modify the filter;
@ -6368,6 +6364,7 @@ static void hclge_rfs_filter_expire(struct hclge_dev *hdev)
#endif
}
/* make sure being called after lock up with fd_rule_lock */
static void hclge_clear_arfs_rules(struct hnae3_handle *handle)
{
#ifdef CONFIG_RFS_ACCEL
@ -6420,10 +6417,14 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable)
hdev->fd_en = enable;
clear = hdev->fd_active_type == HCLGE_FD_ARFS_ACTIVE;
if (!enable)
if (!enable) {
spin_lock_bh(&hdev->fd_rule_lock);
hclge_del_all_fd_entries(handle, clear);
else
spin_unlock_bh(&hdev->fd_rule_lock);
} else {
hclge_restore_fd_entries(handle);
}
}
static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable)
@ -6886,8 +6887,9 @@ static void hclge_ae_stop(struct hnae3_handle *handle)
int i;
set_bit(HCLGE_STATE_DOWN, &hdev->state);
spin_lock_bh(&hdev->fd_rule_lock);
hclge_clear_arfs_rules(handle);
spin_unlock_bh(&hdev->fd_rule_lock);
/* If it is not PF reset, the firmware will disable the MAC,
* so it only need to stop phy here.
@ -9040,11 +9042,12 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
bool writen_to_tbl = false;
int ret = 0;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && is_kill) {
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
return -EBUSY;
}

View File

@ -1592,11 +1592,12 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
if (proto != htons(ETH_P_8021Q))
return -EPROTONOSUPPORT;
/* When device is resetting, firmware is unable to handle
* mailbox. Just record the vlan id, and remove it after
/* When device is resetting or reset failed, firmware is unable to
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) && is_kill) {
if ((test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
}
@ -3439,23 +3440,36 @@ void hclgevf_update_port_base_vlan_info(struct hclgevf_dev *hdev, u16 state,
{
struct hnae3_handle *nic = &hdev->nic;
struct hclge_vf_to_pf_msg send_msg;
int ret;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
rtnl_unlock();
if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) {
dev_warn(&hdev->pdev->dev,
"is resetting when updating port based vlan info\n");
rtnl_unlock();
return;
}
ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT);
if (ret) {
rtnl_unlock();
return;
}
/* send msg to PF and wait update port based vlan info */
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
HCLGE_MBX_PORT_BASE_VLAN_CFG);
memcpy(send_msg.data, port_base_vlan_info, data_size);
hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
ret = hclgevf_send_mbx_msg(hdev, &send_msg, false, NULL, 0);
if (!ret) {
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = state;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
}
if (state == HNAE3_PORT_BASE_VLAN_DISABLE)
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_DISABLE;
else
nic->port_base_vlan_state = HNAE3_PORT_BASE_VLAN_ENABLE;
rtnl_lock();
hclgevf_notify_client(hdev, HNAE3_UP_CLIENT);
rtnl_unlock();
}

View File

@ -3206,7 +3206,7 @@ static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
req_tx_irq_failed:
for (j = 0; j < i; j++) {
free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
irq_dispose_mapping(adapter->rx_scrq[j]->irq);
irq_dispose_mapping(adapter->tx_scrq[j]->irq);
}
release_sub_crqs(adapter, 1);
return rc;

View File

@ -301,10 +301,8 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
*/
hw->dev_spec.ich8lan.ulp_state = e1000_ulp_state_unknown;
ret_val = e1000_disable_ulp_lpt_lp(hw, true);
if (ret_val) {
if (ret_val)
e_warn("Failed to disable ULP\n");
goto out;
}
ret_val = hw->phy.ops.acquire(hw);
if (ret_val) {

View File

@ -6215,9 +6215,18 @@ static void igb_reset_task(struct work_struct *work)
struct igb_adapter *adapter;
adapter = container_of(work, struct igb_adapter, reset_task);
rtnl_lock();
/* If we're already down or resetting, just bail */
if (test_bit(__IGB_DOWN, &adapter->state) ||
test_bit(__IGB_RESETTING, &adapter->state)) {
rtnl_unlock();
return;
}
igb_dump(adapter);
netdev_err(adapter->netdev, "Reset adapter\n");
igb_reinit_locked(adapter);
rtnl_unlock();
}
/**

View File

@ -1730,10 +1730,12 @@ static void otx2_reset_task(struct work_struct *work)
if (!netif_running(pf->netdev))
return;
rtnl_lock();
otx2_stop(pf->netdev);
pf->reset_count++;
otx2_open(pf->netdev);
netif_trans_update(pf->netdev);
rtnl_unlock();
}
static const struct net_device_ops otx2_netdev_ops = {
@ -2111,6 +2113,7 @@ static void otx2_remove(struct pci_dev *pdev)
pf = netdev_priv(netdev);
cancel_work_sync(&pf->reset_task);
/* Disable link notifications */
otx2_cgx_config_linkevents(pf, false);

View File

@ -617,6 +617,8 @@ static void otx2vf_remove(struct pci_dev *pdev)
vf = netdev_priv(netdev);
cancel_work_sync(&vf->reset_task);
unregister_netdev(netdev);
otx2vf_disable_mbox_intr(vf);
otx2_detach_resources(&vf->mbox);

View File

@ -171,11 +171,21 @@ static int mt7621_gmac0_rgmii_adjust(struct mtk_eth *eth,
return 0;
}
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth, int speed)
static void mtk_gmac0_rgmii_adjust(struct mtk_eth *eth,
phy_interface_t interface, int speed)
{
u32 val;
int ret;
if (interface == PHY_INTERFACE_MODE_TRGMII) {
mtk_w32(eth, TRGMII_MODE, INTF_MODE);
val = 500000000;
ret = clk_set_rate(eth->clks[MTK_CLK_TRGPLL], val);
if (ret)
dev_err(eth->dev, "Failed to set trgmii pll: %d\n", ret);
return;
}
val = (speed == SPEED_1000) ?
INTF_MODE_RGMII_1000 : INTF_MODE_RGMII_10_100;
mtk_w32(eth, val, INTF_MODE);
@ -262,10 +272,9 @@ static void mtk_mac_config(struct phylink_config *config, unsigned int mode,
state->interface))
goto err_phy;
} else {
if (state->interface !=
PHY_INTERFACE_MODE_TRGMII)
mtk_gmac0_rgmii_adjust(mac->hw,
state->speed);
mtk_gmac0_rgmii_adjust(mac->hw,
state->interface,
state->speed);
/* mt7623_pad_clk_setup */
for (i = 0 ; i < NUM_TRGMII_CTRL; i++)
@ -2887,6 +2896,8 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
eth->netdev[id]->irq = eth->irq[0];
eth->netdev[id]->dev.of_node = np;
eth->netdev[id]->max_mtu = MTK_MAX_RX_LENGTH - MTK_RX_ETH_HLEN;
return 0;
free_netdev:

View File

@ -4356,12 +4356,14 @@ static void mlx4_pci_resume(struct pci_dev *pdev)
static void mlx4_shutdown(struct pci_dev *pdev)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
mlx4_info(persist->dev, "mlx4_shutdown was called\n");
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP)
mlx4_unload_one(pdev);
mutex_unlock(&persist->interface_state_mutex);
mlx4_pci_disable_device(dev);
}
static const struct pci_error_handlers mlx4_err_handler = {

View File

@ -183,13 +183,16 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw,
static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5e_rep_priv *rpriv;
struct mlx5e_priv *priv;
/* A given netdev is not a representor or not a slave of LAG configuration */
if (!mlx5e_eswitch_rep(netdev) || !bond_slave_get_rtnl(netdev))
return false;
priv = netdev_priv(netdev);
rpriv = priv->ppriv;
/* Egress acl forward to vport is supported only non-uplink representor */
return rpriv->rep->vport != MLX5_VPORT_UPLINK;
}

View File

@ -551,19 +551,31 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb,
}
}
tun_dst = tun_rx_dst(enc_opts.key.len);
if (!tun_dst) {
WARN_ON_ONCE(true);
if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
tun_dst = __ip_tun_set_dst(key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, TUNNEL_KEY,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
} else if (key.enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
tun_dst = __ipv6_tun_set_dst(&key.enc_ipv6.src, &key.enc_ipv6.dst,
key.enc_ip.tos, key.enc_ip.ttl,
key.enc_tp.dst, 0, TUNNEL_KEY,
key32_to_tunnel_id(key.enc_key_id.keyid),
enc_opts.key.len);
} else {
netdev_dbg(priv->netdev,
"Couldn't restore tunnel, unsupported addr_type: %d\n",
key.enc_control.addr_type);
return false;
}
ip_tunnel_key_init(&tun_dst->u.tun_info.key,
key.enc_ipv4.src, key.enc_ipv4.dst,
key.enc_ip.tos, key.enc_ip.ttl,
0, /* label */
key.enc_tp.src, key.enc_tp.dst,
key32_to_tunnel_id(key.enc_key_id.keyid),
TUNNEL_KEY);
if (!tun_dst) {
netdev_dbg(priv->netdev, "Couldn't restore tunnel, no tun_dst\n");
return false;
}
tun_dst->u.tun_info.key.tp_src = key.enc_tp.src;
if (enc_opts.key.len)
ip_tunnel_info_opts_set(&tun_dst->u.tun_info,

View File

@ -301,6 +301,8 @@ static int mlx5e_tc_tun_parse_geneve_params(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, geneve_protocol_type, ETH_P_TEB);
}
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
return 0;
}

View File

@ -80,6 +80,8 @@ static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv,
gre_key.key, be32_to_cpu(enc_keyid.key->keyid));
}
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
return 0;
}

View File

@ -136,6 +136,8 @@ static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni,
be32_to_cpu(enc_keyid.key->keyid));
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
return 0;
}

View File

@ -417,7 +417,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->mpwqe.wq,
&rq->wq_ctrl);
if (err)
return err;
goto err_rq_wq_destroy;
rq->mpwqe.wq.db = &rq->mpwqe.wq.db[MLX5_RCV_DBR];
@ -445,7 +445,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
err = mlx5_wq_cyc_create(mdev, &rqp->wq, rqc_wq, &rq->wqe.wq,
&rq->wq_ctrl);
if (err)
return err;
goto err_rq_wq_destroy;
rq->wqe.wq.db = &rq->wqe.wq.db[MLX5_RCV_DBR];
@ -3062,6 +3062,25 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
}
static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
enum mlx5_port_status state)
{
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int vport_admin_state;
mlx5_set_port_admin_status(mdev, state);
if (!MLX5_ESWITCH_MANAGER(mdev) || mlx5_eswitch_mode(esw) == MLX5_ESWITCH_OFFLOADS)
return;
if (state == MLX5_PORT_UP)
vport_admin_state = MLX5_VPORT_ADMIN_STATE_AUTO;
else
vport_admin_state = MLX5_VPORT_ADMIN_STATE_DOWN;
mlx5_eswitch_set_vport_state(esw, MLX5_VPORT_UPLINK, vport_admin_state);
}
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@ -3094,7 +3113,7 @@ int mlx5e_open(struct net_device *netdev)
mutex_lock(&priv->state_lock);
err = mlx5e_open_locked(netdev);
if (!err)
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_UP);
mutex_unlock(&priv->state_lock);
return err;
@ -3128,7 +3147,7 @@ int mlx5e_close(struct net_device *netdev)
return -ENODEV;
mutex_lock(&priv->state_lock);
mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
mlx5e_modify_admin_state(priv->mdev, MLX5_PORT_DOWN);
err = mlx5e_close_locked(netdev);
mutex_unlock(&priv->state_lock);
@ -5177,7 +5196,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
/* Marking the link as currently not needed by the Driver */
if (!netif_running(netdev))
mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
mlx5e_modify_admin_state(mdev, MLX5_PORT_DOWN);
mlx5e_set_netdev_mtu_boundaries(priv);
mlx5e_set_dev_port_mtu(priv);
@ -5384,6 +5403,8 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv)
profile->cleanup_tx(priv);
out:
set_bit(MLX5E_STATE_DESTROYING, &priv->state);
cancel_work_sync(&priv->update_stats_work);
return err;
}

View File

@ -936,6 +936,7 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv)
static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv)
{
mlx5e_ethtool_cleanup_steering(priv);
rep_vport_rx_rule_destroy(priv);
mlx5e_destroy_rep_root_ft(priv);
mlx5e_destroy_ttc_table(priv, &priv->fs.ttc);
@ -1080,6 +1081,8 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
mlx5e_rep_tc_enable(priv);
mlx5_modify_vport_admin_state(mdev, MLX5_VPORT_STATE_OP_MOD_UPLINK,
0, 0, MLX5_VPORT_ADMIN_STATE_AUTO);
mlx5_lag_add(mdev, netdev);
priv->events_nb.notifier_call = uplink_rep_async_event;
mlx5_notifier_register(mdev, &priv->events_nb);

View File

@ -2247,6 +2247,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
match.key->vlan_priority);
*match_level = MLX5_MATCH_L2;
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
}

View File

@ -1632,7 +1632,7 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
esw_destroy_tsar(esw);
return err;
}
@ -1687,8 +1687,6 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
else if (esw->mode == MLX5_ESWITCH_OFFLOADS)
esw_offloads_disable(esw);
esw_destroy_tsar(esw);
old_mode = esw->mode;
esw->mode = MLX5_ESWITCH_NONE;
@ -1698,6 +1696,8 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf)
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB);
mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH);
}
esw_destroy_tsar(esw);
if (clear_vf)
mlx5_eswitch_clear_vf_vports_info(esw);
}
@ -1949,6 +1949,8 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
u16 vport, int link_state)
{
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int opmod = MLX5_VPORT_STATE_OP_MOD_ESW_VPORT;
int other_vport = 1;
int err = 0;
if (!ESW_ALLOWED(esw))
@ -1956,15 +1958,17 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vport == MLX5_VPORT_UPLINK) {
opmod = MLX5_VPORT_STATE_OP_MOD_UPLINK;
other_vport = 0;
vport = 0;
}
mutex_lock(&esw->state_lock);
err = mlx5_modify_vport_admin_state(esw->dev,
MLX5_VPORT_STATE_OP_MOD_ESW_VPORT,
vport, 1, link_state);
err = mlx5_modify_vport_admin_state(esw->dev, opmod, vport, other_vport, link_state);
if (err) {
mlx5_core_warn(esw->dev,
"Failed to set vport %d link state, err = %d",
vport, err);
mlx5_core_warn(esw->dev, "Failed to set vport %d link state, opmod = %d, err = %d",
vport, opmod, err);
goto unlock;
}
@ -2006,8 +2010,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport);
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (IS_ERR(evport))
return PTR_ERR(evport);
if (vlan > 4095 || qos > 7)
@ -2035,6 +2037,9 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
u8 set_flags = 0;
int err;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (vlan || qos)
set_flags = SET_VLAN_STRIP | SET_VLAN_INSERT;

View File

@ -685,6 +685,8 @@ static inline int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) { r
static inline void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) {}
static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; }
static inline bool mlx5_eswitch_is_funcs_handler(struct mlx5_core_dev *dev) { return false; }
static inline
int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw, u16 vport, int link_state) { return 0; }
static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev)
{
return ERR_PTR(-EOPNOTSUPP);

View File

@ -236,6 +236,15 @@ static struct mlx5_eswitch_rep *mlx5_eswitch_get_rep(struct mlx5_eswitch *esw,
return &esw->offloads.vport_reps[idx];
}
static void
mlx5_eswitch_set_rule_flow_source(struct mlx5_eswitch *esw,
struct mlx5_flow_spec *spec,
struct mlx5_esw_flow_attr *attr)
{
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
attr && attr->in_rep && attr->in_rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
static void
mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
@ -259,9 +268,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
mlx5_eswitch_get_vport_metadata_mask());
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_2;
misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters);
if (memchr_inv(misc, 0, MLX5_ST_SZ_BYTES(fte_match_set_misc)))
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
} else {
misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters);
MLX5_SET(fte_match_set_misc, misc, source_port, attr->in_rep->vport);
@ -279,10 +285,6 @@ mlx5_eswitch_set_rule_source_port(struct mlx5_eswitch *esw,
spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS;
}
if (MLX5_CAP_ESW_FLOWTABLE(esw->dev, flow_source) &&
attr->in_rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_UPLINK;
}
struct mlx5_flow_handle *
@ -396,6 +398,8 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw,
goto err_esw_get;
}
mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (mlx5_eswitch_termtbl_required(esw, attr, &flow_act, spec))
rule = mlx5_eswitch_add_termtbl_rule(esw, fdb, spec, attr,
&flow_act, dest, i);
@ -462,6 +466,7 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw,
i++;
mlx5_eswitch_set_rule_source_port(esw, spec, attr);
mlx5_eswitch_set_rule_flow_source(esw, spec, attr);
if (attr->outer_match_level != MLX5_MATCH_NONE)
spec->match_criteria_enable |= MLX5_MATCH_OUTER_HEADERS;

View File

@ -796,7 +796,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root,
return ft;
}
/* If reverse if false then return the first flow table in next priority of
/* If reverse is false then return the first flow table in next priority of
* prio in the tree, else return the last flow table in the previous priority
* of prio in the tree.
*/
@ -828,34 +828,16 @@ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio)
return find_closest_ft(prio, true);
}
static struct fs_prio *find_fwd_ns_prio(struct mlx5_flow_root_namespace *root,
struct mlx5_flow_namespace *ns)
{
struct mlx5_flow_namespace *root_ns = &root->ns;
struct fs_prio *iter_prio;
struct fs_prio *prio;
fs_get_obj(prio, ns->node.parent);
list_for_each_entry(iter_prio, &root_ns->node.children, node.list) {
if (iter_prio == prio &&
!list_is_last(&prio->node.children, &iter_prio->node.list))
return list_next_entry(iter_prio, node.list);
}
return NULL;
}
static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft,
struct mlx5_flow_act *flow_act)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
struct fs_prio *prio;
bool next_ns;
if (flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS)
prio = find_fwd_ns_prio(root, ft->ns);
else
fs_get_obj(prio, ft->node.parent);
next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS;
fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent);
return (prio) ? find_next_chained_ft(prio) : NULL;
return find_next_chained_ft(prio);
}
static int connect_fts_in_prio(struct mlx5_core_dev *dev,

View File

@ -273,17 +273,17 @@ static int mlx5_extts_configure(struct ptp_clock_info *ptp,
if (rq->extts.index >= clock->ptp_info.n_pins)
return -EINVAL;
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
if (on) {
pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
if (pin < 0)
return -EBUSY;
pin_mode = MLX5_PIN_MODE_IN;
pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
field_select = MLX5_MTPPS_FS_PIN_MODE |
MLX5_MTPPS_FS_PATTERN |
MLX5_MTPPS_FS_ENABLE;
} else {
pin = rq->extts.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@ -331,12 +331,12 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
if (rq->perout.index >= clock->ptp_info.n_pins)
return -EINVAL;
if (on) {
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
if (on) {
pin_mode = MLX5_PIN_MODE_OUT;
pattern = MLX5_OUT_PATTERN_PERIODIC;
ts.tv_sec = rq->perout.period.sec;
@ -362,7 +362,6 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp,
MLX5_MTPPS_FS_ENABLE |
MLX5_MTPPS_FS_TIME_STAMP;
} else {
pin = rq->perout.index;
field_select = MLX5_MTPPS_FS_ENABLE;
}
@ -409,10 +408,31 @@ static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
return 0;
}
enum {
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN = BIT(0),
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT = BIT(1),
};
static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
ptp_info);
switch (func) {
case PTP_PF_NONE:
return 0;
case PTP_PF_EXTTS:
return !(clock->pps_info.pin_caps[pin] &
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_IN);
case PTP_PF_PEROUT:
return !(clock->pps_info.pin_caps[pin] &
MLX5_MTPPS_REG_CAP_PIN_X_MODE_SUPPORT_PPS_OUT);
default:
return -EOPNOTSUPP;
}
return -EOPNOTSUPP;
}
static const struct ptp_clock_info mlx5_ptp_clock_info = {
@ -432,6 +452,38 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = {
.verify = NULL,
};
static int mlx5_query_mtpps_pin_mode(struct mlx5_core_dev *mdev, u8 pin,
u32 *mtpps, u32 mtpps_size)
{
u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {};
MLX5_SET(mtpps_reg, in, pin, pin);
return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
mtpps_size, MLX5_REG_MTPPS, 0, 0);
}
static int mlx5_get_pps_pin_mode(struct mlx5_clock *clock, u8 pin)
{
struct mlx5_core_dev *mdev = clock->mdev;
u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {};
u8 mode;
int err;
err = mlx5_query_mtpps_pin_mode(mdev, pin, out, sizeof(out));
if (err || !MLX5_GET(mtpps_reg, out, enable))
return PTP_PF_NONE;
mode = MLX5_GET(mtpps_reg, out, pin_mode);
if (mode == MLX5_PIN_MODE_IN)
return PTP_PF_EXTTS;
else if (mode == MLX5_PIN_MODE_OUT)
return PTP_PF_PEROUT;
return PTP_PF_NONE;
}
static int mlx5_init_pin_config(struct mlx5_clock *clock)
{
int i;
@ -451,8 +503,8 @@ static int mlx5_init_pin_config(struct mlx5_clock *clock)
sizeof(clock->ptp_info.pin_config[i].name),
"mlx5_pps%d", i);
clock->ptp_info.pin_config[i].index = i;
clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
clock->ptp_info.pin_config[i].chan = i;
clock->ptp_info.pin_config[i].func = mlx5_get_pps_pin_mode(clock, i);
clock->ptp_info.pin_config[i].chan = 0;
}
return 0;

View File

@ -1815,7 +1815,7 @@ static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
bulk_list, cb, cb_priv, tid);
if (err) {
kfree(trans);
kfree_rcu(trans, rcu);
return err;
}
return 0;
@ -2053,11 +2053,13 @@ void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
break;
}
}
rcu_read_unlock();
if (!found)
if (!found) {
rcu_read_unlock();
goto drop;
}
rxl->func(skb, local_port, rxl_item->priv);
rcu_read_unlock();
return;
drop:

Some files were not shown because too many files have changed in this diff Show More