mirror of https://gitee.com/openkylin/linux.git
This is the 4.19-rc6 release
-----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAluw4MIACgkQONu9yGCS aT7+8xAAiYnc4khUsxeInm3z44WPfRX1+UF51frTNSY5C8Nn5nvRSnTUNLuKkkrz 8RbwCL6UYyJxF9I/oZdHPsPOD4IxXkQY55tBjz7ZbSBIFEwYM6RJMm8mAGlXY7wq VyWA5MhlpGHM9DjrguB4DMRipnrSc06CVAnC+ZyKLjzblzU1Wdf2dYu+AW9pUVXP j4r74lFED5djPY1xfqfzEwmYRCeEGYGx7zMqT3GrrF5uFPqj1H6O5klEsAhIZvdl IWnJTU2coC8R/Sd17g4lHWPIeQNnMUGIUbu+PhIrZ/lDwFxlocg4BvarPXEdzgYi gdZzKBfovpEsSu5RCQsKWG4IGQxY7I1p70IOP9eqEFHZy77qT1YcHVAWrK1Y/bJd UA08gUOSzRnhKkNR3+PsaMflUOl9WkpyHECZu394cyRGMutSS50aWkavJPJ/o1Qi D/oGqZLLcKFyuNcchG+Met1TzY3LvYEDgSburqwqeUZWtAsGs8kmiiq7qvmXx4zV IcgM8ERqJ8mbfhfsXQU7hwydIrPJ3JdIq19RnM5ajbv2Q4C/qJCyAKkQoacrlKR4 aiow/qvyNrP80rpXfPJB8/8PiWeDtAnnGhM+xySZNlw3t8GR6NYpUkIzf5TdkSb3 C8KuKg6FY9QAS62fv+5KK3LB/wbQanxaPNruQFGe5K1iDQ5Fvzw= =dMl4 -----END PGP SIGNATURE----- BackMerge v4.19-rc6 into drm-next I have some pulls based on rc6, and I prefer to have an explicit backmerge. Signed-off-by: Dave Airlie <airlied@redhat.com>
This commit is contained in:
commit
6004f172b3
|
@ -1,4 +1,4 @@
|
|||
Device-Tree bindings for input/gpio_keys.c keyboard driver
|
||||
Device-Tree bindings for input/keyboard/gpio_keys.c keyboard driver
|
||||
|
||||
Required properties:
|
||||
- compatible = "gpio-keys";
|
||||
|
|
|
@ -33,4 +33,3 @@ Video Function Calls
|
|||
video-clear-buffer
|
||||
video-set-streamtype
|
||||
video-set-format
|
||||
video-set-attributes
|
||||
|
|
22
MAINTAINERS
22
MAINTAINERS
|
@ -9721,13 +9721,6 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
|
|||
S: Maintained
|
||||
F: drivers/media/dvb-frontends/mn88473*
|
||||
|
||||
PCI DRIVER FOR MOBIVEIL PCIE IP
|
||||
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
|
||||
F: drivers/pci/controller/pcie-mobiveil.c
|
||||
|
||||
MODULE SUPPORT
|
||||
M: Jessica Yu <jeyu@kernel.org>
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/jeyu/linux.git modules-next
|
||||
|
@ -10957,7 +10950,7 @@ M: Willy Tarreau <willy@haproxy.com>
|
|||
M: Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
|
||||
S: Odd Fixes
|
||||
F: Documentation/auxdisplay/lcd-panel-cgram.txt
|
||||
F: drivers/misc/panel.c
|
||||
F: drivers/auxdisplay/panel.c
|
||||
|
||||
PARALLEL PORT SUBSYSTEM
|
||||
M: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
|
||||
|
@ -11145,6 +11138,13 @@ F: include/uapi/linux/switchtec_ioctl.h
|
|||
F: include/linux/switchtec.h
|
||||
F: drivers/ntb/hw/mscc/
|
||||
|
||||
PCI DRIVER FOR MOBIVEIL PCIE IP
|
||||
M: Subrahmanya Lingappa <l.subrahmanya@mobiveil.co.in>
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/mobiveil-pcie.txt
|
||||
F: drivers/pci/controller/pcie-mobiveil.c
|
||||
|
||||
PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
M: Jason Cooper <jason@lakedaemon.net>
|
||||
|
@ -11211,8 +11211,14 @@ F: tools/pci/
|
|||
|
||||
PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
|
||||
M: Russell Currey <ruscur@russell.cc>
|
||||
M: Sam Bobroff <sbobroff@linux.ibm.com>
|
||||
M: Oliver O'Halloran <oohall@gmail.com>
|
||||
L: linuxppc-dev@lists.ozlabs.org
|
||||
S: Supported
|
||||
F: Documentation/PCI/pci-error-recovery.txt
|
||||
F: drivers/pci/pcie/aer.c
|
||||
F: drivers/pci/pcie/dpc.c
|
||||
F: drivers/pci/pcie/err.c
|
||||
F: Documentation/powerpc/eeh-pci-error-recovery.txt
|
||||
F: arch/powerpc/kernel/eeh*.c
|
||||
F: arch/powerpc/platforms/*/eeh*.c
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 19
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Merciless Moray
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -9,6 +9,7 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
|
|||
|
||||
extern unsigned int rtas_data;
|
||||
extern unsigned long long memory_limit;
|
||||
extern bool init_mem_is_free;
|
||||
extern unsigned long klimit;
|
||||
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
|
||||
|
||||
|
|
|
@ -1314,9 +1314,7 @@ EXC_REAL_BEGIN(denorm_exception_hv, 0x1500, 0x100)
|
|||
|
||||
#ifdef CONFIG_PPC_DENORMALISATION
|
||||
mfspr r10,SPRN_HSRR1
|
||||
mfspr r11,SPRN_HSRR0 /* save HSRR0 */
|
||||
andis. r10,r10,(HSRR1_DENORM)@h /* denorm? */
|
||||
addi r11,r11,-4 /* HSRR0 is next instruction */
|
||||
bne+ denorm_assist
|
||||
#endif
|
||||
|
||||
|
@ -1382,6 +1380,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
|
|||
*/
|
||||
XVCPSGNDP32(32)
|
||||
denorm_done:
|
||||
mfspr r11,SPRN_HSRR0
|
||||
subi r11,r11,4
|
||||
mtspr SPRN_HSRR0,r11
|
||||
mtcrf 0x80,r9
|
||||
ld r9,PACA_EXGEN+EX_R9(r13)
|
||||
|
|
|
@ -176,13 +176,27 @@ _GLOBAL(tm_reclaim)
|
|||
std r1, PACATMSCRATCH(r13)
|
||||
ld r1, PACAR1(r13)
|
||||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
std r11, GPR11(r1) /* Temporary stash */
|
||||
|
||||
/*
|
||||
* Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
|
||||
* clobbered by an exception once we turn on MSR_RI below.
|
||||
*/
|
||||
ld r11, PACATMSCRATCH(r13)
|
||||
std r11, GPR1(r1)
|
||||
|
||||
/*
|
||||
* Store r13 away so we can free up the scratch SPR for the SLB fault
|
||||
* handler (needed once we start accessing the thread_struct).
|
||||
*/
|
||||
GET_SCRATCH0(r11)
|
||||
std r11, GPR13(r1)
|
||||
|
||||
/* Reset MSR RI so we can take SLB faults again */
|
||||
li r11, MSR_RI
|
||||
mtmsrd r11, 1
|
||||
|
||||
/* Store the PPR in r11 and reset to decent value */
|
||||
mfspr r11, SPRN_PPR
|
||||
HMT_MEDIUM
|
||||
|
||||
|
@ -207,11 +221,11 @@ _GLOBAL(tm_reclaim)
|
|||
SAVE_GPR(8, r7) /* user r8 */
|
||||
SAVE_GPR(9, r7) /* user r9 */
|
||||
SAVE_GPR(10, r7) /* user r10 */
|
||||
ld r3, PACATMSCRATCH(r13) /* user r1 */
|
||||
ld r3, GPR1(r1) /* user r1 */
|
||||
ld r4, GPR7(r1) /* user r7 */
|
||||
ld r5, GPR11(r1) /* user r11 */
|
||||
ld r6, GPR12(r1) /* user r12 */
|
||||
GET_SCRATCH0(8) /* user r13 */
|
||||
ld r8, GPR13(r1) /* user r13 */
|
||||
std r3, GPR1(r7)
|
||||
std r4, GPR7(r7)
|
||||
std r5, GPR11(r7)
|
||||
|
|
|
@ -443,6 +443,9 @@ _GLOBAL(csum_ipv6_magic)
|
|||
addc r0, r8, r9
|
||||
ld r10, 0(r4)
|
||||
ld r11, 8(r4)
|
||||
#ifdef CONFIG_CPU_LITTLE_ENDIAN
|
||||
rotldi r5, r5, 8
|
||||
#endif
|
||||
adde r0, r0, r10
|
||||
add r5, r5, r7
|
||||
adde r0, r0, r11
|
||||
|
|
|
@ -28,6 +28,12 @@ static int __patch_instruction(unsigned int *exec_addr, unsigned int instr,
|
|||
{
|
||||
int err;
|
||||
|
||||
/* Make sure we aren't patching a freed init section */
|
||||
if (init_mem_is_free && init_section_contains(exec_addr, 4)) {
|
||||
pr_debug("Skipping init section patching addr: 0x%px\n", exec_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
__put_user_size(instr, patch_addr, 4, err);
|
||||
if (err)
|
||||
return err;
|
||||
|
|
|
@ -63,6 +63,7 @@
|
|||
#endif
|
||||
|
||||
unsigned long long memory_limit;
|
||||
bool init_mem_is_free;
|
||||
|
||||
#ifdef CONFIG_HIGHMEM
|
||||
pte_t *kmap_pte;
|
||||
|
@ -396,6 +397,7 @@ void free_initmem(void)
|
|||
{
|
||||
ppc_md.progress = ppc_printk_progress;
|
||||
mark_initmem_nx();
|
||||
init_mem_is_free = true;
|
||||
free_initmem_default(POISON_FREE_INITMEM);
|
||||
}
|
||||
|
||||
|
|
|
@ -1204,7 +1204,9 @@ int find_and_online_cpu_nid(int cpu)
|
|||
int new_nid;
|
||||
|
||||
/* Use associativity from first thread for all siblings */
|
||||
vphn_get_associativity(cpu, associativity);
|
||||
if (vphn_get_associativity(cpu, associativity))
|
||||
return cpu_to_node(cpu);
|
||||
|
||||
new_nid = associativity_to_nid(associativity);
|
||||
if (new_nid < 0 || !node_possible(new_nid))
|
||||
new_nid = first_online_node;
|
||||
|
@ -1452,7 +1454,8 @@ static struct timer_list topology_timer;
|
|||
|
||||
static void reset_topology_timer(void)
|
||||
{
|
||||
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
|
||||
if (vphn_enabled)
|
||||
mod_timer(&topology_timer, jiffies + topology_timer_secs * HZ);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
|
|
@ -45,7 +45,7 @@ static void scan_pkey_feature(void)
|
|||
* Since any pkey can be used for data or execute, we will just treat
|
||||
* all keys as equal and track them as one entity.
|
||||
*/
|
||||
pkeys_total = be32_to_cpu(vals[0]);
|
||||
pkeys_total = vals[0];
|
||||
pkeys_devtree_defined = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
|||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned int, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
if ((level_shift - 3) * levels + page_shift >= 55)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#ifndef _ASM_RISCV_PROTOTYPES_H
|
||||
|
||||
#include <linux/ftrace.h>
|
||||
#include <asm-generic/asm-prototypes.h>
|
||||
|
||||
#endif /* _ASM_RISCV_PROTOTYPES_H */
|
|
@ -25,20 +25,6 @@ ENTRY(get_sev_encryption_bit)
|
|||
push %ebx
|
||||
push %ecx
|
||||
push %edx
|
||||
push %edi
|
||||
|
||||
/*
|
||||
* RIP-relative addressing is needed to access the encryption bit
|
||||
* variable. Since we are running in 32-bit mode we need this call/pop
|
||||
* sequence to get the proper relative addressing.
|
||||
*/
|
||||
call 1f
|
||||
1: popl %edi
|
||||
subl $1b, %edi
|
||||
|
||||
movl enc_bit(%edi), %eax
|
||||
cmpl $0, %eax
|
||||
jge .Lsev_exit
|
||||
|
||||
/* Check if running under a hypervisor */
|
||||
movl $1, %eax
|
||||
|
@ -69,15 +55,12 @@ ENTRY(get_sev_encryption_bit)
|
|||
|
||||
movl %ebx, %eax
|
||||
andl $0x3f, %eax /* Return the encryption bit location */
|
||||
movl %eax, enc_bit(%edi)
|
||||
jmp .Lsev_exit
|
||||
|
||||
.Lno_sev:
|
||||
xor %eax, %eax
|
||||
movl %eax, enc_bit(%edi)
|
||||
|
||||
.Lsev_exit:
|
||||
pop %edi
|
||||
pop %edx
|
||||
pop %ecx
|
||||
pop %ebx
|
||||
|
@ -113,8 +96,6 @@ ENTRY(set_sev_encryption_mask)
|
|||
ENDPROC(set_sev_encryption_mask)
|
||||
|
||||
.data
|
||||
enc_bit:
|
||||
.int 0xffffffff
|
||||
|
||||
#ifdef CONFIG_AMD_MEM_ENCRYPT
|
||||
.balign 8
|
||||
|
|
|
@ -322,16 +322,11 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|||
|
||||
/*
|
||||
* __blk_mq_update_nr_hw_queues will update the nr_hw_queues and
|
||||
* queue_hw_ctx after freeze the queue. So we could use q_usage_counter
|
||||
* to avoid race with it. __blk_mq_update_nr_hw_queues will users
|
||||
* synchronize_rcu to ensure all of the users go out of the critical
|
||||
* section below and see zeroed q_usage_counter.
|
||||
* queue_hw_ctx after freeze the queue, so we use q_usage_counter
|
||||
* to avoid race with it.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (percpu_ref_is_zero(&q->q_usage_counter)) {
|
||||
rcu_read_unlock();
|
||||
if (!percpu_ref_tryget(&q->q_usage_counter))
|
||||
return;
|
||||
}
|
||||
|
||||
queue_for_each_hw_ctx(q, hctx, i) {
|
||||
struct blk_mq_tags *tags = hctx->tags;
|
||||
|
@ -347,7 +342,7 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
|
|||
bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
|
||||
bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
|
||||
}
|
||||
rcu_read_unlock();
|
||||
blk_queue_exit(q);
|
||||
}
|
||||
|
||||
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
|
||||
|
|
|
@ -1628,7 +1628,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||
BUG_ON(!rq->q);
|
||||
if (rq->mq_ctx != this_ctx) {
|
||||
if (this_ctx) {
|
||||
trace_block_unplug(this_q, depth, from_schedule);
|
||||
trace_block_unplug(this_q, depth, !from_schedule);
|
||||
blk_mq_sched_insert_requests(this_q, this_ctx,
|
||||
&ctx_list,
|
||||
from_schedule);
|
||||
|
@ -1648,7 +1648,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
|
|||
* on 'ctx_list'. Do those.
|
||||
*/
|
||||
if (this_ctx) {
|
||||
trace_block_unplug(this_q, depth, from_schedule);
|
||||
trace_block_unplug(this_q, depth, !from_schedule);
|
||||
blk_mq_sched_insert_requests(this_q, this_ctx, &ctx_list,
|
||||
from_schedule);
|
||||
}
|
||||
|
|
|
@ -609,7 +609,7 @@ void elv_drain_elevator(struct request_queue *q)
|
|||
|
||||
while (e->type->ops.sq.elevator_dispatch_fn(q, 1))
|
||||
;
|
||||
if (q->nr_sorted && printed++ < 10) {
|
||||
if (q->nr_sorted && !blk_queue_is_zoned(q) && printed++ < 10 ) {
|
||||
printk(KERN_ERR "%s: forced dispatching is broken "
|
||||
"(nr_sorted=%u), please report this\n",
|
||||
q->elevator->type->elevator_name, q->nr_sorted);
|
||||
|
|
|
@ -2670,8 +2670,8 @@ static void purge_persistent_grants(struct blkfront_info *info)
|
|||
list_del(&gnt_list_entry->node);
|
||||
gnttab_end_foreign_access(gnt_list_entry->gref, 0, 0UL);
|
||||
rinfo->persistent_gnts_c--;
|
||||
__free_page(gnt_list_entry->page);
|
||||
kfree(gnt_list_entry);
|
||||
gnt_list_entry->gref = GRANT_INVALID_REF;
|
||||
list_add_tail(&gnt_list_entry->node, &rinfo->grants);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&rinfo->ring_lock, flags);
|
||||
|
|
|
@ -180,26 +180,29 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
|
|||
data->base = of_iomap(node, 0);
|
||||
if (!data->base) {
|
||||
pr_err("Could not map PIT address\n");
|
||||
return -ENXIO;
|
||||
ret = -ENXIO;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
data->mck = of_clk_get(node, 0);
|
||||
if (IS_ERR(data->mck)) {
|
||||
pr_err("Unable to get mck clk\n");
|
||||
return PTR_ERR(data->mck);
|
||||
ret = PTR_ERR(data->mck);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(data->mck);
|
||||
if (ret) {
|
||||
pr_err("Unable to enable mck\n");
|
||||
return ret;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Get the interrupts property */
|
||||
data->irq = irq_of_parse_and_map(node, 0);
|
||||
if (!data->irq) {
|
||||
pr_err("Unable to get IRQ from DT\n");
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -227,7 +230,7 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
|
|||
ret = clocksource_register_hz(&data->clksrc, pit_rate);
|
||||
if (ret) {
|
||||
pr_err("Failed to register clocksource\n");
|
||||
return ret;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Set up irq handler */
|
||||
|
@ -236,7 +239,8 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
|
|||
"at91_tick", data);
|
||||
if (ret) {
|
||||
pr_err("Unable to setup IRQ\n");
|
||||
return ret;
|
||||
clocksource_unregister(&data->clksrc);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Set up and register clockevents */
|
||||
|
@ -254,6 +258,10 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
|
|||
clockevents_register_device(&data->clkevt);
|
||||
|
||||
return 0;
|
||||
|
||||
exit:
|
||||
kfree(data);
|
||||
return ret;
|
||||
}
|
||||
TIMER_OF_DECLARE(at91sam926x_pit, "atmel,at91sam9260-pit",
|
||||
at91sam926x_pit_dt_init);
|
||||
|
|
|
@ -130,13 +130,17 @@ static int fttmr010_timer_set_next_event(unsigned long cycles,
|
|||
cr &= ~fttmr010->t1_enable_val;
|
||||
writel(cr, fttmr010->base + TIMER_CR);
|
||||
|
||||
/* Setup the match register forward/backward in time */
|
||||
cr = readl(fttmr010->base + TIMER1_COUNT);
|
||||
if (fttmr010->count_down)
|
||||
cr -= cycles;
|
||||
else
|
||||
cr += cycles;
|
||||
writel(cr, fttmr010->base + TIMER1_MATCH1);
|
||||
if (fttmr010->count_down) {
|
||||
/*
|
||||
* ASPEED Timer Controller will load TIMER1_LOAD register
|
||||
* into TIMER1_COUNT register when the timer is re-enabled.
|
||||
*/
|
||||
writel(cycles, fttmr010->base + TIMER1_LOAD);
|
||||
} else {
|
||||
/* Setup the match register forward in time */
|
||||
cr = readl(fttmr010->base + TIMER1_COUNT);
|
||||
writel(cr + cycles, fttmr010->base + TIMER1_MATCH1);
|
||||
}
|
||||
|
||||
/* Start */
|
||||
cr = readl(fttmr010->base + TIMER_CR);
|
||||
|
|
|
@ -97,6 +97,9 @@ static int __init ti_32k_timer_init(struct device_node *np)
|
|||
return -ENXIO;
|
||||
}
|
||||
|
||||
if (!of_machine_is_compatible("ti,am43"))
|
||||
ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
|
||||
|
||||
ti_32k_timer.counter = ti_32k_timer.base;
|
||||
|
||||
/*
|
||||
|
|
|
@ -44,7 +44,7 @@ enum _msm8996_version {
|
|||
|
||||
struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
|
||||
|
||||
static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
|
||||
static enum _msm8996_version qcom_cpufreq_kryo_get_msm_id(void)
|
||||
{
|
||||
size_t len;
|
||||
u32 *msm_id;
|
||||
|
@ -222,7 +222,7 @@ static int __init qcom_cpufreq_kryo_init(void)
|
|||
}
|
||||
module_init(qcom_cpufreq_kryo_init);
|
||||
|
||||
static void __init qcom_cpufreq_kryo_exit(void)
|
||||
static void __exit qcom_cpufreq_kryo_exit(void)
|
||||
{
|
||||
platform_device_unregister(kryo_cpufreq_pdev);
|
||||
platform_driver_unregister(&qcom_cpufreq_kryo_driver);
|
||||
|
|
|
@ -535,6 +535,11 @@ static unsigned long dax_get_unmapped_area(struct file *filp,
|
|||
return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
|
||||
}
|
||||
|
||||
static const struct address_space_operations dev_dax_aops = {
|
||||
.set_page_dirty = noop_set_page_dirty,
|
||||
.invalidatepage = noop_invalidatepage,
|
||||
};
|
||||
|
||||
static int dax_open(struct inode *inode, struct file *filp)
|
||||
{
|
||||
struct dax_device *dax_dev = inode_dax(inode);
|
||||
|
@ -544,6 +549,7 @@ static int dax_open(struct inode *inode, struct file *filp)
|
|||
dev_dbg(&dev_dax->dev, "trace\n");
|
||||
inode->i_mapping = __dax_inode->i_mapping;
|
||||
inode->i_mapping->host = __dax_inode;
|
||||
inode->i_mapping->a_ops = &dev_dax_aops;
|
||||
filp->f_mapping = inode->i_mapping;
|
||||
filp->f_wb_err = filemap_sample_wb_err(filp->f_mapping);
|
||||
filp->private_data = dev_dax;
|
||||
|
|
|
@ -258,6 +258,8 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
|||
{
|
||||
int i;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
|
||||
if (adev->vce.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
|
@ -268,7 +270,6 @@ int amdgpu_vce_suspend(struct amdgpu_device *adev)
|
|||
if (i == AMDGPU_MAX_VCE_HANDLES)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vce.idle_work);
|
||||
/* TODO: suspending running encoding sessions isn't supported */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -163,11 +163,11 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
|
|||
unsigned size;
|
||||
void *ptr;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
if (adev->vcn.vcpu_bo == NULL)
|
||||
return 0;
|
||||
|
||||
cancel_delayed_work_sync(&adev->vcn.idle_work);
|
||||
|
||||
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
|
||||
ptr = adev->vcn.cpu_addr;
|
||||
|
||||
|
|
|
@ -718,6 +718,87 @@ amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
static void emulated_link_detect(struct dc_link *link)
|
||||
{
|
||||
struct dc_sink_init_data sink_init_data = { 0 };
|
||||
struct display_sink_capability sink_caps = { 0 };
|
||||
enum dc_edid_status edid_status;
|
||||
struct dc_context *dc_ctx = link->ctx;
|
||||
struct dc_sink *sink = NULL;
|
||||
struct dc_sink *prev_sink = NULL;
|
||||
|
||||
link->type = dc_connection_none;
|
||||
prev_sink = link->local_sink;
|
||||
|
||||
if (prev_sink != NULL)
|
||||
dc_sink_retain(prev_sink);
|
||||
|
||||
switch (link->connector_signal) {
|
||||
case SIGNAL_TYPE_HDMI_TYPE_A: {
|
||||
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
|
||||
sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A;
|
||||
break;
|
||||
}
|
||||
|
||||
case SIGNAL_TYPE_DVI_SINGLE_LINK: {
|
||||
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
|
||||
sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
|
||||
break;
|
||||
}
|
||||
|
||||
case SIGNAL_TYPE_DVI_DUAL_LINK: {
|
||||
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
|
||||
sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK;
|
||||
break;
|
||||
}
|
||||
|
||||
case SIGNAL_TYPE_LVDS: {
|
||||
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C;
|
||||
sink_caps.signal = SIGNAL_TYPE_LVDS;
|
||||
break;
|
||||
}
|
||||
|
||||
case SIGNAL_TYPE_EDP: {
|
||||
sink_caps.transaction_type =
|
||||
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
|
||||
sink_caps.signal = SIGNAL_TYPE_EDP;
|
||||
break;
|
||||
}
|
||||
|
||||
case SIGNAL_TYPE_DISPLAY_PORT: {
|
||||
sink_caps.transaction_type =
|
||||
DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
|
||||
sink_caps.signal = SIGNAL_TYPE_VIRTUAL;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
DC_ERROR("Invalid connector type! signal:%d\n",
|
||||
link->connector_signal);
|
||||
return;
|
||||
}
|
||||
|
||||
sink_init_data.link = link;
|
||||
sink_init_data.sink_signal = sink_caps.signal;
|
||||
|
||||
sink = dc_sink_create(&sink_init_data);
|
||||
if (!sink) {
|
||||
DC_ERROR("Failed to create sink!\n");
|
||||
return;
|
||||
}
|
||||
|
||||
link->local_sink = sink;
|
||||
|
||||
edid_status = dm_helpers_read_local_edid(
|
||||
link->ctx,
|
||||
link,
|
||||
sink);
|
||||
|
||||
if (edid_status != EDID_OK)
|
||||
DC_ERROR("Failed to read EDID");
|
||||
|
||||
}
|
||||
|
||||
static int dm_resume(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = handle;
|
||||
|
@ -731,6 +812,7 @@ static int dm_resume(void *handle)
|
|||
struct drm_plane *plane;
|
||||
struct drm_plane_state *new_plane_state;
|
||||
struct dm_plane_state *dm_new_plane_state;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
|
@ -761,7 +843,13 @@ static int dm_resume(void *handle)
|
|||
continue;
|
||||
|
||||
mutex_lock(&aconnector->hpd_lock);
|
||||
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none)
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
else
|
||||
dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
|
||||
|
||||
if (aconnector->fake_enable && aconnector->dc_link->local_sink)
|
||||
aconnector->fake_enable = false;
|
||||
|
@ -1010,6 +1098,7 @@ static void handle_hpd_irq(void *param)
|
|||
struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param;
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct drm_device *dev = connector->dev;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
|
||||
/*
|
||||
* In case of failure or MST no need to update connector status or notify the OS
|
||||
|
@ -1020,7 +1109,21 @@ static void handle_hpd_irq(void *param)
|
|||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
|
||||
if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(aconnector->dc_link);
|
||||
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
if (aconnector->base.force == DRM_FORCE_UNSPECIFIED)
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
|
||||
} else if (dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD)) {
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
|
||||
|
@ -1120,6 +1223,7 @@ static void handle_hpd_rx_irq(void *param)
|
|||
struct drm_device *dev = connector->dev;
|
||||
struct dc_link *dc_link = aconnector->dc_link;
|
||||
bool is_mst_root_connector = aconnector->mst_mgr.mst_state;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
|
||||
/*
|
||||
* TODO:Temporary add mutex to protect hpd interrupt not have a gpio
|
||||
|
@ -1132,7 +1236,24 @@ static void handle_hpd_rx_irq(void *param)
|
|||
if (dc_link_handle_hpd_rx_irq(dc_link, NULL, NULL) &&
|
||||
!is_mst_root_connector) {
|
||||
/* Downstream Port status changed. */
|
||||
if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
if (!dc_link_detect_sink(dc_link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(dc_link);
|
||||
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
|
||||
drm_modeset_lock_all(dev);
|
||||
dm_restore_drm_connector_state(dev, connector);
|
||||
drm_modeset_unlock_all(dev);
|
||||
|
||||
drm_kms_helper_hotplug_event(dev);
|
||||
} else if (dc_link_detect(dc_link, DETECT_REASON_HPDRX)) {
|
||||
|
||||
if (aconnector->fake_enable)
|
||||
aconnector->fake_enable = false;
|
||||
|
@ -1529,6 +1650,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
struct amdgpu_mode_info *mode_info = &adev->mode_info;
|
||||
uint32_t link_cnt;
|
||||
int32_t total_overlay_planes, total_primary_planes;
|
||||
enum dc_connection_type new_connection_type = dc_connection_none;
|
||||
|
||||
link_cnt = dm->dc->caps.max_links;
|
||||
if (amdgpu_dm_mode_config_init(dm->adev)) {
|
||||
|
@ -1595,7 +1717,14 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
|
|||
|
||||
link = dc_get_link_at_index(dm->dc, i);
|
||||
|
||||
if (dc_link_detect(link, DETECT_REASON_BOOT)) {
|
||||
if (!dc_link_detect_sink(link, &new_connection_type))
|
||||
DRM_ERROR("KMS: Failed to detect connector\n");
|
||||
|
||||
if (aconnector->base.force && new_connection_type == dc_connection_none) {
|
||||
emulated_link_detect(link);
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
|
||||
} else if (dc_link_detect(link, DETECT_REASON_BOOT)) {
|
||||
amdgpu_dm_update_connector_after_detect(aconnector);
|
||||
register_backlight_device(dm, link);
|
||||
}
|
||||
|
@ -2638,7 +2767,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
|
|||
if (dm_state && dm_state->freesync_capable)
|
||||
stream->ignore_msa_timing_param = true;
|
||||
finish:
|
||||
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL)
|
||||
if (sink && sink->sink_signal == SIGNAL_TYPE_VIRTUAL && aconnector->base.force != DRM_FORCE_ON)
|
||||
dc_sink_release(sink);
|
||||
|
||||
return stream;
|
||||
|
|
|
@ -198,7 +198,7 @@ static bool program_hpd_filter(
|
|||
return result;
|
||||
}
|
||||
|
||||
static bool detect_sink(struct dc_link *link, enum dc_connection_type *type)
|
||||
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type)
|
||||
{
|
||||
uint32_t is_hpd_high = 0;
|
||||
struct gpio *hpd_pin;
|
||||
|
@ -612,7 +612,7 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
|
|||
if (link->connector_signal == SIGNAL_TYPE_VIRTUAL)
|
||||
return false;
|
||||
|
||||
if (false == detect_sink(link, &new_connection_type)) {
|
||||
if (false == dc_link_detect_sink(link, &new_connection_type)) {
|
||||
BREAK_TO_DEBUGGER();
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -216,6 +216,7 @@ void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
|
|||
|
||||
bool dc_link_is_dp_sink_present(struct dc_link *link);
|
||||
|
||||
bool dc_link_detect_sink(struct dc_link *link, enum dc_connection_type *type);
|
||||
/*
|
||||
* DPCD access interfaces
|
||||
*/
|
||||
|
|
|
@ -2537,7 +2537,7 @@ static void pplib_apply_display_requirements(
|
|||
dc->prev_display_config = *pp_display_cfg;
|
||||
}
|
||||
|
||||
void dce110_set_bandwidth(
|
||||
static void dce110_set_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool decrease_allowed)
|
||||
|
|
|
@ -68,11 +68,6 @@ void dce110_fill_display_configs(
|
|||
const struct dc_state *context,
|
||||
struct dm_pp_display_configuration *pp_display_cfg);
|
||||
|
||||
void dce110_set_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool decrease_allowed);
|
||||
|
||||
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
|
||||
|
||||
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
|
||||
|
|
|
@ -244,17 +244,6 @@ static void dce120_update_dchub(
|
|||
dh_data->dchub_info_valid = false;
|
||||
}
|
||||
|
||||
static void dce120_set_bandwidth(
|
||||
struct dc *dc,
|
||||
struct dc_state *context,
|
||||
bool decrease_allowed)
|
||||
{
|
||||
if (context->stream_count <= 0)
|
||||
return;
|
||||
|
||||
dce110_set_bandwidth(dc, context, decrease_allowed);
|
||||
}
|
||||
|
||||
void dce120_hw_sequencer_construct(struct dc *dc)
|
||||
{
|
||||
/* All registers used by dce11.2 match those in dce11 in offset and
|
||||
|
@ -263,6 +252,5 @@ void dce120_hw_sequencer_construct(struct dc *dc)
|
|||
dce110_hw_sequencer_construct(dc);
|
||||
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
|
||||
dc->hwss.update_dchub = dce120_update_dchub;
|
||||
dc->hwss.set_bandwidth = dce120_set_bandwidth;
|
||||
}
|
||||
|
||||
|
|
|
@ -752,6 +752,7 @@ static int malidp_bind(struct device *dev)
|
|||
drm->irq_enabled = true;
|
||||
|
||||
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
|
||||
drm_crtc_vblank_reset(&malidp->crtc);
|
||||
if (ret < 0) {
|
||||
DRM_ERROR("failed to initialise vblank\n");
|
||||
goto vblank_fail;
|
||||
|
|
|
@ -384,7 +384,8 @@ static long malidp500_se_calc_mclk(struct malidp_hw_device *hwdev,
|
|||
|
||||
static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
|
||||
dma_addr_t *addrs, s32 *pitches,
|
||||
int num_planes, u16 w, u16 h, u32 fmt_id)
|
||||
int num_planes, u16 w, u16 h, u32 fmt_id,
|
||||
const s16 *rgb2yuv_coeffs)
|
||||
{
|
||||
u32 base = MALIDP500_SE_MEMWRITE_BASE;
|
||||
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
|
||||
|
@ -416,6 +417,16 @@ static int malidp500_enable_memwrite(struct malidp_hw_device *hwdev,
|
|||
|
||||
malidp_hw_write(hwdev, MALIDP_DE_H_ACTIVE(w) | MALIDP_DE_V_ACTIVE(h),
|
||||
MALIDP500_SE_MEMWRITE_OUT_SIZE);
|
||||
|
||||
if (rgb2yuv_coeffs) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
|
||||
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
|
||||
MALIDP500_SE_RGB_YUV_COEFFS + i * 4);
|
||||
}
|
||||
}
|
||||
|
||||
malidp_hw_setbits(hwdev, MALIDP_SE_MEMWRITE_EN, MALIDP500_SE_CONTROL);
|
||||
|
||||
return 0;
|
||||
|
@ -658,7 +669,8 @@ static long malidp550_se_calc_mclk(struct malidp_hw_device *hwdev,
|
|||
|
||||
static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
|
||||
dma_addr_t *addrs, s32 *pitches,
|
||||
int num_planes, u16 w, u16 h, u32 fmt_id)
|
||||
int num_planes, u16 w, u16 h, u32 fmt_id,
|
||||
const s16 *rgb2yuv_coeffs)
|
||||
{
|
||||
u32 base = MALIDP550_SE_MEMWRITE_BASE;
|
||||
u32 de_base = malidp_get_block_base(hwdev, MALIDP_DE_BLOCK);
|
||||
|
@ -689,6 +701,15 @@ static int malidp550_enable_memwrite(struct malidp_hw_device *hwdev,
|
|||
malidp_hw_setbits(hwdev, MALIDP550_SE_MEMWRITE_ONESHOT | MALIDP_SE_MEMWRITE_EN,
|
||||
MALIDP550_SE_CONTROL);
|
||||
|
||||
if (rgb2yuv_coeffs) {
|
||||
int i;
|
||||
|
||||
for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; i++) {
|
||||
malidp_hw_write(hwdev, rgb2yuv_coeffs[i],
|
||||
MALIDP550_SE_RGB_YUV_COEFFS + i * 4);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -191,7 +191,8 @@ struct malidp_hw {
|
|||
* @param fmt_id - internal format ID of output buffer
|
||||
*/
|
||||
int (*enable_memwrite)(struct malidp_hw_device *hwdev, dma_addr_t *addrs,
|
||||
s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id);
|
||||
s32 *pitches, int num_planes, u16 w, u16 h, u32 fmt_id,
|
||||
const s16 *rgb2yuv_coeffs);
|
||||
|
||||
/*
|
||||
* Disable the writing to memory of the next frame's content.
|
||||
|
|
|
@ -26,6 +26,8 @@ struct malidp_mw_connector_state {
|
|||
s32 pitches[2];
|
||||
u8 format;
|
||||
u8 n_planes;
|
||||
bool rgb2yuv_initialized;
|
||||
const s16 *rgb2yuv_coeffs;
|
||||
};
|
||||
|
||||
static int malidp_mw_connector_get_modes(struct drm_connector *connector)
|
||||
|
@ -84,7 +86,7 @@ static void malidp_mw_connector_destroy(struct drm_connector *connector)
|
|||
static struct drm_connector_state *
|
||||
malidp_mw_connector_duplicate_state(struct drm_connector *connector)
|
||||
{
|
||||
struct malidp_mw_connector_state *mw_state;
|
||||
struct malidp_mw_connector_state *mw_state, *mw_current_state;
|
||||
|
||||
if (WARN_ON(!connector->state))
|
||||
return NULL;
|
||||
|
@ -93,7 +95,10 @@ malidp_mw_connector_duplicate_state(struct drm_connector *connector)
|
|||
if (!mw_state)
|
||||
return NULL;
|
||||
|
||||
/* No need to preserve any of our driver-local data */
|
||||
mw_current_state = to_mw_state(connector->state);
|
||||
mw_state->rgb2yuv_coeffs = mw_current_state->rgb2yuv_coeffs;
|
||||
mw_state->rgb2yuv_initialized = mw_current_state->rgb2yuv_initialized;
|
||||
|
||||
__drm_atomic_helper_connector_duplicate_state(connector, &mw_state->base);
|
||||
|
||||
return &mw_state->base;
|
||||
|
@ -108,6 +113,13 @@ static const struct drm_connector_funcs malidp_mw_connector_funcs = {
|
|||
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
|
||||
};
|
||||
|
||||
static const s16 rgb2yuv_coeffs_bt709_limited[MALIDP_COLORADJ_NUM_COEFFS] = {
|
||||
47, 157, 16,
|
||||
-26, -87, 112,
|
||||
112, -102, -10,
|
||||
16, 128, 128
|
||||
};
|
||||
|
||||
static int
|
||||
malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
|
@ -157,6 +169,9 @@ malidp_mw_encoder_atomic_check(struct drm_encoder *encoder,
|
|||
}
|
||||
mw_state->n_planes = n_planes;
|
||||
|
||||
if (fb->format->is_yuv)
|
||||
mw_state->rgb2yuv_coeffs = rgb2yuv_coeffs_bt709_limited;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -239,10 +254,12 @@ void malidp_mw_atomic_commit(struct drm_device *drm,
|
|||
|
||||
drm_writeback_queue_job(mw_conn, conn_state->writeback_job);
|
||||
conn_state->writeback_job = NULL;
|
||||
|
||||
hwdev->hw->enable_memwrite(hwdev, mw_state->addrs,
|
||||
mw_state->pitches, mw_state->n_planes,
|
||||
fb->width, fb->height, mw_state->format);
|
||||
fb->width, fb->height, mw_state->format,
|
||||
!mw_state->rgb2yuv_initialized ?
|
||||
mw_state->rgb2yuv_coeffs : NULL);
|
||||
mw_state->rgb2yuv_initialized = !!mw_state->rgb2yuv_coeffs;
|
||||
} else {
|
||||
DRM_DEV_DEBUG_DRIVER(drm->dev, "Disable memwrite\n");
|
||||
hwdev->hw->disable_memwrite(hwdev);
|
||||
|
|
|
@ -205,6 +205,7 @@
|
|||
#define MALIDP500_SE_BASE 0x00c00
|
||||
#define MALIDP500_SE_CONTROL 0x00c0c
|
||||
#define MALIDP500_SE_MEMWRITE_OUT_SIZE 0x00c2c
|
||||
#define MALIDP500_SE_RGB_YUV_COEFFS 0x00C74
|
||||
#define MALIDP500_SE_MEMWRITE_BASE 0x00e00
|
||||
#define MALIDP500_DC_IRQ_BASE 0x00f00
|
||||
#define MALIDP500_CONFIG_VALID 0x00f00
|
||||
|
@ -238,6 +239,7 @@
|
|||
#define MALIDP550_SE_CONTROL 0x08010
|
||||
#define MALIDP550_SE_MEMWRITE_ONESHOT (1 << 7)
|
||||
#define MALIDP550_SE_MEMWRITE_OUT_SIZE 0x08030
|
||||
#define MALIDP550_SE_RGB_YUV_COEFFS 0x08078
|
||||
#define MALIDP550_SE_MEMWRITE_BASE 0x08100
|
||||
#define MALIDP550_DC_BASE 0x0c000
|
||||
#define MALIDP550_DC_CONTROL 0x0c010
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <linux/err.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
#include <drm/drm_device.h>
|
||||
#include <drm/drm_crtc.h>
|
||||
#include <drm/drm_panel.h>
|
||||
|
||||
|
@ -105,13 +104,6 @@ int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector)
|
|||
if (panel->connector)
|
||||
return -EBUSY;
|
||||
|
||||
panel->link = device_link_add(connector->dev->dev, panel->dev, 0);
|
||||
if (!panel->link) {
|
||||
dev_err(panel->dev, "failed to link panel to %s\n",
|
||||
dev_name(connector->dev->dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
panel->connector = connector;
|
||||
panel->drm = connector->dev;
|
||||
|
||||
|
@ -133,8 +125,6 @@ EXPORT_SYMBOL(drm_panel_attach);
|
|||
*/
|
||||
int drm_panel_detach(struct drm_panel *panel)
|
||||
{
|
||||
device_link_del(panel->link);
|
||||
|
||||
panel->connector = NULL;
|
||||
panel->drm = NULL;
|
||||
|
||||
|
|
|
@ -113,6 +113,8 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
|
|||
{
|
||||
int ret;
|
||||
|
||||
WARN_ON(*fence);
|
||||
|
||||
*fence = drm_syncobj_fence_get(syncobj);
|
||||
if (*fence)
|
||||
return 1;
|
||||
|
@ -717,6 +719,9 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
|
|||
|
||||
if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
|
||||
for (i = 0; i < count; ++i) {
|
||||
if (entries[i].fence)
|
||||
continue;
|
||||
|
||||
drm_syncobj_fence_get_or_add_callback(syncobjs[i],
|
||||
&entries[i].fence,
|
||||
&entries[i].syncobj_cb,
|
||||
|
|
|
@ -592,8 +592,6 @@ static int etnaviv_pdev_probe(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct component_match *match = NULL;
|
||||
|
||||
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
|
||||
|
||||
if (!dev->platform_data) {
|
||||
struct device_node *core_node;
|
||||
|
||||
|
@ -655,13 +653,30 @@ static int __init etnaviv_init(void)
|
|||
for_each_compatible_node(np, NULL, "vivante,gc") {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
pdev = platform_device_register_simple("etnaviv", -1,
|
||||
NULL, 0);
|
||||
if (IS_ERR(pdev)) {
|
||||
ret = PTR_ERR(pdev);
|
||||
|
||||
pdev = platform_device_alloc("etnaviv", -1);
|
||||
if (!pdev) {
|
||||
ret = -ENOMEM;
|
||||
of_node_put(np);
|
||||
goto unregister_platform_driver;
|
||||
}
|
||||
pdev->dev.coherent_dma_mask = DMA_BIT_MASK(40);
|
||||
pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask;
|
||||
|
||||
/*
|
||||
* Apply the same DMA configuration to the virtual etnaviv
|
||||
* device as the GPU we found. This assumes that all Vivante
|
||||
* GPUs in the system share the same DMA constraints.
|
||||
*/
|
||||
of_dma_configure(&pdev->dev, np, true);
|
||||
|
||||
ret = platform_device_add(pdev);
|
||||
if (ret) {
|
||||
platform_device_put(pdev);
|
||||
of_node_put(np);
|
||||
goto unregister_platform_driver;
|
||||
}
|
||||
|
||||
etnaviv_drm = pdev;
|
||||
of_node_put(np);
|
||||
break;
|
||||
|
|
|
@ -139,7 +139,8 @@ static int intel_th_remove(struct device *dev)
|
|||
th->thdev[i] = NULL;
|
||||
}
|
||||
|
||||
th->num_thdevs = lowest;
|
||||
if (lowest >= 0)
|
||||
th->num_thdevs = lowest;
|
||||
}
|
||||
|
||||
if (thdrv->attr_group)
|
||||
|
@ -487,7 +488,7 @@ static const struct intel_th_subdevice {
|
|||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
{
|
||||
.start = TH_MMIO_SW,
|
||||
.start = 1, /* use resource[1] */
|
||||
.end = 0,
|
||||
.flags = IORESOURCE_MEM,
|
||||
},
|
||||
|
@ -580,6 +581,7 @@ intel_th_subdevice_alloc(struct intel_th *th,
|
|||
struct intel_th_device *thdev;
|
||||
struct resource res[3];
|
||||
unsigned int req = 0;
|
||||
bool is64bit = false;
|
||||
int r, err;
|
||||
|
||||
thdev = intel_th_device_alloc(th, subdev->type, subdev->name,
|
||||
|
@ -589,12 +591,18 @@ intel_th_subdevice_alloc(struct intel_th *th,
|
|||
|
||||
thdev->drvdata = th->drvdata;
|
||||
|
||||
for (r = 0; r < th->num_resources; r++)
|
||||
if (th->resource[r].flags & IORESOURCE_MEM_64) {
|
||||
is64bit = true;
|
||||
break;
|
||||
}
|
||||
|
||||
memcpy(res, subdev->res,
|
||||
sizeof(struct resource) * subdev->nres);
|
||||
|
||||
for (r = 0; r < subdev->nres; r++) {
|
||||
struct resource *devres = th->resource;
|
||||
int bar = TH_MMIO_CONFIG;
|
||||
int bar = 0; /* cut subdevices' MMIO from resource[0] */
|
||||
|
||||
/*
|
||||
* Take .end == 0 to mean 'take the whole bar',
|
||||
|
@ -603,6 +611,8 @@ intel_th_subdevice_alloc(struct intel_th *th,
|
|||
*/
|
||||
if (!res[r].end && res[r].flags == IORESOURCE_MEM) {
|
||||
bar = res[r].start;
|
||||
if (is64bit)
|
||||
bar *= 2;
|
||||
res[r].start = 0;
|
||||
res[r].end = resource_size(&devres[bar]) - 1;
|
||||
}
|
||||
|
|
|
@ -160,6 +160,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
|
|||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x18e1),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{
|
||||
/* Ice Lake PCH */
|
||||
PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x34a6),
|
||||
.driver_data = (kernel_ulong_t)&intel_th_2x,
|
||||
},
|
||||
{ 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -337,55 +337,6 @@ static int add_roce_gid(struct ib_gid_table_entry *entry)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* add_modify_gid - Add or modify GID table entry
|
||||
*
|
||||
* @table: GID table in which GID to be added or modified
|
||||
* @attr: Attributes of the GID
|
||||
*
|
||||
* Returns 0 on success or appropriate error code. It accepts zero
|
||||
* GID addition for non RoCE ports for HCA's who report them as valid
|
||||
* GID. However such zero GIDs are not added to the cache.
|
||||
*/
|
||||
static int add_modify_gid(struct ib_gid_table *table,
|
||||
const struct ib_gid_attr *attr)
|
||||
{
|
||||
struct ib_gid_table_entry *entry;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Invalidate any old entry in the table to make it safe to write to
|
||||
* this index.
|
||||
*/
|
||||
if (is_gid_entry_valid(table->data_vec[attr->index]))
|
||||
put_gid_entry(table->data_vec[attr->index]);
|
||||
|
||||
/*
|
||||
* Some HCA's report multiple GID entries with only one valid GID, and
|
||||
* leave other unused entries as the zero GID. Convert zero GIDs to
|
||||
* empty table entries instead of storing them.
|
||||
*/
|
||||
if (rdma_is_zero_gid(&attr->gid))
|
||||
return 0;
|
||||
|
||||
entry = alloc_gid_entry(attr);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (rdma_protocol_roce(attr->device, attr->port_num)) {
|
||||
ret = add_roce_gid(entry);
|
||||
if (ret)
|
||||
goto done;
|
||||
}
|
||||
|
||||
store_gid_entry(table, entry);
|
||||
return 0;
|
||||
|
||||
done:
|
||||
put_gid_entry(entry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* del_gid - Delete GID table entry
|
||||
*
|
||||
|
@ -419,6 +370,55 @@ static void del_gid(struct ib_device *ib_dev, u8 port,
|
|||
put_gid_entry_locked(entry);
|
||||
}
|
||||
|
||||
/**
|
||||
* add_modify_gid - Add or modify GID table entry
|
||||
*
|
||||
* @table: GID table in which GID to be added or modified
|
||||
* @attr: Attributes of the GID
|
||||
*
|
||||
* Returns 0 on success or appropriate error code. It accepts zero
|
||||
* GID addition for non RoCE ports for HCA's who report them as valid
|
||||
* GID. However such zero GIDs are not added to the cache.
|
||||
*/
|
||||
static int add_modify_gid(struct ib_gid_table *table,
|
||||
const struct ib_gid_attr *attr)
|
||||
{
|
||||
struct ib_gid_table_entry *entry;
|
||||
int ret = 0;
|
||||
|
||||
/*
|
||||
* Invalidate any old entry in the table to make it safe to write to
|
||||
* this index.
|
||||
*/
|
||||
if (is_gid_entry_valid(table->data_vec[attr->index]))
|
||||
del_gid(attr->device, attr->port_num, table, attr->index);
|
||||
|
||||
/*
|
||||
* Some HCA's report multiple GID entries with only one valid GID, and
|
||||
* leave other unused entries as the zero GID. Convert zero GIDs to
|
||||
* empty table entries instead of storing them.
|
||||
*/
|
||||
if (rdma_is_zero_gid(&attr->gid))
|
||||
return 0;
|
||||
|
||||
entry = alloc_gid_entry(attr);
|
||||
if (!entry)
|
||||
return -ENOMEM;
|
||||
|
||||
if (rdma_protocol_roce(attr->device, attr->port_num)) {
|
||||
ret = add_roce_gid(entry);
|
||||
if (ret)
|
||||
goto done;
|
||||
}
|
||||
|
||||
store_gid_entry(table, entry);
|
||||
return 0;
|
||||
|
||||
done:
|
||||
put_gid_entry(entry);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* rwlock should be read locked, or lock should be held */
|
||||
static int find_gid(struct ib_gid_table *table, const union ib_gid *gid,
|
||||
const struct ib_gid_attr *val, bool default_gid,
|
||||
|
|
|
@ -1759,6 +1759,8 @@ static int ucma_close(struct inode *inode, struct file *filp)
|
|||
mutex_lock(&mut);
|
||||
if (!ctx->closing) {
|
||||
mutex_unlock(&mut);
|
||||
ucma_put_ctx(ctx);
|
||||
wait_for_completion(&ctx->comp);
|
||||
/* rdma_destroy_id ensures that no event handlers are
|
||||
* inflight for that id before releasing it.
|
||||
*/
|
||||
|
|
|
@ -2027,33 +2027,55 @@ static int modify_qp(struct ib_uverbs_file *file,
|
|||
|
||||
if ((cmd->base.attr_mask & IB_QP_CUR_STATE &&
|
||||
cmd->base.cur_qp_state > IB_QPS_ERR) ||
|
||||
cmd->base.qp_state > IB_QPS_ERR) {
|
||||
(cmd->base.attr_mask & IB_QP_STATE &&
|
||||
cmd->base.qp_state > IB_QPS_ERR)) {
|
||||
ret = -EINVAL;
|
||||
goto release_qp;
|
||||
}
|
||||
|
||||
attr->qp_state = cmd->base.qp_state;
|
||||
attr->cur_qp_state = cmd->base.cur_qp_state;
|
||||
attr->path_mtu = cmd->base.path_mtu;
|
||||
attr->path_mig_state = cmd->base.path_mig_state;
|
||||
attr->qkey = cmd->base.qkey;
|
||||
attr->rq_psn = cmd->base.rq_psn;
|
||||
attr->sq_psn = cmd->base.sq_psn;
|
||||
attr->dest_qp_num = cmd->base.dest_qp_num;
|
||||
attr->qp_access_flags = cmd->base.qp_access_flags;
|
||||
attr->pkey_index = cmd->base.pkey_index;
|
||||
attr->alt_pkey_index = cmd->base.alt_pkey_index;
|
||||
attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
|
||||
attr->max_rd_atomic = cmd->base.max_rd_atomic;
|
||||
attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
|
||||
attr->min_rnr_timer = cmd->base.min_rnr_timer;
|
||||
attr->port_num = cmd->base.port_num;
|
||||
attr->timeout = cmd->base.timeout;
|
||||
attr->retry_cnt = cmd->base.retry_cnt;
|
||||
attr->rnr_retry = cmd->base.rnr_retry;
|
||||
attr->alt_port_num = cmd->base.alt_port_num;
|
||||
attr->alt_timeout = cmd->base.alt_timeout;
|
||||
attr->rate_limit = cmd->rate_limit;
|
||||
if (cmd->base.attr_mask & IB_QP_STATE)
|
||||
attr->qp_state = cmd->base.qp_state;
|
||||
if (cmd->base.attr_mask & IB_QP_CUR_STATE)
|
||||
attr->cur_qp_state = cmd->base.cur_qp_state;
|
||||
if (cmd->base.attr_mask & IB_QP_PATH_MTU)
|
||||
attr->path_mtu = cmd->base.path_mtu;
|
||||
if (cmd->base.attr_mask & IB_QP_PATH_MIG_STATE)
|
||||
attr->path_mig_state = cmd->base.path_mig_state;
|
||||
if (cmd->base.attr_mask & IB_QP_QKEY)
|
||||
attr->qkey = cmd->base.qkey;
|
||||
if (cmd->base.attr_mask & IB_QP_RQ_PSN)
|
||||
attr->rq_psn = cmd->base.rq_psn;
|
||||
if (cmd->base.attr_mask & IB_QP_SQ_PSN)
|
||||
attr->sq_psn = cmd->base.sq_psn;
|
||||
if (cmd->base.attr_mask & IB_QP_DEST_QPN)
|
||||
attr->dest_qp_num = cmd->base.dest_qp_num;
|
||||
if (cmd->base.attr_mask & IB_QP_ACCESS_FLAGS)
|
||||
attr->qp_access_flags = cmd->base.qp_access_flags;
|
||||
if (cmd->base.attr_mask & IB_QP_PKEY_INDEX)
|
||||
attr->pkey_index = cmd->base.pkey_index;
|
||||
if (cmd->base.attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY)
|
||||
attr->en_sqd_async_notify = cmd->base.en_sqd_async_notify;
|
||||
if (cmd->base.attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
|
||||
attr->max_rd_atomic = cmd->base.max_rd_atomic;
|
||||
if (cmd->base.attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
|
||||
attr->max_dest_rd_atomic = cmd->base.max_dest_rd_atomic;
|
||||
if (cmd->base.attr_mask & IB_QP_MIN_RNR_TIMER)
|
||||
attr->min_rnr_timer = cmd->base.min_rnr_timer;
|
||||
if (cmd->base.attr_mask & IB_QP_PORT)
|
||||
attr->port_num = cmd->base.port_num;
|
||||
if (cmd->base.attr_mask & IB_QP_TIMEOUT)
|
||||
attr->timeout = cmd->base.timeout;
|
||||
if (cmd->base.attr_mask & IB_QP_RETRY_CNT)
|
||||
attr->retry_cnt = cmd->base.retry_cnt;
|
||||
if (cmd->base.attr_mask & IB_QP_RNR_RETRY)
|
||||
attr->rnr_retry = cmd->base.rnr_retry;
|
||||
if (cmd->base.attr_mask & IB_QP_ALT_PATH) {
|
||||
attr->alt_port_num = cmd->base.alt_port_num;
|
||||
attr->alt_timeout = cmd->base.alt_timeout;
|
||||
attr->alt_pkey_index = cmd->base.alt_pkey_index;
|
||||
}
|
||||
if (cmd->base.attr_mask & IB_QP_RATE_LIMIT)
|
||||
attr->rate_limit = cmd->rate_limit;
|
||||
|
||||
if (cmd->base.attr_mask & IB_QP_AV)
|
||||
copy_ah_attr_from_uverbs(qp->device, &attr->ah_attr,
|
||||
|
|
|
@ -440,6 +440,7 @@ static int ib_uverbs_comp_event_close(struct inode *inode, struct file *filp)
|
|||
list_del(&entry->obj_list);
|
||||
kfree(entry);
|
||||
}
|
||||
file->ev_queue.is_closed = 1;
|
||||
spin_unlock_irq(&file->ev_queue.lock);
|
||||
|
||||
uverbs_close_fd(filp);
|
||||
|
|
|
@ -248,6 +248,7 @@ void uverbs_destroy_api(struct uverbs_api *uapi)
|
|||
kfree(rcu_dereference_protected(*slot, true));
|
||||
radix_tree_iter_delete(&uapi->radix, &iter, slot);
|
||||
}
|
||||
kfree(uapi);
|
||||
}
|
||||
|
||||
struct uverbs_api *uverbs_alloc_api(
|
||||
|
|
|
@ -78,7 +78,7 @@ static struct list_head bnxt_re_dev_list = LIST_HEAD_INIT(bnxt_re_dev_list);
|
|||
/* Mutex to protect the list of bnxt_re devices added */
|
||||
static DEFINE_MUTEX(bnxt_re_dev_lock);
|
||||
static struct workqueue_struct *bnxt_re_wq;
|
||||
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait);
|
||||
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev);
|
||||
|
||||
/* SR-IOV helper functions */
|
||||
|
||||
|
@ -182,7 +182,7 @@ static void bnxt_re_shutdown(void *p)
|
|||
if (!rdev)
|
||||
return;
|
||||
|
||||
bnxt_re_ib_unreg(rdev, false);
|
||||
bnxt_re_ib_unreg(rdev);
|
||||
}
|
||||
|
||||
static void bnxt_re_stop_irq(void *handle)
|
||||
|
@ -251,7 +251,7 @@ static struct bnxt_ulp_ops bnxt_re_ulp_ops = {
|
|||
/* Driver registration routines used to let the networking driver (bnxt_en)
|
||||
* to know that the RoCE driver is now installed
|
||||
*/
|
||||
static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
|
||||
static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev;
|
||||
int rc;
|
||||
|
@ -260,14 +260,9 @@ static int bnxt_re_unregister_netdev(struct bnxt_re_dev *rdev, bool lock_wait)
|
|||
return -EINVAL;
|
||||
|
||||
en_dev = rdev->en_dev;
|
||||
/* Acquire rtnl lock if it is not invokded from netdev event */
|
||||
if (lock_wait)
|
||||
rtnl_lock();
|
||||
|
||||
rc = en_dev->en_ops->bnxt_unregister_device(rdev->en_dev,
|
||||
BNXT_ROCE_ULP);
|
||||
if (lock_wait)
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -281,14 +276,12 @@ static int bnxt_re_register_netdev(struct bnxt_re_dev *rdev)
|
|||
|
||||
en_dev = rdev->en_dev;
|
||||
|
||||
rtnl_lock();
|
||||
rc = en_dev->en_ops->bnxt_register_device(en_dev, BNXT_ROCE_ULP,
|
||||
&bnxt_re_ulp_ops, rdev);
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
|
||||
static int bnxt_re_free_msix(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev;
|
||||
int rc;
|
||||
|
@ -298,13 +291,9 @@ static int bnxt_re_free_msix(struct bnxt_re_dev *rdev, bool lock_wait)
|
|||
|
||||
en_dev = rdev->en_dev;
|
||||
|
||||
if (lock_wait)
|
||||
rtnl_lock();
|
||||
|
||||
rc = en_dev->en_ops->bnxt_free_msix(rdev->en_dev, BNXT_ROCE_ULP);
|
||||
|
||||
if (lock_wait)
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -320,7 +309,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
|
|||
|
||||
num_msix_want = min_t(u32, BNXT_RE_MAX_MSIX, num_online_cpus());
|
||||
|
||||
rtnl_lock();
|
||||
num_msix_got = en_dev->en_ops->bnxt_request_msix(en_dev, BNXT_ROCE_ULP,
|
||||
rdev->msix_entries,
|
||||
num_msix_want);
|
||||
|
@ -335,7 +323,6 @@ static int bnxt_re_request_msix(struct bnxt_re_dev *rdev)
|
|||
}
|
||||
rdev->num_msix = num_msix_got;
|
||||
done:
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -358,24 +345,18 @@ static void bnxt_re_fill_fw_msg(struct bnxt_fw_msg *fw_msg, void *msg,
|
|||
fw_msg->timeout = timeout;
|
||||
}
|
||||
|
||||
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
|
||||
bool lock_wait)
|
||||
static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_ring_free_input req = {0};
|
||||
struct hwrm_ring_free_output resp;
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
bool do_unlock = false;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
if (lock_wait) {
|
||||
rtnl_lock();
|
||||
do_unlock = true;
|
||||
}
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_FREE, -1, -1);
|
||||
req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL;
|
||||
|
@ -386,8 +367,6 @@ static int bnxt_re_net_ring_free(struct bnxt_re_dev *rdev, u16 fw_ring_id,
|
|||
if (rc)
|
||||
dev_err(rdev_to_dev(rdev),
|
||||
"Failed to free HW ring:%d :%#x", req.ring_id, rc);
|
||||
if (do_unlock)
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -405,7 +384,6 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
|
|||
return rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
rtnl_lock();
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_RING_ALLOC, -1, -1);
|
||||
req.enables = 0;
|
||||
req.page_tbl_addr = cpu_to_le64(dma_arr[0]);
|
||||
|
@ -426,27 +404,21 @@ static int bnxt_re_net_ring_alloc(struct bnxt_re_dev *rdev, dma_addr_t *dma_arr,
|
|||
if (!rc)
|
||||
*fw_ring_id = le16_to_cpu(resp.ring_id);
|
||||
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
||||
u32 fw_stats_ctx_id, bool lock_wait)
|
||||
u32 fw_stats_ctx_id)
|
||||
{
|
||||
struct bnxt_en_dev *en_dev = rdev->en_dev;
|
||||
struct hwrm_stat_ctx_free_input req = {0};
|
||||
struct bnxt_fw_msg fw_msg;
|
||||
bool do_unlock = false;
|
||||
int rc = -EINVAL;
|
||||
|
||||
if (!en_dev)
|
||||
return rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
if (lock_wait) {
|
||||
rtnl_lock();
|
||||
do_unlock = true;
|
||||
}
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1);
|
||||
req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id);
|
||||
|
@ -457,8 +429,6 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev,
|
|||
dev_err(rdev_to_dev(rdev),
|
||||
"Failed to free HW stats context %#x", rc);
|
||||
|
||||
if (do_unlock)
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -478,7 +448,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
|||
return rc;
|
||||
|
||||
memset(&fw_msg, 0, sizeof(fw_msg));
|
||||
rtnl_lock();
|
||||
|
||||
bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1);
|
||||
req.update_period_ms = cpu_to_le32(1000);
|
||||
|
@ -490,7 +459,6 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev,
|
|||
if (!rc)
|
||||
*fw_stats_ctx_id = le32_to_cpu(resp.stat_ctx_id);
|
||||
|
||||
rtnl_unlock();
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -929,19 +897,19 @@ static int bnxt_re_init_res(struct bnxt_re_dev *rdev)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev, bool lock_wait)
|
||||
static void bnxt_re_free_nq_res(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < rdev->num_msix - 1; i++) {
|
||||
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id, lock_wait);
|
||||
bnxt_re_net_ring_free(rdev, rdev->nq[i].ring_id);
|
||||
bnxt_qplib_free_nq(&rdev->nq[i]);
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_re_free_res(struct bnxt_re_dev *rdev, bool lock_wait)
|
||||
static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
bnxt_re_free_nq_res(rdev, lock_wait);
|
||||
bnxt_re_free_nq_res(rdev);
|
||||
|
||||
if (rdev->qplib_res.dpi_tbl.max) {
|
||||
bnxt_qplib_dealloc_dpi(&rdev->qplib_res,
|
||||
|
@ -1219,7 +1187,7 @@ static int bnxt_re_setup_qos(struct bnxt_re_dev *rdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
|
||||
static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev)
|
||||
{
|
||||
int i, rc;
|
||||
|
||||
|
@ -1234,28 +1202,27 @@ static void bnxt_re_ib_unreg(struct bnxt_re_dev *rdev, bool lock_wait)
|
|||
cancel_delayed_work(&rdev->worker);
|
||||
|
||||
bnxt_re_cleanup_res(rdev);
|
||||
bnxt_re_free_res(rdev, lock_wait);
|
||||
bnxt_re_free_res(rdev);
|
||||
|
||||
if (test_and_clear_bit(BNXT_RE_FLAG_RCFW_CHANNEL_EN, &rdev->flags)) {
|
||||
rc = bnxt_qplib_deinit_rcfw(&rdev->rcfw);
|
||||
if (rc)
|
||||
dev_warn(rdev_to_dev(rdev),
|
||||
"Failed to deinitialize RCFW: %#x", rc);
|
||||
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id,
|
||||
lock_wait);
|
||||
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
|
||||
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
|
||||
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
|
||||
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, lock_wait);
|
||||
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
|
||||
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_RE_FLAG_GOT_MSIX, &rdev->flags)) {
|
||||
rc = bnxt_re_free_msix(rdev, lock_wait);
|
||||
rc = bnxt_re_free_msix(rdev);
|
||||
if (rc)
|
||||
dev_warn(rdev_to_dev(rdev),
|
||||
"Failed to free MSI-X vectors: %#x", rc);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_RE_FLAG_NETDEV_REGISTERED, &rdev->flags)) {
|
||||
rc = bnxt_re_unregister_netdev(rdev, lock_wait);
|
||||
rc = bnxt_re_unregister_netdev(rdev);
|
||||
if (rc)
|
||||
dev_warn(rdev_to_dev(rdev),
|
||||
"Failed to unregister with netdev: %#x", rc);
|
||||
|
@ -1276,6 +1243,12 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
|||
{
|
||||
int i, j, rc;
|
||||
|
||||
bool locked;
|
||||
|
||||
/* Acquire rtnl lock through out this function */
|
||||
rtnl_lock();
|
||||
locked = true;
|
||||
|
||||
/* Registered a new RoCE device instance to netdev */
|
||||
rc = bnxt_re_register_netdev(rdev);
|
||||
if (rc) {
|
||||
|
@ -1374,12 +1347,16 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
|||
schedule_delayed_work(&rdev->worker, msecs_to_jiffies(30000));
|
||||
}
|
||||
|
||||
rtnl_unlock();
|
||||
locked = false;
|
||||
|
||||
/* Register ib dev */
|
||||
rc = bnxt_re_register_ib(rdev);
|
||||
if (rc) {
|
||||
pr_err("Failed to register with IB: %#x\n", rc);
|
||||
goto fail;
|
||||
}
|
||||
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
|
||||
dev_info(rdev_to_dev(rdev), "Device registered successfully");
|
||||
for (i = 0; i < ARRAY_SIZE(bnxt_re_attributes); i++) {
|
||||
rc = device_create_file(&rdev->ibdev.dev,
|
||||
|
@ -1395,7 +1372,6 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
|||
goto fail;
|
||||
}
|
||||
}
|
||||
set_bit(BNXT_RE_FLAG_IBDEV_REGISTERED, &rdev->flags);
|
||||
ib_get_eth_speed(&rdev->ibdev, 1, &rdev->active_speed,
|
||||
&rdev->active_width);
|
||||
set_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags);
|
||||
|
@ -1404,17 +1380,21 @@ static int bnxt_re_ib_reg(struct bnxt_re_dev *rdev)
|
|||
|
||||
return 0;
|
||||
free_sctx:
|
||||
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id, true);
|
||||
bnxt_re_net_stats_ctx_free(rdev, rdev->qplib_ctx.stats.fw_id);
|
||||
free_ctx:
|
||||
bnxt_qplib_free_ctx(rdev->en_dev->pdev, &rdev->qplib_ctx);
|
||||
disable_rcfw:
|
||||
bnxt_qplib_disable_rcfw_channel(&rdev->rcfw);
|
||||
free_ring:
|
||||
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id, true);
|
||||
bnxt_re_net_ring_free(rdev, rdev->rcfw.creq_ring_id);
|
||||
free_rcfw:
|
||||
bnxt_qplib_free_rcfw_channel(&rdev->rcfw);
|
||||
fail:
|
||||
bnxt_re_ib_unreg(rdev, true);
|
||||
if (!locked)
|
||||
rtnl_lock();
|
||||
bnxt_re_ib_unreg(rdev);
|
||||
rtnl_unlock();
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
|
@ -1567,7 +1547,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier,
|
|||
*/
|
||||
if (atomic_read(&rdev->sched_count) > 0)
|
||||
goto exit;
|
||||
bnxt_re_ib_unreg(rdev, false);
|
||||
bnxt_re_ib_unreg(rdev);
|
||||
bnxt_re_remove_one(rdev);
|
||||
bnxt_re_dev_unreg(rdev);
|
||||
break;
|
||||
|
@ -1646,7 +1626,10 @@ static void __exit bnxt_re_mod_exit(void)
|
|||
*/
|
||||
flush_workqueue(bnxt_re_wq);
|
||||
bnxt_re_dev_stop(rdev);
|
||||
bnxt_re_ib_unreg(rdev, true);
|
||||
/* Acquire the rtnl_lock as the L2 resources are freed here */
|
||||
rtnl_lock();
|
||||
bnxt_re_ib_unreg(rdev);
|
||||
rtnl_unlock();
|
||||
bnxt_re_remove_one(rdev);
|
||||
bnxt_re_dev_unreg(rdev);
|
||||
}
|
||||
|
|
|
@ -6733,6 +6733,7 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
|
|||
struct hfi1_devdata *dd = ppd->dd;
|
||||
struct send_context *sc;
|
||||
int i;
|
||||
int sc_flags;
|
||||
|
||||
if (flags & FREEZE_SELF)
|
||||
write_csr(dd, CCE_CTRL, CCE_CTRL_SPC_FREEZE_SMASK);
|
||||
|
@ -6743,11 +6744,13 @@ void start_freeze_handling(struct hfi1_pportdata *ppd, int flags)
|
|||
/* notify all SDMA engines that they are going into a freeze */
|
||||
sdma_freeze_notify(dd, !!(flags & FREEZE_LINK_DOWN));
|
||||
|
||||
sc_flags = SCF_FROZEN | SCF_HALTED | (flags & FREEZE_LINK_DOWN ?
|
||||
SCF_LINK_DOWN : 0);
|
||||
/* do halt pre-handling on all enabled send contexts */
|
||||
for (i = 0; i < dd->num_send_contexts; i++) {
|
||||
sc = dd->send_contexts[i].sc;
|
||||
if (sc && (sc->flags & SCF_ENABLED))
|
||||
sc_stop(sc, SCF_FROZEN | SCF_HALTED);
|
||||
sc_stop(sc, sc_flags);
|
||||
}
|
||||
|
||||
/* Send context are frozen. Notify user space */
|
||||
|
@ -10674,6 +10677,7 @@ int set_link_state(struct hfi1_pportdata *ppd, u32 state)
|
|||
add_rcvctrl(dd, RCV_CTRL_RCV_PORT_ENABLE_SMASK);
|
||||
|
||||
handle_linkup_change(dd, 1);
|
||||
pio_kernel_linkup(dd);
|
||||
|
||||
/*
|
||||
* After link up, a new link width will have been set.
|
||||
|
|
|
@ -86,6 +86,7 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
|
|||
unsigned long flags;
|
||||
int write = 1; /* write sendctrl back */
|
||||
int flush = 0; /* re-read sendctrl to make sure it is flushed */
|
||||
int i;
|
||||
|
||||
spin_lock_irqsave(&dd->sendctrl_lock, flags);
|
||||
|
||||
|
@ -95,9 +96,13 @@ void pio_send_control(struct hfi1_devdata *dd, int op)
|
|||
reg |= SEND_CTRL_SEND_ENABLE_SMASK;
|
||||
/* Fall through */
|
||||
case PSC_DATA_VL_ENABLE:
|
||||
mask = 0;
|
||||
for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
|
||||
if (!dd->vld[i].mtu)
|
||||
mask |= BIT_ULL(i);
|
||||
/* Disallow sending on VLs not enabled */
|
||||
mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
|
||||
SEND_CTRL_UNSUPPORTED_VL_SHIFT;
|
||||
mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
|
||||
SEND_CTRL_UNSUPPORTED_VL_SHIFT;
|
||||
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
|
||||
break;
|
||||
case PSC_GLOBAL_DISABLE:
|
||||
|
@ -921,20 +926,18 @@ void sc_free(struct send_context *sc)
|
|||
void sc_disable(struct send_context *sc)
|
||||
{
|
||||
u64 reg;
|
||||
unsigned long flags;
|
||||
struct pio_buf *pbuf;
|
||||
|
||||
if (!sc)
|
||||
return;
|
||||
|
||||
/* do all steps, even if already disabled */
|
||||
spin_lock_irqsave(&sc->alloc_lock, flags);
|
||||
spin_lock_irq(&sc->alloc_lock);
|
||||
reg = read_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL));
|
||||
reg &= ~SC(CTRL_CTXT_ENABLE_SMASK);
|
||||
sc->flags &= ~SCF_ENABLED;
|
||||
sc_wait_for_packet_egress(sc, 1);
|
||||
write_kctxt_csr(sc->dd, sc->hw_context, SC(CTRL), reg);
|
||||
spin_unlock_irqrestore(&sc->alloc_lock, flags);
|
||||
|
||||
/*
|
||||
* Flush any waiters. Once the context is disabled,
|
||||
|
@ -944,7 +947,7 @@ void sc_disable(struct send_context *sc)
|
|||
* proceed with the flush.
|
||||
*/
|
||||
udelay(1);
|
||||
spin_lock_irqsave(&sc->release_lock, flags);
|
||||
spin_lock(&sc->release_lock);
|
||||
if (sc->sr) { /* this context has a shadow ring */
|
||||
while (sc->sr_tail != sc->sr_head) {
|
||||
pbuf = &sc->sr[sc->sr_tail].pbuf;
|
||||
|
@ -955,7 +958,8 @@ void sc_disable(struct send_context *sc)
|
|||
sc->sr_tail = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&sc->release_lock, flags);
|
||||
spin_unlock(&sc->release_lock);
|
||||
spin_unlock_irq(&sc->alloc_lock);
|
||||
}
|
||||
|
||||
/* return SendEgressCtxtStatus.PacketOccupancy */
|
||||
|
@ -1178,11 +1182,39 @@ void pio_kernel_unfreeze(struct hfi1_devdata *dd)
|
|||
sc = dd->send_contexts[i].sc;
|
||||
if (!sc || !(sc->flags & SCF_FROZEN) || sc->type == SC_USER)
|
||||
continue;
|
||||
if (sc->flags & SCF_LINK_DOWN)
|
||||
continue;
|
||||
|
||||
sc_enable(sc); /* will clear the sc frozen flag */
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* pio_kernel_linkup() - Re-enable send contexts after linkup event
|
||||
* @dd: valid devive data
|
||||
*
|
||||
* When the link goes down, the freeze path is taken. However, a link down
|
||||
* event is different from a freeze because if the send context is re-enabled
|
||||
* whowever is sending data will start sending data again, which will hang
|
||||
* any QP that is sending data.
|
||||
*
|
||||
* The freeze path now looks at the type of event that occurs and takes this
|
||||
* path for link down event.
|
||||
*/
|
||||
void pio_kernel_linkup(struct hfi1_devdata *dd)
|
||||
{
|
||||
struct send_context *sc;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < dd->num_send_contexts; i++) {
|
||||
sc = dd->send_contexts[i].sc;
|
||||
if (!sc || !(sc->flags & SCF_LINK_DOWN) || sc->type == SC_USER)
|
||||
continue;
|
||||
|
||||
sc_enable(sc); /* will clear the sc link down flag */
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Wait for the SendPioInitCtxt.PioInitInProgress bit to clear.
|
||||
* Returns:
|
||||
|
@ -1382,11 +1414,10 @@ void sc_stop(struct send_context *sc, int flag)
|
|||
{
|
||||
unsigned long flags;
|
||||
|
||||
/* mark the context */
|
||||
sc->flags |= flag;
|
||||
|
||||
/* stop buffer allocations */
|
||||
spin_lock_irqsave(&sc->alloc_lock, flags);
|
||||
/* mark the context */
|
||||
sc->flags |= flag;
|
||||
sc->flags &= ~SCF_ENABLED;
|
||||
spin_unlock_irqrestore(&sc->alloc_lock, flags);
|
||||
wake_up(&sc->halt_wait);
|
||||
|
|
|
@ -139,6 +139,7 @@ struct send_context {
|
|||
#define SCF_IN_FREE 0x02
|
||||
#define SCF_HALTED 0x04
|
||||
#define SCF_FROZEN 0x08
|
||||
#define SCF_LINK_DOWN 0x10
|
||||
|
||||
struct send_context_info {
|
||||
struct send_context *sc; /* allocated working context */
|
||||
|
@ -306,6 +307,7 @@ void set_pio_integrity(struct send_context *sc);
|
|||
void pio_reset_all(struct hfi1_devdata *dd);
|
||||
void pio_freeze(struct hfi1_devdata *dd);
|
||||
void pio_kernel_unfreeze(struct hfi1_devdata *dd);
|
||||
void pio_kernel_linkup(struct hfi1_devdata *dd);
|
||||
|
||||
/* global PIO send control operations */
|
||||
#define PSC_GLOBAL_ENABLE 0
|
||||
|
|
|
@ -828,7 +828,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts)
|
|||
if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) {
|
||||
if (++req->iov_idx == req->data_iovs) {
|
||||
ret = -EFAULT;
|
||||
goto free_txreq;
|
||||
goto free_tx;
|
||||
}
|
||||
iovec = &req->iovs[req->iov_idx];
|
||||
WARN_ON(iovec->offset);
|
||||
|
|
|
@ -1582,6 +1582,7 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
|
|||
struct hfi1_pportdata *ppd;
|
||||
struct hfi1_devdata *dd;
|
||||
u8 sc5;
|
||||
u8 sl;
|
||||
|
||||
if (hfi1_check_mcast(rdma_ah_get_dlid(ah_attr)) &&
|
||||
!(rdma_ah_get_ah_flags(ah_attr) & IB_AH_GRH))
|
||||
|
@ -1590,8 +1591,13 @@ static int hfi1_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr)
|
|||
/* test the mapping for validity */
|
||||
ibp = to_iport(ibdev, rdma_ah_get_port_num(ah_attr));
|
||||
ppd = ppd_from_ibp(ibp);
|
||||
sc5 = ibp->sl_to_sc[rdma_ah_get_sl(ah_attr)];
|
||||
dd = dd_from_ppd(ppd);
|
||||
|
||||
sl = rdma_ah_get_sl(ah_attr);
|
||||
if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
|
||||
return -EINVAL;
|
||||
|
||||
sc5 = ibp->sl_to_sc[sl];
|
||||
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
|
|
|
@ -723,6 +723,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
|
|||
attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
|
||||
struct mlx5_ib_ucontext *c = to_mucontext(uobj->context);
|
||||
struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
|
||||
u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
|
||||
struct devx_obj *obj;
|
||||
int err;
|
||||
|
||||
|
@ -754,10 +755,12 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
|
|||
|
||||
err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
|
||||
if (err)
|
||||
goto obj_free;
|
||||
goto obj_destroy;
|
||||
|
||||
return 0;
|
||||
|
||||
obj_destroy:
|
||||
mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
|
||||
obj_free:
|
||||
kfree(obj);
|
||||
return err;
|
||||
|
|
|
@ -2951,7 +2951,7 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|||
{
|
||||
struct srp_target_port *target = host_to_target(scmnd->device->host);
|
||||
struct srp_rdma_ch *ch;
|
||||
int i;
|
||||
int i, j;
|
||||
u8 status;
|
||||
|
||||
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
|
||||
|
@ -2965,8 +2965,8 @@ static int srp_reset_device(struct scsi_cmnd *scmnd)
|
|||
|
||||
for (i = 0; i < target->ch_count; i++) {
|
||||
ch = &target->ch[i];
|
||||
for (i = 0; i < target->req_ring_size; ++i) {
|
||||
struct srp_request *req = &ch->req_ring[i];
|
||||
for (j = 0; j < target->req_ring_size; ++j) {
|
||||
struct srp_request *req = &ch->req_ring[j];
|
||||
|
||||
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
|
||||
}
|
||||
|
|
|
@ -75,8 +75,7 @@ MODULE_LICENSE("GPL");
|
|||
*/
|
||||
|
||||
|
||||
static unsigned char atakbd_keycode[0x72] = { /* American layout */
|
||||
[0] = KEY_GRAVE,
|
||||
static unsigned char atakbd_keycode[0x73] = { /* American layout */
|
||||
[1] = KEY_ESC,
|
||||
[2] = KEY_1,
|
||||
[3] = KEY_2,
|
||||
|
@ -117,9 +116,9 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
|
|||
[38] = KEY_L,
|
||||
[39] = KEY_SEMICOLON,
|
||||
[40] = KEY_APOSTROPHE,
|
||||
[41] = KEY_BACKSLASH, /* FIXME, '#' */
|
||||
[41] = KEY_GRAVE,
|
||||
[42] = KEY_LEFTSHIFT,
|
||||
[43] = KEY_GRAVE, /* FIXME: '~' */
|
||||
[43] = KEY_BACKSLASH,
|
||||
[44] = KEY_Z,
|
||||
[45] = KEY_X,
|
||||
[46] = KEY_C,
|
||||
|
@ -145,45 +144,34 @@ static unsigned char atakbd_keycode[0x72] = { /* American layout */
|
|||
[66] = KEY_F8,
|
||||
[67] = KEY_F9,
|
||||
[68] = KEY_F10,
|
||||
[69] = KEY_ESC,
|
||||
[70] = KEY_DELETE,
|
||||
[71] = KEY_KP7,
|
||||
[72] = KEY_KP8,
|
||||
[73] = KEY_KP9,
|
||||
[71] = KEY_HOME,
|
||||
[72] = KEY_UP,
|
||||
[74] = KEY_KPMINUS,
|
||||
[75] = KEY_KP4,
|
||||
[76] = KEY_KP5,
|
||||
[77] = KEY_KP6,
|
||||
[75] = KEY_LEFT,
|
||||
[77] = KEY_RIGHT,
|
||||
[78] = KEY_KPPLUS,
|
||||
[79] = KEY_KP1,
|
||||
[80] = KEY_KP2,
|
||||
[81] = KEY_KP3,
|
||||
[82] = KEY_KP0,
|
||||
[83] = KEY_KPDOT,
|
||||
[90] = KEY_KPLEFTPAREN,
|
||||
[91] = KEY_KPRIGHTPAREN,
|
||||
[92] = KEY_KPASTERISK, /* FIXME */
|
||||
[93] = KEY_KPASTERISK,
|
||||
[94] = KEY_KPPLUS,
|
||||
[95] = KEY_HELP,
|
||||
[80] = KEY_DOWN,
|
||||
[82] = KEY_INSERT,
|
||||
[83] = KEY_DELETE,
|
||||
[96] = KEY_102ND,
|
||||
[97] = KEY_KPASTERISK, /* FIXME */
|
||||
[98] = KEY_KPSLASH,
|
||||
[97] = KEY_UNDO,
|
||||
[98] = KEY_HELP,
|
||||
[99] = KEY_KPLEFTPAREN,
|
||||
[100] = KEY_KPRIGHTPAREN,
|
||||
[101] = KEY_KPSLASH,
|
||||
[102] = KEY_KPASTERISK,
|
||||
[103] = KEY_UP,
|
||||
[104] = KEY_KPASTERISK, /* FIXME */
|
||||
[105] = KEY_LEFT,
|
||||
[106] = KEY_RIGHT,
|
||||
[107] = KEY_KPASTERISK, /* FIXME */
|
||||
[108] = KEY_DOWN,
|
||||
[109] = KEY_KPASTERISK, /* FIXME */
|
||||
[110] = KEY_KPASTERISK, /* FIXME */
|
||||
[111] = KEY_KPASTERISK, /* FIXME */
|
||||
[112] = KEY_KPASTERISK, /* FIXME */
|
||||
[113] = KEY_KPASTERISK /* FIXME */
|
||||
[103] = KEY_KP7,
|
||||
[104] = KEY_KP8,
|
||||
[105] = KEY_KP9,
|
||||
[106] = KEY_KP4,
|
||||
[107] = KEY_KP5,
|
||||
[108] = KEY_KP6,
|
||||
[109] = KEY_KP1,
|
||||
[110] = KEY_KP2,
|
||||
[111] = KEY_KP3,
|
||||
[112] = KEY_KP0,
|
||||
[113] = KEY_KPDOT,
|
||||
[114] = KEY_KPENTER,
|
||||
};
|
||||
|
||||
static struct input_dev *atakbd_dev;
|
||||
|
@ -191,21 +179,15 @@ static struct input_dev *atakbd_dev;
|
|||
static void atakbd_interrupt(unsigned char scancode, char down)
|
||||
{
|
||||
|
||||
if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
|
||||
if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
|
||||
|
||||
// report raw events here?
|
||||
|
||||
scancode = atakbd_keycode[scancode];
|
||||
|
||||
if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
|
||||
input_report_key(atakbd_dev, scancode, 1);
|
||||
input_report_key(atakbd_dev, scancode, 0);
|
||||
input_sync(atakbd_dev);
|
||||
} else {
|
||||
input_report_key(atakbd_dev, scancode, down);
|
||||
input_sync(atakbd_dev);
|
||||
}
|
||||
} else /* scancodes >= 0xf2 are mouse data, most likely */
|
||||
input_report_key(atakbd_dev, scancode, down);
|
||||
input_sync(atakbd_dev);
|
||||
} else /* scancodes >= 0xf3 are mouse data, most likely */
|
||||
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
|
||||
|
||||
return;
|
||||
|
|
|
@ -410,7 +410,7 @@ static int uinput_validate_absinfo(struct input_dev *dev, unsigned int code,
|
|||
min = abs->minimum;
|
||||
max = abs->maximum;
|
||||
|
||||
if ((min != 0 || max != 0) && max <= min) {
|
||||
if ((min != 0 || max != 0) && max < min) {
|
||||
printk(KERN_DEBUG
|
||||
"%s: invalid abs[%02x] min:%d max:%d\n",
|
||||
UINPUT_NAME, code, min, max);
|
||||
|
|
|
@ -1178,6 +1178,8 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
|
|||
static const char * const middle_button_pnp_ids[] = {
|
||||
"LEN2131", /* ThinkPad P52 w/ NFC */
|
||||
"LEN2132", /* ThinkPad P52 */
|
||||
"LEN2133", /* ThinkPad P72 w/ NFC */
|
||||
"LEN2134", /* ThinkPad P72 */
|
||||
NULL
|
||||
};
|
||||
|
||||
|
|
|
@ -241,6 +241,9 @@ static int __maybe_unused egalax_ts_suspend(struct device *dev)
|
|||
struct i2c_client *client = to_i2c_client(dev);
|
||||
int ret;
|
||||
|
||||
if (device_may_wakeup(dev))
|
||||
return enable_irq_wake(client->irq);
|
||||
|
||||
ret = i2c_master_send(client, suspend_cmd, MAX_I2C_DATA_LEN);
|
||||
return ret > 0 ? 0 : ret;
|
||||
}
|
||||
|
@ -249,6 +252,9 @@ static int __maybe_unused egalax_ts_resume(struct device *dev)
|
|||
{
|
||||
struct i2c_client *client = to_i2c_client(dev);
|
||||
|
||||
if (device_may_wakeup(dev))
|
||||
return disable_irq_wake(client->irq);
|
||||
|
||||
return egalax_wake_up_device(client);
|
||||
}
|
||||
|
||||
|
|
|
@ -246,7 +246,13 @@ static u16 get_alias(struct device *dev)
|
|||
|
||||
/* The callers make sure that get_device_id() does not fail here */
|
||||
devid = get_device_id(dev);
|
||||
|
||||
/* For ACPI HID devices, we simply return the devid as such */
|
||||
if (!dev_is_pci(dev))
|
||||
return devid;
|
||||
|
||||
ivrs_alias = amd_iommu_alias_table[devid];
|
||||
|
||||
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
|
||||
|
||||
if (ivrs_alias == pci_alias)
|
||||
|
|
|
@ -2540,9 +2540,9 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
|||
if (dev && dev_is_pci(dev) && info->pasid_supported) {
|
||||
ret = intel_pasid_alloc_table(dev);
|
||||
if (ret) {
|
||||
__dmar_remove_one_dev_info(info);
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
return NULL;
|
||||
pr_warn("No pasid table for %s, pasid disabled\n",
|
||||
dev_name(dev));
|
||||
info->pasid_supported = 0;
|
||||
}
|
||||
}
|
||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
#define __INTEL_PASID_H
|
||||
|
||||
#define PASID_MIN 0x1
|
||||
#define PASID_MAX 0x100000
|
||||
#define PASID_MAX 0x20000
|
||||
|
||||
struct pasid_entry {
|
||||
u64 val;
|
||||
|
|
|
@ -1241,6 +1241,12 @@ static int rk_iommu_probe(struct platform_device *pdev)
|
|||
|
||||
static void rk_iommu_shutdown(struct platform_device *pdev)
|
||||
{
|
||||
struct rk_iommu *iommu = platform_get_drvdata(pdev);
|
||||
int i = 0, irq;
|
||||
|
||||
while ((irq = platform_get_irq(pdev, i++)) != -ENXIO)
|
||||
devm_free_irq(iommu->dev, irq, iommu);
|
||||
|
||||
pm_runtime_force_suspend(&pdev->dev);
|
||||
}
|
||||
|
||||
|
|
|
@ -965,6 +965,7 @@ void bch_prio_write(struct cache *ca);
|
|||
void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent);
|
||||
|
||||
extern struct workqueue_struct *bcache_wq;
|
||||
extern struct workqueue_struct *bch_journal_wq;
|
||||
extern struct mutex bch_register_lock;
|
||||
extern struct list_head bch_cache_sets;
|
||||
|
||||
|
|
|
@ -485,7 +485,7 @@ static void do_journal_discard(struct cache *ca)
|
|||
|
||||
closure_get(&ca->set->cl);
|
||||
INIT_WORK(&ja->discard_work, journal_discard_work);
|
||||
schedule_work(&ja->discard_work);
|
||||
queue_work(bch_journal_wq, &ja->discard_work);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -592,7 +592,7 @@ static void journal_write_done(struct closure *cl)
|
|||
: &j->w[0];
|
||||
|
||||
__closure_wake_up(&w->wait);
|
||||
continue_at_nobarrier(cl, journal_write, system_wq);
|
||||
continue_at_nobarrier(cl, journal_write, bch_journal_wq);
|
||||
}
|
||||
|
||||
static void journal_write_unlock(struct closure *cl)
|
||||
|
@ -627,7 +627,7 @@ static void journal_write_unlocked(struct closure *cl)
|
|||
spin_unlock(&c->journal.lock);
|
||||
|
||||
btree_flush_write(c);
|
||||
continue_at(cl, journal_write, system_wq);
|
||||
continue_at(cl, journal_write, bch_journal_wq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
|
@ -47,6 +47,7 @@ static int bcache_major;
|
|||
static DEFINE_IDA(bcache_device_idx);
|
||||
static wait_queue_head_t unregister_wait;
|
||||
struct workqueue_struct *bcache_wq;
|
||||
struct workqueue_struct *bch_journal_wq;
|
||||
|
||||
#define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
|
||||
/* limitation of partitions number on single bcache device */
|
||||
|
@ -2341,6 +2342,9 @@ static void bcache_exit(void)
|
|||
kobject_put(bcache_kobj);
|
||||
if (bcache_wq)
|
||||
destroy_workqueue(bcache_wq);
|
||||
if (bch_journal_wq)
|
||||
destroy_workqueue(bch_journal_wq);
|
||||
|
||||
if (bcache_major)
|
||||
unregister_blkdev(bcache_major, "bcache");
|
||||
unregister_reboot_notifier(&reboot);
|
||||
|
@ -2370,6 +2374,10 @@ static int __init bcache_init(void)
|
|||
if (!bcache_wq)
|
||||
goto err;
|
||||
|
||||
bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
|
||||
if (!bch_journal_wq)
|
||||
goto err;
|
||||
|
||||
bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
|
||||
if (!bcache_kobj)
|
||||
goto err;
|
||||
|
|
|
@ -1159,41 +1159,21 @@ static int mt9v111_probe(struct i2c_client *client)
|
|||
V4L2_CID_AUTO_WHITE_BALANCE,
|
||||
0, 1, 1,
|
||||
V4L2_WHITE_BALANCE_AUTO);
|
||||
if (IS_ERR_OR_NULL(mt9v111->auto_awb)) {
|
||||
ret = PTR_ERR(mt9v111->auto_awb);
|
||||
goto error_free_ctrls;
|
||||
}
|
||||
|
||||
mt9v111->auto_exp = v4l2_ctrl_new_std_menu(&mt9v111->ctrls,
|
||||
&mt9v111_ctrl_ops,
|
||||
V4L2_CID_EXPOSURE_AUTO,
|
||||
V4L2_EXPOSURE_MANUAL,
|
||||
0, V4L2_EXPOSURE_AUTO);
|
||||
if (IS_ERR_OR_NULL(mt9v111->auto_exp)) {
|
||||
ret = PTR_ERR(mt9v111->auto_exp);
|
||||
goto error_free_ctrls;
|
||||
}
|
||||
|
||||
/* Initialize timings */
|
||||
mt9v111->hblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
|
||||
V4L2_CID_HBLANK,
|
||||
MT9V111_CORE_R05_MIN_HBLANK,
|
||||
MT9V111_CORE_R05_MAX_HBLANK, 1,
|
||||
MT9V111_CORE_R05_DEF_HBLANK);
|
||||
if (IS_ERR_OR_NULL(mt9v111->hblank)) {
|
||||
ret = PTR_ERR(mt9v111->hblank);
|
||||
goto error_free_ctrls;
|
||||
}
|
||||
|
||||
mt9v111->vblank = v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
|
||||
V4L2_CID_VBLANK,
|
||||
MT9V111_CORE_R06_MIN_VBLANK,
|
||||
MT9V111_CORE_R06_MAX_VBLANK, 1,
|
||||
MT9V111_CORE_R06_DEF_VBLANK);
|
||||
if (IS_ERR_OR_NULL(mt9v111->vblank)) {
|
||||
ret = PTR_ERR(mt9v111->vblank);
|
||||
goto error_free_ctrls;
|
||||
}
|
||||
|
||||
/* PIXEL_RATE is fixed: just expose it to user space. */
|
||||
v4l2_ctrl_new_std(&mt9v111->ctrls, &mt9v111_ctrl_ops,
|
||||
|
@ -1201,6 +1181,10 @@ static int mt9v111_probe(struct i2c_client *client)
|
|||
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2), 1,
|
||||
DIV_ROUND_CLOSEST(mt9v111->sysclk, 2));
|
||||
|
||||
if (mt9v111->ctrls.error) {
|
||||
ret = mt9v111->ctrls.error;
|
||||
goto error_free_ctrls;
|
||||
}
|
||||
mt9v111->sd.ctrl_handler = &mt9v111->ctrls;
|
||||
|
||||
/* Start with default configuration: 640x480 UYVY. */
|
||||
|
@ -1226,26 +1210,27 @@ static int mt9v111_probe(struct i2c_client *client)
|
|||
mt9v111->pad.flags = MEDIA_PAD_FL_SOURCE;
|
||||
ret = media_entity_pads_init(&mt9v111->sd.entity, 1, &mt9v111->pad);
|
||||
if (ret)
|
||||
goto error_free_ctrls;
|
||||
goto error_free_entity;
|
||||
#endif
|
||||
|
||||
ret = mt9v111_chip_probe(mt9v111);
|
||||
if (ret)
|
||||
goto error_free_ctrls;
|
||||
goto error_free_entity;
|
||||
|
||||
ret = v4l2_async_register_subdev(&mt9v111->sd);
|
||||
if (ret)
|
||||
goto error_free_ctrls;
|
||||
goto error_free_entity;
|
||||
|
||||
return 0;
|
||||
|
||||
error_free_ctrls:
|
||||
v4l2_ctrl_handler_free(&mt9v111->ctrls);
|
||||
|
||||
error_free_entity:
|
||||
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
|
||||
media_entity_cleanup(&mt9v111->sd.entity);
|
||||
#endif
|
||||
|
||||
error_free_ctrls:
|
||||
v4l2_ctrl_handler_free(&mt9v111->ctrls);
|
||||
|
||||
mutex_destroy(&mt9v111->pwr_mutex);
|
||||
mutex_destroy(&mt9v111->stream_mutex);
|
||||
|
||||
|
@ -1259,12 +1244,12 @@ static int mt9v111_remove(struct i2c_client *client)
|
|||
|
||||
v4l2_async_unregister_subdev(sd);
|
||||
|
||||
v4l2_ctrl_handler_free(&mt9v111->ctrls);
|
||||
|
||||
#if IS_ENABLED(CONFIG_MEDIA_CONTROLLER)
|
||||
media_entity_cleanup(&sd->entity);
|
||||
#endif
|
||||
|
||||
v4l2_ctrl_handler_free(&mt9v111->ctrls);
|
||||
|
||||
mutex_destroy(&mt9v111->pwr_mutex);
|
||||
mutex_destroy(&mt9v111->stream_mutex);
|
||||
|
||||
|
|
|
@ -541,6 +541,8 @@ config VIDEO_CROS_EC_CEC
|
|||
depends on MFD_CROS_EC
|
||||
select CEC_CORE
|
||||
select CEC_NOTIFIER
|
||||
select CHROME_PLATFORMS
|
||||
select CROS_EC_PROTO
|
||||
---help---
|
||||
If you say yes here you will get support for the
|
||||
ChromeOS Embedded Controller's CEC.
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#define CAMSS_CSI_PHY_LNn_CFG2(n) (0x004 + 0x40 * (n))
|
||||
#define CAMSS_CSI_PHY_LNn_CFG3(n) (0x008 + 0x40 * (n))
|
||||
|
|
|
@ -12,6 +12,7 @@
|
|||
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#define CSIPHY_3PH_LNn_CFG1(n) (0x000 + 0x100 * (n))
|
||||
#define CSIPHY_3PH_LNn_CFG1_SWI_REC_DLY_PRG (BIT(7) | BIT(6))
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/platform_device.h>
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/completion.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
|
@ -1076,8 +1077,8 @@ int msm_ispif_subdev_init(struct ispif_device *ispif,
|
|||
else
|
||||
return -EINVAL;
|
||||
|
||||
ispif->line = kcalloc(ispif->line_num, sizeof(*ispif->line),
|
||||
GFP_KERNEL);
|
||||
ispif->line = devm_kcalloc(dev, ispif->line_num, sizeof(*ispif->line),
|
||||
GFP_KERNEL);
|
||||
if (!ispif->line)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#include "camss-vfe.h"
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
*/
|
||||
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/iopoll.h>
|
||||
|
||||
#include "camss-vfe.h"
|
||||
|
|
|
@ -848,17 +848,18 @@ static int camss_probe(struct platform_device *pdev)
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
camss->csiphy = kcalloc(camss->csiphy_num, sizeof(*camss->csiphy),
|
||||
GFP_KERNEL);
|
||||
camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
|
||||
sizeof(*camss->csiphy), GFP_KERNEL);
|
||||
if (!camss->csiphy)
|
||||
return -ENOMEM;
|
||||
|
||||
camss->csid = kcalloc(camss->csid_num, sizeof(*camss->csid),
|
||||
GFP_KERNEL);
|
||||
camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
|
||||
GFP_KERNEL);
|
||||
if (!camss->csid)
|
||||
return -ENOMEM;
|
||||
|
||||
camss->vfe = kcalloc(camss->vfe_num, sizeof(*camss->vfe), GFP_KERNEL);
|
||||
camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
|
||||
GFP_KERNEL);
|
||||
if (!camss->vfe)
|
||||
return -ENOMEM;
|
||||
|
||||
|
@ -993,12 +994,12 @@ static const struct of_device_id camss_dt_match[] = {
|
|||
|
||||
MODULE_DEVICE_TABLE(of, camss_dt_match);
|
||||
|
||||
static int camss_runtime_suspend(struct device *dev)
|
||||
static int __maybe_unused camss_runtime_suspend(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int camss_runtime_resume(struct device *dev)
|
||||
static int __maybe_unused camss_runtime_resume(struct device *dev)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -402,8 +402,10 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
|
|||
if (msg[0].addr == state->af9033_i2c_addr[1])
|
||||
reg |= 0x100000;
|
||||
|
||||
ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
|
||||
msg[0].len - 3);
|
||||
ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
|
||||
&msg[0].buf[3],
|
||||
msg[0].len - 3)
|
||||
: -EOPNOTSUPP;
|
||||
} else {
|
||||
/* I2C write */
|
||||
u8 buf[MAX_XFER_SIZE];
|
||||
|
|
|
@ -971,16 +971,13 @@ static void bond_poll_controller(struct net_device *bond_dev)
|
|||
struct slave *slave = NULL;
|
||||
struct list_head *iter;
|
||||
struct ad_info ad_info;
|
||||
struct netpoll_info *ni;
|
||||
const struct net_device_ops *ops;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD)
|
||||
if (bond_3ad_get_active_agg_info(bond, &ad_info))
|
||||
return;
|
||||
|
||||
bond_for_each_slave_rcu(bond, slave, iter) {
|
||||
ops = slave->dev->netdev_ops;
|
||||
if (!bond_slave_is_up(slave) || !ops->ndo_poll_controller)
|
||||
if (!bond_slave_is_up(slave))
|
||||
continue;
|
||||
|
||||
if (BOND_MODE(bond) == BOND_MODE_8023AD) {
|
||||
|
@ -992,11 +989,7 @@ static void bond_poll_controller(struct net_device *bond_dev)
|
|||
continue;
|
||||
}
|
||||
|
||||
ni = rcu_dereference_bh(slave->dev->npinfo);
|
||||
if (down_trylock(&ni->dev_lock))
|
||||
continue;
|
||||
ops->ndo_poll_controller(slave->dev);
|
||||
up(&ni->dev_lock);
|
||||
netpoll_poll_dev(slave->dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ static irqreturn_t bmac_txdma_intr(int irq, void *dev_id);
|
|||
static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id);
|
||||
static void bmac_set_timeout(struct net_device *dev);
|
||||
static void bmac_tx_timeout(struct timer_list *t);
|
||||
static int bmac_output(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t bmac_output(struct sk_buff *skb, struct net_device *dev);
|
||||
static void bmac_start(struct net_device *dev);
|
||||
|
||||
#define DBDMA_SET(x) ( ((x) | (x) << 16) )
|
||||
|
@ -1456,7 +1456,7 @@ bmac_start(struct net_device *dev)
|
|||
spin_unlock_irqrestore(&bp->lock, flags);
|
||||
}
|
||||
|
||||
static int
|
||||
static netdev_tx_t
|
||||
bmac_output(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct bmac_data *bp = netdev_priv(dev);
|
||||
|
|
|
@ -78,7 +78,7 @@ struct mace_data {
|
|||
|
||||
static int mace_open(struct net_device *dev);
|
||||
static int mace_close(struct net_device *dev);
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static void mace_set_multicast(struct net_device *dev);
|
||||
static void mace_reset(struct net_device *dev);
|
||||
static int mace_set_address(struct net_device *dev, void *addr);
|
||||
|
@ -525,7 +525,7 @@ static inline void mace_set_timeout(struct net_device *dev)
|
|||
mp->timeout_active = 1;
|
||||
}
|
||||
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct mace_data *mp = netdev_priv(dev);
|
||||
volatile struct dbdma_regs __iomem *td = mp->tx_dma;
|
||||
|
|
|
@ -89,7 +89,7 @@ struct mace_frame {
|
|||
|
||||
static int mace_open(struct net_device *dev);
|
||||
static int mace_close(struct net_device *dev);
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev);
|
||||
static void mace_set_multicast(struct net_device *dev);
|
||||
static int mace_set_address(struct net_device *dev, void *addr);
|
||||
static void mace_reset(struct net_device *dev);
|
||||
|
@ -444,7 +444,7 @@ static int mace_close(struct net_device *dev)
|
|||
* Transmit a frame
|
||||
*/
|
||||
|
||||
static int mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t mace_xmit_start(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct mace_data *mp = netdev_priv(dev);
|
||||
unsigned long flags;
|
||||
|
|
|
@ -225,9 +225,10 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
}
|
||||
|
||||
/* for single fragment packets use build_skb() */
|
||||
if (buff->is_eop) {
|
||||
if (buff->is_eop &&
|
||||
buff->len <= AQ_CFG_RX_FRAME_MAX - AQ_SKB_ALIGN) {
|
||||
skb = build_skb(page_address(buff->page),
|
||||
buff->len + AQ_SKB_ALIGN);
|
||||
AQ_CFG_RX_FRAME_MAX);
|
||||
if (unlikely(!skb)) {
|
||||
err = -ENOMEM;
|
||||
goto err_exit;
|
||||
|
@ -247,18 +248,21 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
|
|||
buff->len - ETH_HLEN,
|
||||
SKB_TRUESIZE(buff->len - ETH_HLEN));
|
||||
|
||||
for (i = 1U, next_ = buff->next,
|
||||
buff_ = &self->buff_ring[next_]; true;
|
||||
next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_], ++i) {
|
||||
skb_add_rx_frag(skb, i, buff_->page, 0,
|
||||
buff_->len,
|
||||
SKB_TRUESIZE(buff->len -
|
||||
ETH_HLEN));
|
||||
buff_->is_cleaned = 1;
|
||||
if (!buff->is_eop) {
|
||||
for (i = 1U, next_ = buff->next,
|
||||
buff_ = &self->buff_ring[next_];
|
||||
true; next_ = buff_->next,
|
||||
buff_ = &self->buff_ring[next_], ++i) {
|
||||
skb_add_rx_frag(skb, i,
|
||||
buff_->page, 0,
|
||||
buff_->len,
|
||||
SKB_TRUESIZE(buff->len -
|
||||
ETH_HLEN));
|
||||
buff_->is_cleaned = 1;
|
||||
|
||||
if (buff_->is_eop)
|
||||
break;
|
||||
if (buff_->is_eop)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12894,19 +12894,6 @@ static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
|
|||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void poll_bnx2x(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
for_each_eth_queue(bp, i) {
|
||||
struct bnx2x_fastpath *fp = &bp->fp[i];
|
||||
napi_schedule(&bnx2x_fp(bp, fp->index, napi));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static int bnx2x_validate_addr(struct net_device *dev)
|
||||
{
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
|
@ -13113,9 +13100,6 @@ static const struct net_device_ops bnx2x_netdev_ops = {
|
|||
.ndo_tx_timeout = bnx2x_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = bnx2x_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = bnx2x_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = poll_bnx2x,
|
||||
#endif
|
||||
.ndo_setup_tc = __bnx2x_setup_tc,
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
.ndo_set_vf_mac = bnx2x_set_vf_mac,
|
||||
|
|
|
@ -7672,21 +7672,6 @@ static void bnxt_tx_timeout(struct net_device *dev)
|
|||
bnxt_queue_sp_work(bp);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
static void bnxt_poll_controller(struct net_device *dev)
|
||||
{
|
||||
struct bnxt *bp = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
/* Only process tx rings/combined rings in netpoll mode. */
|
||||
for (i = 0; i < bp->tx_nr_rings; i++) {
|
||||
struct bnxt_tx_ring_info *txr = &bp->tx_ring[i];
|
||||
|
||||
napi_schedule(&txr->bnapi->napi);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
static void bnxt_timer(struct timer_list *t)
|
||||
{
|
||||
struct bnxt *bp = from_timer(bp, t, timer);
|
||||
|
@ -8519,9 +8504,6 @@ static const struct net_device_ops bnxt_netdev_ops = {
|
|||
.ndo_set_vf_link_state = bnxt_set_vf_link_state,
|
||||
.ndo_set_vf_spoofchk = bnxt_set_vf_spoofchk,
|
||||
.ndo_set_vf_trust = bnxt_set_vf_trust,
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = bnxt_poll_controller,
|
||||
#endif
|
||||
.ndo_setup_tc = bnxt_setup_tc,
|
||||
#ifdef CONFIG_RFS_ACCEL
|
||||
|
|
|
@ -46,6 +46,9 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg,
|
|||
}
|
||||
}
|
||||
|
||||
if (i == ARRAY_SIZE(nvm_params))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (nvm_param.dir_type == BNXT_NVM_PORT_CFG)
|
||||
idx = bp->pf.port_id;
|
||||
else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG)
|
||||
|
|
|
@ -75,17 +75,23 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void bnxt_tc_parse_vlan(struct bnxt *bp,
|
||||
struct bnxt_tc_actions *actions,
|
||||
const struct tc_action *tc_act)
|
||||
static int bnxt_tc_parse_vlan(struct bnxt *bp,
|
||||
struct bnxt_tc_actions *actions,
|
||||
const struct tc_action *tc_act)
|
||||
{
|
||||
if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
|
||||
switch (tcf_vlan_action(tc_act)) {
|
||||
case TCA_VLAN_ACT_POP:
|
||||
actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
|
||||
} else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
|
||||
break;
|
||||
case TCA_VLAN_ACT_PUSH:
|
||||
actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
|
||||
actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
|
||||
actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
|
||||
break;
|
||||
default:
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
|
||||
|
@ -134,7 +140,9 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
|
|||
|
||||
/* Push/pop VLAN */
|
||||
if (is_tcf_vlan(tc_act)) {
|
||||
bnxt_tc_parse_vlan(bp, actions, tc_act);
|
||||
rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
|
||||
if (rc)
|
||||
return rc;
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -753,7 +753,6 @@ struct cpl_abort_req_rss {
|
|||
};
|
||||
|
||||
struct cpl_abort_req_rss6 {
|
||||
WR_HDR;
|
||||
union opcode_tid ot;
|
||||
__be32 srqidx_status;
|
||||
};
|
||||
|
|
|
@ -332,7 +332,7 @@ static int ep93xx_poll(struct napi_struct *napi, int budget)
|
|||
return rx;
|
||||
}
|
||||
|
||||
static int ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t ep93xx_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct ep93xx_priv *ep = netdev_priv(dev);
|
||||
struct ep93xx_tdesc *txd;
|
||||
|
|
|
@ -113,7 +113,7 @@ struct net_local {
|
|||
|
||||
/* Index to functions, as function prototypes. */
|
||||
static int net_open(struct net_device *dev);
|
||||
static int net_send_packet(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t net_send_packet(struct sk_buff *skb, struct net_device *dev);
|
||||
static irqreturn_t net_interrupt(int irq, void *dev_id);
|
||||
static void set_multicast_list(struct net_device *dev);
|
||||
static void net_rx(struct net_device *dev);
|
||||
|
@ -324,7 +324,7 @@ net_open(struct net_device *dev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
static netdev_tx_t
|
||||
net_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct net_local *lp = netdev_priv(dev);
|
||||
|
|
|
@ -64,7 +64,8 @@ static unsigned int net_debug = NET_DEBUG;
|
|||
#define RX_AREA_END 0x0fc00
|
||||
|
||||
static int ether1_open(struct net_device *dev);
|
||||
static int ether1_sendpacket(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t ether1_sendpacket(struct sk_buff *skb,
|
||||
struct net_device *dev);
|
||||
static irqreturn_t ether1_interrupt(int irq, void *dev_id);
|
||||
static int ether1_close(struct net_device *dev);
|
||||
static void ether1_setmulticastlist(struct net_device *dev);
|
||||
|
@ -667,7 +668,7 @@ ether1_timeout(struct net_device *dev)
|
|||
netif_wake_queue(dev);
|
||||
}
|
||||
|
||||
static int
|
||||
static netdev_tx_t
|
||||
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
|
||||
|
|
|
@ -347,7 +347,7 @@ static const char init_setup[] =
|
|||
0x7f /* *multi IA */ };
|
||||
|
||||
static int i596_open(struct net_device *dev);
|
||||
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev);
|
||||
static irqreturn_t i596_interrupt(int irq, void *dev_id);
|
||||
static int i596_close(struct net_device *dev);
|
||||
static void i596_add_cmd(struct net_device *dev, struct i596_cmd *cmd);
|
||||
|
@ -966,7 +966,7 @@ static void i596_tx_timeout (struct net_device *dev)
|
|||
}
|
||||
|
||||
|
||||
static int i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t i596_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct i596_private *lp = netdev_priv(dev);
|
||||
struct tx_cmd *tx_cmd;
|
||||
|
|
|
@ -121,7 +121,8 @@ static int sun3_82586_probe1(struct net_device *dev,int ioaddr);
|
|||
static irqreturn_t sun3_82586_interrupt(int irq,void *dev_id);
|
||||
static int sun3_82586_open(struct net_device *dev);
|
||||
static int sun3_82586_close(struct net_device *dev);
|
||||
static int sun3_82586_send_packet(struct sk_buff *,struct net_device *);
|
||||
static netdev_tx_t sun3_82586_send_packet(struct sk_buff *,
|
||||
struct net_device *);
|
||||
static struct net_device_stats *sun3_82586_get_stats(struct net_device *dev);
|
||||
static void set_multicast_list(struct net_device *dev);
|
||||
static void sun3_82586_timeout(struct net_device *dev);
|
||||
|
@ -1002,7 +1003,8 @@ static void sun3_82586_timeout(struct net_device *dev)
|
|||
* send frame
|
||||
*/
|
||||
|
||||
static int sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
static netdev_tx_t
|
||||
sun3_82586_send_packet(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len,i;
|
||||
#ifndef NO_NOPCOMMANDS
|
||||
|
|
|
@ -2677,12 +2677,17 @@ static int emac_init_phy(struct emac_instance *dev)
|
|||
if (of_phy_is_fixed_link(np)) {
|
||||
int res = emac_dt_mdio_probe(dev);
|
||||
|
||||
if (!res) {
|
||||
res = of_phy_register_fixed_link(np);
|
||||
if (res)
|
||||
mdiobus_unregister(dev->mii_bus);
|
||||
if (res)
|
||||
return res;
|
||||
|
||||
res = of_phy_register_fixed_link(np);
|
||||
dev->phy_dev = of_phy_find_device(np);
|
||||
if (res || !dev->phy_dev) {
|
||||
mdiobus_unregister(dev->mii_bus);
|
||||
return res ? res : -EINVAL;
|
||||
}
|
||||
return res;
|
||||
emac_adjust_link(dev->ndev);
|
||||
put_device(&dev->phy_dev->mdio.dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -504,9 +504,6 @@ void fm10k_update_stats(struct fm10k_intfc *interface);
|
|||
void fm10k_service_event_schedule(struct fm10k_intfc *interface);
|
||||
void fm10k_macvlan_schedule(struct fm10k_intfc *interface);
|
||||
void fm10k_update_rx_drop_en(struct fm10k_intfc *interface);
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
void fm10k_netpoll(struct net_device *netdev);
|
||||
#endif
|
||||
|
||||
/* Netdev */
|
||||
struct net_device *fm10k_alloc_netdev(const struct fm10k_info *info);
|
||||
|
|
|
@ -1648,9 +1648,6 @@ static const struct net_device_ops fm10k_netdev_ops = {
|
|||
.ndo_udp_tunnel_del = fm10k_udp_tunnel_del,
|
||||
.ndo_dfwd_add_station = fm10k_dfwd_add_station,
|
||||
.ndo_dfwd_del_station = fm10k_dfwd_del_station,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = fm10k_netpoll,
|
||||
#endif
|
||||
.ndo_features_check = fm10k_features_check,
|
||||
};
|
||||
|
||||
|
|
|
@ -1210,28 +1210,6 @@ static irqreturn_t fm10k_msix_mbx_vf(int __always_unused irq, void *data)
|
|||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/**
|
||||
* fm10k_netpoll - A Polling 'interrupt' handler
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* This is used by netconsole to send skbs without having to re-enable
|
||||
* interrupts. It's not called while the normal interrupt routine is executing.
|
||||
**/
|
||||
void fm10k_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct fm10k_intfc *interface = netdev_priv(netdev);
|
||||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__FM10K_DOWN, interface->state))
|
||||
return;
|
||||
|
||||
for (i = 0; i < interface->num_q_vectors; i++)
|
||||
fm10k_msix_clean_rings(0, interface->q_vector[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
#define FM10K_ERR_MSG(type) case (type): error = #type; break
|
||||
static void fm10k_handle_fault(struct fm10k_intfc *interface, int type,
|
||||
struct fm10k_fault *fault)
|
||||
|
|
|
@ -396,29 +396,6 @@ static void i40evf_map_rings_to_vectors(struct i40evf_adapter *adapter)
|
|||
adapter->aq_required |= I40EVF_FLAG_AQ_MAP_VECTORS;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/**
|
||||
* i40evf_netpoll - A Polling 'interrupt' handler
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* This is used by netconsole to send skbs without having to re-enable
|
||||
* interrupts. It's not called while the normal interrupt routine is executing.
|
||||
**/
|
||||
static void i40evf_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct i40evf_adapter *adapter = netdev_priv(netdev);
|
||||
int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
|
||||
int i;
|
||||
|
||||
/* if interface is down do nothing */
|
||||
if (test_bit(__I40E_VSI_DOWN, adapter->vsi.state))
|
||||
return;
|
||||
|
||||
for (i = 0; i < q_vectors; i++)
|
||||
i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
|
||||
}
|
||||
|
||||
#endif
|
||||
/**
|
||||
* i40evf_irq_affinity_notify - Callback for affinity changes
|
||||
* @notify: context as to what irq was changed
|
||||
|
@ -3229,9 +3206,6 @@ static const struct net_device_ops i40evf_netdev_ops = {
|
|||
.ndo_features_check = i40evf_features_check,
|
||||
.ndo_fix_features = i40evf_fix_features,
|
||||
.ndo_set_features = i40evf_set_features,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = i40evf_netpoll,
|
||||
#endif
|
||||
.ndo_setup_tc = i40evf_setup_tc,
|
||||
};
|
||||
|
||||
|
|
|
@ -4806,30 +4806,6 @@ void ice_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
|
|||
stats->rx_length_errors = vsi_stats->rx_length_errors;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/**
|
||||
* ice_netpoll - polling "interrupt" handler
|
||||
* @netdev: network interface device structure
|
||||
*
|
||||
* Used by netconsole to send skbs without having to re-enable interrupts.
|
||||
* This is not called in the normal interrupt path.
|
||||
*/
|
||||
static void ice_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct ice_netdev_priv *np = netdev_priv(netdev);
|
||||
struct ice_vsi *vsi = np->vsi;
|
||||
struct ice_pf *pf = vsi->back;
|
||||
int i;
|
||||
|
||||
if (test_bit(__ICE_DOWN, vsi->state) ||
|
||||
!test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
|
||||
return;
|
||||
|
||||
for (i = 0; i < vsi->num_q_vectors; i++)
|
||||
ice_msix_clean_rings(0, vsi->q_vectors[i]);
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
/**
|
||||
* ice_napi_disable_all - Disable NAPI for all q_vectors in the VSI
|
||||
* @vsi: VSI having NAPI disabled
|
||||
|
@ -5497,9 +5473,6 @@ static const struct net_device_ops ice_netdev_ops = {
|
|||
.ndo_validate_addr = eth_validate_addr,
|
||||
.ndo_change_mtu = ice_change_mtu,
|
||||
.ndo_get_stats64 = ice_get_stats64,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ice_netpoll,
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
.ndo_vlan_rx_add_vid = ice_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ice_vlan_rx_kill_vid,
|
||||
.ndo_set_features = ice_set_features,
|
||||
|
|
|
@ -205,10 +205,6 @@ static struct notifier_block dca_notifier = {
|
|||
.priority = 0
|
||||
};
|
||||
#endif
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* for netdump / net console */
|
||||
static void igb_netpoll(struct net_device *);
|
||||
#endif
|
||||
#ifdef CONFIG_PCI_IOV
|
||||
static unsigned int max_vfs;
|
||||
module_param(max_vfs, uint, 0);
|
||||
|
@ -2881,9 +2877,6 @@ static const struct net_device_ops igb_netdev_ops = {
|
|||
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
|
||||
.ndo_set_vf_trust = igb_ndo_set_vf_trust,
|
||||
.ndo_get_vf_config = igb_ndo_get_vf_config,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = igb_netpoll,
|
||||
#endif
|
||||
.ndo_fix_features = igb_fix_features,
|
||||
.ndo_set_features = igb_set_features,
|
||||
.ndo_fdb_add = igb_ndo_fdb_add,
|
||||
|
@ -9053,29 +9046,6 @@ static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
static void igb_netpoll(struct net_device *netdev)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
struct e1000_hw *hw = &adapter->hw;
|
||||
struct igb_q_vector *q_vector;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < adapter->num_q_vectors; i++) {
|
||||
q_vector = adapter->q_vector[i];
|
||||
if (adapter->flags & IGB_FLAG_HAS_MSIX)
|
||||
wr32(E1000_EIMC, q_vector->eims_value);
|
||||
else
|
||||
igb_irq_disable(adapter);
|
||||
napi_schedule(&q_vector->napi);
|
||||
}
|
||||
}
|
||||
#endif /* CONFIG_NET_POLL_CONTROLLER */
|
||||
|
||||
/**
|
||||
* igb_io_error_detected - called when PCI error is detected
|
||||
* @pdev: Pointer to PCI device
|
||||
|
|
|
@ -81,11 +81,6 @@ static int ixgb_vlan_rx_kill_vid(struct net_device *netdev,
|
|||
__be16 proto, u16 vid);
|
||||
static void ixgb_restore_vlan(struct ixgb_adapter *adapter);
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/* for netdump / net console */
|
||||
static void ixgb_netpoll(struct net_device *dev);
|
||||
#endif
|
||||
|
||||
static pci_ers_result_t ixgb_io_error_detected (struct pci_dev *pdev,
|
||||
enum pci_channel_state state);
|
||||
static pci_ers_result_t ixgb_io_slot_reset (struct pci_dev *pdev);
|
||||
|
@ -348,9 +343,6 @@ static const struct net_device_ops ixgb_netdev_ops = {
|
|||
.ndo_tx_timeout = ixgb_tx_timeout,
|
||||
.ndo_vlan_rx_add_vid = ixgb_vlan_rx_add_vid,
|
||||
.ndo_vlan_rx_kill_vid = ixgb_vlan_rx_kill_vid,
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
.ndo_poll_controller = ixgb_netpoll,
|
||||
#endif
|
||||
.ndo_fix_features = ixgb_fix_features,
|
||||
.ndo_set_features = ixgb_set_features,
|
||||
};
|
||||
|
@ -2195,23 +2187,6 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
|
|||
ixgb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NET_POLL_CONTROLLER
|
||||
/*
|
||||
* Polling 'interrupt' - used by things like netconsole to send skbs
|
||||
* without having to re-enable interrupts. It's not called while
|
||||
* the interrupt routine is executing.
|
||||
*/
|
||||
|
||||
static void ixgb_netpoll(struct net_device *dev)
|
||||
{
|
||||
struct ixgb_adapter *adapter = netdev_priv(dev);
|
||||
|
||||
disable_irq(adapter->pdev->irq);
|
||||
ixgb_intr(adapter->pdev->irq, dev);
|
||||
enable_irq(adapter->pdev->irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
/**
|
||||
* ixgb_io_error_detected - called when PCI error is detected
|
||||
* @pdev: pointer to pci device with error
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue