Merge 4.12-rc6 into usb-next

We want the USB fixes in here.

Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Greg Kroah-Hartman 2017-06-20 10:16:07 +08:00
commit 571949a40a
331 changed files with 2988 additions and 2036 deletions

View File

@ -3811,6 +3811,13 @@
expediting. Set to zero to disable automatic expediting. Set to zero to disable automatic
expediting. expediting.
stack_guard_gap= [MM]
override the default stack gap protection. The value
is in page units and it defines how many pages prior
to (for stacks growing down) resp. after (for stacks
growing up) the main stack are reserved for no other
mapping. Default value is 256 pages.
stacktrace [FTRACE] stacktrace [FTRACE]
Enabled the stack tracer on boot up. Enabled the stack tracer on boot up.

View File

@ -122,7 +122,7 @@ associated flow of the packet. The hash is either provided by hardware
or will be computed in the stack. Capable hardware can pass the hash in or will be computed in the stack. Capable hardware can pass the hash in
the receive descriptor for the packet; this would usually be the same the receive descriptor for the packet; this would usually be the same
hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in hash used for RSS (e.g. computed Toeplitz hash). The hash is saved in
skb->rx_hash and can be used elsewhere in the stack as a hash of the skb->hash and can be used elsewhere in the stack as a hash of the
packets flow. packets flow.
Each receive hardware queue has an associated list of CPUs to which Each receive hardware queue has an associated list of CPUs to which

View File

@ -1,7 +1,7 @@
VERSION = 4 VERSION = 4
PATCHLEVEL = 12 PATCHLEVEL = 12
SUBLEVEL = 0 SUBLEVEL = 0
EXTRAVERSION = -rc5 EXTRAVERSION = -rc6
NAME = Fearless Coyote NAME = Fearless Coyote
# *DOCUMENTATION* # *DOCUMENTATION*

View File

@ -65,7 +65,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -220,7 +220,7 @@ AM33XX_IOPAD(0x94c, PIN_INPUT_PULLDOWN | MUX_MODE7)
mmc1_pins: pinmux_mmc1_pins { mmc1_pins: pinmux_mmc1_pins {
pinctrl-single,pins = < pinctrl-single,pins = <
AM33XX_IOPAD(0x960, PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */ AM33XX_IOPAD(0x96c, PIN_INPUT | MUX_MODE7) /* uart0_rtsn.gpio1_9 */
>; >;
}; };
@ -280,10 +280,6 @@ AM33XX_IOPAD(0x830, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdInt - gpmc_ad12.gpio1_1
AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */ AM33XX_IOPAD(0x834, PIN_INPUT_PULLUP | MUX_MODE7) /* nKbdReset - gpmc_ad13.gpio1_13 */
AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */ AM33XX_IOPAD(0x838, PIN_INPUT_PULLUP | MUX_MODE7) /* nDispReset - gpmc_ad14.gpio1_14 */
AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */ AM33XX_IOPAD(0x844, PIN_INPUT_PULLUP | MUX_MODE7) /* USB1_enPower - gpmc_a1.gpio1_17 */
/* AVR Programming - SPI Bus (bit bang) - Screen and Keyboard */
AM33XX_IOPAD(0x954, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMOSI spi0_d0.gpio0_3 */
AM33XX_IOPAD(0x958, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattMISO spi0_d1.gpio0_4 */
AM33XX_IOPAD(0x950, PIN_INPUT_PULLUP | MUX_MODE7) /* Kbd/Disp/BattSCLK spi0_clk.gpio0_2 */
/* PDI Bus - Battery system */ /* PDI Bus - Battery system */
AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */ AM33XX_IOPAD(0x840, PIN_INPUT_PULLUP | MUX_MODE7) /* nBattReset gpmc_a0.gpio1_16 */
AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */ AM33XX_IOPAD(0x83c, PIN_INPUT_PULLUP | MUX_MODE7) /* BattPDIData gpmc_ad15.gpio1_15 */
@ -384,7 +380,7 @@ &mmc1 {
pinctrl-names = "default"; pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>; pinctrl-0 = <&mmc1_pins>;
bus-width = <4>; bus-width = <4>;
cd-gpios = <&gpio0 6 GPIO_ACTIVE_LOW>; cd-gpios = <&gpio1 9 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vmmcsd_fixed>; vmmc-supply = <&vmmcsd_fixed>;
}; };

View File

@ -558,10 +558,11 @@ rtc: rtc@01f00000 {
}; };
r_ccu: clock@1f01400 { r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu"; compatible = "allwinner,sun8i-h3-r-ccu";
reg = <0x01f01400 0x100>; reg = <0x01f01400 0x100>;
clocks = <&osc24M>, <&osc32k>, <&iosc>; clocks = <&osc24M>, <&osc32k>, <&iosc>,
clock-names = "hosc", "losc", "iosc"; <&ccu 9>;
clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>; #clock-cells = <1>;
#reset-cells = <1>; #reset-cells = <1>;
}; };

View File

@ -90,7 +90,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -141,7 +141,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -406,8 +406,9 @@ rtc: rtc@1f00000 {
r_ccu: clock@1f01400 { r_ccu: clock@1f01400 {
compatible = "allwinner,sun50i-a64-r-ccu"; compatible = "allwinner,sun50i-a64-r-ccu";
reg = <0x01f01400 0x100>; reg = <0x01f01400 0x100>;
clocks = <&osc24M>, <&osc32k>, <&iosc>; clocks = <&osc24M>, <&osc32k>, <&iosc>,
clock-names = "hosc", "losc", "iosc"; <&ccu 11>;
clock-names = "hosc", "losc", "iosc", "pll-periph";
#clock-cells = <1>; #clock-cells = <1>;
#reset-cells = <1>; #reset-cells = <1>;
}; };

View File

@ -40,7 +40,7 @@
* OTHER DEALINGS IN THE SOFTWARE. * OTHER DEALINGS IN THE SOFTWARE.
*/ */
#include "sunxi-h3-h5.dtsi" #include <arm/sunxi-h3-h5.dtsi>
/ { / {
cpus { cpus {

View File

@ -1 +0,0 @@
../../../../arm/boot/dts/sunxi-h3-h5.dtsi

View File

@ -36,6 +36,7 @@ int bpf_jit_enable __read_mostly;
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
#define TCALL_CNT (MAX_BPF_JIT_REG + 2) #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
#define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
/* Map BPF registers to A64 registers */ /* Map BPF registers to A64 registers */
static const int bpf2a64[] = { static const int bpf2a64[] = {
@ -57,6 +58,7 @@ static const int bpf2a64[] = {
/* temporary registers for internal BPF JIT */ /* temporary registers for internal BPF JIT */
[TMP_REG_1] = A64_R(10), [TMP_REG_1] = A64_R(10),
[TMP_REG_2] = A64_R(11), [TMP_REG_2] = A64_R(11),
[TMP_REG_3] = A64_R(12),
/* tail_call_cnt */ /* tail_call_cnt */
[TCALL_CNT] = A64_R(26), [TCALL_CNT] = A64_R(26),
/* temporary register for blinding constants */ /* temporary register for blinding constants */
@ -319,6 +321,7 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
const u8 src = bpf2a64[insn->src_reg]; const u8 src = bpf2a64[insn->src_reg];
const u8 tmp = bpf2a64[TMP_REG_1]; const u8 tmp = bpf2a64[TMP_REG_1];
const u8 tmp2 = bpf2a64[TMP_REG_2]; const u8 tmp2 = bpf2a64[TMP_REG_2];
const u8 tmp3 = bpf2a64[TMP_REG_3];
const s16 off = insn->off; const s16 off = insn->off;
const s32 imm = insn->imm; const s32 imm = insn->imm;
const int i = insn - ctx->prog->insnsi; const int i = insn - ctx->prog->insnsi;
@ -689,10 +692,10 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
emit(A64_PRFM(tmp, PST, L1, STRM), ctx); emit(A64_PRFM(tmp, PST, L1, STRM), ctx);
emit(A64_LDXR(isdw, tmp2, tmp), ctx); emit(A64_LDXR(isdw, tmp2, tmp), ctx);
emit(A64_ADD(isdw, tmp2, tmp2, src), ctx); emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
emit(A64_STXR(isdw, tmp2, tmp, tmp2), ctx); emit(A64_STXR(isdw, tmp2, tmp, tmp3), ctx);
jmp_offset = -3; jmp_offset = -3;
check_imm19(jmp_offset); check_imm19(jmp_offset);
emit(A64_CBNZ(0, tmp2, jmp_offset), ctx); emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
break; break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */ /* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */

View File

@ -75,7 +75,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(current->mm, addr); vma = find_vma(current->mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto success; goto success;
} }

View File

@ -128,19 +128,19 @@ quiet_cmd_cpp_its_S = ITS $@
-DADDR_BITS=$(ADDR_BITS) \ -DADDR_BITS=$(ADDR_BITS) \
-DADDR_CELLS=$(itb_addr_cells) -DADDR_CELLS=$(itb_addr_cells)
$(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,none,vmlinux.bin) $(call if_changed_dep,cpp_its_S,none,vmlinux.bin)
$(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.gz.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz) $(call if_changed_dep,cpp_its_S,gzip,vmlinux.bin.gz)
$(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.bz2.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2) $(call if_changed_dep,cpp_its_S,bzip2,vmlinux.bin.bz2)
$(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.lzma.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma) $(call if_changed_dep,cpp_its_S,lzma,vmlinux.bin.lzma)
$(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S FORCE $(obj)/vmlinux.lzo.its: $(srctree)/arch/mips/$(PLATFORM)/vmlinux.its.S $(VMLINUX) FORCE
$(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo) $(call if_changed_dep,cpp_its_S,lzo,vmlinux.bin.lzo)
quiet_cmd_itb-image = ITB $@ quiet_cmd_itb-image = ITB $@

View File

@ -35,7 +35,12 @@ extern pte_t *pkmap_page_table;
* easily, subsequent pte tables have to be allocated in one physical * easily, subsequent pte tables have to be allocated in one physical
* chunk of RAM. * chunk of RAM.
*/ */
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define LAST_PKMAP 512
#else
#define LAST_PKMAP 1024 #define LAST_PKMAP 1024
#endif
#define LAST_PKMAP_MASK (LAST_PKMAP-1) #define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))

View File

@ -43,7 +43,8 @@ typedef union mips_instruction kprobe_opcode_t;
#define flush_insn_slot(p) \ #define flush_insn_slot(p) \
do { \ do { \
flush_icache_range((unsigned long)p->addr, \ if (p->addr) \
flush_icache_range((unsigned long)p->addr, \
(unsigned long)p->addr + \ (unsigned long)p->addr + \
(MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \ (MAX_INSN_SIZE * sizeof(kprobe_opcode_t))); \
} while (0) } while (0)

View File

@ -19,6 +19,10 @@
#define __ARCH_USE_5LEVEL_HACK #define __ARCH_USE_5LEVEL_HACK
#include <asm-generic/pgtable-nopmd.h> #include <asm-generic/pgtable-nopmd.h>
#ifdef CONFIG_HIGHMEM
#include <asm/highmem.h>
#endif
extern int temp_tlb_entry; extern int temp_tlb_entry;
/* /*
@ -62,7 +66,8 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
#define VMALLOC_START MAP_BASE #define VMALLOC_START MAP_BASE
#define PKMAP_BASE (0xfe000000UL) #define PKMAP_END ((FIXADDR_START) & ~((LAST_PKMAP << PAGE_SHIFT)-1))
#define PKMAP_BASE (PKMAP_END - PAGE_SIZE * LAST_PKMAP)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE) # define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)

View File

@ -804,8 +804,10 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
break; break;
} }
/* Compact branch: BNEZC || JIALC */ /* Compact branch: BNEZC || JIALC */
if (insn.i_format.rs) if (!insn.i_format.rs) {
/* JIALC: set $31/ra */
regs->regs[31] = epc + 4; regs->regs[31] = epc + 4;
}
regs->cp0_epc += 8; regs->cp0_epc += 8;
break; break;
#endif #endif

View File

@ -38,20 +38,6 @@ void arch_ftrace_update_code(int command)
#endif #endif
/*
* Check if the address is in kernel space
*
* Clone core_kernel_text() from kernel/extable.c, but doesn't call
* init_kernel_text() for Ftrace doesn't trace functions in init sections.
*/
static inline int in_kernel_space(unsigned long ip)
{
if (ip >= (unsigned long)_stext &&
ip <= (unsigned long)_etext)
return 1;
return 0;
}
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
@ -198,7 +184,7 @@ int ftrace_make_nop(struct module *mod,
* If ip is in kernel space, no long call, otherwise, long call is * If ip is in kernel space, no long call, otherwise, long call is
* needed. * needed.
*/ */
new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new); return ftrace_modify_code(ip, new);
#else #else
@ -218,12 +204,12 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
unsigned int new; unsigned int new;
unsigned long ip = rec->ip; unsigned long ip = rec->ip;
new = in_kernel_space(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
return ftrace_modify_code(ip, new); return ftrace_modify_code(ip, new);
#else #else
return ftrace_modify_code_2r(ip, new, in_kernel_space(ip) ? return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
INSN_NOP : insn_la_mcount[1]); INSN_NOP : insn_la_mcount[1]);
#endif #endif
} }
@ -289,7 +275,7 @@ unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
* instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
* kernel, move after the instruction "move ra, at"(offset is 16) * kernel, move after the instruction "move ra, at"(offset is 16)
*/ */
ip = self_ra - (in_kernel_space(self_ra) ? 16 : 24); ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
/* /*
* search the text until finding the non-store instruction or "s{d,w} * search the text until finding the non-store instruction or "s{d,w}
@ -394,7 +380,7 @@ void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
* entries configured through the tracing/set_graph_function interface. * entries configured through the tracing/set_graph_function interface.
*/ */
insns = in_kernel_space(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
trace.func = self_ra - (MCOUNT_INSN_SIZE * insns); trace.func = self_ra - (MCOUNT_INSN_SIZE * insns);
/* Only trace if the calling function expects to */ /* Only trace if the calling function expects to */

View File

@ -1597,7 +1597,6 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
break; break;
case CPU_P5600: case CPU_P5600:
case CPU_P6600: case CPU_P6600:
case CPU_I6400:
/* 8-bit event numbers */ /* 8-bit event numbers */
raw_id = config & 0x1ff; raw_id = config & 0x1ff;
base_id = raw_id & 0xff; base_id = raw_id & 0xff;
@ -1610,6 +1609,11 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
raw_event.range = P; raw_event.range = P;
#endif #endif
break; break;
case CPU_I6400:
/* 8-bit event numbers */
base_id = config & 0xff;
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
break;
case CPU_1004K: case CPU_1004K:
if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;

View File

@ -93,7 +93,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -51,15 +51,15 @@ void __init pagetable_init(void)
/* /*
* Fixed mappings: * Fixed mappings:
*/ */
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; vaddr = __fix_to_virt(__end_of_fixed_addresses - 1);
fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base); fixrange_init(vaddr & PMD_MASK, vaddr + FIXADDR_SIZE, pgd_base);
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* /*
* Permanent kmaps: * Permanent kmaps:
*/ */
vaddr = PKMAP_BASE; vaddr = PKMAP_BASE;
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); fixrange_init(vaddr & PMD_MASK, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
pgd = swapper_pg_dir + __pgd_offset(vaddr); pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = pud_offset(pgd, vaddr); pud = pud_offset(pgd, vaddr);

View File

@ -90,7 +90,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags) unsigned long len, unsigned long pgoff, unsigned long flags)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
unsigned long task_size = TASK_SIZE; unsigned long task_size = TASK_SIZE;
int do_color_align, last_mmap; int do_color_align, last_mmap;
struct vm_unmapped_area_info info; struct vm_unmapped_area_info info;
@ -117,9 +117,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma_prev(mm, addr, &prev);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr; goto found_addr;
} }
@ -143,7 +144,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
const unsigned long len, const unsigned long pgoff, const unsigned long len, const unsigned long pgoff,
const unsigned long flags) const unsigned long flags)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma, *prev;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
unsigned long addr = addr0; unsigned long addr = addr0;
int do_color_align, last_mmap; int do_color_align, last_mmap;
@ -177,9 +178,11 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = COLOR_ALIGN(addr, last_mmap, pgoff); addr = COLOR_ALIGN(addr, last_mmap, pgoff);
else else
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr);
vma = find_vma_prev(mm, addr, &prev);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)) &&
(!prev || addr >= vm_end_gap(prev)))
goto found_addr; goto found_addr;
} }

View File

@ -104,7 +104,7 @@
"1: "PPC_TLNEI" %4,0\n" \ "1: "PPC_TLNEI" %4,0\n" \
_EMIT_BUG_ENTRY \ _EMIT_BUG_ENTRY \
: : "i" (__FILE__), "i" (__LINE__), \ : : "i" (__FILE__), "i" (__LINE__), \
"i" (BUGFLAG_TAINT(TAINT_WARN)), \ "i" (BUGFLAG_WARNING|BUGFLAG_TAINT(TAINT_WARN)),\
"i" (sizeof(struct bug_entry)), \ "i" (sizeof(struct bug_entry)), \
"r" (__ret_warn_on)); \ "r" (__ret_warn_on)); \
} \ } \

View File

@ -94,11 +94,13 @@ struct xive_q {
* store at 0 and some ESBs support doing a trigger via a * store at 0 and some ESBs support doing a trigger via a
* separate trigger page. * separate trigger page.
*/ */
#define XIVE_ESB_GET 0x800 #define XIVE_ESB_STORE_EOI 0x400 /* Store */
#define XIVE_ESB_SET_PQ_00 0xc00 #define XIVE_ESB_LOAD_EOI 0x000 /* Load */
#define XIVE_ESB_SET_PQ_01 0xd00 #define XIVE_ESB_GET 0x800 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 #define XIVE_ESB_SET_PQ_00 0xc00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 #define XIVE_ESB_SET_PQ_01 0xd00 /* Load */
#define XIVE_ESB_SET_PQ_10 0xe00 /* Load */
#define XIVE_ESB_SET_PQ_11 0xf00 /* Load */
#define XIVE_ESB_VAL_P 0x2 #define XIVE_ESB_VAL_P 0x2
#define XIVE_ESB_VAL_Q 0x1 #define XIVE_ESB_VAL_Q 0x1

View File

@ -69,7 +69,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
{ {
/* If the XIVE supports the new "store EOI facility, use it */ /* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
__x_writeq(0, __x_eoi_page(xd)); __x_writeq(0, __x_eoi_page(xd) + XIVE_ESB_STORE_EOI);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
opal_int_eoi(hw_irq); opal_int_eoi(hw_irq);
} else { } else {
@ -89,7 +89,7 @@ static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
* properly. * properly.
*/ */
if (xd->flags & XIVE_IRQ_FLAG_LSI) if (xd->flags & XIVE_IRQ_FLAG_LSI)
__x_readq(__x_eoi_page(xd)); __x_readq(__x_eoi_page(xd) + XIVE_ESB_LOAD_EOI);
else { else {
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00); eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);

View File

@ -68,7 +68,7 @@ radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && if (mm->task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
/* /*

View File

@ -112,7 +112,7 @@ radix__arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr && if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -157,7 +157,7 @@ radix__arch_get_unmapped_area_topdown(struct file *filp,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (mm->task_size - len >= addr && addr >= mmap_min_addr && if (mm->task_size - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -99,7 +99,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
if ((mm->task_size - len) < addr) if ((mm->task_size - len) < addr)
return 0; return 0;
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
return (!vma || (addr + len) <= vma->vm_start); return (!vma || (addr + len) <= vm_start_gap(vma));
} }
static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)

View File

@ -75,7 +75,8 @@ struct pci_dev *pnv_pci_get_npu_dev(struct pci_dev *gpdev, int index)
if (WARN_ON(!gpdev)) if (WARN_ON(!gpdev))
return NULL; return NULL;
if (WARN_ON(!gpdev->dev.of_node)) /* Not all PCI devices have device-tree nodes */
if (!gpdev->dev.of_node)
return NULL; return NULL;
/* Get assoicated PCI device */ /* Get assoicated PCI device */

View File

@ -297,7 +297,7 @@ void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
{ {
/* If the XIVE supports the new "store EOI facility, use it */ /* If the XIVE supports the new "store EOI facility, use it */
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
out_be64(xd->eoi_mmio, 0); out_be64(xd->eoi_mmio + XIVE_ESB_STORE_EOI, 0);
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
/* /*
* The FW told us to call it. This happens for some * The FW told us to call it. This happens for some

View File

@ -30,6 +30,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
@ -90,6 +94,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
@ -359,6 +365,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
@ -367,16 +374,19 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_ATA_OVER_ETH=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_RBD=m
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
@ -442,6 +452,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m CONFIG_PPP_BSDCOMP=m
@ -452,7 +464,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
@ -471,6 +482,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
@ -487,12 +499,18 @@ CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y CONFIG_XFS_RT=y
CONFIG_XFS_DEBUG=y CONFIG_XFS_DEBUG=y
CONFIG_GFS2_FS=m CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_BTRFS_DEBUG=y
CONFIG_NILFS2_FS=m CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QUOTA_DEBUG=y
CONFIG_QFMT_V1=m CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m CONFIG_QFMT_V2=m
CONFIG_AUTOFS4_FS=m CONFIG_AUTOFS4_FS=m
@ -558,6 +576,7 @@ CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DEBUG_RODATA_TEST=y
CONFIG_DEBUG_OBJECTS=y CONFIG_DEBUG_OBJECTS=y
CONFIG_DEBUG_OBJECTS_SELFTEST=y CONFIG_DEBUG_OBJECTS_SELFTEST=y
CONFIG_DEBUG_OBJECTS_FREE=y CONFIG_DEBUG_OBJECTS_FREE=y
@ -580,7 +599,6 @@ CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y CONFIG_WQ_WATCHDOG=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
@ -595,6 +613,7 @@ CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300 CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y CONFIG_FAILSLAB=y
CONFIG_FAIL_PAGE_ALLOC=y CONFIG_FAIL_PAGE_ALLOC=y
@ -616,13 +635,12 @@ CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
CONFIG_KPROBES_SANITY_TEST=y CONFIG_KPROBES_SANITY_TEST=y
CONFIG_RBTREE_TEST=y CONFIG_RBTREE_TEST=y
CONFIG_INTERVAL_TREE_TEST=m CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m CONFIG_TEST_BPF=m
CONFIG_BUG_ON_DATA_CORRUPTION=y CONFIG_BUG_ON_DATA_CORRUPTION=y
@ -630,6 +648,7 @@ CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@ -640,7 +659,9 @@ CONFIG_CRYPTO_RSA=m
CONFIG_CRYPTO_DH=m CONFIG_CRYPTO_DH=m
CONFIG_CRYPTO_ECDH=m CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_GCM=m
@ -648,6 +669,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
@ -657,8 +679,10 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
@ -674,6 +698,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
@ -685,6 +710,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_ASYMMETRIC_KEY_TYPE=y CONFIG_ASYMMETRIC_KEY_TYPE=y
@ -692,6 +718,7 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
CONFIG_X509_CERTIFICATE_PARSER=m CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m CONFIG_CRC7=m
CONFIG_CRC8=m CONFIG_CRC8=m
CONFIG_RANDOM32_SELFTEST=y
CONFIG_CORDIC=m CONFIG_CORDIC=m
CONFIG_CMM=m CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y CONFIG_APPLDATA_BASE=y

View File

@ -31,6 +31,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@ -46,7 +47,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
@ -88,6 +92,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
@ -356,6 +362,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
@ -364,16 +371,18 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_ATA_OVER_ETH=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
@ -439,6 +448,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m CONFIG_PPP_BSDCOMP=m
@ -449,7 +460,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
@ -468,6 +478,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
@ -483,11 +494,15 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m CONFIG_QFMT_V2=m
@ -553,7 +568,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
@ -576,6 +590,7 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@ -599,6 +614,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
@ -611,6 +627,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
@ -626,16 +643,19 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_CRYPTO_USER_API_AEAD=m CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m CONFIG_CRC7=m

View File

@ -31,6 +31,7 @@ CONFIG_USER_NS=y
CONFIG_SCHED_AUTOGROUP=y CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@ -44,7 +45,10 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_BLK_DEV_THROTTLING=y CONFIG_BLK_DEV_THROTTLING=y
CONFIG_BLK_WBT=y
CONFIG_BLK_WBT_SQ=y
CONFIG_PARTITION_ADVANCED=y CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y CONFIG_IBM_PARTITION=y
CONFIG_BSD_DISKLABEL=y CONFIG_BSD_DISKLABEL=y
@ -86,6 +90,8 @@ CONFIG_UNIX=y
CONFIG_UNIX_DIAG=m CONFIG_UNIX_DIAG=m
CONFIG_XFRM_USER=m CONFIG_XFRM_USER=m
CONFIG_NET_KEY=m CONFIG_NET_KEY=m
CONFIG_SMC=m
CONFIG_SMC_DIAG=m
CONFIG_INET=y CONFIG_INET=y
CONFIG_IP_MULTICAST=y CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y CONFIG_IP_ADVANCED_ROUTER=y
@ -354,6 +360,7 @@ CONFIG_NET_ACT_SIMP=m
CONFIG_NET_ACT_SKBEDIT=m CONFIG_NET_ACT_SKBEDIT=m
CONFIG_NET_ACT_CSUM=m CONFIG_NET_ACT_CSUM=m
CONFIG_DNS_RESOLVER=y CONFIG_DNS_RESOLVER=y
CONFIG_NETLINK_DIAG=m
CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_PRIO=y
CONFIG_BPF_JIT=y CONFIG_BPF_JIT=y
CONFIG_NET_PKTGEN=m CONFIG_NET_PKTGEN=m
@ -362,16 +369,18 @@ CONFIG_DEVTMPFS=y
CONFIG_DMA_CMA=y CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=0 CONFIG_CMA_SIZE_MBYTES=0
CONFIG_CONNECTOR=y CONFIG_CONNECTOR=y
CONFIG_ZRAM=m
CONFIG_BLK_DEV_LOOP=m CONFIG_BLK_DEV_LOOP=m
CONFIG_BLK_DEV_CRYPTOLOOP=m CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_BLK_DEV_DRBD=m
CONFIG_BLK_DEV_NBD=m CONFIG_BLK_DEV_NBD=m
CONFIG_BLK_DEV_OSD=m CONFIG_BLK_DEV_OSD=m
CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=32768 CONFIG_BLK_DEV_RAM_SIZE=32768
CONFIG_CDROM_PKTCDVD=m CONFIG_BLK_DEV_RAM_DAX=y
CONFIG_ATA_OVER_ETH=m
CONFIG_VIRTIO_BLK=y CONFIG_VIRTIO_BLK=y
CONFIG_ENCLOSURE_SERVICES=m CONFIG_ENCLOSURE_SERVICES=m
CONFIG_GENWQE=m
CONFIG_RAID_ATTRS=m CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y CONFIG_BLK_DEV_SD=y
@ -437,6 +446,8 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_INTEL is not set # CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set # CONFIG_NET_VENDOR_MARVELL is not set
CONFIG_MLX4_EN=m CONFIG_MLX4_EN=m
CONFIG_MLX5_CORE=m
CONFIG_MLX5_CORE_EN=y
# CONFIG_NET_VENDOR_NATSEMI is not set # CONFIG_NET_VENDOR_NATSEMI is not set
CONFIG_PPP=m CONFIG_PPP=m
CONFIG_PPP_BSDCOMP=m CONFIG_PPP_BSDCOMP=m
@ -447,7 +458,6 @@ CONFIG_PPTP=m
CONFIG_PPPOL2TP=m CONFIG_PPPOL2TP=m
CONFIG_PPP_ASYNC=m CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m CONFIG_PPP_SYNC_TTY=m
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
@ -466,6 +476,7 @@ CONFIG_DIAG288_WATCHDOG=m
CONFIG_INFINIBAND=m CONFIG_INFINIBAND=m
CONFIG_INFINIBAND_USER_ACCESS=m CONFIG_INFINIBAND_USER_ACCESS=m
CONFIG_MLX4_INFINIBAND=m CONFIG_MLX4_INFINIBAND=m
CONFIG_MLX5_INFINIBAND=m
CONFIG_VIRTIO_BALLOON=m CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y CONFIG_EXT4_FS_POSIX_ACL=y
@ -481,11 +492,15 @@ CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y CONFIG_XFS_POSIX_ACL=y
CONFIG_XFS_RT=y CONFIG_XFS_RT=y
CONFIG_GFS2_FS=m CONFIG_GFS2_FS=m
CONFIG_GFS2_FS_LOCKING_DLM=y
CONFIG_OCFS2_FS=m CONFIG_OCFS2_FS=m
CONFIG_BTRFS_FS=y CONFIG_BTRFS_FS=y
CONFIG_BTRFS_FS_POSIX_ACL=y CONFIG_BTRFS_FS_POSIX_ACL=y
CONFIG_NILFS2_FS=m CONFIG_NILFS2_FS=m
CONFIG_FS_DAX=y
CONFIG_EXPORTFS_BLOCK_OPS=y
CONFIG_FANOTIFY=y CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
CONFIG_QUOTA_NETLINK_INTERFACE=y CONFIG_QUOTA_NETLINK_INTERFACE=y
CONFIG_QFMT_V1=m CONFIG_QFMT_V1=m
CONFIG_QFMT_V2=m CONFIG_QFMT_V2=m
@ -551,7 +566,6 @@ CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
@ -574,6 +588,7 @@ CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y CONFIG_SECURITY_NETWORK=y
CONFIG_HARDENED_USERCOPY=y
CONFIG_SECURITY_SELINUX=y CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0 CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
@ -597,6 +612,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_KEYWRAP=m CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_CMAC=m
CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m CONFIG_CRYPTO_CRC32=m
@ -609,6 +625,7 @@ CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_AES_TI=m
CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAMELLIA=m CONFIG_CRYPTO_CAMELLIA=m
@ -624,6 +641,7 @@ CONFIG_CRYPTO_TWOFISH=m
CONFIG_CRYPTO_842=m CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_ANSI_CPRNG=m
CONFIG_CRYPTO_USER_API_HASH=m CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
@ -635,6 +653,7 @@ CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m
CONFIG_CRYPTO_DES_S390=m CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_GHASH_S390=m CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y CONFIG_CRYPTO_CRC32_S390=y
CONFIG_CRC7=m CONFIG_CRC7=m

View File

@ -12,8 +12,10 @@ CONFIG_TUNE_ZEC12=y
CONFIG_NR_CPUS=2 CONFIG_NR_CPUS=2
# CONFIG_HOTPLUG_CPU is not set # CONFIG_HOTPLUG_CPU is not set
CONFIG_HZ_100=y CONFIG_HZ_100=y
# CONFIG_ARCH_RANDOM is not set
# CONFIG_COMPACTION is not set # CONFIG_COMPACTION is not set
# CONFIG_MIGRATION is not set # CONFIG_MIGRATION is not set
# CONFIG_BOUNCE is not set
# CONFIG_CHECK_STACK is not set # CONFIG_CHECK_STACK is not set
# CONFIG_CHSC_SCH is not set # CONFIG_CHSC_SCH is not set
# CONFIG_SCM_BUS is not set # CONFIG_SCM_BUS is not set
@ -36,11 +38,11 @@ CONFIG_SCSI_CONSTANTS=y
CONFIG_SCSI_LOGGING=y CONFIG_SCSI_LOGGING=y
CONFIG_SCSI_FC_ATTRS=y CONFIG_SCSI_FC_ATTRS=y
CONFIG_ZFCP=y CONFIG_ZFCP=y
# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
# CONFIG_INPUT_KEYBOARD is not set # CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_MOUSE is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
# CONFIG_HVC_IUCV is not set # CONFIG_HVC_IUCV is not set
# CONFIG_HW_RANDOM_S390 is not set
CONFIG_RAW_DRIVER=y CONFIG_RAW_DRIVER=y
# CONFIG_SCLP_ASYNC is not set # CONFIG_SCLP_ASYNC is not set
# CONFIG_HMC_DRV is not set # CONFIG_HMC_DRV is not set
@ -54,9 +56,9 @@ CONFIG_RAW_DRIVER=y
# CONFIG_INOTIFY_USER is not set # CONFIG_INOTIFY_USER is not set
CONFIG_CONFIGFS_FS=y CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set # CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_PRINTK_TIME=y CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_KERNEL=y CONFIG_DEBUG_KERNEL=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
# CONFIG_SCHED_DEBUG is not set # CONFIG_SCHED_DEBUG is not set

View File

@ -28,6 +28,7 @@ CONFIG_NAMESPACES=y
CONFIG_USER_NS=y CONFIG_USER_NS=y
CONFIG_BLK_DEV_INITRD=y CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y CONFIG_EXPERT=y
# CONFIG_SYSFS_SYSCALL is not set
CONFIG_BPF_SYSCALL=y CONFIG_BPF_SYSCALL=y
CONFIG_USERFAULTFD=y CONFIG_USERFAULTFD=y
# CONFIG_COMPAT_BRK is not set # CONFIG_COMPAT_BRK is not set
@ -108,7 +109,6 @@ CONFIG_ZFCP=y
CONFIG_SCSI_VIRTIO=y CONFIG_SCSI_VIRTIO=y
CONFIG_MD=y CONFIG_MD=y
CONFIG_MD_LINEAR=m CONFIG_MD_LINEAR=m
CONFIG_MD_RAID0=m
CONFIG_MD_MULTIPATH=m CONFIG_MD_MULTIPATH=m
CONFIG_BLK_DEV_DM=y CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=m CONFIG_DM_CRYPT=m
@ -131,6 +131,7 @@ CONFIG_TUN=m
CONFIG_VIRTIO_NET=y CONFIG_VIRTIO_NET=y
# CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALACRITECH is not set
# CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_NET_VENDOR_SYNOPSYS is not set
# CONFIG_INPUT is not set # CONFIG_INPUT is not set
# CONFIG_SERIO is not set # CONFIG_SERIO is not set
CONFIG_DEVKMEM=y CONFIG_DEVKMEM=y
@ -162,7 +163,6 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y CONFIG_DEBUG_PAGEALLOC=y
CONFIG_DETECT_HUNG_TASK=y CONFIG_DETECT_HUNG_TASK=y
CONFIG_PANIC_ON_OOPS=y CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_DEBUG_RT_MUTEXES=y CONFIG_DEBUG_RT_MUTEXES=y
CONFIG_PROVE_LOCKING=y CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y CONFIG_LOCK_STAT=y
@ -172,14 +172,12 @@ CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y CONFIG_DEBUG_NOTIFIERS=y
CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_RCU_TRACE=y
CONFIG_LATENCYTOP=y CONFIG_LATENCYTOP=y
CONFIG_SCHED_TRACER=y CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y CONFIG_FTRACE_SYSCALLS=y
CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y
CONFIG_STACK_TRACER=y CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENTS=y
CONFIG_FUNCTION_PROFILER=y CONFIG_FUNCTION_PROFILER=y
CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y CONFIG_KPROBES_SANITY_TEST=y
@ -190,7 +188,6 @@ CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m CONFIG_CRYPTO_GCM=m
CONFIG_CRYPTO_CBC=y CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_CTS=m CONFIG_CRYPTO_CTS=m
CONFIG_CRYPTO_ECB=m
CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_XTS=m CONFIG_CRYPTO_XTS=m
@ -230,6 +227,7 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m
CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_RNG=m
CONFIG_ZCRYPT=m CONFIG_ZCRYPT=m
CONFIG_PKEY=m CONFIG_PKEY=m
CONFIG_CRYPTO_PAES_S390=m
CONFIG_CRYPTO_SHA1_S390=m CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m CONFIG_CRYPTO_SHA256_S390=m
CONFIG_CRYPTO_SHA512_S390=m CONFIG_CRYPTO_SHA512_S390=m

View File

@ -231,12 +231,17 @@ ENTRY(sie64a)
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
.Lsie_done: .Lsie_done:
# some program checks are suppressing. C code (e.g. do_protection_exception) # some program checks are suppressing. C code (e.g. do_protection_exception)
# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other # will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
# instructions between sie64a and .Lsie_done should not cause program # are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
# interrupts. So lets use a nop (47 00 00 00) as a landing pad. # Other instructions between sie64a and .Lsie_done should not cause program
# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
# See also .Lcleanup_sie # See also .Lcleanup_sie
.Lrewind_pad: .Lrewind_pad6:
nop 0 nopr 7
.Lrewind_pad4:
nopr 7
.Lrewind_pad2:
nopr 7
.globl sie_exit .globl sie_exit
sie_exit: sie_exit:
lg %r14,__SF_EMPTY+8(%r15) # load guest register save area lg %r14,__SF_EMPTY+8(%r15) # load guest register save area
@ -249,7 +254,9 @@ sie_exit:
stg %r14,__SF_EMPTY+16(%r15) # set exit reason code stg %r14,__SF_EMPTY+16(%r15) # set exit reason code
j sie_exit j sie_exit
EX_TABLE(.Lrewind_pad,.Lsie_fault) EX_TABLE(.Lrewind_pad6,.Lsie_fault)
EX_TABLE(.Lrewind_pad4,.Lsie_fault)
EX_TABLE(.Lrewind_pad2,.Lsie_fault)
EX_TABLE(sie_exit,.Lsie_fault) EX_TABLE(sie_exit,.Lsie_fault)
EXPORT_SYMBOL(sie64a) EXPORT_SYMBOL(sie64a)
EXPORT_SYMBOL(sie_exit) EXPORT_SYMBOL(sie_exit)

View File

@ -101,7 +101,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit; goto check_asce_limit;
} }
@ -151,7 +151,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
goto check_asce_limit; goto check_asce_limit;
} }

View File

@ -64,7 +64,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -114,7 +114,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -120,7 +120,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -183,7 +183,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -120,7 +120,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (task_size - len >= addr && if (task_size - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (mm->get_unmapped_area == arch_get_unmapped_area) if (mm->get_unmapped_area == arch_get_unmapped_area)

View File

@ -233,7 +233,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (current->mm->get_unmapped_area == arch_get_unmapped_area) if (current->mm->get_unmapped_area == arch_get_unmapped_area)

View File

@ -29,6 +29,7 @@ struct pt_regs;
} while (0) } while (0)
extern int fixup_exception(struct pt_regs *regs, int trapnr); extern int fixup_exception(struct pt_regs *regs, int trapnr);
extern int fixup_bug(struct pt_regs *regs, int trapnr);
extern bool ex_has_fault_handler(unsigned long ip); extern bool ex_has_fault_handler(unsigned long ip);
extern void early_fixup_exception(struct pt_regs *regs, int trapnr); extern void early_fixup_exception(struct pt_regs *regs, int trapnr);

View File

@ -144,7 +144,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (end - len >= addr && if (end - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
@ -187,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
addr = PAGE_ALIGN(addr); addr = PAGE_ALIGN(addr);
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }

View File

@ -182,7 +182,7 @@ int is_valid_bugaddr(unsigned long addr)
return ud == INSN_UD0 || ud == INSN_UD2; return ud == INSN_UD0 || ud == INSN_UD2;
} }
static int fixup_bug(struct pt_regs *regs, int trapnr) int fixup_bug(struct pt_regs *regs, int trapnr)
{ {
if (trapnr != X86_TRAP_UD) if (trapnr != X86_TRAP_UD)
return 0; return 0;

View File

@ -162,6 +162,9 @@ void __init early_fixup_exception(struct pt_regs *regs, int trapnr)
if (fixup_exception(regs, trapnr)) if (fixup_exception(regs, trapnr))
return; return;
if (fixup_bug(regs, trapnr))
return;
fail: fail:
early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n", early_printk("PANIC: early exception 0x%02x IP %lx:%lx error %lx cr2 0x%lx\n",
(unsigned)trapnr, (unsigned long)regs->cs, regs->ip, (unsigned)trapnr, (unsigned long)regs->cs, regs->ip,

View File

@ -148,7 +148,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
addr = ALIGN(addr, huge_page_size(h)); addr = ALIGN(addr, huge_page_size(h));
vma = find_vma(mm, addr); vma = find_vma(mm, addr);
if (TASK_SIZE - len >= addr && if (TASK_SIZE - len >= addr &&
(!vma || addr + len <= vma->vm_start)) (!vma || addr + len <= vm_start_gap(vma)))
return addr; return addr;
} }
if (mm->get_unmapped_area == arch_get_unmapped_area) if (mm->get_unmapped_area == arch_get_unmapped_area)

View File

@ -161,16 +161,16 @@ static int page_size_mask;
static void __init probe_page_size_mask(void) static void __init probe_page_size_mask(void)
{ {
#if !defined(CONFIG_KMEMCHECK)
/* /*
* For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
* use small pages. * use small pages.
* This will simplify cpa(), which otherwise needs to support splitting * This will simplify cpa(), which otherwise needs to support splitting
* large pages into small in interrupt context, etc. * large pages into small in interrupt context, etc.
*/ */
if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled()) if (boot_cpu_has(X86_FEATURE_PSE) && !debug_pagealloc_enabled() && !IS_ENABLED(CONFIG_KMEMCHECK))
page_size_mask |= 1 << PG_LEVEL_2M; page_size_mask |= 1 << PG_LEVEL_2M;
#endif else
direct_gbpages = 0;
/* Enable PSE if available */ /* Enable PSE if available */
if (boot_cpu_has(X86_FEATURE_PSE)) if (boot_cpu_has(X86_FEATURE_PSE))

View File

@ -29,7 +29,8 @@ static inline void variant_irq_disable(unsigned int irq) { }
# define PLATFORM_NR_IRQS 0 # define PLATFORM_NR_IRQS 0
#endif #endif
#define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS #define XTENSA_NR_IRQS XCHAL_NUM_INTERRUPTS
#define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS) #define NR_IRQS (XTENSA_NR_IRQS + VARIANT_NR_IRQS + PLATFORM_NR_IRQS + 1)
#define XTENSA_PIC_LINUX_IRQ(hwirq) ((hwirq) + 1)
#if VARIANT_NR_IRQS == 0 #if VARIANT_NR_IRQS == 0
static inline void variant_init_irq(void) { } static inline void variant_init_irq(void) { }

View File

@ -34,11 +34,6 @@ asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
{ {
int irq = irq_find_mapping(NULL, hwirq); int irq = irq_find_mapping(NULL, hwirq);
if (hwirq >= NR_IRQS) {
printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
__func__, hwirq);
}
#ifdef CONFIG_DEBUG_STACKOVERFLOW #ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 1KB free? */ /* Debugging check for stack overflow: is there less than 1KB free? */
{ {

View File

@ -593,8 +593,7 @@ c_show(struct seq_file *f, void *slot)
(ccount_freq/10000) % 100, (ccount_freq/10000) % 100,
loops_per_jiffy/(500000/HZ), loops_per_jiffy/(500000/HZ),
(loops_per_jiffy/(5000/HZ)) % 100); (loops_per_jiffy/(5000/HZ)) % 100);
seq_puts(f, "flags\t\t: "
seq_printf(f,"flags\t\t: "
#if XCHAL_HAVE_NMI #if XCHAL_HAVE_NMI
"nmi " "nmi "
#endif #endif

View File

@ -88,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
/* At this point: (!vmm || addr < vmm->vm_end). */ /* At this point: (!vmm || addr < vmm->vm_end). */
if (TASK_SIZE - len < addr) if (TASK_SIZE - len < addr)
return -ENOMEM; return -ENOMEM;
if (!vmm || addr + len <= vmm->vm_start) if (!vmm || addr + len <= vm_start_gap(vmm))
return addr; return addr;
addr = vmm->vm_end; addr = vmm->vm_end;
if (flags & MAP_SHARED) if (flags & MAP_SHARED)

View File

@ -118,7 +118,7 @@ SECTIONS
SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR) SECTION_VECTOR (.KernelExceptionVector.text, KERNEL_VECTOR_VADDR)
SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4) SECTION_VECTOR (.UserExceptionVector.literal, USER_VECTOR_VADDR - 4)
SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR) SECTION_VECTOR (.UserExceptionVector.text, USER_VECTOR_VADDR)
SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 48) SECTION_VECTOR (.DoubleExceptionVector.literal, DOUBLEEXC_VECTOR_VADDR - 20)
SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR) SECTION_VECTOR (.DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR)
#endif #endif
@ -306,13 +306,13 @@ SECTIONS
.UserExceptionVector.literal) .UserExceptionVector.literal)
SECTION_VECTOR (_DoubleExceptionVector_literal, SECTION_VECTOR (_DoubleExceptionVector_literal,
.DoubleExceptionVector.literal, .DoubleExceptionVector.literal,
DOUBLEEXC_VECTOR_VADDR - 48, DOUBLEEXC_VECTOR_VADDR - 20,
SIZEOF(.UserExceptionVector.text), SIZEOF(.UserExceptionVector.text),
.UserExceptionVector.text) .UserExceptionVector.text)
SECTION_VECTOR (_DoubleExceptionVector_text, SECTION_VECTOR (_DoubleExceptionVector_text,
.DoubleExceptionVector.text, .DoubleExceptionVector.text,
DOUBLEEXC_VECTOR_VADDR, DOUBLEEXC_VECTOR_VADDR,
48, 20,
.DoubleExceptionVector.literal) .DoubleExceptionVector.literal)
. = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;

View File

@ -317,8 +317,7 @@ static int __init simdisk_init(void)
if (simdisk_count > MAX_SIMDISK_COUNT) if (simdisk_count > MAX_SIMDISK_COUNT)
simdisk_count = MAX_SIMDISK_COUNT; simdisk_count = MAX_SIMDISK_COUNT;
sddev = kmalloc(simdisk_count * sizeof(struct simdisk), sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL);
GFP_KERNEL);
if (sddev == NULL) if (sddev == NULL)
goto out_unregister; goto out_unregister;

View File

@ -24,16 +24,18 @@
/* Interrupt configuration. */ /* Interrupt configuration. */
#define PLATFORM_NR_IRQS 10 #define PLATFORM_NR_IRQS 0
/* Default assignment of LX60 devices to external interrupts. */ /* Default assignment of LX60 devices to external interrupts. */
#ifdef CONFIG_XTENSA_MX #ifdef CONFIG_XTENSA_MX
#define DUART16552_INTNUM XCHAL_EXTINT3_NUM #define DUART16552_INTNUM XCHAL_EXTINT3_NUM
#define OETH_IRQ XCHAL_EXTINT4_NUM #define OETH_IRQ XCHAL_EXTINT4_NUM
#define C67X00_IRQ XCHAL_EXTINT8_NUM
#else #else
#define DUART16552_INTNUM XCHAL_EXTINT0_NUM #define DUART16552_INTNUM XCHAL_EXTINT0_NUM
#define OETH_IRQ XCHAL_EXTINT1_NUM #define OETH_IRQ XCHAL_EXTINT1_NUM
#define C67X00_IRQ XCHAL_EXTINT5_NUM
#endif #endif
/* /*
@ -63,5 +65,5 @@
#define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000) #define C67X00_PADDR (XCHAL_KIO_PADDR + 0x0D0D0000)
#define C67X00_SIZE 0x10 #define C67X00_SIZE 0x10
#define C67X00_IRQ 5
#endif /* __XTENSA_XTAVNET_HARDWARE_H */ #endif /* __XTENSA_XTAVNET_HARDWARE_H */

View File

@ -175,8 +175,8 @@ static struct resource ethoc_res[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[2] = { /* IRQ number */ [2] = { /* IRQ number */
.start = OETH_IRQ, .start = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.end = OETH_IRQ, .end = XTENSA_PIC_LINUX_IRQ(OETH_IRQ),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
@ -213,8 +213,8 @@ static struct resource c67x00_res[] = {
.flags = IORESOURCE_MEM, .flags = IORESOURCE_MEM,
}, },
[1] = { /* IRQ number */ [1] = { /* IRQ number */
.start = C67X00_IRQ, .start = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.end = C67X00_IRQ, .end = XTENSA_PIC_LINUX_IRQ(C67X00_IRQ),
.flags = IORESOURCE_IRQ, .flags = IORESOURCE_IRQ,
}, },
}; };
@ -247,7 +247,7 @@ static struct resource serial_resource = {
static struct plat_serial8250_port serial_platform_data[] = { static struct plat_serial8250_port serial_platform_data[] = {
[0] = { [0] = {
.mapbase = DUART16552_PADDR, .mapbase = DUART16552_PADDR,
.irq = DUART16552_INTNUM, .irq = XTENSA_PIC_LINUX_IRQ(DUART16552_INTNUM),
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST |
UPF_IOREMAP, UPF_IOREMAP,
.iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32, .iotype = XCHAL_HAVE_BE ? UPIO_MEM32BE : UPIO_MEM32,

View File

@ -777,24 +777,25 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
} }
/** /**
* blk_release_queue: - release a &struct request_queue when it is no longer needed * __blk_release_queue - release a request queue when it is no longer needed
* @kobj: the kobj belonging to the request queue to be released * @work: pointer to the release_work member of the request queue to be released
* *
* Description: * Description:
* blk_release_queue is the pair to blk_init_queue() or * blk_release_queue is the counterpart of blk_init_queue(). It should be
* blk_queue_make_request(). It should be called when a request queue is * called when a request queue is being released; typically when a block
* being released; typically when a block device is being de-registered. * device is being de-registered. Its primary task it to free the queue
* Currently, its primary task it to free all the &struct request * itself.
* structures that were allocated to the queue and the queue itself.
* *
* Note: * Notes:
* The low level driver must have finished any outstanding requests first * The low level driver must have finished any outstanding requests first
* via blk_cleanup_queue(). * via blk_cleanup_queue().
**/ *
static void blk_release_queue(struct kobject *kobj) * Although blk_release_queue() may be called with preemption disabled,
* __blk_release_queue() may sleep.
*/
static void __blk_release_queue(struct work_struct *work)
{ {
struct request_queue *q = struct request_queue *q = container_of(work, typeof(*q), release_work);
container_of(kobj, struct request_queue, kobj);
if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags)) if (test_bit(QUEUE_FLAG_POLL_STATS, &q->queue_flags))
blk_stat_remove_callback(q, q->poll_cb); blk_stat_remove_callback(q, q->poll_cb);
@ -834,6 +835,15 @@ static void blk_release_queue(struct kobject *kobj)
call_rcu(&q->rcu_head, blk_free_queue_rcu); call_rcu(&q->rcu_head, blk_free_queue_rcu);
} }
static void blk_release_queue(struct kobject *kobj)
{
struct request_queue *q =
container_of(kobj, struct request_queue, kobj);
INIT_WORK(&q->release_work, __blk_release_queue);
schedule_work(&q->release_work);
}
static const struct sysfs_ops queue_sysfs_ops = { static const struct sysfs_ops queue_sysfs_ops = {
.show = queue_attr_show, .show = queue_attr_show,
.store = queue_attr_store, .store = queue_attr_store,

View File

@ -416,9 +416,18 @@ acpi_tb_get_table(struct acpi_table_desc *table_desc,
} }
} }
table_desc->validation_count++; if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
if (table_desc->validation_count == 0) { table_desc->validation_count++;
table_desc->validation_count--;
/*
* Detect validation_count overflows to ensure that the warning
* message will only be printed once.
*/
if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
ACPI_WARNING((AE_INFO,
"Table %p, Validation count overflows\n",
table_desc));
}
} }
*out_table = table_desc->pointer; *out_table = table_desc->pointer;
@ -445,13 +454,20 @@ void acpi_tb_put_table(struct acpi_table_desc *table_desc)
ACPI_FUNCTION_TRACE(acpi_tb_put_table); ACPI_FUNCTION_TRACE(acpi_tb_put_table);
if (table_desc->validation_count == 0) { if (table_desc->validation_count < ACPI_MAX_TABLE_VALIDATIONS) {
ACPI_WARNING((AE_INFO, table_desc->validation_count--;
"Table %p, Validation count is zero before decrement\n",
table_desc)); /*
return_VOID; * Detect validation_count underflows to ensure that the warning
* message will only be printed once.
*/
if (table_desc->validation_count >= ACPI_MAX_TABLE_VALIDATIONS) {
ACPI_WARNING((AE_INFO,
"Table %p, Validation count underflows\n",
table_desc));
return_VOID;
}
} }
table_desc->validation_count--;
if (table_desc->validation_count == 0) { if (table_desc->validation_count == 0) {

View File

@ -474,15 +474,6 @@ acpi_ut_walk_aml_resources(struct acpi_walk_state *walk_state,
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
} }
/*
* The end_tag opcode must be followed by a zero byte.
* Although this byte is technically defined to be a checksum,
* in practice, all ASL compilers set this byte to zero.
*/
if (*(aml + 1) != 0) {
return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
}
/* Return the pointer to the end_tag if requested */ /* Return the pointer to the end_tag if requested */
if (!user_function) { if (!user_function) {

View File

@ -185,8 +185,8 @@ static ssize_t store_down_threshold(struct gov_attr_set *attr_set,
int ret; int ret;
ret = sscanf(buf, "%u", &input); ret = sscanf(buf, "%u", &input);
/* cannot be lower than 11 otherwise freq will not fall */ /* cannot be lower than 1 otherwise freq will not fall */
if (ret != 1 || input < 11 || input > 100 || if (ret != 1 || input < 1 || input > 100 ||
input >= dbs_data->up_threshold) input >= dbs_data->up_threshold)
return -EINVAL; return -EINVAL;

View File

@ -180,8 +180,10 @@ int dt_init_idle_driver(struct cpuidle_driver *drv,
if (!state_node) if (!state_node)
break; break;
if (!of_device_is_available(state_node)) if (!of_device_is_available(state_node)) {
of_node_put(state_node);
continue; continue;
}
if (!idle_state_valid(state_node, i, cpumask)) { if (!idle_state_valid(state_node, i, cpumask)) {
pr_warn("%s idle state not valid, bailing out\n", pr_warn("%s idle state not valid, bailing out\n",

View File

@ -267,7 +267,11 @@ static int exynos_nocp_probe(struct platform_device *pdev)
} }
platform_set_drvdata(pdev, nocp); platform_set_drvdata(pdev, nocp);
clk_prepare_enable(nocp->clk); ret = clk_prepare_enable(nocp->clk);
if (ret) {
dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
return ret;
}
pr_info("exynos-nocp: new NoC Probe device registered: %s\n", pr_info("exynos-nocp: new NoC Probe device registered: %s\n",
dev_name(dev)); dev_name(dev));

View File

@ -44,7 +44,7 @@ struct exynos_ppmu {
{ "ppmu-event2-"#name, PPMU_PMNCNT2 }, \ { "ppmu-event2-"#name, PPMU_PMNCNT2 }, \
{ "ppmu-event3-"#name, PPMU_PMNCNT3 } { "ppmu-event3-"#name, PPMU_PMNCNT3 }
struct __exynos_ppmu_events { static struct __exynos_ppmu_events {
char *name; char *name;
int id; int id;
} ppmu_events[] = { } ppmu_events[] = {
@ -648,7 +648,11 @@ static int exynos_ppmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), desc[i].name); dev_name(&pdev->dev), desc[i].name);
} }
clk_prepare_enable(info->ppmu.clk); ret = clk_prepare_enable(info->ppmu.clk);
if (ret) {
dev_err(&pdev->dev, "failed to prepare ppmu clock\n");
return ret;
}
return 0; return 0;
} }

View File

@ -47,7 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION); DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL); DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID); DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0400, DMI_PRODUCT_FAMILY); DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR); DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME); DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION); DEFINE_DMI_ATTR_WITH_SHOW(board_version, 0444, DMI_BOARD_VERSION);
@ -192,7 +192,7 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION); ADD_DMI_ATTR(product_version, DMI_PRODUCT_VERSION);
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL); ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID); ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY); ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR); ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME); ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION); ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);

View File

@ -144,7 +144,7 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
buf = dmi_early_remap(dmi_base, orig_dmi_len); buf = dmi_early_remap(dmi_base, orig_dmi_len);
if (buf == NULL) if (buf == NULL)
return -1; return -ENOMEM;
dmi_decode_table(buf, decode, NULL); dmi_decode_table(buf, decode, NULL);
@ -178,7 +178,7 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
const char *d = (const char *) dm; const char *d = (const char *) dm;
const char *p; const char *p;
if (dmi_ident[slot]) if (dmi_ident[slot] || dm->length <= string)
return; return;
p = dmi_string(dm, d[string]); p = dmi_string(dm, d[string]);
@ -191,13 +191,14 @@ static void __init dmi_save_ident(const struct dmi_header *dm, int slot,
static void __init dmi_save_uuid(const struct dmi_header *dm, int slot, static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
int index) int index)
{ {
const u8 *d = (u8 *) dm + index; const u8 *d;
char *s; char *s;
int is_ff = 1, is_00 = 1, i; int is_ff = 1, is_00 = 1, i;
if (dmi_ident[slot]) if (dmi_ident[slot] || dm->length <= index + 16)
return; return;
d = (u8 *) dm + index;
for (i = 0; i < 16 && (is_ff || is_00); i++) { for (i = 0; i < 16 && (is_ff || is_00); i++) {
if (d[i] != 0x00) if (d[i] != 0x00)
is_00 = 0; is_00 = 0;
@ -228,16 +229,17 @@ static void __init dmi_save_uuid(const struct dmi_header *dm, int slot,
static void __init dmi_save_type(const struct dmi_header *dm, int slot, static void __init dmi_save_type(const struct dmi_header *dm, int slot,
int index) int index)
{ {
const u8 *d = (u8 *) dm + index; const u8 *d;
char *s; char *s;
if (dmi_ident[slot]) if (dmi_ident[slot] || dm->length <= index)
return; return;
s = dmi_alloc(4); s = dmi_alloc(4);
if (!s) if (!s)
return; return;
d = (u8 *) dm + index;
sprintf(s, "%u", *d & 0x7F); sprintf(s, "%u", *d & 0x7F);
dmi_ident[slot] = s; dmi_ident[slot] = s;
} }
@ -278,9 +280,13 @@ static void __init dmi_save_devices(const struct dmi_header *dm)
static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm) static void __init dmi_save_oem_strings_devices(const struct dmi_header *dm)
{ {
int i, count = *(u8 *)(dm + 1); int i, count;
struct dmi_device *dev; struct dmi_device *dev;
if (dm->length < 0x05)
return;
count = *(u8 *)(dm + 1);
for (i = 1; i <= count; i++) { for (i = 1; i <= count; i++) {
const char *devname = dmi_string(dm, i); const char *devname = dmi_string(dm, i);
@ -353,6 +359,9 @@ static void __init dmi_save_extended_devices(const struct dmi_header *dm)
const char *name; const char *name;
const u8 *d = (u8 *)dm; const u8 *d = (u8 *)dm;
if (dm->length < 0x0B)
return;
/* Skip disabled device */ /* Skip disabled device */
if ((d[0x5] & 0x80) == 0) if ((d[0x5] & 0x80) == 0)
return; return;
@ -387,7 +396,7 @@ static void __init save_mem_devices(const struct dmi_header *dm, void *v)
const char *d = (const char *)dm; const char *d = (const char *)dm;
static int nr; static int nr;
if (dm->type != DMI_ENTRY_MEM_DEVICE) if (dm->type != DMI_ENTRY_MEM_DEVICE || dm->length < 0x12)
return; return;
if (nr >= dmi_memdev_nr) { if (nr >= dmi_memdev_nr) {
pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n"); pr_warn(FW_BUG "Too many DIMM entries in SMBIOS table\n");
@ -649,6 +658,21 @@ void __init dmi_scan_machine(void)
if (p == NULL) if (p == NULL)
goto error; goto error;
/*
* Same logic as above, look for a 64-bit entry point
* first, and if not found, fall back to 32-bit entry point.
*/
memcpy_fromio(buf, p, 16);
for (q = p + 16; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16);
if (!dmi_smbios3_present(buf)) {
dmi_available = 1;
dmi_early_unmap(p, 0x10000);
goto out;
}
memcpy(buf, buf + 16, 16);
}
/* /*
* Iterate over all possible DMI header addresses q. * Iterate over all possible DMI header addresses q.
* Maintain the 32 bytes around q in buf. On the * Maintain the 32 bytes around q in buf. On the
@ -659,7 +683,7 @@ void __init dmi_scan_machine(void)
memset(buf, 0, 16); memset(buf, 0, 16);
for (q = p; q < p + 0x10000; q += 16) { for (q = p; q < p + 0x10000; q += 16) {
memcpy_fromio(buf + 16, q, 16); memcpy_fromio(buf + 16, q, 16);
if (!dmi_smbios3_present(buf) || !dmi_present(buf)) { if (!dmi_present(buf)) {
dmi_available = 1; dmi_available = 1;
dmi_early_unmap(p, 0x10000); dmi_early_unmap(p, 0x10000);
goto out; goto out;
@ -993,7 +1017,8 @@ EXPORT_SYMBOL(dmi_get_date);
* @decode: Callback function * @decode: Callback function
* @private_data: Private data to be passed to the callback function * @private_data: Private data to be passed to the callback function
* *
* Returns -1 when the DMI table can't be reached, 0 on success. * Returns 0 on success, -ENXIO if DMI is not selected or not present,
* or a different negative error code if DMI walking fails.
*/ */
int dmi_walk(void (*decode)(const struct dmi_header *, void *), int dmi_walk(void (*decode)(const struct dmi_header *, void *),
void *private_data) void *private_data)
@ -1001,11 +1026,11 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
u8 *buf; u8 *buf;
if (!dmi_available) if (!dmi_available)
return -1; return -ENXIO;
buf = dmi_remap(dmi_base, dmi_len); buf = dmi_remap(dmi_base, dmi_len);
if (buf == NULL) if (buf == NULL)
return -1; return -ENOMEM;
dmi_decode_table(buf, decode, private_data); dmi_decode_table(buf, decode, private_data);

View File

@ -1207,8 +1207,11 @@ static void dce_v10_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0; u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) { if (amdgpu_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
/* watermark for high clocks */ /* watermark for high clocks */
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {

View File

@ -1176,8 +1176,11 @@ static void dce_v11_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0; u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) { if (amdgpu_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
/* watermark for high clocks */ /* watermark for high clocks */
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {

View File

@ -983,8 +983,11 @@ static void dce_v6_0_program_watermarks(struct amdgpu_device *adev,
fixed20_12 a, b, c; fixed20_12 a, b, c;
if (amdgpu_crtc->base.enabled && num_heads && mode) { if (amdgpu_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
priority_a_cnt = 0; priority_a_cnt = 0;
priority_b_cnt = 0; priority_b_cnt = 0;

View File

@ -1091,8 +1091,11 @@ static void dce_v8_0_program_watermarks(struct amdgpu_device *adev,
u32 tmp, wm_mask, lb_vblank_lead_lines = 0; u32 tmp, wm_mask, lb_vblank_lead_lines = 0;
if (amdgpu_crtc->base.enabled && num_heads && mode) { if (amdgpu_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
/* watermark for high clocks */ /* watermark for high clocks */
if (adev->pm.dpm_enabled) { if (adev->pm.dpm_enabled) {

View File

@ -1,6 +1,7 @@
config DRM_DW_HDMI config DRM_DW_HDMI
tristate tristate
select DRM_KMS_HELPER select DRM_KMS_HELPER
select REGMAP_MMIO
config DRM_DW_HDMI_AHB_AUDIO config DRM_DW_HDMI_AHB_AUDIO
tristate "Synopsys Designware AHB Audio interface" tristate "Synopsys Designware AHB Audio interface"

View File

@ -36,10 +36,6 @@
#define VGT_VERSION_MAJOR 1 #define VGT_VERSION_MAJOR 1
#define VGT_VERSION_MINOR 0 #define VGT_VERSION_MINOR 0
#define INTEL_VGT_IF_VERSION_ENCODE(major, minor) ((major) << 16 | (minor))
#define INTEL_VGT_IF_VERSION \
INTEL_VGT_IF_VERSION_ENCODE(VGT_VERSION_MAJOR, VGT_VERSION_MINOR)
/* /*
* notifications from guest to vgpu device model * notifications from guest to vgpu device model
*/ */
@ -55,8 +51,8 @@ enum vgt_g2v_type {
struct vgt_if { struct vgt_if {
u64 magic; /* VGT_MAGIC */ u64 magic; /* VGT_MAGIC */
uint16_t version_major; u16 version_major;
uint16_t version_minor; u16 version_minor;
u32 vgt_id; /* ID of vGT instance */ u32 vgt_id; /* ID of vGT instance */
u32 rsv1[12]; /* pad to offset 0x40 */ u32 rsv1[12]; /* pad to offset 0x40 */
/* /*

View File

@ -60,8 +60,8 @@
*/ */
void i915_check_vgpu(struct drm_i915_private *dev_priv) void i915_check_vgpu(struct drm_i915_private *dev_priv)
{ {
uint64_t magic; u64 magic;
uint32_t version; u16 version_major;
BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); BUILD_BUG_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
@ -69,10 +69,8 @@ void i915_check_vgpu(struct drm_i915_private *dev_priv)
if (magic != VGT_MAGIC) if (magic != VGT_MAGIC)
return; return;
version = INTEL_VGT_IF_VERSION_ENCODE( version_major = __raw_i915_read16(dev_priv, vgtif_reg(version_major));
__raw_i915_read16(dev_priv, vgtif_reg(version_major)), if (version_major < VGT_VERSION_MAJOR) {
__raw_i915_read16(dev_priv, vgtif_reg(version_minor)));
if (version != INTEL_VGT_IF_VERSION) {
DRM_INFO("VGT interface version mismatch!\n"); DRM_INFO("VGT interface version mismatch!\n");
return; return;
} }

View File

@ -4598,7 +4598,7 @@ static void cpt_verify_modeset(struct drm_device *dev, int pipe)
static int static int
skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
unsigned scaler_user, int *scaler_id, unsigned int rotation, unsigned int scaler_user, int *scaler_id,
int src_w, int src_h, int dst_w, int dst_h) int src_w, int src_h, int dst_w, int dst_h)
{ {
struct intel_crtc_scaler_state *scaler_state = struct intel_crtc_scaler_state *scaler_state =
@ -4607,9 +4607,12 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
to_intel_crtc(crtc_state->base.crtc); to_intel_crtc(crtc_state->base.crtc);
int need_scaling; int need_scaling;
need_scaling = drm_rotation_90_or_270(rotation) ? /*
(src_h != dst_w || src_w != dst_h): * Src coordinates are already rotated by 270 degrees for
(src_w != dst_w || src_h != dst_h); * the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
need_scaling = src_w != dst_w || src_h != dst_h;
/* /*
* if plane is being disabled or scaler is no more required or force detach * if plane is being disabled or scaler is no more required or force detach
@ -4671,7 +4674,7 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode; const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX, return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
&state->scaler_state.scaler_id, DRM_ROTATE_0, &state->scaler_state.scaler_id,
state->pipe_src_w, state->pipe_src_h, state->pipe_src_w, state->pipe_src_h,
adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay); adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
} }
@ -4700,7 +4703,6 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
ret = skl_update_scaler(crtc_state, force_detach, ret = skl_update_scaler(crtc_state, force_detach,
drm_plane_index(&intel_plane->base), drm_plane_index(&intel_plane->base),
&plane_state->scaler_id, &plane_state->scaler_id,
plane_state->base.rotation,
drm_rect_width(&plane_state->base.src) >> 16, drm_rect_width(&plane_state->base.src) >> 16,
drm_rect_height(&plane_state->base.src) >> 16, drm_rect_height(&plane_state->base.src) >> 16,
drm_rect_width(&plane_state->base.dst), drm_rect_width(&plane_state->base.dst),

View File

@ -3373,20 +3373,26 @@ skl_plane_downscale_amount(const struct intel_crtc_state *cstate,
/* n.b., src is 16.16 fixed point, dst is whole integer */ /* n.b., src is 16.16 fixed point, dst is whole integer */
if (plane->id == PLANE_CURSOR) { if (plane->id == PLANE_CURSOR) {
/*
* Cursors only support 0/180 degree rotation,
* hence no need to account for rotation here.
*/
src_w = pstate->base.src_w; src_w = pstate->base.src_w;
src_h = pstate->base.src_h; src_h = pstate->base.src_h;
dst_w = pstate->base.crtc_w; dst_w = pstate->base.crtc_w;
dst_h = pstate->base.crtc_h; dst_h = pstate->base.crtc_h;
} else { } else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
src_w = drm_rect_width(&pstate->base.src); src_w = drm_rect_width(&pstate->base.src);
src_h = drm_rect_height(&pstate->base.src); src_h = drm_rect_height(&pstate->base.src);
dst_w = drm_rect_width(&pstate->base.dst); dst_w = drm_rect_width(&pstate->base.dst);
dst_h = drm_rect_height(&pstate->base.dst); dst_h = drm_rect_height(&pstate->base.dst);
} }
if (drm_rotation_90_or_270(pstate->base.rotation))
swap(dst_w, dst_h);
downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); downscale_h = max(src_h / dst_h, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING); downscale_w = max(src_w / dst_w, (uint32_t)DRM_PLANE_HELPER_NO_SCALING);
@ -3417,12 +3423,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *cstate,
if (y && format != DRM_FORMAT_NV12) if (y && format != DRM_FORMAT_NV12)
return 0; return 0;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&intel_pstate->base.src) >> 16; width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16; height = drm_rect_height(&intel_pstate->base.src) >> 16;
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
/* for planar format */ /* for planar format */
if (format == DRM_FORMAT_NV12) { if (format == DRM_FORMAT_NV12) {
if (y) /* y-plane data rate */ if (y) /* y-plane data rate */
@ -3505,12 +3513,14 @@ skl_ddb_min_alloc(const struct drm_plane_state *pstate,
fb->modifier != I915_FORMAT_MOD_Yf_TILED) fb->modifier != I915_FORMAT_MOD_Yf_TILED)
return 8; return 8;
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
src_w = drm_rect_width(&intel_pstate->base.src) >> 16; src_w = drm_rect_width(&intel_pstate->base.src) >> 16;
src_h = drm_rect_height(&intel_pstate->base.src) >> 16; src_h = drm_rect_height(&intel_pstate->base.src) >> 16;
if (drm_rotation_90_or_270(pstate->rotation))
swap(src_w, src_h);
/* Halve UV plane width and height for NV12 */ /* Halve UV plane width and height for NV12 */
if (fb->format->format == DRM_FORMAT_NV12 && !y) { if (fb->format->format == DRM_FORMAT_NV12 && !y) {
src_w /= 2; src_w /= 2;
@ -3794,13 +3804,15 @@ static int skl_compute_plane_wm(const struct drm_i915_private *dev_priv,
width = intel_pstate->base.crtc_w; width = intel_pstate->base.crtc_w;
height = intel_pstate->base.crtc_h; height = intel_pstate->base.crtc_h;
} else { } else {
/*
* Src coordinates are already rotated by 270 degrees for
* the 90/270 degree plane rotation cases (to match the
* GTT mapping), hence no need to account for rotation here.
*/
width = drm_rect_width(&intel_pstate->base.src) >> 16; width = drm_rect_width(&intel_pstate->base.src) >> 16;
height = drm_rect_height(&intel_pstate->base.src) >> 16; height = drm_rect_height(&intel_pstate->base.src) >> 16;
} }
if (drm_rotation_90_or_270(pstate->rotation))
swap(width, height);
cpp = fb->format->cpp[0]; cpp = fb->format->cpp[0];
plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate); plane_pixel_rate = skl_adjusted_plane_pixel_rate(cstate, intel_pstate);

View File

@ -1173,7 +1173,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (IS_G200_SE(mdev)) { if (IS_G200_SE(mdev)) {
if (mdev->unique_rev_id >= 0x02) { if (mdev->unique_rev_id >= 0x04) {
WREG8(MGAREG_CRTCEXT_INDEX, 0x06);
WREG8(MGAREG_CRTCEXT_DATA, 0);
} else if (mdev->unique_rev_id >= 0x02) {
u8 hi_pri_lvl; u8 hi_pri_lvl;
u32 bpp; u32 bpp;
u32 mb; u32 mb;
@ -1639,6 +1642,10 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
if (mga_vga_calculate_mode_bandwidth(mode, bpp) if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (30100 * 1024)) > (30100 * 1024))
return MODE_BANDWIDTH; return MODE_BANDWIDTH;
} else {
if (mga_vga_calculate_mode_bandwidth(mode, bpp)
> (55000 * 1024))
return MODE_BANDWIDTH;
} }
} else if (mdev->type == G200_WB) { } else if (mdev->type == G200_WB) {
if (mode->hdisplay > 1280) if (mode->hdisplay > 1280)

View File

@ -35,6 +35,13 @@
#include "mxsfb_drv.h" #include "mxsfb_drv.h"
#include "mxsfb_regs.h" #include "mxsfb_regs.h"
#define MXS_SET_ADDR 0x4
#define MXS_CLR_ADDR 0x8
#define MODULE_CLKGATE BIT(30)
#define MODULE_SFTRST BIT(31)
/* 1 second delay should be plenty of time for block reset */
#define RESET_TIMEOUT 1000000
static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val) static u32 set_hsync_pulse_width(struct mxsfb_drm_private *mxsfb, u32 val)
{ {
return (val & mxsfb->devdata->hs_wdth_mask) << return (val & mxsfb->devdata->hs_wdth_mask) <<
@ -159,6 +166,36 @@ static void mxsfb_disable_controller(struct mxsfb_drm_private *mxsfb)
clk_disable_unprepare(mxsfb->clk_disp_axi); clk_disable_unprepare(mxsfb->clk_disp_axi);
} }
/*
* Clear the bit and poll it cleared. This is usually called with
* a reset address and mask being either SFTRST(bit 31) or CLKGATE
* (bit 30).
*/
static int clear_poll_bit(void __iomem *addr, u32 mask)
{
u32 reg;
writel(mask, addr + MXS_CLR_ADDR);
return readl_poll_timeout(addr, reg, !(reg & mask), 0, RESET_TIMEOUT);
}
static int mxsfb_reset_block(void __iomem *reset_addr)
{
int ret;
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
if (ret)
return ret;
writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
if (ret)
return ret;
return clear_poll_bit(reset_addr, MODULE_CLKGATE);
}
static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb) static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
{ {
struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode; struct drm_display_mode *m = &mxsfb->pipe.crtc.state->adjusted_mode;
@ -173,6 +210,11 @@ static void mxsfb_crtc_mode_set_nofb(struct mxsfb_drm_private *mxsfb)
*/ */
mxsfb_enable_axi_clk(mxsfb); mxsfb_enable_axi_clk(mxsfb);
/* Mandatory eLCDIF reset as per the Reference Manual */
err = mxsfb_reset_block(mxsfb->base);
if (err)
return;
/* Clear the FIFOs */ /* Clear the FIFOs */
writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET); writel(CTRL1_FIFO_CLEAR, mxsfb->base + LCDC_CTRL1 + REG_SET);

View File

@ -9267,8 +9267,11 @@ static void dce8_program_watermarks(struct radeon_device *rdev,
u32 tmp, wm_mask; u32 tmp, wm_mask;
if (radeon_crtc->base.enabled && num_heads && mode) { if (radeon_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
/* watermark for high clocks */ /* watermark for high clocks */
if ((rdev->pm.pm_method == PM_METHOD_DPM) && if ((rdev->pm.pm_method == PM_METHOD_DPM) &&

View File

@ -2266,8 +2266,11 @@ static void evergreen_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c; fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) { if (radeon_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
priority_a_cnt = 0; priority_a_cnt = 0;
priority_b_cnt = 0; priority_b_cnt = 0;
dram_channels = evergreen_get_number_of_dram_channels(rdev); dram_channels = evergreen_get_number_of_dram_channels(rdev);

View File

@ -621,7 +621,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
} }
/* TODO: is this still necessary on NI+ ? */ /* TODO: is this still necessary on NI+ ? */
if ((cmd == 0 || cmd == 1 || cmd == 0x3) && if ((cmd == 0 || cmd == 0x3) &&
(start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) { (start >> 28) != (p->rdev->uvd.gpu_addr >> 28)) {
DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n", DRM_ERROR("msg/fb buffer %LX-%LX out of 256MB segment!\n",
start, end); start, end);

View File

@ -2284,8 +2284,11 @@ static void dce6_program_watermarks(struct radeon_device *rdev,
fixed20_12 a, b, c; fixed20_12 a, b, c;
if (radeon_crtc->base.enabled && num_heads && mode) { if (radeon_crtc->base.enabled && num_heads && mode) {
active_time = 1000000UL * (u32)mode->crtc_hdisplay / (u32)mode->clock; active_time = (u32) div_u64((u64)mode->crtc_hdisplay * 1000000,
line_time = min((u32) (1000000UL * (u32)mode->crtc_htotal / (u32)mode->clock), (u32)65535); (u32)mode->clock);
line_time = (u32) div_u64((u64)mode->crtc_htotal * 1000000,
(u32)mode->clock);
line_time = min(line_time, (u32)65535);
priority_a_cnt = 0; priority_a_cnt = 0;
priority_b_cnt = 0; priority_b_cnt = 0;

View File

@ -451,18 +451,6 @@ int tegra_drm_submit(struct tegra_drm_context *context,
#ifdef CONFIG_DRM_TEGRA_STAGING #ifdef CONFIG_DRM_TEGRA_STAGING
static struct tegra_drm_context *
tegra_drm_file_get_context(struct tegra_drm_file *file, u32 id)
{
struct tegra_drm_context *context;
mutex_lock(&file->lock);
context = idr_find(&file->contexts, id);
mutex_unlock(&file->lock);
return context;
}
static int tegra_gem_create(struct drm_device *drm, void *data, static int tegra_gem_create(struct drm_device *drm, void *data,
struct drm_file *file) struct drm_file *file)
{ {
@ -551,7 +539,7 @@ static int tegra_client_open(struct tegra_drm_file *fpriv,
if (err < 0) if (err < 0)
return err; return err;
err = idr_alloc(&fpriv->contexts, context, 0, 0, GFP_KERNEL); err = idr_alloc(&fpriv->contexts, context, 1, 0, GFP_KERNEL);
if (err < 0) { if (err < 0) {
client->ops->close_channel(context); client->ops->close_channel(context);
return err; return err;
@ -606,7 +594,7 @@ static int tegra_close_channel(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock); mutex_lock(&fpriv->lock);
context = tegra_drm_file_get_context(fpriv, args->context); context = idr_find(&fpriv->contexts, args->context);
if (!context) { if (!context) {
err = -EINVAL; err = -EINVAL;
goto unlock; goto unlock;
@ -631,7 +619,7 @@ static int tegra_get_syncpt(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock); mutex_lock(&fpriv->lock);
context = tegra_drm_file_get_context(fpriv, args->context); context = idr_find(&fpriv->contexts, args->context);
if (!context) { if (!context) {
err = -ENODEV; err = -ENODEV;
goto unlock; goto unlock;
@ -660,7 +648,7 @@ static int tegra_submit(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock); mutex_lock(&fpriv->lock);
context = tegra_drm_file_get_context(fpriv, args->context); context = idr_find(&fpriv->contexts, args->context);
if (!context) { if (!context) {
err = -ENODEV; err = -ENODEV;
goto unlock; goto unlock;
@ -685,7 +673,7 @@ static int tegra_get_syncpt_base(struct drm_device *drm, void *data,
mutex_lock(&fpriv->lock); mutex_lock(&fpriv->lock);
context = tegra_drm_file_get_context(fpriv, args->context); context = idr_find(&fpriv->contexts, args->context);
if (!context) { if (!context) {
err = -ENODEV; err = -ENODEV;
goto unlock; goto unlock;

View File

@ -172,7 +172,7 @@ static int host1x_probe(struct platform_device *pdev)
host->rst = devm_reset_control_get(&pdev->dev, "host1x"); host->rst = devm_reset_control_get(&pdev->dev, "host1x");
if (IS_ERR(host->rst)) { if (IS_ERR(host->rst)) {
err = PTR_ERR(host->clk); err = PTR_ERR(host->rst);
dev_err(&pdev->dev, "failed to get reset: %d\n", err); dev_err(&pdev->dev, "failed to get reset: %d\n", err);
return err; return err;
} }

View File

@ -826,11 +826,35 @@ static int hid_scan_report(struct hid_device *hid)
* hid-rmi should take care of them, * hid-rmi should take care of them,
* not hid-generic * not hid-generic
*/ */
if (IS_ENABLED(CONFIG_HID_RMI)) hid->group = HID_GROUP_RMI;
hid->group = HID_GROUP_RMI;
break; break;
} }
/* fall back to generic driver in case specific driver doesn't exist */
switch (hid->group) {
case HID_GROUP_MULTITOUCH_WIN_8:
/* fall-through */
case HID_GROUP_MULTITOUCH:
if (!IS_ENABLED(CONFIG_HID_MULTITOUCH))
hid->group = HID_GROUP_GENERIC;
break;
case HID_GROUP_SENSOR_HUB:
if (!IS_ENABLED(CONFIG_HID_SENSOR_HUB))
hid->group = HID_GROUP_GENERIC;
break;
case HID_GROUP_RMI:
if (!IS_ENABLED(CONFIG_HID_RMI))
hid->group = HID_GROUP_GENERIC;
break;
case HID_GROUP_WACOM:
if (!IS_ENABLED(CONFIG_HID_WACOM))
hid->group = HID_GROUP_GENERIC;
break;
case HID_GROUP_LOGITECH_DJ_DEVICE:
if (!IS_ENABLED(CONFIG_HID_LOGITECH_DJ))
hid->group = HID_GROUP_GENERIC;
break;
}
vfree(parser); vfree(parser);
return 0; return 0;
} }
@ -1763,15 +1787,23 @@ EXPORT_SYMBOL_GPL(hid_disconnect);
* used as a driver. See hid_scan_report(). * used as a driver. See hid_scan_report().
*/ */
static const struct hid_device_id hid_have_special_driver[] = { static const struct hid_device_id hid_have_special_driver[] = {
#if IS_ENABLED(CONFIG_HID_A4TECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_WCP32PU) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_X5_005D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) }, { HID_USB_DEVICE(USB_VENDOR_ID_A4TECH, USB_DEVICE_ID_A4TECH_RP_649) },
#endif
#if IS_ENABLED(CONFIG_HID_ACCUTOUCH)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) },
#endif
#if IS_ENABLED(CONFIG_HID_ACRUX)
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0x0802) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) }, { HID_USB_DEVICE(USB_VENDOR_ID_ACRUX, 0xf705) },
#endif
#if IS_ENABLED(CONFIG_HID_ALPS)
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) }, { HID_DEVICE(HID_BUS_ANY, HID_GROUP_ANY, USB_VENDOR_ID_ALPS_JP, HID_DEVICE_ID_ALPS_U1_DUAL) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLE)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MIGHTYMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER_ANSI) },
@ -1792,11 +1824,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_ISO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER4_HF_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS) },
@ -1851,62 +1878,100 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
#endif
#if IS_ENABLED(CONFIG_HID_APPLEIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_IRCONTROL5) },
#endif
#if IS_ENABLED(CONFIG_HID_ASUS)
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_KEYBOARD) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) }, { HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_I2C_TOUCHPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_ROG_KEYBOARD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_T100_KEYBOARD) },
#endif
#if IS_ENABLED(CONFIG_HID_AUREAL)
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) }, { HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
#endif
#if IS_ENABLED(CONFIG_HID_BELKIN)
{ HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) }, { HID_USB_DEVICE(USB_VENDOR_ID_BELKIN, USB_DEVICE_ID_FLIP_KVM) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
#endif
#if IS_ENABLED(CONFIG_HID_BETOP_FF)
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185BFM, 0x2208) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185PC, 0x5506) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2PC, 0x1850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) }, { HID_USB_DEVICE(USB_VENDOR_ID_BETOP_2185V2BFM, 0x5500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) }, #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) }, #if IS_ENABLED(CONFIG_HID_CHERRY)
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) },
#endif
#if IS_ENABLED(CONFIG_HID_CHICONY)
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_TACTICAL_PAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_AK1D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_ACER_SWITCH12) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) },
#endif
#if IS_ENABLED(CONFIG_HID_CMEDIA)
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
#endif
#if IS_ENABLED(CONFIG_HID_CORSAIR)
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_K90) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) }, { HID_USB_DEVICE(USB_VENDOR_ID_CORSAIR, USB_DEVICE_ID_CORSAIR_SCIMITAR_PRO_RGB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) }, #endif
#if IS_ENABLED(CONFIG_HID_CP2112)
{ HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_CP2112) },
#endif
#if IS_ENABLED(CONFIG_HID_CYPRESS)
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_BARCODE_4) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYPRESS, USB_DEVICE_ID_CYPRESS_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) }, #endif
#if IS_ENABLED(CONFIG_HID_DRAGONRISE)
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0006) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) }, { HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, 0x0011) },
#if IS_ENABLED(CONFIG_HID_MAYFLASH)
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
#endif #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) }, #if IS_ENABLED(CONFIG_HID_ELECOM)
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_BM084) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRED) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELECOM, USB_DEVICE_ID_ELECOM_DEFT_WIRELESS) },
#endif
#if IS_ENABLED(CONFIG_HID_ELO)
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0009) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) }, { HID_USB_DEVICE(USB_VENDOR_ID_ELO, 0x0030) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_ACCUTOUCH_2216) }, #endif
#if IS_ENABLED(CONFIG_HID_EMS_FF)
{ HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) }, { HID_USB_DEVICE(USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II) },
#endif
#if IS_ENABLED(CONFIG_HID_EZKEY)
{ HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) }, { HID_USB_DEVICE(USB_VENDOR_ID_EZKEY, USB_DEVICE_ID_BTC_8193) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) }, #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) }, #if IS_ENABLED(CONFIG_HID_GEMBIRD)
{ HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GEMBIRD, USB_DEVICE_ID_GEMBIRD_JPD_DUALFORCE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) }, #endif
#if IS_ENABLED(CONFIG_HID_GFRM)
{ HID_BLUETOOTH_DEVICE(0x58, 0x2000) },
{ HID_BLUETOOTH_DEVICE(0x471, 0x2210) },
#endif
#if IS_ENABLED(CONFIG_HID_GREENASIA)
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) }, { HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0012) },
#endif
#if IS_ENABLED(CONFIG_HID_GT683R)
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
#endif
#if IS_ENABLED(CONFIG_HID_GYRATION)
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) }, { HID_USB_DEVICE(USB_VENDOR_ID_GYRATION, USB_DEVICE_ID_GYRATION_REMOTE_3) },
#endif
#if IS_ENABLED(CONFIG_HID_HOLTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK, USB_DEVICE_ID_HOLTEK_ON_LINE_GRIP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A04A) },
@ -1915,12 +1980,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A072) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A081) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_MOUSE_A0C2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) }, #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS, USB_DEVICE_ID_JESS_ZEN_AIO_KBD) }, #if IS_ENABLED(CONFIG_HID_ICADE)
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_ION, USB_DEVICE_ID_ICADE) },
#endif
#if IS_ENABLED(CONFIG_HID_KENSINGTON)
{ HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KENSINGTON, USB_DEVICE_ID_KS_SLIMBLADE) },
#endif
#if IS_ENABLED(CONFIG_HID_KEYTOUCH)
{ HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) }, { HID_USB_DEVICE(USB_VENDOR_ID_KEYTOUCH, USB_DEVICE_ID_KEYTOUCH_IEC) },
#endif
#if IS_ENABLED(CONFIG_HID_KYE)
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_MANTICORE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GX_IMPERATOR) },
@ -1930,21 +2000,29 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_V2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_PENSKETCH_M912) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, #endif
#if IS_ENABLED(CONFIG_HID_LCPOWER)
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
#endif
#if IS_ENABLED(CONFIG_HID_LED)
{ HID_USB_DEVICE(USB_VENDOR_ID_DELCOM, USB_DEVICE_ID_DELCOM_VISUAL_IND) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_WN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DREAM_CHEEKY, USB_DEVICE_ID_DREAM_CHEEKY_FA) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) },
#endif
#if IS_ENABLED(CONFIG_HID_LENOVO) #if IS_ENABLED(CONFIG_HID_LENOVO)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CUSBKBD) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_CBTKBD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) }, { HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_TPPRODOCK) },
#endif #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) }, #if IS_ENABLED(CONFIG_HID_LOGITECH)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_MX3000_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@ -1957,7 +2035,6 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G29_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_F3D) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WINGMAN_FFG ) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_FORCE3D_PRO) },
@ -1969,17 +2046,30 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_DFGT_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G25_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G27_WHEEL) },
#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
#endif
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_WII_WHEEL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RUMBLEPAD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACETRAVELLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) }, { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_SPACENAVIGATOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) }, #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) }, #if IS_ENABLED(CONFIG_HID_LOGITECH_HIDPP)
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_LUXAFOR) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL) },
#endif
#if IS_ENABLED(CONFIG_HID_LOGITECH_DJ)
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2) },
#endif
#if IS_ENABLED(CONFIG_HID_MAGICMOUSE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICMOUSE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGICTRACKPAD) },
#endif
#if IS_ENABLED(CONFIG_HID_MAYFLASH)
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_PS3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_DOLPHINBAR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_DRAGONRISE, USB_DEVICE_ID_DRAGONRISE_GAMECUBE2) },
#endif
#if IS_ENABLED(CONFIG_HID_MICROSOFT)
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_SIDEWINDER_GV) },
@ -1995,9 +2085,22 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_600) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3KV1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) }, { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
#endif
#if IS_ENABLED(CONFIG_HID_MONTEREY)
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, #endif
#if IS_ENABLED(CONFIG_HID_MULTITOUCH)
{ HID_USB_DEVICE(USB_VENDOR_ID_LG, USB_DEVICE_ID_LG_MELFAS_MT) },
#endif
#if IS_ENABLED(CONFIG_HID_WIIMOTE)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
#endif
#if IS_ENABLED(CONFIG_HID_NTI)
{ HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTI, USB_DEVICE_ID_USB_SUN) },
#endif
#if IS_ENABLED(CONFIG_HID_NTRIG)
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_2) },
@ -2017,13 +2120,41 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_16) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_17) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) }, { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_18) },
#endif
#if IS_ENABLED(CONFIG_HID_ORTEK)
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_PKB1700) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
#endif
#if IS_ENABLED(CONFIG_HID_PANTHERLORD)
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GAMERON, USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR) },
{ HID_USB_DEVICE(USB_VENDOR_ID_GREENASIA, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_JESS2, USB_DEVICE_ID_JESS2_COLOR_RUMBLE_PAD) },
#endif
#if IS_ENABLED(CONFIG_HID_PENMOUNT)
{ HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
#endif
#if IS_ENABLED(CONFIG_HID_PETALYNX)
{ HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
#endif
#if IS_ENABLED(CONFIG_HID_PICOLCD)
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICOLCD_BOOTLOADER) },
#endif
#if IS_ENABLED(CONFIG_HID_PLANTRONICS)
{ HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) }, { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
#endif
#if IS_ENABLED(CONFIG_HID_PRIMAX)
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RISO_KAGAKU, USB_DEVICE_ID_RI_KA_WEBMAIL) }, #endif
#if IS_ENABLED(CONFIG_HID_PRODIKEYS)
{ HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI) },
#endif
#if IS_ENABLED(CONFIG_HID_RMI)
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
#endif
#if IS_ENABLED(CONFIG_HID_ROCCAT) #if IS_ENABLED(CONFIG_HID_ROCCAT)
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) }, { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ISKU) },
@ -2051,9 +2182,21 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
#endif #endif
#if IS_ENABLED(CONFIG_HID_SAMSUNG)
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, #endif
#if IS_ENABLED(CONFIG_HID_SMARTJOYPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) },
#endif
#if IS_ENABLED(CONFIG_HID_SONY)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
@ -2072,9 +2215,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
#endif
#if IS_ENABLED(CONFIG_HID_SPEEDLINK)
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) },
#endif
#if IS_ENABLED(CONFIG_HID_STEELSERIES)
{ HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) }, { HID_USB_DEVICE(USB_VENDOR_ID_STEELSERIES, USB_DEVICE_ID_STEELSERIES_SRWS1) },
#endif
#if IS_ENABLED(CONFIG_HID_SUNPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) }, { HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THINGM, USB_DEVICE_ID_BLINK1) }, #endif
#if IS_ENABLED(CONFIG_HID_THRUSTMASTER)
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
@ -2083,12 +2234,25 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) }, { HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb65a) },
#endif
#if IS_ENABLED(CONFIG_HID_TIVO)
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_BT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TIVO, USB_DEVICE_ID_TIVO_SLIDE_PRO) },
#endif
#if IS_ENABLED(CONFIG_HID_TOPSEED)
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_BTC, USB_DEVICE_ID_BTC_EMPREX_REMOTE_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) }, { HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED2, USB_DEVICE_ID_TOPSEED2_RF_COMBO) },
#endif
#if IS_ENABLED(CONFIG_HID_TWINHAN)
{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) }, { HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
#endif
#if IS_ENABLED(CONFIG_HID_UCLOGIC)
{ HID_USB_DEVICE(USB_VENDOR_ID_HUION, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_HUION_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP5540U) },
@ -2096,20 +2260,17 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP1062) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_WIRELESS_TABLET_TWHL850) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_TWHA60) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_YIYNOVA_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_81) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UGEE_TABLET_45) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) }, { HID_USB_DEVICE(USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_DRAWIMAGE_G3) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_TABLET_EX07S) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SMARTJOY_PLUS) }, { HID_USB_DEVICE(USB_VENDOR_ID_UGTIZER, USB_DEVICE_ID_UGTIZER_TABLET_GP0610) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_SUPER_JOY_BOX_3) }, #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD) }, #if IS_ENABLED(CONFIG_HID_UDRAW_PS3)
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_3_PRO) }, { HID_USB_DEVICE(USB_VENDOR_ID_THQ, USB_DEVICE_ID_THQ_PS3_UDRAW) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_DUAL_BOX_PRO) }, #endif
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP_LTD, USB_DEVICE_ID_SUPER_JOY_BOX_5_PRO) }, #if IS_ENABLED(CONFIG_HID_WALTOP)
{ HID_USB_DEVICE(USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_5_8_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SLIM_TABLET_12_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_Q_PAD) },
@ -2117,19 +2278,18 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_10_6_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_MEDIA_TABLET_14_1_INCH) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) }, { HID_USB_DEVICE(USB_VENDOR_ID_WALTOP, USB_DEVICE_ID_WALTOP_SIRIUS_BATTERY_FREE_TABLET) },
{ HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE) }, #endif
#if IS_ENABLED(CONFIG_HID_XINMO)
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_XIN_MO_DUAL_ARCADE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) }, { HID_USB_DEVICE(USB_VENDOR_ID_XIN_MO, USB_DEVICE_ID_THT_2P_ARCADE) },
#endif
#if IS_ENABLED(CONFIG_HID_ZEROPLUS)
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
#endif
#if IS_ENABLED(CONFIG_HID_ZYDACRON)
{ HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZYDACRON, USB_DEVICE_ID_ZYDACRON_REMOTE_CONTROL) },
#endif
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_RAZER, USB_DEVICE_ID_RAZER_BLADE_14) },
{ HID_USB_DEVICE(USB_VENDOR_ID_CMEDIA, USB_DEVICE_ID_CM6533) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
{ } { }
}; };

View File

@ -1066,7 +1066,7 @@ static void ssip_pn_setup(struct net_device *dev)
dev->addr_len = 1; dev->addr_len = 1;
dev->tx_queue_len = SSIP_TXQUEUE_LEN; dev->tx_queue_len = SSIP_TXQUEUE_LEN;
dev->destructor = free_netdev; dev->needs_free_netdev = true;
dev->header_ops = &phonet_header_ops; dev->header_ops = &phonet_header_ops;
} }

View File

@ -584,7 +584,7 @@ static int ismt_access(struct i2c_adapter *adap, u16 addr,
/* unmap the data buffer */ /* unmap the data buffer */
if (dma_size != 0) if (dma_size != 0)
dma_unmap_single(&adap->dev, dma_addr, dma_size, dma_direction); dma_unmap_single(dev, dma_addr, dma_size, dma_direction);
if (unlikely(!time_left)) { if (unlikely(!time_left)) {
dev_err(dev, "completion wait timed out\n"); dev_err(dev, "completion wait timed out\n");

View File

@ -319,7 +319,7 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
rcar_i2c_write(priv, ICFBSCR, TCYC06); rcar_i2c_write(priv, ICFBSCR, TCYC06);
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg), dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
priv->msg->len, priv->dma_direction); sg_dma_len(&priv->sg), priv->dma_direction);
priv->dma_direction = DMA_NONE; priv->dma_direction = DMA_NONE;
} }

View File

@ -468,13 +468,13 @@ static void meson_sar_adc_unlock(struct iio_dev *indio_dev)
static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev) static void meson_sar_adc_clear_fifo(struct iio_dev *indio_dev)
{ {
struct meson_sar_adc_priv *priv = iio_priv(indio_dev); struct meson_sar_adc_priv *priv = iio_priv(indio_dev);
int count; unsigned int count, tmp;
for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) { for (count = 0; count < MESON_SAR_ADC_MAX_FIFO_SIZE; count++) {
if (!meson_sar_adc_get_fifo_count(indio_dev)) if (!meson_sar_adc_get_fifo_count(indio_dev))
break; break;
regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, 0); regmap_read(priv->regmap, MESON_SAR_ADC_FIFO_RD, &tmp);
} }
} }

View File

@ -718,9 +718,12 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
adc->dev = dev; adc->dev = dev;
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!iores)
return -EINVAL;
adc->base = devm_ioremap(dev, iores->start, resource_size(iores)); adc->base = devm_ioremap(dev, iores->start, resource_size(iores));
if (IS_ERR(adc->base)) if (!adc->base)
return PTR_ERR(adc->base); return -ENOMEM;
init_completion(&adc->completion); init_completion(&adc->completion);
spin_lock_init(&adc->lock); spin_lock_init(&adc->lock);

View File

@ -14,6 +14,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/poll.h> #include <linux/poll.h>
#include <linux/iio/buffer.h> #include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h> #include <linux/iio/buffer-dma.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/sizes.h> #include <linux/sizes.h>

View File

@ -14,6 +14,7 @@
#include <linux/iio/iio.h> #include <linux/iio/iio.h>
#include <linux/iio/buffer.h> #include <linux/iio/buffer.h>
#include <linux/iio/buffer_impl.h>
#include <linux/iio/buffer-dma.h> #include <linux/iio/buffer-dma.h>
#include <linux/iio/buffer-dmaengine.h> #include <linux/iio/buffer-dmaengine.h>

View File

@ -41,6 +41,7 @@ static const int accel_scale[] = {598, 1196, 2392, 4785};
static const struct inv_mpu6050_reg_map reg_set_6500 = { static const struct inv_mpu6050_reg_map reg_set_6500 = {
.sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV, .sample_rate_div = INV_MPU6050_REG_SAMPLE_RATE_DIV,
.lpf = INV_MPU6050_REG_CONFIG, .lpf = INV_MPU6050_REG_CONFIG,
.accel_lpf = INV_MPU6500_REG_ACCEL_CONFIG_2,
.user_ctrl = INV_MPU6050_REG_USER_CTRL, .user_ctrl = INV_MPU6050_REG_USER_CTRL,
.fifo_en = INV_MPU6050_REG_FIFO_EN, .fifo_en = INV_MPU6050_REG_FIFO_EN,
.gyro_config = INV_MPU6050_REG_GYRO_CONFIG, .gyro_config = INV_MPU6050_REG_GYRO_CONFIG,
@ -210,6 +211,37 @@ int inv_mpu6050_set_power_itg(struct inv_mpu6050_state *st, bool power_on)
} }
EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg); EXPORT_SYMBOL_GPL(inv_mpu6050_set_power_itg);
/**
* inv_mpu6050_set_lpf_regs() - set low pass filter registers, chip dependent
*
* MPU60xx/MPU9150 use only 1 register for accelerometer + gyroscope
* MPU6500 and above have a dedicated register for accelerometer
*/
static int inv_mpu6050_set_lpf_regs(struct inv_mpu6050_state *st,
enum inv_mpu6050_filter_e val)
{
int result;
result = regmap_write(st->map, st->reg->lpf, val);
if (result)
return result;
switch (st->chip_type) {
case INV_MPU6050:
case INV_MPU6000:
case INV_MPU9150:
/* old chips, nothing to do */
result = 0;
break;
default:
/* set accel lpf */
result = regmap_write(st->map, st->reg->accel_lpf, val);
break;
}
return result;
}
/** /**
* inv_mpu6050_init_config() - Initialize hardware, disable FIFO. * inv_mpu6050_init_config() - Initialize hardware, disable FIFO.
* *
@ -233,8 +265,7 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev)
if (result) if (result)
return result; return result;
d = INV_MPU6050_FILTER_20HZ; result = inv_mpu6050_set_lpf_regs(st, INV_MPU6050_FILTER_20HZ);
result = regmap_write(st->map, st->reg->lpf, d);
if (result) if (result)
return result; return result;
@ -537,6 +568,8 @@ static int inv_mpu6050_write_raw(struct iio_dev *indio_dev,
* would be alising. This function basically search for the * would be alising. This function basically search for the
* correct low pass parameters based on the fifo rate, e.g, * correct low pass parameters based on the fifo rate, e.g,
* sampling frequency. * sampling frequency.
*
* lpf is set automatically when setting sampling rate to avoid any aliases.
*/ */
static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate) static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
{ {
@ -552,7 +585,7 @@ static int inv_mpu6050_set_lpf(struct inv_mpu6050_state *st, int rate)
while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1)) while ((h < hz[i]) && (i < ARRAY_SIZE(d) - 1))
i++; i++;
data = d[i]; data = d[i];
result = regmap_write(st->map, st->reg->lpf, data); result = inv_mpu6050_set_lpf_regs(st, data);
if (result) if (result)
return result; return result;
st->chip_config.lpf = data; st->chip_config.lpf = data;

View File

@ -28,6 +28,7 @@
* struct inv_mpu6050_reg_map - Notable registers. * struct inv_mpu6050_reg_map - Notable registers.
* @sample_rate_div: Divider applied to gyro output rate. * @sample_rate_div: Divider applied to gyro output rate.
* @lpf: Configures internal low pass filter. * @lpf: Configures internal low pass filter.
* @accel_lpf: Configures accelerometer low pass filter.
* @user_ctrl: Enables/resets the FIFO. * @user_ctrl: Enables/resets the FIFO.
* @fifo_en: Determines which data will appear in FIFO. * @fifo_en: Determines which data will appear in FIFO.
* @gyro_config: gyro config register. * @gyro_config: gyro config register.
@ -47,6 +48,7 @@
struct inv_mpu6050_reg_map { struct inv_mpu6050_reg_map {
u8 sample_rate_div; u8 sample_rate_div;
u8 lpf; u8 lpf;
u8 accel_lpf;
u8 user_ctrl; u8 user_ctrl;
u8 fifo_en; u8 fifo_en;
u8 gyro_config; u8 gyro_config;
@ -188,6 +190,7 @@ struct inv_mpu6050_state {
#define INV_MPU6050_FIFO_THRESHOLD 500 #define INV_MPU6050_FIFO_THRESHOLD 500
/* mpu6500 registers */ /* mpu6500 registers */
#define INV_MPU6500_REG_ACCEL_CONFIG_2 0x1D
#define INV_MPU6500_REG_ACCEL_OFFSET 0x77 #define INV_MPU6500_REG_ACCEL_OFFSET 0x77
/* delay time in milliseconds */ /* delay time in milliseconds */

View File

@ -449,12 +449,7 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
return ret; return ret;
rt = (struct rt6_info *)dst; rt = (struct rt6_info *)dst;
if (ipv6_addr_any(&fl6.saddr)) { if (ipv6_addr_any(&src_in->sin6_addr)) {
ret = ipv6_dev_get_saddr(addr->net, ip6_dst_idev(dst)->dev,
&fl6.daddr, 0, &fl6.saddr);
if (ret)
goto put;
src_in->sin6_family = AF_INET6; src_in->sin6_family = AF_INET6;
src_in->sin6_addr = fl6.saddr; src_in->sin6_addr = fl6.saddr;
} }
@ -471,9 +466,6 @@ static int addr6_resolve(struct sockaddr_in6 *src_in,
*pdst = dst; *pdst = dst;
return 0; return 0;
put:
dst_release(dst);
return ret;
} }
#else #else
static int addr6_resolve(struct sockaddr_in6 *src_in, static int addr6_resolve(struct sockaddr_in6 *src_in,

View File

@ -56,6 +56,10 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
#define BNXT_RE_UD_QP_HW_STALL 0x400000
#define BNXT_RE_RQ_WQE_THRESHOLD 32
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;

View File

@ -61,6 +61,48 @@
#include "ib_verbs.h" #include "ib_verbs.h"
#include <rdma/bnxt_re-abi.h> #include <rdma/bnxt_re-abi.h>
static int __from_ib_access_flags(int iflags)
{
int qflags = 0;
if (iflags & IB_ACCESS_LOCAL_WRITE)
qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
if (iflags & IB_ACCESS_REMOTE_READ)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
if (iflags & IB_ACCESS_REMOTE_WRITE)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
if (iflags & IB_ACCESS_REMOTE_ATOMIC)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
if (iflags & IB_ACCESS_MW_BIND)
qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
if (iflags & IB_ZERO_BASED)
qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
if (iflags & IB_ACCESS_ON_DEMAND)
qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
return qflags;
};
static enum ib_access_flags __to_ib_access_flags(int qflags)
{
enum ib_access_flags iflags = 0;
if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
iflags |= IB_ACCESS_LOCAL_WRITE;
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
iflags |= IB_ACCESS_REMOTE_WRITE;
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
iflags |= IB_ACCESS_REMOTE_READ;
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
iflags |= IB_ACCESS_REMOTE_ATOMIC;
if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
iflags |= IB_ACCESS_MW_BIND;
if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
iflags |= IB_ZERO_BASED;
if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
iflags |= IB_ACCESS_ON_DEMAND;
return iflags;
};
static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list, static int bnxt_re_build_sgl(struct ib_sge *ib_sg_list,
struct bnxt_qplib_sge *sg_list, int num) struct bnxt_qplib_sge *sg_list, int num)
{ {
@ -149,8 +191,8 @@ int bnxt_re_query_device(struct ib_device *ibdev,
ib_attr->max_total_mcast_qp_attach = 0; ib_attr->max_total_mcast_qp_attach = 0;
ib_attr->max_ah = dev_attr->max_ah; ib_attr->max_ah = dev_attr->max_ah;
ib_attr->max_fmr = dev_attr->max_fmr; ib_attr->max_fmr = 0;
ib_attr->max_map_per_fmr = 1; /* ? */ ib_attr->max_map_per_fmr = 0;
ib_attr->max_srq = dev_attr->max_srq; ib_attr->max_srq = dev_attr->max_srq;
ib_attr->max_srq_wr = dev_attr->max_srq_wqes; ib_attr->max_srq_wr = dev_attr->max_srq_wqes;
@ -410,6 +452,158 @@ enum rdma_link_layer bnxt_re_get_link_layer(struct ib_device *ibdev,
return IB_LINK_LAYER_ETHERNET; return IB_LINK_LAYER_ETHERNET;
} }
#define BNXT_RE_FENCE_PBL_SIZE DIV_ROUND_UP(BNXT_RE_FENCE_BYTES, PAGE_SIZE)
static void bnxt_re_create_fence_wqe(struct bnxt_re_pd *pd)
{
struct bnxt_re_fence_data *fence = &pd->fence;
struct ib_mr *ib_mr = &fence->mr->ib_mr;
struct bnxt_qplib_swqe *wqe = &fence->bind_wqe;
memset(wqe, 0, sizeof(*wqe));
wqe->type = BNXT_QPLIB_SWQE_TYPE_BIND_MW;
wqe->wr_id = BNXT_QPLIB_FENCE_WRID;
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_SIGNAL_COMP;
wqe->flags |= BNXT_QPLIB_SWQE_FLAGS_UC_FENCE;
wqe->bind.zero_based = false;
wqe->bind.parent_l_key = ib_mr->lkey;
wqe->bind.va = (u64)(unsigned long)fence->va;
wqe->bind.length = fence->size;
wqe->bind.access_cntl = __from_ib_access_flags(IB_ACCESS_REMOTE_READ);
wqe->bind.mw_type = SQ_BIND_MW_TYPE_TYPE1;
/* Save the initial rkey in fence structure for now;
* wqe->bind.r_key will be set at (re)bind time.
*/
fence->bind_rkey = ib_inc_rkey(fence->mw->rkey);
}
static int bnxt_re_bind_fence_mw(struct bnxt_qplib_qp *qplib_qp)
{
struct bnxt_re_qp *qp = container_of(qplib_qp, struct bnxt_re_qp,
qplib_qp);
struct ib_pd *ib_pd = qp->ib_qp.pd;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_fence_data *fence = &pd->fence;
struct bnxt_qplib_swqe *fence_wqe = &fence->bind_wqe;
struct bnxt_qplib_swqe wqe;
int rc;
memcpy(&wqe, fence_wqe, sizeof(wqe));
wqe.bind.r_key = fence->bind_rkey;
fence->bind_rkey = ib_inc_rkey(fence->bind_rkey);
dev_dbg(rdev_to_dev(qp->rdev),
"Posting bind fence-WQE: rkey: %#x QP: %d PD: %p\n",
wqe.bind.r_key, qp->qplib_qp.id, pd);
rc = bnxt_qplib_post_send(&qp->qplib_qp, &wqe);
if (rc) {
dev_err(rdev_to_dev(qp->rdev), "Failed to bind fence-WQE\n");
return rc;
}
bnxt_qplib_post_send_db(&qp->qplib_qp);
return rc;
}
static void bnxt_re_destroy_fence_mr(struct bnxt_re_pd *pd)
{
struct bnxt_re_fence_data *fence = &pd->fence;
struct bnxt_re_dev *rdev = pd->rdev;
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = fence->mr;
if (fence->mw) {
bnxt_re_dealloc_mw(fence->mw);
fence->mw = NULL;
}
if (mr) {
if (mr->ib_mr.rkey)
bnxt_qplib_dereg_mrw(&rdev->qplib_res, &mr->qplib_mr,
true);
if (mr->ib_mr.lkey)
bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
kfree(mr);
fence->mr = NULL;
}
if (fence->dma_addr) {
dma_unmap_single(dev, fence->dma_addr, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
fence->dma_addr = 0;
}
}
static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd)
{
int mr_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_MW_BIND;
struct bnxt_re_fence_data *fence = &pd->fence;
struct bnxt_re_dev *rdev = pd->rdev;
struct device *dev = &rdev->en_dev->pdev->dev;
struct bnxt_re_mr *mr = NULL;
dma_addr_t dma_addr = 0;
struct ib_mw *mw;
u64 pbl_tbl;
int rc;
dma_addr = dma_map_single(dev, fence->va, BNXT_RE_FENCE_BYTES,
DMA_BIDIRECTIONAL);
rc = dma_mapping_error(dev, dma_addr);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to dma-map fence-MR-mem\n");
rc = -EIO;
fence->dma_addr = 0;
goto fail;
}
fence->dma_addr = dma_addr;
/* Allocate a MR */
mr = kzalloc(sizeof(*mr), GFP_KERNEL);
if (!mr) {
rc = -ENOMEM;
goto fail;
}
fence->mr = mr;
mr->rdev = rdev;
mr->qplib_mr.pd = &pd->qplib_pd;
mr->qplib_mr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR;
mr->qplib_mr.flags = __from_ib_access_flags(mr_access_flags);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to alloc fence-HW-MR\n");
goto fail;
}
/* Register MR */
mr->ib_mr.lkey = mr->qplib_mr.lkey;
mr->qplib_mr.va = (u64)(unsigned long)fence->va;
mr->qplib_mr.total_size = BNXT_RE_FENCE_BYTES;
pbl_tbl = dma_addr;
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &mr->qplib_mr, &pbl_tbl,
BNXT_RE_FENCE_PBL_SIZE, false);
if (rc) {
dev_err(rdev_to_dev(rdev), "Failed to register fence-MR\n");
goto fail;
}
mr->ib_mr.rkey = mr->qplib_mr.rkey;
/* Create a fence MW only for kernel consumers */
mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL);
if (!mw) {
dev_err(rdev_to_dev(rdev),
"Failed to create fence-MW for PD: %p\n", pd);
rc = -EINVAL;
goto fail;
}
fence->mw = mw;
bnxt_re_create_fence_wqe(pd);
return 0;
fail:
bnxt_re_destroy_fence_mr(pd);
return rc;
}
/* Protection Domains */ /* Protection Domains */
int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
{ {
@ -417,6 +611,7 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd)
struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_dev *rdev = pd->rdev;
int rc; int rc;
bnxt_re_destroy_fence_mr(pd);
if (ib_pd->uobject && pd->dpi.dbr) { if (ib_pd->uobject && pd->dpi.dbr) {
struct ib_ucontext *ib_uctx = ib_pd->uobject->context; struct ib_ucontext *ib_uctx = ib_pd->uobject->context;
struct bnxt_re_ucontext *ucntx; struct bnxt_re_ucontext *ucntx;
@ -498,6 +693,10 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev,
} }
} }
if (!udata)
if (bnxt_re_create_fence_mr(pd))
dev_warn(rdev_to_dev(rdev),
"Failed to create Fence-MR\n");
return &pd->ib_pd; return &pd->ib_pd;
dbfail: dbfail:
(void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, (void)bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl,
@ -849,12 +1048,16 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
/* Shadow QP SQ depth should be same as QP1 RQ depth */ /* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2; qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.sq.q_full_delta = 1;
qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rcq = qp1_qp->rcq;
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */
qp->qplib_qp.rq.q_full_delta = 1;
qp->qplib_qp.mtu = qp1_qp->mtu; qp->qplib_qp.mtu = qp1_qp->mtu;
@ -917,10 +1120,6 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type == qp->qplib_qp.sig_type = ((qp_init_attr->sq_sig_type ==
IB_SIGNAL_ALL_WR) ? true : false); IB_SIGNAL_ALL_WR) ? true : false);
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge; qp->qplib_qp.sq.max_sge = qp_init_attr->cap.max_send_sge;
if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges) if (qp->qplib_qp.sq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges; qp->qplib_qp.sq.max_sge = dev_attr->max_qp_sges;
@ -959,6 +1158,9 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.rq.max_wqe = min_t(u32, entries, qp->qplib_qp.rq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1); dev_attr->max_qp_wqes + 1);
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_init_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge; qp->qplib_qp.rq.max_sge = qp_init_attr->cap.max_recv_sge;
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@ -967,6 +1169,12 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu)); qp->qplib_qp.mtu = ib_mtu_enum_to_int(iboe_get_mtu(rdev->netdev->mtu));
if (qp_init_attr->qp_type == IB_QPT_GSI) { if (qp_init_attr->qp_type == IB_QPT_GSI) {
/* Allocate 1 more than what's provided */
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr + 1);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1);
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
qp_init_attr->cap.max_send_wr;
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges) if (qp->qplib_qp.rq.max_sge > dev_attr->max_qp_sges)
qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges; qp->qplib_qp.rq.max_sge = dev_attr->max_qp_sges;
@ -1006,6 +1214,22 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
} }
} else { } else {
/* Allocate 128 + 1 more than what's provided */
entries = roundup_pow_of_two(qp_init_attr->cap.max_send_wr +
BNXT_QPLIB_RESERVED_QP_WRS + 1);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes +
BNXT_QPLIB_RESERVED_QP_WRS + 1);
qp->qplib_qp.sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
/*
* Reserving one slot for Phantom WQE. Application can
* post one extra entry in this case. But allowing this to avoid
* unexpected Queue full condition
*/
qp->qplib_qp.sq.q_full_delta -= 1;
qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom; qp->qplib_qp.max_rd_atomic = dev_attr->max_qp_rd_atom;
qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom; qp->qplib_qp.max_dest_rd_atomic = dev_attr->max_qp_init_rd_atom;
if (udata) { if (udata) {
@ -1025,6 +1249,7 @@ struct ib_qp *bnxt_re_create_qp(struct ib_pd *ib_pd,
qp->ib_qp.qp_num = qp->qplib_qp.id; qp->ib_qp.qp_num = qp->qplib_qp.id;
spin_lock_init(&qp->sq_lock); spin_lock_init(&qp->sq_lock);
spin_lock_init(&qp->rq_lock);
if (udata) { if (udata) {
struct bnxt_re_qp_resp resp; struct bnxt_re_qp_resp resp;
@ -1129,48 +1354,6 @@ static enum ib_mtu __to_ib_mtu(u32 mtu)
} }
} }
static int __from_ib_access_flags(int iflags)
{
int qflags = 0;
if (iflags & IB_ACCESS_LOCAL_WRITE)
qflags |= BNXT_QPLIB_ACCESS_LOCAL_WRITE;
if (iflags & IB_ACCESS_REMOTE_READ)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_READ;
if (iflags & IB_ACCESS_REMOTE_WRITE)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_WRITE;
if (iflags & IB_ACCESS_REMOTE_ATOMIC)
qflags |= BNXT_QPLIB_ACCESS_REMOTE_ATOMIC;
if (iflags & IB_ACCESS_MW_BIND)
qflags |= BNXT_QPLIB_ACCESS_MW_BIND;
if (iflags & IB_ZERO_BASED)
qflags |= BNXT_QPLIB_ACCESS_ZERO_BASED;
if (iflags & IB_ACCESS_ON_DEMAND)
qflags |= BNXT_QPLIB_ACCESS_ON_DEMAND;
return qflags;
};
static enum ib_access_flags __to_ib_access_flags(int qflags)
{
enum ib_access_flags iflags = 0;
if (qflags & BNXT_QPLIB_ACCESS_LOCAL_WRITE)
iflags |= IB_ACCESS_LOCAL_WRITE;
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_WRITE)
iflags |= IB_ACCESS_REMOTE_WRITE;
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_READ)
iflags |= IB_ACCESS_REMOTE_READ;
if (qflags & BNXT_QPLIB_ACCESS_REMOTE_ATOMIC)
iflags |= IB_ACCESS_REMOTE_ATOMIC;
if (qflags & BNXT_QPLIB_ACCESS_MW_BIND)
iflags |= IB_ACCESS_MW_BIND;
if (qflags & BNXT_QPLIB_ACCESS_ZERO_BASED)
iflags |= IB_ZERO_BASED;
if (qflags & BNXT_QPLIB_ACCESS_ON_DEMAND)
iflags |= IB_ACCESS_ON_DEMAND;
return iflags;
};
static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev, static int bnxt_re_modify_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp1_qp, struct bnxt_re_qp *qp1_qp,
int qp_attr_mask) int qp_attr_mask)
@ -1378,11 +1561,21 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr,
entries = roundup_pow_of_two(qp_attr->cap.max_send_wr); entries = roundup_pow_of_two(qp_attr->cap.max_send_wr);
qp->qplib_qp.sq.max_wqe = min_t(u32, entries, qp->qplib_qp.sq.max_wqe = min_t(u32, entries,
dev_attr->max_qp_wqes + 1); dev_attr->max_qp_wqes + 1);
qp->qplib_qp.sq.q_full_delta = qp->qplib_qp.sq.max_wqe -
qp_attr->cap.max_send_wr;
/*
* Reserving one slot for Phantom WQE. Some application can
* post one extra entry in this case. Allowing this to avoid
* unexpected Queue full condition
*/
qp->qplib_qp.sq.q_full_delta -= 1;
qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge; qp->qplib_qp.sq.max_sge = qp_attr->cap.max_send_sge;
if (qp->qplib_qp.rq.max_wqe) { if (qp->qplib_qp.rq.max_wqe) {
entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr); entries = roundup_pow_of_two(qp_attr->cap.max_recv_wr);
qp->qplib_qp.rq.max_wqe = qp->qplib_qp.rq.max_wqe =
min_t(u32, entries, dev_attr->max_qp_wqes + 1); min_t(u32, entries, dev_attr->max_qp_wqes + 1);
qp->qplib_qp.rq.q_full_delta = qp->qplib_qp.rq.max_wqe -
qp_attr->cap.max_recv_wr;
qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge; qp->qplib_qp.rq.max_sge = qp_attr->cap.max_recv_sge;
} else { } else {
/* SRQ was used prior, just ignore the RQ caps */ /* SRQ was used prior, just ignore the RQ caps */
@ -1883,6 +2076,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
return payload_sz; return payload_sz;
} }
static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
{
if ((qp->ib_qp.qp_type == IB_QPT_UD ||
qp->ib_qp.qp_type == IB_QPT_GSI ||
qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
int qp_attr_mask;
struct ib_qp_attr qp_attr;
qp_attr_mask = IB_QP_STATE;
qp_attr.qp_state = IB_QPS_RTS;
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
qp->qplib_qp.wqe_cnt = 0;
}
}
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp, struct bnxt_re_qp *qp,
struct ib_send_wr *wr) struct ib_send_wr *wr)
@ -1928,6 +2137,7 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags); spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc; return rc;
} }
@ -2024,6 +2234,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags); spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc; return rc;
@ -2071,7 +2282,10 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_qplib_swqe wqe; struct bnxt_qplib_swqe wqe;
int rc = 0, payload_sz = 0; int rc = 0, payload_sz = 0;
unsigned long flags;
u32 count = 0;
spin_lock_irqsave(&qp->rq_lock, flags);
while (wr) { while (wr) {
/* House keeping */ /* House keeping */
memset(&wqe, 0, sizeof(wqe)); memset(&wqe, 0, sizeof(wqe));
@ -2100,9 +2314,21 @@ int bnxt_re_post_recv(struct ib_qp *ib_qp, struct ib_recv_wr *wr,
*bad_wr = wr; *bad_wr = wr;
break; break;
} }
/* Ring DB if the RQEs posted reaches a threshold value */
if (++count >= BNXT_RE_RQ_WQE_THRESHOLD) {
bnxt_qplib_post_recv_db(&qp->qplib_qp);
count = 0;
}
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_recv_db(&qp->qplib_qp);
if (count)
bnxt_qplib_post_recv_db(&qp->qplib_qp);
spin_unlock_irqrestore(&qp->rq_lock, flags);
return rc; return rc;
} }
@ -2643,12 +2869,36 @@ static void bnxt_re_process_res_ud_wc(struct ib_wc *wc,
wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; wc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
} }
static int send_phantom_wqe(struct bnxt_re_qp *qp)
{
struct bnxt_qplib_qp *lib_qp = &qp->qplib_qp;
unsigned long flags;
int rc = 0;
spin_lock_irqsave(&qp->sq_lock, flags);
rc = bnxt_re_bind_fence_mw(lib_qp);
if (!rc) {
lib_qp->sq.phantom_wqe_cnt++;
dev_dbg(&lib_qp->sq.hwq.pdev->dev,
"qp %#x sq->prod %#x sw_prod %#x phantom_wqe_cnt %d\n",
lib_qp->id, lib_qp->sq.hwq.prod,
HWQ_CMP(lib_qp->sq.hwq.prod, &lib_qp->sq.hwq),
lib_qp->sq.phantom_wqe_cnt);
}
spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc;
}
int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
{ {
struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq); struct bnxt_re_cq *cq = container_of(ib_cq, struct bnxt_re_cq, ib_cq);
struct bnxt_re_qp *qp; struct bnxt_re_qp *qp;
struct bnxt_qplib_cqe *cqe; struct bnxt_qplib_cqe *cqe;
int i, ncqe, budget; int i, ncqe, budget;
struct bnxt_qplib_q *sq;
struct bnxt_qplib_qp *lib_qp;
u32 tbl_idx; u32 tbl_idx;
struct bnxt_re_sqp_entries *sqp_entry = NULL; struct bnxt_re_sqp_entries *sqp_entry = NULL;
unsigned long flags; unsigned long flags;
@ -2661,7 +2911,21 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc)
} }
cqe = &cq->cql[0]; cqe = &cq->cql[0];
while (budget) { while (budget) {
ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget); lib_qp = NULL;
ncqe = bnxt_qplib_poll_cq(&cq->qplib_cq, cqe, budget, &lib_qp);
if (lib_qp) {
sq = &lib_qp->sq;
if (sq->send_phantom) {
qp = container_of(lib_qp,
struct bnxt_re_qp, qplib_qp);
if (send_phantom_wqe(qp) == -ENOMEM)
dev_err(rdev_to_dev(cq->rdev),
"Phantom failed! Scheduled to send again\n");
else
sq->send_phantom = false;
}
}
if (!ncqe) if (!ncqe)
break; break;
@ -2822,6 +3086,12 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
struct bnxt_re_dev *rdev = mr->rdev; struct bnxt_re_dev *rdev = mr->rdev;
int rc; int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (rc) {
dev_err(rdev_to_dev(rdev), "Dereg MR failed: %#x\n", rc);
return rc;
}
if (mr->npages && mr->pages) { if (mr->npages && mr->pages) {
rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res, rc = bnxt_qplib_free_fast_reg_page_list(&rdev->qplib_res,
&mr->qplib_frpl); &mr->qplib_frpl);
@ -2829,8 +3099,6 @@ int bnxt_re_dereg_mr(struct ib_mr *ib_mr)
mr->npages = 0; mr->npages = 0;
mr->pages = NULL; mr->pages = NULL;
} }
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mr->qplib_mr);
if (!IS_ERR_OR_NULL(mr->ib_umem)) if (!IS_ERR_OR_NULL(mr->ib_umem))
ib_umem_release(mr->ib_umem); ib_umem_release(mr->ib_umem);
@ -2914,97 +3182,52 @@ struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type type,
return ERR_PTR(rc); return ERR_PTR(rc);
} }
/* Fast Memory Regions */ struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *ib_pd, int mr_access_flags, struct ib_udata *udata)
struct ib_fmr_attr *fmr_attr)
{ {
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd); struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev; struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_re_fmr *fmr; struct bnxt_re_mw *mw;
int rc; int rc;
if (fmr_attr->max_pages > MAX_PBL_LVL_2_PGS || mw = kzalloc(sizeof(*mw), GFP_KERNEL);
fmr_attr->max_maps > rdev->dev_attr.max_map_per_fmr) { if (!mw)
dev_err(rdev_to_dev(rdev), "Allocate FMR exceeded Max limit");
return ERR_PTR(-ENOMEM);
}
fmr = kzalloc(sizeof(*fmr), GFP_KERNEL);
if (!fmr)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
mw->rdev = rdev;
mw->qplib_mw.pd = &pd->qplib_pd;
fmr->rdev = rdev; mw->qplib_mw.type = (type == IB_MW_TYPE_1 ?
fmr->qplib_fmr.pd = &pd->qplib_pd; CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE1 :
fmr->qplib_fmr.type = CMDQ_ALLOCATE_MRW_MRW_FLAGS_PMR; CMDQ_ALLOCATE_MRW_MRW_FLAGS_MW_TYPE2B);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &mw->qplib_mw);
rc = bnxt_qplib_alloc_mrw(&rdev->qplib_res, &fmr->qplib_fmr); if (rc) {
if (rc) dev_err(rdev_to_dev(rdev), "Allocate MW failed!");
goto fail; goto fail;
}
mw->ib_mw.rkey = mw->qplib_mw.rkey;
fmr->qplib_fmr.flags = __from_ib_access_flags(mr_access_flags); atomic_inc(&rdev->mw_count);
fmr->ib_fmr.lkey = fmr->qplib_fmr.lkey; return &mw->ib_mw;
fmr->ib_fmr.rkey = fmr->ib_fmr.lkey;
atomic_inc(&rdev->mr_count);
return &fmr->ib_fmr;
fail: fail:
kfree(fmr); kfree(mw);
return ERR_PTR(rc); return ERR_PTR(rc);
} }
int bnxt_re_map_phys_fmr(struct ib_fmr *ib_fmr, u64 *page_list, int list_len, int bnxt_re_dealloc_mw(struct ib_mw *ib_mw)
u64 iova)
{ {
struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr, struct bnxt_re_mw *mw = container_of(ib_mw, struct bnxt_re_mw, ib_mw);
ib_fmr); struct bnxt_re_dev *rdev = mw->rdev;
struct bnxt_re_dev *rdev = fmr->rdev;
int rc; int rc;
fmr->qplib_fmr.va = iova; rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &mw->qplib_mw);
fmr->qplib_fmr.total_size = list_len * PAGE_SIZE; if (rc) {
dev_err(rdev_to_dev(rdev), "Free MW failed: %#x\n", rc);
rc = bnxt_qplib_reg_mr(&rdev->qplib_res, &fmr->qplib_fmr, page_list, return rc;
list_len, true);
if (rc)
dev_err(rdev_to_dev(rdev), "Failed to map FMR for lkey = 0x%x!",
fmr->ib_fmr.lkey);
return rc;
}
int bnxt_re_unmap_fmr(struct list_head *fmr_list)
{
struct bnxt_re_dev *rdev;
struct bnxt_re_fmr *fmr;
struct ib_fmr *ib_fmr;
int rc = 0;
/* Validate each FMRs inside the fmr_list */
list_for_each_entry(ib_fmr, fmr_list, list) {
fmr = container_of(ib_fmr, struct bnxt_re_fmr, ib_fmr);
rdev = fmr->rdev;
if (rdev) {
rc = bnxt_qplib_dereg_mrw(&rdev->qplib_res,
&fmr->qplib_fmr, true);
if (rc)
break;
}
} }
return rc;
}
int bnxt_re_dealloc_fmr(struct ib_fmr *ib_fmr) kfree(mw);
{ atomic_dec(&rdev->mw_count);
struct bnxt_re_fmr *fmr = container_of(ib_fmr, struct bnxt_re_fmr,
ib_fmr);
struct bnxt_re_dev *rdev = fmr->rdev;
int rc;
rc = bnxt_qplib_free_mrw(&rdev->qplib_res, &fmr->qplib_fmr);
if (rc)
dev_err(rdev_to_dev(rdev), "Failed to free FMR");
kfree(fmr);
atomic_dec(&rdev->mr_count);
return rc; return rc;
} }

View File

@ -44,11 +44,23 @@ struct bnxt_re_gid_ctx {
u32 refcnt; u32 refcnt;
}; };
#define BNXT_RE_FENCE_BYTES 64
struct bnxt_re_fence_data {
u32 size;
u8 va[BNXT_RE_FENCE_BYTES];
dma_addr_t dma_addr;
struct bnxt_re_mr *mr;
struct ib_mw *mw;
struct bnxt_qplib_swqe bind_wqe;
u32 bind_rkey;
};
struct bnxt_re_pd { struct bnxt_re_pd {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_pd ib_pd; struct ib_pd ib_pd;
struct bnxt_qplib_pd qplib_pd; struct bnxt_qplib_pd qplib_pd;
struct bnxt_qplib_dpi dpi; struct bnxt_qplib_dpi dpi;
struct bnxt_re_fence_data fence;
}; };
struct bnxt_re_ah { struct bnxt_re_ah {
@ -62,6 +74,7 @@ struct bnxt_re_qp {
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct ib_qp ib_qp; struct ib_qp ib_qp;
spinlock_t sq_lock; /* protect sq */ spinlock_t sq_lock; /* protect sq */
spinlock_t rq_lock; /* protect rq */
struct bnxt_qplib_qp qplib_qp; struct bnxt_qplib_qp qplib_qp;
struct ib_umem *sumem; struct ib_umem *sumem;
struct ib_umem *rumem; struct ib_umem *rumem;
@ -181,12 +194,9 @@ int bnxt_re_map_mr_sg(struct ib_mr *ib_mr, struct scatterlist *sg, int sg_nents,
struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type, struct ib_mr *bnxt_re_alloc_mr(struct ib_pd *ib_pd, enum ib_mr_type mr_type,
u32 max_num_sg); u32 max_num_sg);
int bnxt_re_dereg_mr(struct ib_mr *mr); int bnxt_re_dereg_mr(struct ib_mr *mr);
struct ib_fmr *bnxt_re_alloc_fmr(struct ib_pd *pd, int mr_access_flags, struct ib_mw *bnxt_re_alloc_mw(struct ib_pd *ib_pd, enum ib_mw_type type,
struct ib_fmr_attr *fmr_attr); struct ib_udata *udata);
int bnxt_re_map_phys_fmr(struct ib_fmr *fmr, u64 *page_list, int list_len, int bnxt_re_dealloc_mw(struct ib_mw *mw);
u64 iova);
int bnxt_re_unmap_fmr(struct list_head *fmr_list);
int bnxt_re_dealloc_fmr(struct ib_fmr *fmr);
struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags, u64 virt_addr, int mr_access_flags,
struct ib_udata *udata); struct ib_udata *udata);

View File

@ -507,10 +507,6 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev)
ibdev->dereg_mr = bnxt_re_dereg_mr; ibdev->dereg_mr = bnxt_re_dereg_mr;
ibdev->alloc_mr = bnxt_re_alloc_mr; ibdev->alloc_mr = bnxt_re_alloc_mr;
ibdev->map_mr_sg = bnxt_re_map_mr_sg; ibdev->map_mr_sg = bnxt_re_map_mr_sg;
ibdev->alloc_fmr = bnxt_re_alloc_fmr;
ibdev->map_phys_fmr = bnxt_re_map_phys_fmr;
ibdev->unmap_fmr = bnxt_re_unmap_fmr;
ibdev->dealloc_fmr = bnxt_re_dealloc_fmr;
ibdev->reg_user_mr = bnxt_re_reg_user_mr; ibdev->reg_user_mr = bnxt_re_reg_user_mr;
ibdev->alloc_ucontext = bnxt_re_alloc_ucontext; ibdev->alloc_ucontext = bnxt_re_alloc_ucontext;

View File

@ -284,7 +284,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_qp1 req; struct cmdq_create_qp1 req;
struct creq_create_qp1_resp *resp; struct creq_create_qp1_resp resp;
struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_pbl *pbl;
struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq; struct bnxt_qplib_q *rq = &qp->rq;
@ -394,31 +394,12 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.pd_id = cpu_to_le32(qp->pd->id); req.pd_id = cpu_to_le32(qp->pd->id);
resp = (struct creq_create_qp1_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0);
NULL, 0); if (rc)
if (!resp) {
dev_err(&res->pdev->dev, "QPLIB: FP: CREATE_QP1 send failed");
rc = -EINVAL;
goto fail; goto fail;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { qp->id = le32_to_cpu(resp.xid);
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 timed out");
rc = -ETIMEDOUT;
goto fail;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP1 failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
rc = -EINVAL;
goto fail;
}
qp->id = le32_to_cpu(resp->xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
sq->flush_in_progress = false; sq->flush_in_progress = false;
rq->flush_in_progress = false; rq->flush_in_progress = false;
@ -442,7 +423,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr; struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
struct cmdq_create_qp req; struct cmdq_create_qp req;
struct creq_create_qp_resp *resp; struct creq_create_qp_resp resp;
struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_pbl *pbl;
struct sq_psn_search **psn_search_ptr; struct sq_psn_search **psn_search_ptr;
unsigned long int psn_search, poff = 0; unsigned long int psn_search, poff = 0;
@ -627,31 +608,12 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
} }
req.pd_id = cpu_to_le32(qp->pd->id); req.pd_id = cpu_to_le32(qp->pd->id);
resp = (struct creq_create_qp_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0);
NULL, 0); if (rc)
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP send failed");
rc = -EINVAL;
goto fail; goto fail;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) { qp->id = le32_to_cpu(resp.xid);
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP timed out");
rc = -ETIMEDOUT;
goto fail;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_QP failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
rc = -EINVAL;
goto fail;
}
qp->id = le32_to_cpu(resp->xid);
qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET; qp->cur_qp_state = CMDQ_MODIFY_QP_NEW_STATE_RESET;
sq->flush_in_progress = false; sq->flush_in_progress = false;
rq->flush_in_progress = false; rq->flush_in_progress = false;
@ -769,10 +731,11 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_modify_qp req; struct cmdq_modify_qp req;
struct creq_modify_qp_resp *resp; struct creq_modify_qp_resp resp;
u16 cmd_flags = 0, pkey; u16 cmd_flags = 0, pkey;
u32 temp32[4]; u32 temp32[4];
u32 bmask; u32 bmask;
int rc;
RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags); RCFW_CMD_PREP(req, MODIFY_QP, cmd_flags);
@ -862,27 +825,10 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id); req.vlan_pcp_vlan_dei_vlan_id = cpu_to_le16(qp->vlan_id);
resp = (struct creq_modify_qp_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0);
NULL, 0); if (rc)
if (!resp) { return rc;
dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: MODIFY_QP failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
qp->cur_qp_state = qp->state; qp->cur_qp_state = qp->state;
return 0; return 0;
} }
@ -891,37 +837,26 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_query_qp req; struct cmdq_query_qp req;
struct creq_query_qp_resp *resp; struct creq_query_qp_resp resp;
struct bnxt_qplib_rcfw_sbuf *sbuf;
struct creq_query_qp_resp_sb *sb; struct creq_query_qp_resp_sb *sb;
u16 cmd_flags = 0; u16 cmd_flags = 0;
u32 temp32[4]; u32 temp32[4];
int i; int i, rc = 0;
RCFW_CMD_PREP(req, QUERY_QP, cmd_flags); RCFW_CMD_PREP(req, QUERY_QP, cmd_flags);
sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb));
if (!sbuf)
return -ENOMEM;
sb = sbuf->sb;
req.qp_cid = cpu_to_le32(qp->id); req.qp_cid = cpu_to_le32(qp->id);
req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS;
resp = (struct creq_query_qp_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)sbuf, 0);
(void **)&sb, 0); if (rc)
if (!resp) { goto bail;
dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: QUERY_QP failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
/* Extract the context from the side buffer */ /* Extract the context from the side buffer */
qp->state = sb->en_sqd_async_notify_state & qp->state = sb->en_sqd_async_notify_state &
CREQ_QUERY_QP_RESP_SB_STATE_MASK; CREQ_QUERY_QP_RESP_SB_STATE_MASK;
@ -976,7 +911,9 @@ int bnxt_qplib_query_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
qp->dest_qpn = le32_to_cpu(sb->dest_qp_id); qp->dest_qpn = le32_to_cpu(sb->dest_qp_id);
memcpy(qp->smac, sb->src_mac, 6); memcpy(qp->smac, sb->src_mac, 6);
qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id); qp->vlan_id = le16_to_cpu(sb->vlan_pcp_vlan_dei_vlan_id);
return 0; bail:
bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf);
return rc;
} }
static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp) static void __clean_cq(struct bnxt_qplib_cq *cq, u64 qp)
@ -1021,34 +958,18 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_qp req; struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp *resp; struct creq_destroy_qp_resp resp;
unsigned long flags; unsigned long flags;
u16 cmd_flags = 0; u16 cmd_flags = 0;
int rc;
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags); RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
req.qp_cid = cpu_to_le32(qp->id); req.qp_cid = cpu_to_le32(qp->id);
resp = (struct creq_destroy_qp_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0);
NULL, 0); if (rc)
if (!resp) { return rc;
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_QP failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
/* Must walk the associated CQs to nullified the QP ptr */ /* Must walk the associated CQs to nullified the QP ptr */
spin_lock_irqsave(&qp->scq->hwq.lock, flags); spin_lock_irqsave(&qp->scq->hwq.lock, flags);
@ -1162,8 +1083,12 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
rc = -EINVAL; rc = -EINVAL;
goto done; goto done;
} }
if (HWQ_CMP((sq->hwq.prod + 1), &sq->hwq) ==
HWQ_CMP(sq->hwq.cons, &sq->hwq)) { if (bnxt_qplib_queue_full(sq)) {
dev_err(&sq->hwq.pdev->dev,
"QPLIB: prod = %#x cons = %#x qdepth = %#x delta = %#x",
sq->hwq.prod, sq->hwq.cons, sq->hwq.max_elements,
sq->q_full_delta);
rc = -ENOMEM; rc = -ENOMEM;
goto done; goto done;
} }
@ -1373,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
} }
sq->hwq.prod++; sq->hwq.prod++;
qp->wqe_cnt++;
done: done:
return rc; return rc;
} }
@ -1411,8 +1339,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
rc = -EINVAL; rc = -EINVAL;
goto done; goto done;
} }
if (HWQ_CMP((rq->hwq.prod + 1), &rq->hwq) == if (bnxt_qplib_queue_full(rq)) {
HWQ_CMP(rq->hwq.cons, &rq->hwq)) {
dev_err(&rq->hwq.pdev->dev, dev_err(&rq->hwq.pdev->dev,
"QPLIB: FP: QP (0x%x) RQ is full!", qp->id); "QPLIB: FP: QP (0x%x) RQ is full!", qp->id);
rc = -EINVAL; rc = -EINVAL;
@ -1483,7 +1410,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_create_cq req; struct cmdq_create_cq req;
struct creq_create_cq_resp *resp; struct creq_create_cq_resp resp;
struct bnxt_qplib_pbl *pbl; struct bnxt_qplib_pbl *pbl;
u16 cmd_flags = 0; u16 cmd_flags = 0;
int rc; int rc;
@ -1525,30 +1452,12 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
(cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) << (cq->cnq_hw_ring_id & CMDQ_CREATE_CQ_CNQ_ID_MASK) <<
CMDQ_CREATE_CQ_CNQ_ID_SFT); CMDQ_CREATE_CQ_CNQ_ID_SFT);
resp = (struct creq_create_cq_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0);
NULL, 0); if (rc)
if (!resp) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ timed out");
rc = -ETIMEDOUT;
goto fail; goto fail;
}
if (resp->status || cq->id = le32_to_cpu(resp.xid);
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: CREATE_CQ failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
rc = -EINVAL;
goto fail;
}
cq->id = le32_to_cpu(resp->xid);
cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem; cq->dbr_base = res->dpi_tbl.dbr_bar_reg_iomem;
cq->period = BNXT_QPLIB_QUEUE_START_PERIOD; cq->period = BNXT_QPLIB_QUEUE_START_PERIOD;
init_waitqueue_head(&cq->waitq); init_waitqueue_head(&cq->waitq);
@ -1566,33 +1475,17 @@ int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq)
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct cmdq_destroy_cq req; struct cmdq_destroy_cq req;
struct creq_destroy_cq_resp *resp; struct creq_destroy_cq_resp resp;
u16 cmd_flags = 0; u16 cmd_flags = 0;
int rc;
RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags); RCFW_CMD_PREP(req, DESTROY_CQ, cmd_flags);
req.cq_cid = cpu_to_le32(cq->id); req.cq_cid = cpu_to_le32(cq->id);
resp = (struct creq_destroy_cq_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, NULL, 0);
NULL, 0); if (rc)
if (!resp) { return rc;
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev, "QPLIB: FP: DESTROY_CQ failed ");
dev_err(&rcfw->pdev->dev,
"QPLIB: with status 0x%x cmdq 0x%x resp 0x%x",
resp->status, le16_to_cpu(req.cookie),
le16_to_cpu(resp->cookie));
return -EINVAL;
}
bnxt_qplib_free_hwq(res->pdev, &cq->hwq); bnxt_qplib_free_hwq(res->pdev, &cq->hwq);
return 0; return 0;
} }
@ -1664,14 +1557,113 @@ static int __flush_rq(struct bnxt_qplib_q *rq, struct bnxt_qplib_qp *qp,
return rc; return rc;
} }
/* Note: SQE is valid from sw_sq_cons up to cqe_sq_cons (exclusive)
* CQE is track from sw_cq_cons to max_element but valid only if VALID=1
*/
static int do_wa9060(struct bnxt_qplib_qp *qp, struct bnxt_qplib_cq *cq,
u32 cq_cons, u32 sw_sq_cons, u32 cqe_sq_cons)
{
struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_swq *swq;
u32 peek_sw_cq_cons, peek_raw_cq_cons, peek_sq_cons_idx;
struct cq_base *peek_hwcqe, **peek_hw_cqe_ptr;
struct cq_req *peek_req_hwcqe;
struct bnxt_qplib_qp *peek_qp;
struct bnxt_qplib_q *peek_sq;
int i, rc = 0;
/* Normal mode */
/* Check for the psn_search marking before completing */
swq = &sq->swq[sw_sq_cons];
if (swq->psn_search &&
le32_to_cpu(swq->psn_search->flags_next_psn) & 0x80000000) {
/* Unmark */
swq->psn_search->flags_next_psn = cpu_to_le32
(le32_to_cpu(swq->psn_search->flags_next_psn)
& ~0x80000000);
dev_dbg(&cq->hwq.pdev->dev,
"FP: Process Req cq_cons=0x%x qp=0x%x sq cons sw=0x%x cqe=0x%x marked!\n",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
sq->condition = true;
sq->send_phantom = true;
/* TODO: Only ARM if the previous SQE is ARMALL */
bnxt_qplib_arm_cq(cq, DBR_DBR_TYPE_CQ_ARMALL);
rc = -EAGAIN;
goto out;
}
if (sq->condition) {
/* Peek at the completions */
peek_raw_cq_cons = cq->hwq.cons;
peek_sw_cq_cons = cq_cons;
i = cq->hwq.max_elements;
while (i--) {
peek_sw_cq_cons = HWQ_CMP((peek_sw_cq_cons), &cq->hwq);
peek_hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr;
peek_hwcqe = &peek_hw_cqe_ptr[CQE_PG(peek_sw_cq_cons)]
[CQE_IDX(peek_sw_cq_cons)];
/* If the next hwcqe is VALID */
if (CQE_CMP_VALID(peek_hwcqe, peek_raw_cq_cons,
cq->hwq.max_elements)) {
/* If the next hwcqe is a REQ */
if ((peek_hwcqe->cqe_type_toggle &
CQ_BASE_CQE_TYPE_MASK) ==
CQ_BASE_CQE_TYPE_REQ) {
peek_req_hwcqe = (struct cq_req *)
peek_hwcqe;
peek_qp = (struct bnxt_qplib_qp *)
((unsigned long)
le64_to_cpu
(peek_req_hwcqe->qp_handle));
peek_sq = &peek_qp->sq;
peek_sq_cons_idx = HWQ_CMP(le16_to_cpu(
peek_req_hwcqe->sq_cons_idx) - 1
, &sq->hwq);
/* If the hwcqe's sq's wr_id matches */
if (peek_sq == sq &&
sq->swq[peek_sq_cons_idx].wr_id ==
BNXT_QPLIB_FENCE_WRID) {
/*
* Unbreak only if the phantom
* comes back
*/
dev_dbg(&cq->hwq.pdev->dev,
"FP:Got Phantom CQE");
sq->condition = false;
sq->single = true;
rc = 0;
goto out;
}
}
/* Valid but not the phantom, so keep looping */
} else {
/* Not valid yet, just exit and wait */
rc = -EINVAL;
goto out;
}
peek_sw_cq_cons++;
peek_raw_cq_cons++;
}
dev_err(&cq->hwq.pdev->dev,
"Should not have come here! cq_cons=0x%x qp=0x%x sq cons sw=0x%x hw=0x%x",
cq_cons, qp->id, sw_sq_cons, cqe_sq_cons);
rc = -EINVAL;
}
out:
return rc;
}
static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq, static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
struct cq_req *hwcqe, struct cq_req *hwcqe,
struct bnxt_qplib_cqe **pcqe, int *budget) struct bnxt_qplib_cqe **pcqe, int *budget,
u32 cq_cons, struct bnxt_qplib_qp **lib_qp)
{ {
struct bnxt_qplib_qp *qp; struct bnxt_qplib_qp *qp;
struct bnxt_qplib_q *sq; struct bnxt_qplib_q *sq;
struct bnxt_qplib_cqe *cqe; struct bnxt_qplib_cqe *cqe;
u32 sw_cons, cqe_cons; u32 sw_sq_cons, cqe_sq_cons;
struct bnxt_qplib_swq *swq;
int rc = 0; int rc = 0;
qp = (struct bnxt_qplib_qp *)((unsigned long) qp = (struct bnxt_qplib_qp *)((unsigned long)
@ -1683,13 +1675,13 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
} }
sq = &qp->sq; sq = &qp->sq;
cqe_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq); cqe_sq_cons = HWQ_CMP(le16_to_cpu(hwcqe->sq_cons_idx), &sq->hwq);
if (cqe_cons > sq->hwq.max_elements) { if (cqe_sq_cons > sq->hwq.max_elements) {
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Process req reported "); "QPLIB: FP: CQ Process req reported ");
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x", "QPLIB: sq_cons_idx 0x%x which exceeded max 0x%x",
cqe_cons, sq->hwq.max_elements); cqe_sq_cons, sq->hwq.max_elements);
return -EINVAL; return -EINVAL;
} }
/* If we were in the middle of flushing the SQ, continue */ /* If we were in the middle of flushing the SQ, continue */
@ -1698,53 +1690,74 @@ static int bnxt_qplib_cq_process_req(struct bnxt_qplib_cq *cq,
/* Require to walk the sq's swq to fabricate CQEs for all previously /* Require to walk the sq's swq to fabricate CQEs for all previously
* signaled SWQEs due to CQE aggregation from the current sq cons * signaled SWQEs due to CQE aggregation from the current sq cons
* to the cqe_cons * to the cqe_sq_cons
*/ */
cqe = *pcqe; cqe = *pcqe;
while (*budget) { while (*budget) {
sw_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq); sw_sq_cons = HWQ_CMP(sq->hwq.cons, &sq->hwq);
if (sw_cons == cqe_cons) if (sw_sq_cons == cqe_sq_cons)
/* Done */
break; break;
swq = &sq->swq[sw_sq_cons];
memset(cqe, 0, sizeof(*cqe)); memset(cqe, 0, sizeof(*cqe));
cqe->opcode = CQ_BASE_CQE_TYPE_REQ; cqe->opcode = CQ_BASE_CQE_TYPE_REQ;
cqe->qp_handle = (u64)(unsigned long)qp; cqe->qp_handle = (u64)(unsigned long)qp;
cqe->src_qp = qp->id; cqe->src_qp = qp->id;
cqe->wr_id = sq->swq[sw_cons].wr_id; cqe->wr_id = swq->wr_id;
cqe->type = sq->swq[sw_cons].type; if (cqe->wr_id == BNXT_QPLIB_FENCE_WRID)
goto skip;
cqe->type = swq->type;
/* For the last CQE, check for status. For errors, regardless /* For the last CQE, check for status. For errors, regardless
* of the request being signaled or not, it must complete with * of the request being signaled or not, it must complete with
* the hwcqe error status * the hwcqe error status
*/ */
if (HWQ_CMP((sw_cons + 1), &sq->hwq) == cqe_cons && if (HWQ_CMP((sw_sq_cons + 1), &sq->hwq) == cqe_sq_cons &&
hwcqe->status != CQ_REQ_STATUS_OK) { hwcqe->status != CQ_REQ_STATUS_OK) {
cqe->status = hwcqe->status; cqe->status = hwcqe->status;
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: FP: CQ Processed Req "); "QPLIB: FP: CQ Processed Req ");
dev_err(&cq->hwq.pdev->dev, dev_err(&cq->hwq.pdev->dev,
"QPLIB: wr_id[%d] = 0x%llx with status 0x%x", "QPLIB: wr_id[%d] = 0x%llx with status 0x%x",
sw_cons, cqe->wr_id, cqe->status); sw_sq_cons, cqe->wr_id, cqe->status);
cqe++; cqe++;
(*budget)--; (*budget)--;
sq->flush_in_progress = true; sq->flush_in_progress = true;
/* Must block new posting of SQ and RQ */ /* Must block new posting of SQ and RQ */
qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR; qp->state = CMDQ_MODIFY_QP_NEW_STATE_ERR;
sq->condition = false;
sq->single = false;
} else { } else {
if (sq->swq[sw_cons].flags & if (swq->flags & SQ_SEND_FLAGS_SIGNAL_COMP) {
SQ_SEND_FLAGS_SIGNAL_COMP) { /* Before we complete, do WA 9060 */
if (do_wa9060(qp, cq, cq_cons, sw_sq_cons,
cqe_sq_cons)) {
*lib_qp = qp;
goto out;
}
cqe->status = CQ_REQ_STATUS_OK; cqe->status = CQ_REQ_STATUS_OK;
cqe++; cqe++;
(*budget)--; (*budget)--;
} }
} }
skip:
sq->hwq.cons++; sq->hwq.cons++;
if (sq->single)
break;
} }
out:
*pcqe = cqe; *pcqe = cqe;
if (!*budget && HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_cons) { if (HWQ_CMP(sq->hwq.cons, &sq->hwq) != cqe_sq_cons) {
/* Out of budget */ /* Out of budget */
rc = -EAGAIN; rc = -EAGAIN;
goto done; goto done;
} }
/*
* Back to normal completion mode only after it has completed all of
* the WC for this CQE
*/
sq->single = false;
if (!sq->flush_in_progress) if (!sq->flush_in_progress)
goto done; goto done;
flush: flush:
@ -2074,7 +2087,7 @@ static int bnxt_qplib_cq_process_cutoff(struct bnxt_qplib_cq *cq,
} }
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num_cqes) int num_cqes, struct bnxt_qplib_qp **lib_qp)
{ {
struct cq_base *hw_cqe, **hw_cqe_ptr; struct cq_base *hw_cqe, **hw_cqe_ptr;
unsigned long flags; unsigned long flags;
@ -2099,7 +2112,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
case CQ_BASE_CQE_TYPE_REQ: case CQ_BASE_CQE_TYPE_REQ:
rc = bnxt_qplib_cq_process_req(cq, rc = bnxt_qplib_cq_process_req(cq,
(struct cq_req *)hw_cqe, (struct cq_req *)hw_cqe,
&cqe, &budget); &cqe, &budget,
sw_cons, lib_qp);
break; break;
case CQ_BASE_CQE_TYPE_RES_RC: case CQ_BASE_CQE_TYPE_RES_RC:
rc = bnxt_qplib_cq_process_res_rc(cq, rc = bnxt_qplib_cq_process_res_rc(cq,

View File

@ -88,6 +88,7 @@ struct bnxt_qplib_swq {
struct bnxt_qplib_swqe { struct bnxt_qplib_swqe {
/* General */ /* General */
#define BNXT_QPLIB_FENCE_WRID 0x46454E43 /* "FENC" */
u64 wr_id; u64 wr_id;
u8 reqs_type; u8 reqs_type;
u8 type; u8 type;
@ -216,9 +217,16 @@ struct bnxt_qplib_q {
struct scatterlist *sglist; struct scatterlist *sglist;
u32 nmap; u32 nmap;
u32 max_wqe; u32 max_wqe;
u16 q_full_delta;
u16 max_sge; u16 max_sge;
u32 psn; u32 psn;
bool flush_in_progress; bool flush_in_progress;
bool condition;
bool single;
bool send_phantom;
u32 phantom_wqe_cnt;
u32 phantom_cqe_cnt;
u32 next_cq_cons;
}; };
struct bnxt_qplib_qp { struct bnxt_qplib_qp {
@ -242,6 +250,7 @@ struct bnxt_qplib_qp {
u8 timeout; u8 timeout;
u8 retry_cnt; u8 retry_cnt;
u8 rnr_retry; u8 rnr_retry;
u64 wqe_cnt;
u32 min_rnr_timer; u32 min_rnr_timer;
u32 max_rd_atomic; u32 max_rd_atomic;
u32 max_dest_rd_atomic; u32 max_dest_rd_atomic;
@ -301,6 +310,13 @@ struct bnxt_qplib_qp {
(!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \ (!!((hdr)->cqe_type_toggle & CQ_BASE_TOGGLE) == \
!((raw_cons) & (cp_bit))) !((raw_cons) & (cp_bit)))
static inline bool bnxt_qplib_queue_full(struct bnxt_qplib_q *qplib_q)
{
return HWQ_CMP((qplib_q->hwq.prod + qplib_q->q_full_delta),
&qplib_q->hwq) == HWQ_CMP(qplib_q->hwq.cons,
&qplib_q->hwq);
}
struct bnxt_qplib_cqe { struct bnxt_qplib_cqe {
u8 status; u8 status;
u8 type; u8 type;
@ -432,7 +448,7 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp,
int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq);
int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe,
int num); int num, struct bnxt_qplib_qp **qp);
void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type);
void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq);
int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq);

View File

@ -39,72 +39,55 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/prefetch.h> #include <linux/prefetch.h>
#include <linux/delay.h>
#include "roce_hsi.h" #include "roce_hsi.h"
#include "qplib_res.h" #include "qplib_res.h"
#include "qplib_rcfw.h" #include "qplib_rcfw.h"
static void bnxt_qplib_service_creq(unsigned long data); static void bnxt_qplib_service_creq(unsigned long data);
/* Hardware communication channel */ /* Hardware communication channel */
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{ {
u16 cbit; u16 cbit;
int rc; int rc;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev,
"QPLIB: CMD bit %d for cookie 0x%x is not set?",
cbit, cookie);
rc = wait_event_timeout(rcfw->waitq, rc = wait_event_timeout(rcfw->waitq,
!test_bit(cbit, rcfw->cmdq_bitmap), !test_bit(cbit, rcfw->cmdq_bitmap),
msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS)); msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
if (!rc) { return rc ? 0 : -ETIMEDOUT;
dev_warn(&rcfw->pdev->dev,
"QPLIB: Bono Error: timeout %d msec, msg {0x%x}\n",
RCFW_CMD_WAIT_TIME_MS, cookie);
}
return rc;
}; };
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
{ {
u32 count = -1; u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
u16 cbit; u16 cbit;
cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (!test_bit(cbit, rcfw->cmdq_bitmap)) if (!test_bit(cbit, rcfw->cmdq_bitmap))
goto done; goto done;
do { do {
mdelay(1); /* 1m sec */
bnxt_qplib_service_creq((unsigned long)rcfw); bnxt_qplib_service_creq((unsigned long)rcfw);
} while (test_bit(cbit, rcfw->cmdq_bitmap) && --count); } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
done: done:
return count; return count ? 0 : -ETIMEDOUT;
}; };
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct cmdq_base *req, void **crsbe, struct creq_base *resp, void *sb, u8 is_block)
u8 is_block)
{ {
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr; struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct bnxt_qplib_hwq *crsb = &rcfw->crsb; struct bnxt_qplib_crsq *crsqe;
struct bnxt_qplib_crsqe *crsqe = NULL;
struct bnxt_qplib_crsbe **crsb_ptr;
u32 sw_prod, cmdq_prod; u32 sw_prod, cmdq_prod;
u8 retry_cnt = 0xFF;
dma_addr_t dma_addr;
unsigned long flags; unsigned long flags;
u32 size, opcode; u32 size, opcode;
u16 cookie, cbit; u16 cookie, cbit;
int pg, idx; int pg, idx;
u8 *preq; u8 *preq;
retry:
opcode = req->opcode; opcode = req->opcode;
if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
(opcode != CMDQ_BASE_OPCODE_QUERY_FUNC && (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
@ -112,63 +95,50 @@ void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
dev_err(&rcfw->pdev->dev, dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW not initialized, reject opcode 0x%x", "QPLIB: RCFW not initialized, reject opcode 0x%x",
opcode); opcode);
return NULL; return -EINVAL;
} }
if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) && if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) { opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!"); dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
return NULL; return -EINVAL;
} }
/* Cmdq are in 16-byte units, each request can consume 1 or more /* Cmdq are in 16-byte units, each request can consume 1 or more
* cmdqe * cmdqe
*/ */
spin_lock_irqsave(&cmdq->lock, flags); spin_lock_irqsave(&cmdq->lock, flags);
if (req->cmd_size > cmdq->max_elements - if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
((HWQ_CMP(cmdq->prod, cmdq) - HWQ_CMP(cmdq->cons, cmdq)) &
(cmdq->max_elements - 1))) {
dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!"); dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
spin_unlock_irqrestore(&cmdq->lock, flags); spin_unlock_irqrestore(&cmdq->lock, flags);
return -EAGAIN;
if (!retry_cnt--)
return NULL;
goto retry;
} }
retry_cnt = 0xFF;
cookie = atomic_inc_return(&rcfw->seq_num) & RCFW_MAX_COOKIE_VALUE; cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
if (is_block) if (is_block)
cookie |= RCFW_CMD_IS_BLOCKING; cookie |= RCFW_CMD_IS_BLOCKING;
set_bit(cbit, rcfw->cmdq_bitmap);
req->cookie = cpu_to_le16(cookie); req->cookie = cpu_to_le16(cookie);
if (test_and_set_bit(cbit, rcfw->cmdq_bitmap)) { crsqe = &rcfw->crsqe_tbl[cbit];
dev_err(&rcfw->pdev->dev, if (crsqe->resp) {
"QPLIB: RCFW MAX outstanding cmd reached!");
atomic_dec(&rcfw->seq_num);
spin_unlock_irqrestore(&cmdq->lock, flags); spin_unlock_irqrestore(&cmdq->lock, flags);
return -EBUSY;
if (!retry_cnt--)
return NULL;
goto retry;
} }
/* Reserve a resp buffer slot if requested */ memset(resp, 0, sizeof(*resp));
if (req->resp_size && crsbe) { crsqe->resp = (struct creq_qp_event *)resp;
spin_lock(&crsb->lock); crsqe->resp->cookie = req->cookie;
sw_prod = HWQ_CMP(crsb->prod, crsb); crsqe->req_size = req->cmd_size;
crsb_ptr = (struct bnxt_qplib_crsbe **)crsb->pbl_ptr; if (req->resp_size && sb) {
*crsbe = (void *)&crsb_ptr[get_crsb_pg(sw_prod)] struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
[get_crsb_idx(sw_prod)];
bnxt_qplib_crsb_dma_next(crsb->pbl_dma_ptr, sw_prod, &dma_addr);
req->resp_addr = cpu_to_le64(dma_addr);
crsb->prod++;
spin_unlock(&crsb->lock);
req->resp_size = (sizeof(struct bnxt_qplib_crsbe) + req->resp_addr = cpu_to_le64(sbuf->dma_addr);
BNXT_QPLIB_CMDQE_UNITS - 1) / req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
BNXT_QPLIB_CMDQE_UNITS; BNXT_QPLIB_CMDQE_UNITS;
} }
cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr; cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
preq = (u8 *)req; preq = (u8 *)req;
size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS; size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
@ -190,23 +160,24 @@ void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
preq += min_t(u32, size, sizeof(*cmdqe)); preq += min_t(u32, size, sizeof(*cmdqe));
size -= min_t(u32, size, sizeof(*cmdqe)); size -= min_t(u32, size, sizeof(*cmdqe));
cmdq->prod++; cmdq->prod++;
rcfw->seq_num++;
} while (size > 0); } while (size > 0);
rcfw->seq_num++;
cmdq_prod = cmdq->prod; cmdq_prod = cmdq->prod;
if (rcfw->flags & FIRMWARE_FIRST_FLAG) { if (rcfw->flags & FIRMWARE_FIRST_FLAG) {
/* The very first doorbell write is required to set this flag /* The very first doorbell write
* which prompts the FW to reset its internal pointers * is required to set this flag
* which prompts the FW to reset
* its internal pointers
*/ */
cmdq_prod |= FIRMWARE_FIRST_FLAG; cmdq_prod |= FIRMWARE_FIRST_FLAG;
rcfw->flags &= ~FIRMWARE_FIRST_FLAG; rcfw->flags &= ~FIRMWARE_FIRST_FLAG;
} }
sw_prod = HWQ_CMP(crsq->prod, crsq);
crsqe = &crsq->crsq[sw_prod];
memset(crsqe, 0, sizeof(*crsqe));
crsq->prod++;
crsqe->req_size = req->cmd_size;
/* ring CMDQ DB */ /* ring CMDQ DB */
wmb();
writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem + writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
rcfw->cmdq_bar_reg_prod_off); rcfw->cmdq_bar_reg_prod_off);
writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem + writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
@ -214,9 +185,56 @@ void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
done: done:
spin_unlock_irqrestore(&cmdq->lock, flags); spin_unlock_irqrestore(&cmdq->lock, flags);
/* Return the CREQ response pointer */ /* Return the CREQ response pointer */
return crsqe ? &crsqe->qp_event : NULL; return 0;
} }
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct cmdq_base *req,
struct creq_base *resp,
void *sb, u8 is_block)
{
struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
u16 cookie;
u8 opcode, retry_cnt = 0xFF;
int rc = 0;
do {
opcode = req->opcode;
rc = __send_message(rcfw, req, resp, sb, is_block);
cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
if (!rc)
break;
if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
/* send failed */
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
cookie, opcode);
return rc;
}
is_block ? mdelay(1) : usleep_range(500, 1000);
} while (retry_cnt--);
if (is_block)
rc = __block_for_resp(rcfw, cookie);
else
rc = __wait_for_resp(rcfw, cookie);
if (rc) {
/* timed out */
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
return rc;
}
if (evnt->status) {
/* failed with status */
dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
cookie, opcode, evnt->status);
rc = -EFAULT;
}
return rc;
}
/* Completions */ /* Completions */
static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_func_event *func_event) struct creq_func_event *func_event)
@ -260,12 +278,12 @@ static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw, static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
struct creq_qp_event *qp_event) struct creq_qp_event *qp_event)
{ {
struct bnxt_qplib_crsq *crsq = &rcfw->crsq;
struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq; struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
struct bnxt_qplib_crsqe *crsqe; struct bnxt_qplib_crsq *crsqe;
u16 cbit, cookie, blocked = 0;
unsigned long flags; unsigned long flags;
u32 sw_cons; u16 cbit, blocked = 0;
u16 cookie;
__le16 mcookie;
switch (qp_event->event) { switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION: case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
@ -275,24 +293,31 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
default: default:
/* Command Response */ /* Command Response */
spin_lock_irqsave(&cmdq->lock, flags); spin_lock_irqsave(&cmdq->lock, flags);
sw_cons = HWQ_CMP(crsq->cons, crsq); cookie = le16_to_cpu(qp_event->cookie);
crsqe = &crsq->crsq[sw_cons]; mcookie = qp_event->cookie;
crsq->cons++;
memcpy(&crsqe->qp_event, qp_event, sizeof(crsqe->qp_event));
cookie = le16_to_cpu(crsqe->qp_event.cookie);
blocked = cookie & RCFW_CMD_IS_BLOCKING; blocked = cookie & RCFW_CMD_IS_BLOCKING;
cookie &= RCFW_MAX_COOKIE_VALUE; cookie &= RCFW_MAX_COOKIE_VALUE;
cbit = cookie % RCFW_MAX_OUTSTANDING_CMD; cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
crsqe = &rcfw->crsqe_tbl[cbit];
if (crsqe->resp &&
crsqe->resp->cookie == mcookie) {
memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
crsqe->resp = NULL;
} else {
dev_err(&rcfw->pdev->dev,
"QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
crsqe->resp ? "mismatch" : "collision",
crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
}
if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap)) if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
dev_warn(&rcfw->pdev->dev, dev_warn(&rcfw->pdev->dev,
"QPLIB: CMD bit %d was not requested", cbit); "QPLIB: CMD bit %d was not requested", cbit);
cmdq->cons += crsqe->req_size; cmdq->cons += crsqe->req_size;
spin_unlock_irqrestore(&cmdq->lock, flags); crsqe->req_size = 0;
if (!blocked) if (!blocked)
wake_up(&rcfw->waitq); wake_up(&rcfw->waitq);
break; spin_unlock_irqrestore(&cmdq->lock, flags);
} }
return 0; return 0;
} }
@ -305,12 +330,12 @@ static void bnxt_qplib_service_creq(unsigned long data)
struct creq_base *creqe, **creq_ptr; struct creq_base *creqe, **creq_ptr;
u32 sw_cons, raw_cons; u32 sw_cons, raw_cons;
unsigned long flags; unsigned long flags;
u32 type; u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
/* Service the CREQ until empty */ /* Service the CREQ until budget is over */
spin_lock_irqsave(&creq->lock, flags); spin_lock_irqsave(&creq->lock, flags);
raw_cons = creq->cons; raw_cons = creq->cons;
while (1) { while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, creq); sw_cons = HWQ_CMP(raw_cons, creq);
creq_ptr = (struct creq_base **)creq->pbl_ptr; creq_ptr = (struct creq_base **)creq->pbl_ptr;
creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]; creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
@ -320,15 +345,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
type = creqe->type & CREQ_BASE_TYPE_MASK; type = creqe->type & CREQ_BASE_TYPE_MASK;
switch (type) { switch (type) {
case CREQ_BASE_TYPE_QP_EVENT: case CREQ_BASE_TYPE_QP_EVENT:
if (!bnxt_qplib_process_qp_event bnxt_qplib_process_qp_event
(rcfw, (struct creq_qp_event *)creqe)) (rcfw, (struct creq_qp_event *)creqe);
rcfw->creq_qp_event_processed++; rcfw->creq_qp_event_processed++;
else {
dev_warn(&rcfw->pdev->dev, "QPLIB: crsqe with");
dev_warn(&rcfw->pdev->dev,
"QPLIB: type = 0x%x not handled",
type);
}
break; break;
case CREQ_BASE_TYPE_FUNC_EVENT: case CREQ_BASE_TYPE_FUNC_EVENT:
if (!bnxt_qplib_process_func_event if (!bnxt_qplib_process_func_event
@ -346,7 +365,9 @@ static void bnxt_qplib_service_creq(unsigned long data)
break; break;
} }
raw_cons++; raw_cons++;
budget--;
} }
if (creq->cons != raw_cons) { if (creq->cons != raw_cons) {
creq->cons = raw_cons; creq->cons = raw_cons;
CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons, CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
@ -375,23 +396,16 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
/* RCFW */ /* RCFW */
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw) int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
{ {
struct creq_deinitialize_fw_resp *resp;
struct cmdq_deinitialize_fw req; struct cmdq_deinitialize_fw req;
struct creq_deinitialize_fw_resp resp;
u16 cmd_flags = 0; u16 cmd_flags = 0;
int rc;
RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags); RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
resp = (struct creq_deinitialize_fw_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
NULL, 0); if (rc)
if (!resp) return rc;
return -EINVAL;
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie)))
return -ETIMEDOUT;
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie))
return -EFAULT;
clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0; return 0;
@ -417,9 +431,10 @@ static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn) struct bnxt_qplib_ctx *ctx, int is_virtfn)
{ {
struct creq_initialize_fw_resp *resp;
struct cmdq_initialize_fw req; struct cmdq_initialize_fw req;
struct creq_initialize_fw_resp resp;
u16 cmd_flags = 0, level; u16 cmd_flags = 0, level;
int rc;
RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags); RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
@ -482,37 +497,19 @@ int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
skip_ctx_setup: skip_ctx_setup:
req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id); req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
resp = (struct creq_initialize_fw_resp *) rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, NULL, 0);
NULL, 0); if (rc)
if (!resp) { return rc;
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW: INITIALIZE_FW send failed");
return -EINVAL;
}
if (!bnxt_qplib_rcfw_wait_for_resp(rcfw, le16_to_cpu(req.cookie))) {
/* Cmd timed out */
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW: INITIALIZE_FW timed out");
return -ETIMEDOUT;
}
if (resp->status ||
le16_to_cpu(resp->cookie) != le16_to_cpu(req.cookie)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: RCFW: INITIALIZE_FW failed");
return -EINVAL;
}
set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags); set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
return 0; return 0;
} }
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
{ {
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->crsb); kfree(rcfw->crsqe_tbl);
kfree(rcfw->crsq.crsq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq); bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
rcfw->pdev = NULL; rcfw->pdev = NULL;
} }
@ -539,21 +536,11 @@ int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
goto fail; goto fail;
} }
rcfw->crsq.max_elements = rcfw->cmdq.max_elements; rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
rcfw->crsq.crsq = kcalloc(rcfw->crsq.max_elements, sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
sizeof(*rcfw->crsq.crsq), GFP_KERNEL); if (!rcfw->crsqe_tbl)
if (!rcfw->crsq.crsq)
goto fail; goto fail;
rcfw->crsb.max_elements = BNXT_QPLIB_CRSBE_MAX_CNT;
if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->crsb, NULL, 0,
&rcfw->crsb.max_elements,
BNXT_QPLIB_CRSBE_UNITS, 0, PAGE_SIZE,
HWQ_TYPE_CTX)) {
dev_err(&rcfw->pdev->dev,
"QPLIB: HW channel CRSB allocation failed");
goto fail;
}
return 0; return 0;
fail: fail:
@ -606,7 +593,7 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
int rc; int rc;
/* General */ /* General */
atomic_set(&rcfw->seq_num, 0); rcfw->seq_num = 0;
rcfw->flags = FIRMWARE_FIRST_FLAG; rcfw->flags = FIRMWARE_FIRST_FLAG;
bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD * bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
sizeof(unsigned long)); sizeof(unsigned long));
@ -636,10 +623,6 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET; rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
/* CRSQ */
rcfw->crsq.prod = 0;
rcfw->crsq.cons = 0;
/* CREQ */ /* CREQ */
rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION; rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
res_base = pci_resource_start(pdev, rcfw->creq_bar_reg); res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
@ -692,3 +675,34 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
__iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4); __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
return 0; return 0;
} }
struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
struct bnxt_qplib_rcfw *rcfw,
u32 size)
{
struct bnxt_qplib_rcfw_sbuf *sbuf;
sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
if (!sbuf)
return NULL;
sbuf->size = size;
sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
&sbuf->dma_addr, GFP_ATOMIC);
if (!sbuf->sb)
goto bail;
return sbuf;
bail:
kfree(sbuf);
return NULL;
}
void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_rcfw_sbuf *sbuf)
{
if (sbuf->sb)
dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
sbuf->sb, sbuf->dma_addr);
kfree(sbuf);
}

View File

@ -73,6 +73,7 @@
#define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT #define RCFW_MAX_OUTSTANDING_CMD BNXT_QPLIB_CMDQE_MAX_CNT
#define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000 #define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
/* Cmdq contains a fix number of a 16-Byte slots */ /* Cmdq contains a fix number of a 16-Byte slots */
struct bnxt_qplib_cmdqe { struct bnxt_qplib_cmdqe {
@ -94,32 +95,6 @@ struct bnxt_qplib_crsbe {
u8 data[1024]; u8 data[1024];
}; };
/* CRSQ SB */
#define BNXT_QPLIB_CRSBE_MAX_CNT 4
#define BNXT_QPLIB_CRSBE_UNITS sizeof(struct bnxt_qplib_crsbe)
#define BNXT_QPLIB_CRSBE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CRSBE_UNITS)
#define MAX_CRSB_IDX (BNXT_QPLIB_CRSBE_MAX_CNT - 1)
#define MAX_CRSB_IDX_PER_PG (BNXT_QPLIB_CRSBE_CNT_PER_PG - 1)
static inline u32 get_crsb_pg(u32 val)
{
return (val & ~MAX_CRSB_IDX_PER_PG) / BNXT_QPLIB_CRSBE_CNT_PER_PG;
}
static inline u32 get_crsb_idx(u32 val)
{
return val & MAX_CRSB_IDX_PER_PG;
}
static inline void bnxt_qplib_crsb_dma_next(dma_addr_t *pg_map_arr,
u32 prod, dma_addr_t *dma_addr)
{
*dma_addr = pg_map_arr[(prod) / BNXT_QPLIB_CRSBE_CNT_PER_PG];
*dma_addr += ((prod) % BNXT_QPLIB_CRSBE_CNT_PER_PG) *
BNXT_QPLIB_CRSBE_UNITS;
}
/* CREQ */ /* CREQ */
/* Allocate 1 per QP for async error notification for now */ /* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
@ -158,17 +133,19 @@ static inline u32 get_creq_idx(u32 val)
#define CREQ_DB(db, raw_cons, cp_bit) \ #define CREQ_DB(db, raw_cons, cp_bit) \
writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db) writel(CREQ_DB_CP_FLAGS | ((raw_cons) & ((cp_bit) - 1)), db)
#define CREQ_ENTRY_POLL_BUDGET 0x100
/* HWQ */ /* HWQ */
struct bnxt_qplib_crsqe {
struct creq_qp_event qp_event; struct bnxt_qplib_crsq {
struct creq_qp_event *resp;
u32 req_size; u32 req_size;
}; };
struct bnxt_qplib_crsq { struct bnxt_qplib_rcfw_sbuf {
struct bnxt_qplib_crsqe *crsq; void *sb;
u32 prod; dma_addr_t dma_addr;
u32 cons; u32 size;
u32 max_elements;
}; };
/* RCFW Communication Channels */ /* RCFW Communication Channels */
@ -185,7 +162,7 @@ struct bnxt_qplib_rcfw {
wait_queue_head_t waitq; wait_queue_head_t waitq;
int (*aeq_handler)(struct bnxt_qplib_rcfw *, int (*aeq_handler)(struct bnxt_qplib_rcfw *,
struct creq_func_event *); struct creq_func_event *);
atomic_t seq_num; u32 seq_num;
/* Bar region info */ /* Bar region info */
void __iomem *cmdq_bar_reg_iomem; void __iomem *cmdq_bar_reg_iomem;
@ -203,8 +180,7 @@ struct bnxt_qplib_rcfw {
/* Actual Cmd and Resp Queues */ /* Actual Cmd and Resp Queues */
struct bnxt_qplib_hwq cmdq; struct bnxt_qplib_hwq cmdq;
struct bnxt_qplib_crsq crsq; struct bnxt_qplib_crsq *crsqe_tbl;
struct bnxt_qplib_hwq crsb;
}; };
void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw); void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
@ -219,11 +195,14 @@ int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
(struct bnxt_qplib_rcfw *, (struct bnxt_qplib_rcfw *,
struct creq_func_event *)); struct creq_func_event *));
int bnxt_qplib_rcfw_block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
int bnxt_qplib_rcfw_wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie); struct bnxt_qplib_rcfw *rcfw,
void *bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw, u32 size);
struct cmdq_base *req, void **crsbe, void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
u8 is_block); struct bnxt_qplib_rcfw_sbuf *sbuf);
int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
struct cmdq_base *req, struct creq_base *resp,
void *sbuf, u8 is_block);
int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw); int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw, int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,

Some files were not shown because too many files have changed in this diff Show More