Merge branch 'x86/urgent' into x86/asm

Get the cr4 fixes so we can apply the final cleanup
This commit is contained in:
Thomas Gleixner 2016-09-30 12:38:28 +02:00
commit d7e25c66c9
153 changed files with 1277 additions and 744 deletions

View File

@ -13,6 +13,7 @@ Required properties:
- touchscreen-size-y : See touchscreen.txt
Optional properties:
- firmware-name : File basename (string) for board specific firmware
- touchscreen-inverted-x : See touchscreen.txt
- touchscreen-inverted-y : See touchscreen.txt
- touchscreen-swapped-x-y : See touchscreen.txt

View File

@ -144,7 +144,7 @@ logical address types are already defined will return with error ``EBUSY``.
- ``flags``
- Flags. No flags are defined yet, so set this to 0.
- Flags. See :ref:`cec-log-addrs-flags` for a list of available flags.
- .. row 7
@ -201,6 +201,25 @@ logical address types are already defined will return with error ``EBUSY``.
give the CEC framework more information about the device type, even
though the framework won't use it directly in the CEC message.
.. _cec-log-addrs-flags:
.. flat-table:: Flags for struct cec_log_addrs
:header-rows: 0
:stub-columns: 0
:widths: 3 1 4
- .. _`CEC-LOG-ADDRS-FL-ALLOW-UNREG-FALLBACK`:
- ``CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK``
- 1
- By default if no logical address of the requested type can be claimed, then
it will go back to the unconfigured state. If this flag is set, then it will
fallback to the Unregistered logical address. Note that if the Unregistered
logical address was explicitly requested, then this flag has no effect.
.. _cec-versions:
.. flat-table:: CEC Versions

View File

@ -64,7 +64,8 @@ it is guaranteed that the state did change in between the two events.
- ``phys_addr``
- The current physical address.
- The current physical address. This is ``CEC_PHYS_ADDR_INVALID`` if no
valid physical address is set.
- .. row 2
@ -72,7 +73,10 @@ it is guaranteed that the state did change in between the two events.
- ``log_addr_mask``
- The current set of claimed logical addresses.
- The current set of claimed logical addresses. This is 0 if no logical
addresses are claimed or if ``phys_addr`` is ``CEC_PHYS_ADDR_INVALID``.
If bit 15 is set (``1 << CEC_LOG_ADDR_UNREGISTERED``) then this device
has the unregistered logical address. In that case all other bits are 0.

View File

@ -2501,7 +2501,7 @@ S: Supported
F: kernel/bpf/
BROADCOM B44 10/100 ETHERNET DRIVER
M: Gary Zambrano <zambrano@broadcom.com>
M: Michael Chan <michael.chan@broadcom.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
@ -8161,6 +8161,15 @@ S: Maintained
W: https://fedorahosted.org/dropwatch/
F: net/core/drop_monitor.c
NETWORKING [DSA]
M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M: Florian Fainelli <f.fainelli@gmail.com>
S: Maintained
F: net/dsa/
F: include/net/dsa.h
F: drivers/net/dsa/
NETWORKING [GENERAL]
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org

View File

@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 8
SUBLEVEL = 0
EXTRAVERSION = -rc7
EXTRAVERSION = -rc8
NAME = Psychotic Stoned Sheep
# *DOCUMENTATION*

View File

@ -61,8 +61,6 @@
#define AARCH64_BREAK_KGDB_DYN_DBG \
(AARCH64_BREAK_MON | (KGDB_DYN_DBG_BRK_IMM << 5))
#define KGDB_DYN_BRK_INS_BYTE(x) \
((AARCH64_BREAK_KGDB_DYN_DBG >> (8 * (x))) & 0xff)
#define CACHE_FLUSH_IS_SAFE 1

View File

@ -19,10 +19,13 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/bug.h>
#include <linux/irq.h>
#include <linux/kdebug.h>
#include <linux/kgdb.h>
#include <linux/kprobes.h>
#include <asm/debug-monitors.h>
#include <asm/insn.h>
#include <asm/traps.h>
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
@ -338,15 +341,24 @@ void kgdb_arch_exit(void)
unregister_die_notifier(&kgdb_notifier);
}
/*
* ARM instructions are always in LE.
* Break instruction is encoded in LE format
*/
struct kgdb_arch arch_kgdb_ops = {
.gdb_bpt_instr = {
KGDB_DYN_BRK_INS_BYTE(0),
KGDB_DYN_BRK_INS_BYTE(1),
KGDB_DYN_BRK_INS_BYTE(2),
KGDB_DYN_BRK_INS_BYTE(3),
}
};
struct kgdb_arch arch_kgdb_ops;
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{
int err;
BUILD_BUG_ON(AARCH64_INSN_SIZE != BREAK_INSTR_SIZE);
err = aarch64_insn_read((void *)bpt->bpt_addr, (u32 *)bpt->saved_instr);
if (err)
return err;
return aarch64_insn_write((void *)bpt->bpt_addr,
(u32)AARCH64_BREAK_KGDB_DYN_DBG);
}
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{
return aarch64_insn_write((void *)bpt->bpt_addr,
*(u32 *)bpt->saved_instr);
}

View File

@ -201,12 +201,6 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
return ret;
}
static void smp_store_cpu_info(unsigned int cpuid)
{
store_cpu_topology(cpuid);
numa_store_cpu_info(cpuid);
}
/*
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
@ -254,7 +248,7 @@ asmlinkage void secondary_start_kernel(void)
*/
notify_cpu_starting(cpu);
smp_store_cpu_info(cpu);
store_cpu_topology(cpu);
/*
* OK, now it's safe to let the boot CPU continue. Wait for
@ -689,10 +683,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{
int err;
unsigned int cpu;
unsigned int this_cpu;
init_cpu_topology();
smp_store_cpu_info(smp_processor_id());
this_cpu = smp_processor_id();
store_cpu_topology(this_cpu);
numa_store_cpu_info(this_cpu);
/*
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
@ -719,6 +716,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
continue;
set_cpu_present(cpu, true);
numa_store_cpu_info(cpu);
}
}

View File

@ -65,6 +65,7 @@ config MIPS
select ARCH_CLOCKSOURCE_DATA
select HANDLE_DOMAIN_IRQ
select HAVE_EXIT_THREAD
select HAVE_REGS_AND_STACK_ACCESS_API
menu "Machine selection"

View File

@ -113,42 +113,6 @@ config SPINLOCK_TEST
help
Add several files to the debugfs to test spinlock speed.
if CPU_MIPSR6
choice
prompt "Compact branch policy"
default MIPS_COMPACT_BRANCHES_OPTIMAL
config MIPS_COMPACT_BRANCHES_NEVER
bool "Never (force delay slot branches)"
help
Pass the -mcompact-branches=never flag to the compiler in order to
force it to always emit branches with delay slots, and make no use
of the compact branch instructions introduced by MIPSr6. This is
useful if you suspect there may be an issue with compact branches in
either the compiler or the CPU.
config MIPS_COMPACT_BRANCHES_OPTIMAL
bool "Optimal (use where beneficial)"
help
Pass the -mcompact-branches=optimal flag to the compiler in order for
it to make use of compact branch instructions where it deems them
beneficial, and use branches with delay slots elsewhere. This is the
default compiler behaviour, and should be used unless you have a
reason to choose otherwise.
config MIPS_COMPACT_BRANCHES_ALWAYS
bool "Always (force compact branches)"
help
Pass the -mcompact-branches=always flag to the compiler in order to
force it to always emit compact branches, making no use of branch
instructions with delay slots. This can result in more compact code
which may be beneficial in some scenarios.
endchoice
endif # CPU_MIPSR6
config SCACHE_DEBUGFS
bool "L2 cache debugfs entries"
depends on DEBUG_FS

View File

@ -203,10 +203,6 @@ endif
toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_ALWAYS) += -mcompact-branches=always
#
# Firmware support
#

View File

@ -96,7 +96,7 @@ static struct clk * __init ath79_reg_ffclk(const char *name,
struct clk *clk;
clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
if (!clk)
if (IS_ERR(clk))
panic("failed to allocate %s clock structure", name);
return clk;

View File

@ -1059,7 +1059,7 @@ static int __init octeon_publish_devices(void)
{
return of_platform_bus_probe(NULL, octeon_ids, NULL);
}
device_initcall(octeon_publish_devices);
arch_initcall(octeon_publish_devices);
MODULE_AUTHOR("David Daney <ddaney@caviumnetworks.com>");
MODULE_LICENSE("GPL");

View File

@ -157,6 +157,7 @@
ldc1 $f28, THREAD_FPR28(\thread)
ldc1 $f30, THREAD_FPR30(\thread)
ctc1 \tmp, fcr31
.set pop
.endm
.macro fpu_restore_16odd thread

View File

@ -15,8 +15,8 @@
static inline bool __should_swizzle_bits(volatile void *a)
{
extern const bool octeon_should_swizzle_table[];
u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
unsigned long did = ((unsigned long)a >> 40) & 0xff;
return octeon_should_swizzle_table[did];
}
@ -29,7 +29,7 @@ static inline bool __should_swizzle_bits(volatile void *a)
#define __should_swizzle_bits(a) false
static inline bool __should_swizzle_addr(unsigned long p)
static inline bool __should_swizzle_addr(u64 p)
{
/* boot bus? */
return ((p >> 40) & 0xff) == 0;

View File

@ -11,11 +11,13 @@
#define CP0_EBASE $15, 1
.macro kernel_entry_setup
#ifdef CONFIG_SMP
mfc0 t0, CP0_EBASE
andi t0, t0, 0x3ff # CPUNum
beqz t0, 1f
# CPUs other than zero goto smp_bootstrap
j smp_bootstrap
#endif /* CONFIG_SMP */
1:
.endm

View File

@ -1164,7 +1164,9 @@ int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
regs->regs[31] = r31;
regs->cp0_epc = epc;
if (!used_math()) { /* First time FPU user. */
preempt_disable();
err = init_fpu();
preempt_enable();
set_used_math();
}
lose_fpu(1); /* Save FPU state for the emulator. */

View File

@ -605,14 +605,14 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
return -EOPNOTSUPP;
/* Avoid inadvertently triggering emulation */
if ((value & PR_FP_MODE_FR) && cpu_has_fpu &&
!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
!(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
return -EOPNOTSUPP;
if ((value & PR_FP_MODE_FRE) && cpu_has_fpu && !cpu_has_fre)
if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
return -EOPNOTSUPP;
/* FR = 0 not supported in MIPS R6 */
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
/* Proceed with the mode switch */

View File

@ -87,6 +87,13 @@ void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
int x = boot_mem_map.nr_map;
int i;
/*
* If the region reaches the top of the physical address space, adjust
* the size slightly so that (start + size) doesn't overflow
*/
if (start + size - 1 == (phys_addr_t)ULLONG_MAX)
--size;
/* Sanity check */
if (start + size < start) {
pr_warn("Trying to add an invalid memory region, skipped\n");

View File

@ -322,6 +322,9 @@ asmlinkage void start_secondary(void)
cpumask_set_cpu(cpu, &cpu_coherent_mask);
notify_cpu_starting(cpu);
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
set_cpu_online(cpu, true);
set_cpu_sibling_map(cpu);
@ -329,10 +332,6 @@ asmlinkage void start_secondary(void)
calculate_cpu_foreign_map();
cpumask_set_cpu(cpu, &cpu_callin_map);
synchronise_count_slave(cpu);
/*
* irq will be enabled in ->smp_finish(), enabling it too early
* is dangerous.

View File

@ -222,7 +222,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self,
return NOTIFY_DONE;
switch (val) {
case DIE_BREAK:
case DIE_UPROBE:
if (uprobe_pre_sstep_notifier(regs))
return NOTIFY_STOP;
break;

View File

@ -39,16 +39,16 @@ static struct vm_special_mapping vdso_vvar_mapping = {
static void __init init_vdso_image(struct mips_vdso_image *image)
{
unsigned long num_pages, i;
unsigned long data_pfn;
BUG_ON(!PAGE_ALIGNED(image->data));
BUG_ON(!PAGE_ALIGNED(image->size));
num_pages = image->size / PAGE_SIZE;
for (i = 0; i < num_pages; i++) {
image->mapping.pages[i] =
virt_to_page(image->data + (i * PAGE_SIZE));
}
data_pfn = __phys_to_pfn(__pa_symbol(image->data));
for (i = 0; i < num_pages; i++)
image->mapping.pages[i] = pfn_to_page(data_pfn + i);
}
static int __init init_vdso(void)

View File

@ -298,5 +298,6 @@ bool do_dsemulret(struct pt_regs *xcp)
/* Set EPC to return to post-branch instruction */
xcp->cp0_epc = current->thread.bd_emu_cont_pc;
pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
MIPS_FPU_EMU_INC_STATS(ds_emul);
return true;
}

View File

@ -800,7 +800,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end)
* If address-based cache ops don't require an SMP call, then
* use them exclusively for small flushes.
*/
size = start - end;
size = end - start;
cache_size = icache_size;
if (!cpu_has_ic_fills_f_dc) {
size *= 2;

View File

@ -261,7 +261,6 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
{
struct maar_config cfg[BOOT_MEM_MAP_MAX];
unsigned i, num_configured, num_cfg = 0;
phys_addr_t skip;
for (i = 0; i < boot_mem_map.nr_map; i++) {
switch (boot_mem_map.map[i].type) {
@ -272,14 +271,14 @@ unsigned __weak platform_maar_init(unsigned num_pairs)
continue;
}
skip = 0x10000 - (boot_mem_map.map[i].addr & 0xffff);
/* Round lower up */
cfg[num_cfg].lower = boot_mem_map.map[i].addr;
cfg[num_cfg].lower += skip;
cfg[num_cfg].lower = (cfg[num_cfg].lower + 0xffff) & ~0xffff;
cfg[num_cfg].upper = cfg[num_cfg].lower;
cfg[num_cfg].upper += boot_mem_map.map[i].size - 1;
cfg[num_cfg].upper -= skip;
/* Round upper down */
cfg[num_cfg].upper = boot_mem_map.map[i].addr +
boot_mem_map.map[i].size;
cfg[num_cfg].upper = (cfg[num_cfg].upper & ~0xffff) - 1;
cfg[num_cfg].attrs = MIPS_MAAR_S;
num_cfg++;

View File

@ -124,6 +124,13 @@ static inline bool pnv_pci_is_m64(struct pnv_phb *phb, struct resource *r)
r->start < (phb->ioda.m64_base + phb->ioda.m64_size));
}
static inline bool pnv_pci_is_m64_flags(unsigned long resource_flags)
{
unsigned long flags = (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH);
return (resource_flags & flags) == flags;
}
static struct pnv_ioda_pe *pnv_ioda_init_pe(struct pnv_phb *phb, int pe_no)
{
phb->ioda.pe_array[pe_no].phb = phb;
@ -2871,7 +2878,7 @@ static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
res = &pdev->resource[i + PCI_IOV_RESOURCES];
if (!res->flags || res->parent)
continue;
if (!pnv_pci_is_m64(phb, res)) {
if (!pnv_pci_is_m64_flags(res->flags)) {
dev_warn(&pdev->dev, "Don't support SR-IOV with"
" non M64 VF BAR%d: %pR. \n",
i, res);
@ -3096,7 +3103,7 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
* alignment for any 64-bit resource, PCIe doesn't care and
* bridges only do 64-bit prefetchable anyway.
*/
if (phb->ioda.m64_segsize && (type & IORESOURCE_MEM_64))
if (phb->ioda.m64_segsize && pnv_pci_is_m64_flags(type))
return phb->ioda.m64_segsize;
if (type & IORESOURCE_MEM)
return phb->ioda.m32_segsize;

View File

@ -60,7 +60,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" movco.l %0, @%3 \n" \
" bf 1b \n" \
" synco \n" \
: "=&z" (temp), "=&z" (res) \
: "=&z" (temp), "=&r" (res) \
: "r" (i), "r" (&v->counter) \
: "t"); \
\

View File

@ -22,7 +22,7 @@ static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
if (hdr->e_type != ET_DYN)
if (GET_LE(&hdr->e_type) != ET_DYN)
fail("input is not a shared object\n");
/* Walk the segment table. */

View File

@ -455,7 +455,7 @@ int intel_bts_interrupt(void)
* The only surefire way of knowing if this NMI is ours is by checking
* the write ptr against the PMI threshold.
*/
if (ds->bts_index >= ds->bts_interrupt_threshold)
if (ds && (ds->bts_index >= ds->bts_interrupt_threshold))
handled = 1;
/*
@ -584,7 +584,8 @@ static __init int bts_init(void)
if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts)
return -ENODEV;
bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE;
bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE |
PERF_PMU_CAP_EXCLUSIVE;
bts_pmu.task_ctx_nr = perf_sw_context;
bts_pmu.event_init = bts_event_init;
bts_pmu.add = bts_event_add;

View File

@ -81,7 +81,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate);
/* Initialize cr4 shadow for this CPU. */
static inline void cr4_init_shadow(void)
{
this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
this_cpu_write(cpu_tlbstate.cr4, __read_cr4_safe());
}
/* Set in this cpu's CR4. */

View File

@ -1137,9 +1137,7 @@ void __init setup_arch(char **cmdline_p)
* auditing all the early-boot CR4 manipulation would be needed to
* rule it out.
*/
if (boot_cpu_data.cpuid_level >= 0)
/* A CPU has %cr4 if and only if it has CPUID. */
mmu_cr4_features = __read_cr4();
mmu_cr4_features = __read_cr4_safe();
memblock_set_current_limit(get_max_mapped());

View File

@ -917,11 +917,11 @@ static void populate_pte(struct cpa_data *cpa,
}
}
static int populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end,
unsigned num_pages, pud_t *pud, pgprot_t pgprot)
static long populate_pmd(struct cpa_data *cpa,
unsigned long start, unsigned long end,
unsigned num_pages, pud_t *pud, pgprot_t pgprot)
{
unsigned int cur_pages = 0;
long cur_pages = 0;
pmd_t *pmd;
pgprot_t pmd_pgprot;
@ -991,12 +991,12 @@ static int populate_pmd(struct cpa_data *cpa,
return num_pages;
}
static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
pgprot_t pgprot)
static long populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
pgprot_t pgprot)
{
pud_t *pud;
unsigned long end;
int cur_pages = 0;
long cur_pages = 0;
pgprot_t pud_pgprot;
end = start + (cpa->numpages << PAGE_SHIFT);
@ -1052,7 +1052,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, pgd_t *pgd,
/* Map trailing leftover */
if (start < end) {
int tmp;
long tmp;
pud = pud_offset(pgd, start);
if (pud_none(*pud))
@ -1078,7 +1078,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgprot_t pgprot = __pgprot(_KERNPG_TABLE);
pud_t *pud = NULL; /* shut up gcc */
pgd_t *pgd_entry;
int ret;
long ret;
pgd_entry = cpa->pgd + pgd_index(addr);
@ -1327,7 +1327,8 @@ static int cpa_process_alias(struct cpa_data *cpa)
static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
{
int ret, numpages = cpa->numpages;
unsigned long numpages = cpa->numpages;
int ret;
while (numpages) {
/*

View File

@ -245,7 +245,7 @@ int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
* text and allocate a new stack because we can't rely on the
* stack pointer being < 4GB.
*/
if (!IS_ENABLED(CONFIG_EFI_MIXED))
if (!IS_ENABLED(CONFIG_EFI_MIXED) || efi_is_native())
return 0;
/*

View File

@ -296,17 +296,29 @@ struct request *blk_mq_alloc_request_hctx(struct request_queue *q, int rw,
if (ret)
return ERR_PTR(ret);
/*
* Check if the hardware context is actually mapped to anything.
* If not tell the caller that it should skip this queue.
*/
hctx = q->queue_hw_ctx[hctx_idx];
if (!blk_mq_hw_queue_mapped(hctx)) {
ret = -EXDEV;
goto out_queue_exit;
}
ctx = __blk_mq_get_ctx(q, cpumask_first(hctx->cpumask));
blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx);
rq = __blk_mq_alloc_request(&alloc_data, rw, 0);
if (!rq) {
blk_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
ret = -EWOULDBLOCK;
goto out_queue_exit;
}
return rq;
out_queue_exit:
blk_queue_exit(q);
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(blk_mq_alloc_request_hctx);

View File

@ -780,9 +780,11 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
/*
* If previous slice expired, start a new one otherwise renew/extend
* existing slice to make sure it is at least throtl_slice interval
* long since now.
* long since now. New slice is started only for empty throttle group.
* If there is queued bio, that means there should be an active
* slice and it should be extended instead.
*/
if (throtl_slice_used(tg, rw))
if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw]))
throtl_start_new_slice(tg, rw);
else {
if (time_before(tg->slice_end[rw], jiffies + throtl_slice))

View File

@ -298,41 +298,48 @@ static int pkcs1pad_decrypt_complete(struct akcipher_request *req, int err)
struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req);
struct pkcs1pad_ctx *ctx = akcipher_tfm_ctx(tfm);
struct pkcs1pad_request *req_ctx = akcipher_request_ctx(req);
unsigned int dst_len;
unsigned int pos;
if (err == -EOVERFLOW)
/* Decrypted value had no leading 0 byte */
err = -EINVAL;
u8 *out_buf;
if (err)
goto done;
if (req_ctx->child_req.dst_len != ctx->key_size - 1) {
err = -EINVAL;
err = -EINVAL;
dst_len = req_ctx->child_req.dst_len;
if (dst_len < ctx->key_size - 1)
goto done;
out_buf = req_ctx->out_buf;
if (dst_len == ctx->key_size) {
if (out_buf[0] != 0x00)
/* Decrypted value had no leading 0 byte */
goto done;
dst_len--;
out_buf++;
}
if (req_ctx->out_buf[0] != 0x02) {
err = -EINVAL;
if (out_buf[0] != 0x02)
goto done;
}
for (pos = 1; pos < req_ctx->child_req.dst_len; pos++)
if (req_ctx->out_buf[pos] == 0x00)
for (pos = 1; pos < dst_len; pos++)
if (out_buf[pos] == 0x00)
break;
if (pos < 9 || pos == req_ctx->child_req.dst_len) {
err = -EINVAL;
if (pos < 9 || pos == dst_len)
goto done;
}
pos++;
if (req->dst_len < req_ctx->child_req.dst_len - pos)
err = 0;
if (req->dst_len < dst_len - pos)
err = -EOVERFLOW;
req->dst_len = req_ctx->child_req.dst_len - pos;
req->dst_len = dst_len - pos;
if (!err)
sg_copy_from_buffer(req->dst,
sg_nents_for_len(req->dst, req->dst_len),
req_ctx->out_buf + pos, req->dst_len);
out_buf + pos, req->dst_len);
done:
kzfree(req_ctx->out_buf);

View File

@ -1475,7 +1475,11 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(buf);
} else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
regcache_drop_region(map, reg, reg + 1);
/* regcache_drop_region() takes lock that we already have,
* thus call map->cache_ops->drop() directly
*/
if (map->cache_ops && map->cache_ops->drop)
map->cache_ops->drop(map, reg, reg + 1);
}
trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);

View File

@ -773,13 +773,6 @@ static int pch_i2c_probe(struct pci_dev *pdev,
/* Set the number of I2C channel instance */
adap_info->ch_num = id->driver_data;
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
adap_info->pch_i2c_suspended = false;
@ -797,6 +790,17 @@ static int pch_i2c_probe(struct pci_dev *pdev,
pch_adap->dev.of_node = pdev->dev.of_node;
pch_adap->dev.parent = &pdev->dev;
}
ret = request_irq(pdev->irq, pch_i2c_handler, IRQF_SHARED,
KBUILD_MODNAME, adap_info);
if (ret) {
pch_pci_err(pdev, "request_irq FAILED\n");
goto err_request_irq;
}
for (i = 0; i < adap_info->ch_num; i++) {
pch_adap = &adap_info->pch_data[i].pch_adapter;
pch_i2c_init(&adap_info->pch_data[i]);

View File

@ -1599,7 +1599,8 @@ static int qup_i2c_pm_resume_runtime(struct device *device)
#ifdef CONFIG_PM_SLEEP
static int qup_i2c_suspend(struct device *device)
{
qup_i2c_pm_suspend_runtime(device);
if (!pm_runtime_suspended(device))
return qup_i2c_pm_suspend_runtime(device);
return 0;
}

View File

@ -164,7 +164,7 @@ static int pca954x_select_chan(struct i2c_mux_core *muxc, u32 chan)
/* Only select the channel if its different from the last channel */
if (data->last_chan != regval) {
ret = pca954x_reg_write(muxc->parent, client, regval);
data->last_chan = regval;
data->last_chan = ret ? 0 : regval;
}
return ret;

View File

@ -390,9 +390,10 @@ static void silead_ts_read_props(struct i2c_client *client)
data->max_fingers = 5; /* Most devices handle up-to 5 fingers */
}
error = device_property_read_string(dev, "touchscreen-fw-name", &str);
error = device_property_read_string(dev, "firmware-name", &str);
if (!error)
snprintf(data->fw_name, sizeof(data->fw_name), "%s", str);
snprintf(data->fw_name, sizeof(data->fw_name),
"silead/%s", str);
else
dev_dbg(dev, "Firmware file name read error. Using default.");
}
@ -410,14 +411,14 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
if (!acpi_id)
return -ENODEV;
snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
acpi_id->id);
snprintf(data->fw_name, sizeof(data->fw_name),
"silead/%s.fw", acpi_id->id);
for (i = 0; i < strlen(data->fw_name); i++)
data->fw_name[i] = tolower(data->fw_name[i]);
} else {
snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw",
id->name);
snprintf(data->fw_name, sizeof(data->fw_name),
"silead/%s.fw", id->name);
}
return 0;
@ -426,7 +427,8 @@ static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
static int silead_ts_set_default_fw_name(struct silead_ts_data *data,
const struct i2c_device_id *id)
{
snprintf(data->fw_name, sizeof(data->fw_name), "%s.fw", id->name);
snprintf(data->fw_name, sizeof(data->fw_name),
"silead/%s.fw", id->name);
return 0;
}
#endif

View File

@ -548,7 +548,7 @@ static int gic_starting_cpu(unsigned int cpu)
static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
unsigned long cluster_id)
{
int cpu = *base_cpu;
int next_cpu, cpu = *base_cpu;
unsigned long mpidr = cpu_logical_map(cpu);
u16 tlist = 0;
@ -562,9 +562,10 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
tlist |= 1 << (mpidr & 0xf);
cpu = cpumask_next(cpu, mask);
if (cpu >= nr_cpu_ids)
next_cpu = cpumask_next(cpu, mask);
if (next_cpu >= nr_cpu_ids)
goto out;
cpu = next_cpu;
mpidr = cpu_logical_map(cpu);

View File

@ -638,27 +638,6 @@ static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
if (!gic_local_irq_is_routable(intr))
return -EPERM;
/*
* HACK: These are all really percpu interrupts, but the rest
* of the MIPS kernel code does not use the percpu IRQ API for
* the CP0 timer and performance counter interrupts.
*/
switch (intr) {
case GIC_LOCAL_INT_TIMER:
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
irq_set_chip_and_handler(virq,
&gic_all_vpes_local_irq_controller,
handle_percpu_irq);
break;
default:
irq_set_chip_and_handler(virq,
&gic_local_irq_controller,
handle_percpu_devid_irq);
irq_set_percpu_devid(virq);
break;
}
spin_lock_irqsave(&gic_lock, flags);
for (i = 0; i < gic_vpes; i++) {
u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
@ -724,16 +703,42 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
return 0;
}
static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
irq_hw_number_t hw)
static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq,
unsigned int hwirq)
{
if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
return gic_local_irq_domain_map(d, virq, hw);
struct irq_chip *chip;
int err;
irq_set_chip_and_handler(virq, &gic_level_irq_controller,
handle_level_irq);
if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller,
NULL);
} else {
switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
case GIC_LOCAL_INT_TIMER:
case GIC_LOCAL_INT_PERFCTR:
case GIC_LOCAL_INT_FDC:
/*
* HACK: These are all really percpu interrupts, but
* the rest of the MIPS kernel code does not use the
* percpu IRQ API for them.
*/
chip = &gic_all_vpes_local_irq_controller;
irq_set_handler(virq, handle_percpu_irq);
break;
return gic_shared_irq_domain_map(d, virq, hw, 0);
default:
chip = &gic_local_irq_controller;
irq_set_handler(virq, handle_percpu_devid_irq);
irq_set_percpu_devid(virq);
break;
}
err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
chip, NULL);
}
return err;
}
static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
@ -744,15 +749,12 @@ static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
int cpu, ret, i;
if (spec->type == GIC_DEVICE) {
/* verify that it doesn't conflict with an IPI irq */
if (test_bit(spec->hwirq, ipi_resrv))
/* verify that shared irqs don't conflict with an IPI irq */
if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) &&
test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv))
return -EBUSY;
hwirq = GIC_SHARED_TO_HWIRQ(spec->hwirq);
return irq_domain_set_hwirq_and_chip(d, virq, hwirq,
&gic_level_irq_controller,
NULL);
return gic_setup_dev_chip(d, virq, spec->hwirq);
} else {
base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs);
if (base_hwirq == gic_shared_intrs) {
@ -821,7 +823,6 @@ int gic_irq_domain_match(struct irq_domain *d, struct device_node *node,
}
static const struct irq_domain_ops gic_irq_domain_ops = {
.map = gic_irq_domain_map,
.alloc = gic_irq_domain_alloc,
.free = gic_irq_domain_free,
.match = gic_irq_domain_match,
@ -852,29 +853,20 @@ static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq,
struct irq_fwspec *fwspec = arg;
struct gic_irq_spec spec = {
.type = GIC_DEVICE,
.hwirq = fwspec->param[1],
};
int i, ret;
bool is_shared = fwspec->param[0] == GIC_SHARED;
if (is_shared) {
ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
if (ret)
return ret;
}
if (fwspec->param[0] == GIC_SHARED)
spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
else
spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec);
if (ret)
return ret;
for (i = 0; i < nr_irqs; i++) {
irq_hw_number_t hwirq;
if (is_shared)
hwirq = GIC_SHARED_TO_HWIRQ(spec.hwirq + i);
else
hwirq = GIC_LOCAL_TO_HWIRQ(spec.hwirq + i);
ret = irq_domain_set_hwirq_and_chip(d, virq + i,
hwirq,
&gic_level_irq_controller,
NULL);
ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i);
if (ret)
goto error;
}
@ -896,7 +888,10 @@ void gic_dev_domain_free(struct irq_domain *d, unsigned int virq,
static void gic_dev_domain_activate(struct irq_domain *domain,
struct irq_data *d)
{
gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS)
gic_local_irq_domain_map(domain, d->irq, d->hwirq);
else
gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0);
}
static struct irq_domain_ops gic_dev_domain_ops = {

View File

@ -70,7 +70,10 @@ static unsigned int cec_get_edid_spa_location(const u8 *edid, unsigned int size)
u8 tag = edid[i] >> 5;
u8 len = edid[i] & 0x1f;
if (tag == 3 && len >= 5 && i + len <= end)
if (tag == 3 && len >= 5 && i + len <= end &&
edid[i + 1] == 0x03 &&
edid[i + 2] == 0x0c &&
edid[i + 3] == 0x00)
return i + 4;
i += len + 1;
} while (i < end);

View File

@ -1552,6 +1552,7 @@ int cx23885_417_register(struct cx23885_dev *dev)
q->mem_ops = &vb2_dma_sg_memops;
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err < 0)

View File

@ -1238,6 +1238,7 @@ static int dvb_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
q->dev = &dev->pci->dev;
ret = vb2_queue_init(q);
if (ret) {
vb2_dvb_dealloc_frontends(&dev->frontends);

View File

@ -295,6 +295,7 @@ static int empress_init(struct saa7134_dev *dev)
q->buf_struct_size = sizeof(struct saa7134_buf);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
q->lock = &dev->lock;
q->dev = &dev->pci->dev;
err = vb2_queue_init(q);
if (err)
return err;

View File

@ -169,7 +169,7 @@ config VIDEO_MEDIATEK_VPU
config VIDEO_MEDIATEK_VCODEC
tristate "Mediatek Video Codec driver"
depends on MTK_IOMMU || COMPILE_TEST
depends on VIDEO_DEV && VIDEO_V4L2
depends on VIDEO_DEV && VIDEO_V4L2 && HAS_DMA
depends on ARCH_MEDIATEK || COMPILE_TEST
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV

View File

@ -23,7 +23,6 @@
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-core.h>
#include "mtk_vcodec_util.h"
#define MTK_VCODEC_DRV_NAME "mtk_vcodec_drv"
#define MTK_VCODEC_ENC_NAME "mtk-vcodec-enc"

View File

@ -487,7 +487,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
struct mtk_q_data *q_data;
int ret, i;
struct mtk_video_fmt *fmt;
unsigned int pitch_w_div16;
struct v4l2_pix_format_mplane *pix_fmt_mp = &f->fmt.pix_mp;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
@ -530,15 +529,6 @@ static int vidioc_venc_s_fmt_out(struct file *file, void *priv,
q_data->coded_width = f->fmt.pix_mp.width;
q_data->coded_height = f->fmt.pix_mp.height;
pitch_w_div16 = DIV_ROUND_UP(q_data->visible_width, 16);
if (pitch_w_div16 % 8 != 0) {
/* Adjust returned width/height, so application could correctly
* allocate hw required memory
*/
q_data->visible_height += 32;
vidioc_try_fmt(f, q_data->fmt);
}
q_data->field = f->fmt.pix_mp.field;
ctx->colorspace = f->fmt.pix_mp.colorspace;
ctx->ycbcr_enc = f->fmt.pix_mp.ycbcr_enc;
@ -878,7 +868,8 @@ static int mtk_venc_encode_header(void *priv)
{
struct mtk_vcodec_ctx *ctx = priv;
int ret;
struct vb2_buffer *dst_buf;
struct vb2_buffer *src_buf, *dst_buf;
struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
@ -911,6 +902,15 @@ static int mtk_venc_encode_header(void *priv)
mtk_v4l2_err("venc_if_encode failed=%d", ret);
return -EINVAL;
}
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
if (src_buf) {
src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
dst_buf->timestamp = src_buf->timestamp;
dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
} else {
mtk_v4l2_err("No timestamp for the header buffer.");
}
ctx->state = MTK_STATE_HEADER;
dst_buf->planes[0].bytesused = enc_result.bs_size;
@ -1003,7 +1003,7 @@ static void mtk_venc_worker(struct work_struct *work)
struct mtk_vcodec_mem bs_buf;
struct venc_done_result enc_result;
int ret, i;
struct vb2_v4l2_buffer *vb2_v4l2;
struct vb2_v4l2_buffer *dst_vb2_v4l2, *src_vb2_v4l2;
/* check dst_buf, dst_buf may be removed in device_run
* to stored encdoe header so we need check dst_buf and
@ -1043,9 +1043,14 @@ static void mtk_venc_worker(struct work_struct *work)
ret = venc_if_encode(ctx, VENC_START_OPT_ENCODE_FRAME,
&frm_buf, &bs_buf, &enc_result);
vb2_v4l2 = container_of(dst_buf, struct vb2_v4l2_buffer, vb2_buf);
src_vb2_v4l2 = to_vb2_v4l2_buffer(src_buf);
dst_vb2_v4l2 = to_vb2_v4l2_buffer(dst_buf);
dst_buf->timestamp = src_buf->timestamp;
dst_vb2_v4l2->timecode = src_vb2_v4l2->timecode;
if (enc_result.is_key_frm)
vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
dst_vb2_v4l2->flags |= V4L2_BUF_FLAG_KEYFRAME;
if (ret) {
v4l2_m2m_buf_done(to_vb2_v4l2_buffer(src_buf),
@ -1217,7 +1222,7 @@ int mtk_vcodec_enc_ctrls_setup(struct mtk_vcodec_ctx *ctx)
0, V4L2_MPEG_VIDEO_HEADER_MODE_SEPARATE);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_PROFILE,
V4L2_MPEG_VIDEO_H264_PROFILE_HIGH,
0, V4L2_MPEG_VIDEO_H264_PROFILE_MAIN);
0, V4L2_MPEG_VIDEO_H264_PROFILE_HIGH);
v4l2_ctrl_new_std_menu(handler, ops, V4L2_CID_MPEG_VIDEO_H264_LEVEL,
V4L2_MPEG_VIDEO_H264_LEVEL_4_2,
0, V4L2_MPEG_VIDEO_H264_LEVEL_4_0);
@ -1288,5 +1293,10 @@ int mtk_venc_lock(struct mtk_vcodec_ctx *ctx)
void mtk_vcodec_enc_release(struct mtk_vcodec_ctx *ctx)
{
venc_if_deinit(ctx);
int ret = venc_if_deinit(ctx);
if (ret)
mtk_v4l2_err("venc_if_deinit failed=%d", ret);
ctx->state = MTK_STATE_FREE;
}

View File

@ -218,11 +218,15 @@ static int fops_vcodec_release(struct file *file)
mtk_v4l2_debug(1, "[%d] encoder", ctx->id);
mutex_lock(&dev->dev_mutex);
/*
* Call v4l2_m2m_ctx_release to make sure the worker thread is not
* running after venc_if_deinit.
*/
v4l2_m2m_ctx_release(ctx->m2m_ctx);
mtk_vcodec_enc_release(ctx);
v4l2_fh_del(&ctx->fh);
v4l2_fh_exit(&ctx->fh);
v4l2_ctrl_handler_free(&ctx->ctrl_hdl);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
list_del_init(&ctx->list);
dev->num_instances--;

View File

@ -16,7 +16,6 @@
#define _MTK_VCODEC_INTR_H_
#define MTK_INST_IRQ_RECEIVED 0x1
#define MTK_INST_WORK_THREAD_ABORT_DONE 0x2
struct mtk_vcodec_ctx;

View File

@ -61,6 +61,8 @@ enum venc_h264_bs_mode {
/*
* struct venc_h264_vpu_config - Structure for h264 encoder configuration
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* @input_fourcc: input fourcc
* @bitrate: target bitrate (in bps)
* @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@ -94,13 +96,13 @@ struct venc_h264_vpu_config {
/*
* struct venc_h264_vpu_buf - Structure for buffer information
* @align: buffer alignment (in bytes)
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* @iova: IO virtual address
* @vpua: VPU side memory addr which is used by RC_CODE
* @size: buffer size (in bytes)
*/
struct venc_h264_vpu_buf {
u32 align;
u32 iova;
u32 vpua;
u32 size;
@ -108,6 +110,8 @@ struct venc_h264_vpu_buf {
/*
* struct venc_h264_vsi - Structure for VPU driver control and info share
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* This structure is allocated in VPU side and shared to AP side.
* @config: h264 encoder configuration
* @work_bufs: working buffer information in VPU side
@ -150,12 +154,6 @@ struct venc_h264_inst {
struct mtk_vcodec_ctx *ctx;
};
static inline void h264_write_reg(struct venc_h264_inst *inst, u32 addr,
u32 val)
{
writel(val, inst->hw_base + addr);
}
static inline u32 h264_read_reg(struct venc_h264_inst *inst, u32 addr)
{
return readl(inst->hw_base + addr);
@ -214,6 +212,8 @@ static unsigned int h264_get_level(struct venc_h264_inst *inst,
return 40;
case V4L2_MPEG_VIDEO_H264_LEVEL_4_1:
return 41;
case V4L2_MPEG_VIDEO_H264_LEVEL_4_2:
return 42;
default:
mtk_vcodec_debug(inst, "unsupported level %d", level);
return 31;

View File

@ -56,6 +56,8 @@ enum venc_vp8_vpu_work_buf {
/*
* struct venc_vp8_vpu_config - Structure for vp8 encoder configuration
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* @input_fourcc: input fourcc
* @bitrate: target bitrate (in bps)
* @pic_w: picture width. Picture size is visible stream resolution, in pixels,
@ -83,14 +85,14 @@ struct venc_vp8_vpu_config {
};
/*
* struct venc_vp8_vpu_buf -Structure for buffer information
* @align: buffer alignment (in bytes)
* struct venc_vp8_vpu_buf - Structure for buffer information
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* @iova: IO virtual address
* @vpua: VPU side memory addr which is used by RC_CODE
* @size: buffer size (in bytes)
*/
struct venc_vp8_vpu_buf {
u32 align;
u32 iova;
u32 vpua;
u32 size;
@ -98,6 +100,8 @@ struct venc_vp8_vpu_buf {
/*
* struct venc_vp8_vsi - Structure for VPU driver control and info share
* AP-W/R : AP is writer/reader on this item
* VPU-W/R: VPU is write/reader on this item
* This structure is allocated in VPU side and shared to AP side.
* @config: vp8 encoder configuration
* @work_bufs: working buffer information in VPU side
@ -138,12 +142,6 @@ struct venc_vp8_inst {
struct mtk_vcodec_ctx *ctx;
};
static inline void vp8_enc_write_reg(struct venc_vp8_inst *inst, u32 addr,
u32 val)
{
writel(val, inst->hw_base + addr);
}
static inline u32 vp8_enc_read_reg(struct venc_vp8_inst *inst, u32 addr)
{
return readl(inst->hw_base + addr);

View File

@ -99,10 +99,16 @@ EXPORT_SYMBOL_GPL(rcar_fcp_put);
*/
int rcar_fcp_enable(struct rcar_fcp_device *fcp)
{
int error;
if (!fcp)
return 0;
return pm_runtime_get_sync(fcp->dev);
error = pm_runtime_get_sync(fcp->dev);
if (error < 0)
return error;
return 0;
}
EXPORT_SYMBOL_GPL(rcar_fcp_enable);

View File

@ -1112,11 +1112,12 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0;
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
div ? ((host->bus_hz / div) >> 1) :
host->bus_hz, div);
if (clock != slot->__clk_old || force_clkinit)
dev_info(&slot->mmc->class_dev,
"Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n",
slot->id, host->bus_hz, clock,
div ? ((host->bus_hz / div) >> 1) :
host->bus_hz, div);
/* disable clock */
mci_writel(host, CLKENA, 0);
@ -1139,6 +1140,9 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit)
/* inform CIU */
mci_send_cmd(slot, sdmmc_cmd_bits, 0);
/* keep the last clock value that was requested from core */
slot->__clk_old = clock;
}
host->current_speed = clock;

View File

@ -249,6 +249,8 @@ extern int dw_mci_resume(struct dw_mci *host);
* @queue_node: List node for placing this node in the @queue list of
* &struct dw_mci.
* @clock: Clock rate configured by set_ios(). Protected by host->lock.
* @__clk_old: The last clock value that was requested from core.
* Keeping track of this helps us to avoid spamming the console.
* @flags: Random state bits associated with the slot.
* @id: Number of this slot.
* @sdio_id: Number of this slot in the SDIO interrupt registers.
@ -263,6 +265,7 @@ struct dw_mci_slot {
struct list_head queue_node;
unsigned int clock;
unsigned int __clk_old;
unsigned long flags;
#define DW_MMC_CARD_PRESENT 0

View File

@ -366,7 +366,8 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
u8 *data, u32 bytes)
{
dma_addr_t addr;
u32 *p, len, i;
u8 *p;
u32 len, i, val;
int ret = 0;
addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE);
@ -392,11 +393,14 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config,
/* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */
len = (config->strength * ECC_PARITY_BITS + 7) >> 3;
p = (u32 *)(data + bytes);
p = data + bytes;
/* write the parity bytes generated by the ECC back to the OOB region */
for (i = 0; i < len; i++)
p[i] = readl(ecc->regs + ECC_ENCPAR(i));
for (i = 0; i < len; i++) {
if ((i % 4) == 0)
val = readl(ecc->regs + ECC_ENCPAR(i / 4));
p[i] = (val >> ((i % 4) * 8)) & 0xff;
}
timeout:
dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE);

View File

@ -93,6 +93,9 @@
#define NFI_FSM_MASK (0xf << 16)
#define NFI_ADDRCNTR (0x70)
#define CNTR_MASK GENMASK(16, 12)
#define ADDRCNTR_SEC_SHIFT (12)
#define ADDRCNTR_SEC(val) \
(((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
#define NFI_STRADDR (0x80)
#define NFI_BYTELEN (0x84)
#define NFI_CSEL (0x90)
@ -699,7 +702,7 @@ static int mtk_nfc_do_write_page(struct mtd_info *mtd, struct nand_chip *chip,
}
ret = readl_poll_timeout_atomic(nfc->regs + NFI_ADDRCNTR, reg,
(reg & CNTR_MASK) >= chip->ecc.steps,
ADDRCNTR_SEC(reg) >= chip->ecc.steps,
10, MTK_TIMEOUT);
if (ret)
dev_err(dev, "hwecc write timeout\n");
@ -902,7 +905,7 @@ static int mtk_nfc_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
dev_warn(nfc->dev, "read ahb/dma done timeout\n");
rc = readl_poll_timeout_atomic(nfc->regs + NFI_BYTELEN, reg,
(reg & CNTR_MASK) >= sectors, 10,
ADDRCNTR_SEC(reg) >= sectors, 10,
MTK_TIMEOUT);
if (rc < 0) {
dev_err(nfc->dev, "subpage done timeout\n");

View File

@ -943,7 +943,7 @@ static int mxc_v2_ooblayout_free(struct mtd_info *mtd, int section,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
int stepsize = nand_chip->ecc.bytes == 9 ? 16 : 26;
if (section > nand_chip->ecc.steps)
if (section >= nand_chip->ecc.steps)
return -ERANGE;
if (!section) {

View File

@ -1268,11 +1268,10 @@ static int __maybe_unused flexcan_suspend(struct device *device)
struct flexcan_priv *priv = netdev_priv(dev);
int err;
err = flexcan_chip_disable(priv);
if (err)
return err;
if (netif_running(dev)) {
err = flexcan_chip_disable(priv);
if (err)
return err;
netif_stop_queue(dev);
netif_device_detach(dev);
}
@ -1285,13 +1284,17 @@ static int __maybe_unused flexcan_resume(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
int err;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(dev)) {
netif_device_attach(dev);
netif_start_queue(dev);
err = flexcan_chip_enable(priv);
if (err)
return err;
}
return flexcan_chip_enable(priv);
return 0;
}
static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume);

View File

@ -81,6 +81,10 @@
#define IFI_CANFD_TIME_SET_TIMEA_4_12_6_6 BIT(15)
#define IFI_CANFD_TDELAY 0x1c
#define IFI_CANFD_TDELAY_DEFAULT 0xb
#define IFI_CANFD_TDELAY_MASK 0x3fff
#define IFI_CANFD_TDELAY_ABS BIT(14)
#define IFI_CANFD_TDELAY_EN BIT(15)
#define IFI_CANFD_ERROR 0x20
#define IFI_CANFD_ERROR_TX_OFFSET 0
@ -641,7 +645,7 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
struct ifi_canfd_priv *priv = netdev_priv(ndev);
const struct can_bittiming *bt = &priv->can.bittiming;
const struct can_bittiming *dbt = &priv->can.data_bittiming;
u16 brp, sjw, tseg1, tseg2;
u16 brp, sjw, tseg1, tseg2, tdc;
/* Configure bit timing */
brp = bt->brp - 2;
@ -664,6 +668,11 @@ static void ifi_canfd_set_bittiming(struct net_device *ndev)
(brp << IFI_CANFD_TIME_PRESCALE_OFF) |
(sjw << IFI_CANFD_TIME_SJW_OFF_7_9_8_8),
priv->base + IFI_CANFD_FTIME);
/* Configure transmitter delay */
tdc = (dbt->brp * (dbt->phase_seg1 + 1)) & IFI_CANFD_TDELAY_MASK;
writel(IFI_CANFD_TDELAY_EN | IFI_CANFD_TDELAY_ABS | tdc,
priv->base + IFI_CANFD_TDELAY);
}
static void ifi_canfd_set_filter(struct net_device *ndev, const u32 id,

View File

@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
rc = bnx2_request_firmware(bp);
if (rc < 0)
goto out;
netif_carrier_off(dev);
bnx2_disable_int(bp);
@ -6428,7 +6424,6 @@ bnx2_open(struct net_device *dev)
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
bnx2_release_firmware(bp);
goto out;
}
@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
rc = bnx2_request_firmware(bp);
if (rc < 0)
goto error;
bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);

View File

@ -31,15 +31,10 @@
#define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS (3 + 5)
#define BNAD_NUM_RXQ_COUNTERS 6
#define BNAD_NUM_RXQ_COUNTERS 7
#define BNAD_NUM_TXQ_COUNTERS 5
#define BNAD_ETHTOOL_STATS_NUM \
(sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
sizeof(struct bnad_drv_stats) / sizeof(u64) + \
offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
static const char *bnad_net_stats_strings[] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_dropped",
"multicast",
"collisions",
"rx_length_errors",
"rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
"rx_fifo_errors",
"rx_missed_errors",
"tx_aborted_errors",
"tx_carrier_errors",
"tx_fifo_errors",
"tx_heartbeat_errors",
"tx_window_errors",
"rx_compressed",
"tx_compressed",
"netif_queue_stop",
"netif_queue_wakeup",
@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"fc_tx_fid_parity_errors",
};
#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
static int
bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_allocbuf_failed", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_mapbuf_failed", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_consumer_index", q_num);
@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
sprintf(string, "rxq%d_allocbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_mapbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index",
q_num);
string += ETH_GSTRING_LEN;
@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *buf)
{
struct bnad *bnad = netdev_priv(netdev);
int i, j, bi;
int i, j, bi = 0;
unsigned long flags;
struct rtnl_link_stats64 *net_stats64;
struct rtnl_link_stats64 net_stats64;
u64 *stats64;
u32 bmap;
@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
* under the same lock
*/
spin_lock_irqsave(&bnad->bna_lock, flags);
bi = 0;
memset(buf, 0, stats->n_stats * sizeof(u64));
net_stats64 = (struct rtnl_link_stats64 *)buf;
bnad_netdev_qstats_fill(bnad, net_stats64);
bnad_netdev_hwstats_fill(bnad, net_stats64);
memset(&net_stats64, 0, sizeof(net_stats64));
bnad_netdev_qstats_fill(bnad, &net_stats64);
bnad_netdev_hwstats_fill(bnad, &net_stats64);
bi = sizeof(*net_stats64) / sizeof(u64);
buf[bi++] = net_stats64.rx_packets;
buf[bi++] = net_stats64.tx_packets;
buf[bi++] = net_stats64.rx_bytes;
buf[bi++] = net_stats64.tx_bytes;
buf[bi++] = net_stats64.rx_errors;
buf[bi++] = net_stats64.tx_errors;
buf[bi++] = net_stats64.rx_dropped;
buf[bi++] = net_stats64.tx_dropped;
buf[bi++] = net_stats64.multicast;
buf[bi++] = net_stats64.collisions;
buf[bi++] = net_stats64.rx_length_errors;
buf[bi++] = net_stats64.rx_crc_errors;
buf[bi++] = net_stats64.rx_frame_errors;
buf[bi++] = net_stats64.tx_fifo_errors;
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);

View File

@ -419,8 +419,8 @@ struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned int requested_speed; /* speed user has requested */
unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */

View File

@ -4305,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
/* Return true if the Link Configuration supports "High Speeds" (those greater
* than 1Gb/s).
*/
static inline bool is_x_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
unsigned int speeds, high_speeds;
speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
return high_speeds != 0;
}
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@ -4756,8 +4763,12 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));

View File

@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
speed = 100000;
lc = &pi->link_cfg;

View File

@ -2265,6 +2265,12 @@ enum fw_port_cap {
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
#define FW_PORT_CAP_SPEED_S 0
#define FW_PORT_CAP_SPEED_M 0x3f
#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
#define FW_PORT_CAP_SPEED_G(x) \
(((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
enum fw_port_mdi {
FW_PORT_CAP_MDI_UNCHANGED,
FW_PORT_CAP_MDI_AUTO,

View File

@ -108,8 +108,8 @@ struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
unsigned short requested_speed; /* speed user has requested */
unsigned short speed; /* actual link speed */
unsigned int requested_speed; /* speed user has requested */
unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
/* Return true if the Link Configuration supports "High Speeds" (those greater
* than 1Gb/s).
*/
static inline bool is_x_10g_port(const struct link_config *lc)
{
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
unsigned int speeds, high_speeds;
speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
return high_speeds != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)

View File

@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
* init_link_config - initialize a link's SW state
@ -1712,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
speed = 100000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for

View File

@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
dev->mcast_pending = 1;
return;
}
mutex_lock(&dev->link_lock);
__emac_set_multicast_list(dev);
mutex_unlock(&dev->link_lock);
}
static int emac_set_mac_address(struct net_device *ndev, void *sa)
{
struct emac_instance *dev = netdev_priv(ndev);
struct sockaddr *addr = sa;
struct emac_regs __iomem *p = dev->emacp;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
mutex_lock(&dev->link_lock);
memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
emac_rx_disable(dev);
emac_tx_disable(dev);
out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
(ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
ndev->dev_addr[5]);
emac_tx_enable(dev);
emac_rx_enable(dev);
mutex_unlock(&dev->link_lock);
return 0;
}
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
.ndo_change_mtu = eth_change_mtu,
};
@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit_sg,
.ndo_change_mtu = emac_change_mtu,
};

View File

@ -1923,6 +1923,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7623-eth" },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,

View File

@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
return 0;
err_out_unmap:
while (i >= 0)
mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
while (i > 0)
mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {

View File

@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
devlink_port_unregister(&info->devlink_port);
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;

View File

@ -1554,6 +1554,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
esw->mode = SRIOV_NONE;
return err;
}

View File

@ -446,7 +446,7 @@ mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
static int esw_offloads_start(struct mlx5_eswitch *esw)
{
int err, num_vfs = esw->dev->priv.sriov.num_vfs;
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@ -455,8 +455,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err)
esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
if (err) {
esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
}
return err;
}
@ -508,12 +512,16 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports)
static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
int err, num_vfs = esw->dev->priv.sriov.num_vfs;
int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
if (err)
esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
if (err) {
esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
if (err1)
esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
}
return err;
}

View File

@ -425,11 +425,11 @@ struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
{
struct mlx5_cmd_fc_bulk *b;
int outlen = sizeof(*b) +
int outlen =
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
b = kzalloc(outlen, GFP_KERNEL);
b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;

View File

@ -2044,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
if (!nn->rx_rings)
if (!nn->rx_rings) {
err = -ENOMEM;
goto err_free_lsc;
}
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
if (!nn->tx_rings)
if (!nn->tx_rings) {
err = -ENOMEM;
goto err_free_rx_rings;
}
for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);

View File

@ -1153,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
val = cpu_to_be32(p_ver->name[i]);
for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}

View File

@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
pmt |= global_unicast;
pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);

View File

@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
pmt |= global_unicast;
pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);

View File

@ -424,10 +424,8 @@ static int xgene_mdio_remove(struct platform_device *pdev)
mdiobus_unregister(mdio_bus);
mdiobus_free(mdio_bus);
if (dev->of_node) {
if (IS_ERR(pdata->clk))
clk_disable_unprepare(pdata->clk);
}
if (dev->of_node)
clk_disable_unprepare(pdata->clk);
return 0;
}

View File

@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
#define NET_VERSION "5"
#define NET_VERSION "6"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@ -2552,6 +2552,77 @@ static void r8152_aldps_en(struct r8152 *tp, bool enable)
}
}
static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
{
ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
ocp_reg_write(tp, OCP_EEE_DATA, reg);
ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
}
static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
{
u16 data;
r8152_mmd_indirect(tp, dev, reg);
data = ocp_reg_read(tp, OCP_EEE_DATA);
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
return data;
}
static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
{
r8152_mmd_indirect(tp, dev, reg);
ocp_reg_write(tp, OCP_EEE_DATA, data);
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
}
static void r8152_eee_en(struct r8152 *tp, bool enable)
{
u16 config1, config2, config3;
u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
if (enable) {
ocp_data |= EEE_RX_EN | EEE_TX_EN;
config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
config1 |= sd_rise_time(1);
config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
config3 |= fast_snr(42);
} else {
ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
RX_QUIET_EN);
config1 |= sd_rise_time(7);
config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
config3 |= fast_snr(511);
}
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
}
static void r8152b_enable_eee(struct r8152 *tp)
{
r8152_eee_en(tp, true);
r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
}
static void r8152b_enable_fc(struct r8152 *tp)
{
u16 anar;
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
static void rtl8152_disable(struct r8152 *tp)
{
r8152_aldps_en(tp, false);
@ -2561,13 +2632,9 @@ static void rtl8152_disable(struct r8152 *tp)
static void r8152b_hw_phy_cfg(struct r8152 *tp)
{
u16 data;
data = r8152_mdio_read(tp, MII_BMCR);
if (data & BMCR_PDOWN) {
data &= ~BMCR_PDOWN;
r8152_mdio_write(tp, MII_BMCR, data);
}
r8152b_enable_eee(tp);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
set_bit(PHY_RESET, &tp->flags);
}
@ -2701,20 +2768,52 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
static void r8153_aldps_en(struct r8152 *tp, bool enable)
{
u16 data;
data = ocp_reg_read(tp, OCP_POWER_CFG);
if (enable) {
data |= EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
} else {
data &= ~EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
msleep(20);
}
}
static void r8153_eee_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
u16 config;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
config = ocp_reg_read(tp, OCP_EEE_CFG);
if (enable) {
ocp_data |= EEE_RX_EN | EEE_TX_EN;
config |= EEE10_EN;
} else {
ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
config &= ~EEE10_EN;
}
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
ocp_reg_write(tp, OCP_EEE_CFG, config);
}
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
tp->version == RTL_VER_05)
ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
/* disable ALDPS before updating the PHY parameters */
r8153_aldps_en(tp, false);
data = r8152_mdio_read(tp, MII_BMCR);
if (data & BMCR_PDOWN) {
data &= ~BMCR_PDOWN;
r8152_mdio_write(tp, MII_BMCR, data);
}
/* disable EEE before updating the PHY parameters */
r8153_eee_en(tp, false);
ocp_reg_write(tp, OCP_EEE_ADV, 0);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@ -2745,6 +2844,12 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
sram_write(tp, SRAM_10M_AMP1, 0x00af);
sram_write(tp, SRAM_10M_AMP2, 0x0208);
r8153_eee_en(tp, true);
ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
set_bit(PHY_RESET, &tp->flags);
}
@ -2866,21 +2971,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
static void r8153_aldps_en(struct r8152 *tp, bool enable)
{
u16 data;
data = ocp_reg_read(tp, OCP_POWER_CFG);
if (enable) {
data |= EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
} else {
data &= ~EN_ALDPS;
ocp_reg_write(tp, OCP_POWER_CFG, data);
msleep(20);
}
}
static void rtl8153_disable(struct r8152 *tp)
{
r8153_aldps_en(tp, false);
@ -3246,103 +3336,6 @@ static int rtl8152_close(struct net_device *netdev)
return res;
}
static inline void r8152_mmd_indirect(struct r8152 *tp, u16 dev, u16 reg)
{
ocp_reg_write(tp, OCP_EEE_AR, FUN_ADDR | dev);
ocp_reg_write(tp, OCP_EEE_DATA, reg);
ocp_reg_write(tp, OCP_EEE_AR, FUN_DATA | dev);
}
static u16 r8152_mmd_read(struct r8152 *tp, u16 dev, u16 reg)
{
u16 data;
r8152_mmd_indirect(tp, dev, reg);
data = ocp_reg_read(tp, OCP_EEE_DATA);
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
return data;
}
static void r8152_mmd_write(struct r8152 *tp, u16 dev, u16 reg, u16 data)
{
r8152_mmd_indirect(tp, dev, reg);
ocp_reg_write(tp, OCP_EEE_DATA, data);
ocp_reg_write(tp, OCP_EEE_AR, 0x0000);
}
static void r8152_eee_en(struct r8152 *tp, bool enable)
{
u16 config1, config2, config3;
u32 ocp_data;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
config1 = ocp_reg_read(tp, OCP_EEE_CONFIG1) & ~sd_rise_time_mask;
config2 = ocp_reg_read(tp, OCP_EEE_CONFIG2);
config3 = ocp_reg_read(tp, OCP_EEE_CONFIG3) & ~fast_snr_mask;
if (enable) {
ocp_data |= EEE_RX_EN | EEE_TX_EN;
config1 |= EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN | RX_QUIET_EN;
config1 |= sd_rise_time(1);
config2 |= RG_DACQUIET_EN | RG_LDVQUIET_EN;
config3 |= fast_snr(42);
} else {
ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
config1 &= ~(EEE_10_CAP | EEE_NWAY_EN | TX_QUIET_EN |
RX_QUIET_EN);
config1 |= sd_rise_time(7);
config2 &= ~(RG_DACQUIET_EN | RG_LDVQUIET_EN);
config3 |= fast_snr(511);
}
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
ocp_reg_write(tp, OCP_EEE_CONFIG1, config1);
ocp_reg_write(tp, OCP_EEE_CONFIG2, config2);
ocp_reg_write(tp, OCP_EEE_CONFIG3, config3);
}
static void r8152b_enable_eee(struct r8152 *tp)
{
r8152_eee_en(tp, true);
r8152_mmd_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, MDIO_EEE_100TX);
}
static void r8153_eee_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
u16 config;
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EEE_CR);
config = ocp_reg_read(tp, OCP_EEE_CFG);
if (enable) {
ocp_data |= EEE_RX_EN | EEE_TX_EN;
config |= EEE10_EN;
} else {
ocp_data &= ~(EEE_RX_EN | EEE_TX_EN);
config &= ~EEE10_EN;
}
ocp_write_word(tp, MCU_TYPE_PLA, PLA_EEE_CR, ocp_data);
ocp_reg_write(tp, OCP_EEE_CFG, config);
}
static void r8153_enable_eee(struct r8152 *tp)
{
r8153_eee_en(tp, true);
ocp_reg_write(tp, OCP_EEE_ADV, MDIO_EEE_1000T | MDIO_EEE_100TX);
}
static void r8152b_enable_fc(struct r8152 *tp)
{
u16 anar;
anar = r8152_mdio_read(tp, MII_ADVERTISE);
anar |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
static void rtl_tally_reset(struct r8152 *tp)
{
u32 ocp_data;
@ -3355,10 +3348,17 @@ static void rtl_tally_reset(struct r8152 *tp)
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
data = r8152_mdio_read(tp, MII_BMCR);
if (data & BMCR_PDOWN) {
data &= ~BMCR_PDOWN;
r8152_mdio_write(tp, MII_BMCR, data);
}
r8152_aldps_en(tp, false);
if (tp->version == RTL_VER_01) {
@ -3380,9 +3380,6 @@ static void r8152b_init(struct r8152 *tp)
SPDWN_RXDV_MSK | SPDWN_LINKCHG_MSK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_GPHY_INTR_IMR, ocp_data);
r8152b_enable_eee(tp);
r8152_aldps_en(tp, true);
r8152b_enable_fc(tp);
rtl_tally_reset(tp);
/* enable rx aggregation */
@ -3394,12 +3391,12 @@ static void r8152b_init(struct r8152 *tp)
static void r8153_init(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
int i;
if (test_bit(RTL8152_UNPLUG, &tp->flags))
return;
r8153_aldps_en(tp, false);
r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
@ -3416,6 +3413,23 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
if (tp->version == RTL_VER_03 || tp->version == RTL_VER_04 ||
tp->version == RTL_VER_05)
ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
data = r8152_mdio_read(tp, MII_BMCR);
if (data & BMCR_PDOWN) {
data &= ~BMCR_PDOWN;
r8152_mdio_write(tp, MII_BMCR, data);
}
for (i = 0; i < 500; i++) {
ocp_data = ocp_reg_read(tp, OCP_PHY_STATUS) & PHY_STAT_MASK;
if (ocp_data == PHY_STAT_LAN_ON)
break;
msleep(20);
}
usb_disable_lpm(tp->udev);
r8153_u2p3en(tp, false);
@ -3483,9 +3497,6 @@ static void r8153_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, 0);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL4, 0);
r8153_enable_eee(tp);
r8153_aldps_en(tp, true);
r8152b_enable_fc(tp);
rtl_tally_reset(tp);
r8153_u2p3en(tp, true);
}

View File

@ -513,6 +513,15 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
int hdrlen = ieee80211_hdrlen(hdr->frame_control);
int queue;
/* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
* in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
* queue. STATION (HS2.0) uses the auxiliary context of the FW,
* and hence needs to be sent on the aux queue
*/
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
skb_info->control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
memcpy(&info, skb->cb, sizeof(info));
if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
@ -526,16 +535,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
/* This holds the amsdu headers length */
skb_info->driver_data[0] = (void *)(uintptr_t)0;
/*
* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used
* in 2 different types of vifs, P2P & STATION. P2P uses the offchannel
* queue. STATION (HS2.0) uses the auxiliary context of the FW,
* and hence needs to be sent on the aux queue
*/
if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
info.control.vif->type == NL80211_IFTYPE_STATION)
IEEE80211_SKB_CB(skb)->hw_queue = mvm->aux_queue;
queue = info.hw_queue;
/*

View File

@ -271,6 +271,11 @@ static int netback_probe(struct xenbus_device *dev,
be->dev = dev;
dev_set_drvdata(&dev->dev, be);
be->state = XenbusStateInitialising;
err = xenbus_switch_state(dev, XenbusStateInitialising);
if (err)
goto fail;
sg = 1;
do {
@ -383,11 +388,6 @@ static int netback_probe(struct xenbus_device *dev,
be->hotplug_script = script;
err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err)
goto fail;
be->state = XenbusStateInitWait;
/* This kicks hotplug scripts, so do it immediately. */
err = backend_create_xenvif(be);
@ -492,20 +492,20 @@ static inline void backend_switch_state(struct backend_info *be,
/* Handle backend state transitions:
*
* The backend state starts in InitWait and the following transitions are
* The backend state starts in Initialising and the following transitions are
* allowed.
*
* InitWait -> Connected
* Initialising -> InitWait -> Connected
* \
* \ ^ \ |
* \ | \ |
* \ | \ |
* \ | \ |
* \ | \ |
* \ | \ |
* V | V V
*
* ^ \ |
* | \ |
* | \ |
* | \ |
* | \ |
* | \ |
* | V V
*
* Closed <-> Closing
* Closed <-> Closing
*
* The state argument specifies the eventual state of the backend and the
* function transitions to that state via the shortest path.
@ -515,6 +515,20 @@ static void set_backend_state(struct backend_info *be,
{
while (be->state != state) {
switch (be->state) {
case XenbusStateInitialising:
switch (state) {
case XenbusStateInitWait:
case XenbusStateConnected:
case XenbusStateClosing:
backend_switch_state(be, XenbusStateInitWait);
break;
case XenbusStateClosed:
backend_switch_state(be, XenbusStateClosed);
break;
default:
BUG();
}
break;
case XenbusStateClosed:
switch (state) {
case XenbusStateInitWait:

View File

@ -561,7 +561,6 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
queue = &ctrl->queues[idx];
queue->ctrl = ctrl;
queue->flags = 0;
init_completion(&queue->cm_done);
if (idx > 0)
@ -595,6 +594,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl,
goto out_destroy_cm_id;
}
clear_bit(NVME_RDMA_Q_DELETING, &queue->flags);
set_bit(NVME_RDMA_Q_CONNECTED, &queue->flags);
return 0;

View File

@ -999,6 +999,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
__u16, __u16,
enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
int qeth_recover_features(struct net_device *);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
/* exports for OSN */

View File

@ -3619,7 +3619,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card,
int e;
e = 0;
while (buffer->element[e].addr) {
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
unsigned long phys_aob_addr;
phys_aob_addr = (unsigned long) buffer->element[e].addr;
@ -6131,6 +6132,35 @@ static int qeth_set_ipa_tso(struct qeth_card *card, int on)
return rc;
}
/* try to restore device features on a device after recovery */
int qeth_recover_features(struct net_device *dev)
{
struct qeth_card *card = dev->ml_priv;
netdev_features_t recover = dev->features;
if (recover & NETIF_F_IP_CSUM) {
if (qeth_set_ipa_csum(card, 1, IPA_OUTBOUND_CHECKSUM))
recover ^= NETIF_F_IP_CSUM;
}
if (recover & NETIF_F_RXCSUM) {
if (qeth_set_ipa_csum(card, 1, IPA_INBOUND_CHECKSUM))
recover ^= NETIF_F_RXCSUM;
}
if (recover & NETIF_F_TSO) {
if (qeth_set_ipa_tso(card, 1))
recover ^= NETIF_F_TSO;
}
if (recover == dev->features)
return 0;
dev_warn(&card->gdev->dev,
"Device recovery failed to restore all offload features\n");
dev->features = recover;
return -EIO;
}
EXPORT_SYMBOL_GPL(qeth_recover_features);
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
struct qeth_card *card = dev->ml_priv;

View File

@ -1124,14 +1124,11 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
card->dev->hw_features |= NETIF_F_RXCSUM;
card->dev->vlan_features |= NETIF_F_RXCSUM;
}
/* Turn on SG per default */
card->dev->features |= NETIF_F_SG;
}
card->info.broadcast_capable = 1;
qeth_l2_request_initial_mac(card);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l2_poll, QETH_NAPI_WEIGHT);
netif_carrier_off(card->dev);
@ -1246,6 +1243,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* this also sets saved unicast addresses */
qeth_l2_set_rx_mode(card->dev);
rtnl_lock();
qeth_recover_features(card->dev);
rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);

View File

@ -257,6 +257,11 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
if (addr->in_progress)
return -EINPROGRESS;
if (!qeth_card_hw_is_reachable(card)) {
addr->disp_flag = QETH_DISP_ADDR_DELETE;
return 0;
}
rc = qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
@ -296,6 +301,11 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr)
hash_add(card->ip_htable, &addr->hnode,
qeth_l3_ipaddr_hash(addr));
if (!qeth_card_hw_is_reachable(card)) {
addr->disp_flag = QETH_DISP_ADDR_ADD;
return 0;
}
/* qeth_l3_register_addr_entry can go to sleep
* if we add a IPV4 addr. It is caused by the reason
* that SETIP ipa cmd starts ARP staff for IPV4 addr.
@ -390,12 +400,16 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
int i;
int rc;
QETH_CARD_TEXT(card, 4, "recoverip");
QETH_CARD_TEXT(card, 4, "recovrip");
spin_lock_bh(&card->ip_lock);
hash_for_each_safe(card->ip_htable, i, tmp, addr, hnode) {
if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->disp_flag == QETH_DISP_ADDR_DELETE) {
qeth_l3_deregister_addr_entry(card, addr);
hash_del(&addr->hnode);
kfree(addr);
} else if (addr->disp_flag == QETH_DISP_ADDR_ADD) {
if (addr->proto == QETH_PROT_IPV4) {
addr->in_progress = 1;
spin_unlock_bh(&card->ip_lock);
@ -407,10 +421,8 @@ static void qeth_l3_recover_ip(struct qeth_card *card)
if (!rc) {
addr->disp_flag = QETH_DISP_ADDR_DO_NOTHING;
if (addr->ref_counter < 1) {
if (addr->ref_counter < 1)
qeth_l3_delete_ip(card, addr);
kfree(addr);
}
} else {
hash_del(&addr->hnode);
kfree(addr);
@ -689,7 +701,7 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto,
spin_lock_bh(&card->ip_lock);
if (!qeth_l3_ip_from_hash(card, ipaddr))
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
@ -757,7 +769,7 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto,
spin_lock_bh(&card->ip_lock);
if (!qeth_l3_ip_from_hash(card, ipaddr))
if (qeth_l3_ip_from_hash(card, ipaddr))
rc = -EEXIST;
else
qeth_l3_add_ip(card, ipaddr);
@ -3108,7 +3120,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
card->dev->vlan_features = NETIF_F_SG |
NETIF_F_RXCSUM | NETIF_F_IP_CSUM |
NETIF_F_TSO;
card->dev->features = NETIF_F_SG;
}
}
} else if (card->info.type == QETH_CARD_TYPE_IQD) {
@ -3136,7 +3147,6 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
netif_keep_dst(card->dev);
card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) *
PAGE_SIZE;
card->dev->gso_max_segs = (QETH_MAX_BUFFER_ELEMENTS(card) - 1);
SET_NETDEV_DEV(card->dev, &card->gdev->dev);
netif_napi_add(card->dev, &card->napi, qeth_l3_poll, QETH_NAPI_WEIGHT);
@ -3269,6 +3279,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
else
dev_open(card->dev);
qeth_l3_set_multicast_list(card->dev);
qeth_recover_features(card->dev);
rtnl_unlock();
}
qeth_trace_features(card);

View File

@ -297,7 +297,9 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->u.a6.pfxlen = 0;
addr->type = QETH_IP_TYPE_NORMAL;
spin_lock_bh(&card->ip_lock);
qeth_l3_delete_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
kfree(addr);
}
@ -329,7 +331,10 @@ static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
spin_lock_bh(&card->ip_lock);
qeth_l3_add_ip(card, addr);
spin_unlock_bh(&card->ip_lock);
kfree(addr);
return count;

View File

@ -12,6 +12,7 @@ Hopefully this will happen later in 2016.
Other TODOs:
- There are two possible replies to CEC_MSG_INITIATE_ARC. How to handle that?
- Add a flag to inhibit passing CEC RC messages to the rc subsystem.
Applications should be able to choose this when calling S_LOG_ADDRS.
- If the reply field of cec_msg is set then when the reply arrives it

View File

@ -124,10 +124,10 @@ static void cec_queue_event(struct cec_adapter *adap,
u64 ts = ktime_get_ns();
struct cec_fh *fh;
mutex_lock(&adap->devnode.fhs_lock);
mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list)
cec_queue_event_fh(fh, ev, ts);
mutex_unlock(&adap->devnode.fhs_lock);
mutex_unlock(&adap->devnode.lock);
}
/*
@ -191,12 +191,12 @@ static void cec_queue_msg_monitor(struct cec_adapter *adap,
u32 monitor_mode = valid_la ? CEC_MODE_MONITOR :
CEC_MODE_MONITOR_ALL;
mutex_lock(&adap->devnode.fhs_lock);
mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower >= monitor_mode)
cec_queue_msg_fh(fh, msg);
}
mutex_unlock(&adap->devnode.fhs_lock);
mutex_unlock(&adap->devnode.lock);
}
/*
@ -207,12 +207,12 @@ static void cec_queue_msg_followers(struct cec_adapter *adap,
{
struct cec_fh *fh;
mutex_lock(&adap->devnode.fhs_lock);
mutex_lock(&adap->devnode.lock);
list_for_each_entry(fh, &adap->devnode.fhs, list) {
if (fh->mode_follower == CEC_MODE_FOLLOWER)
cec_queue_msg_fh(fh, msg);
}
mutex_unlock(&adap->devnode.fhs_lock);
mutex_unlock(&adap->devnode.lock);
}
/* Notify userspace of an adapter state change. */
@ -851,6 +851,9 @@ void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg)
if (!valid_la || msg->len <= 1)
return;
if (adap->log_addrs.log_addr_mask == 0)
return;
/*
* Process the message on the protocol level. If is_reply is true,
* then cec_receive_notify() won't pass on the reply to the listener(s)
@ -1047,11 +1050,17 @@ static int cec_config_thread_func(void *arg)
dprintk(1, "could not claim LA %d\n", i);
}
if (adap->log_addrs.log_addr_mask == 0 &&
!(las->flags & CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK))
goto unconfigure;
configured:
if (adap->log_addrs.log_addr_mask == 0) {
/* Fall back to unregistered */
las->log_addr[0] = CEC_LOG_ADDR_UNREGISTERED;
las->log_addr_mask = 1 << las->log_addr[0];
for (i = 1; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
adap->is_configured = true;
adap->is_configuring = false;
@ -1070,6 +1079,8 @@ static int cec_config_thread_func(void *arg)
cec_report_features(adap, i);
cec_report_phys_addr(adap, i);
}
for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
mutex_lock(&adap->lock);
adap->kthread_config = NULL;
mutex_unlock(&adap->lock);
@ -1398,7 +1409,6 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
u8 init_laddr = cec_msg_initiator(msg);
u8 devtype = cec_log_addr2dev(adap, dest_laddr);
int la_idx = cec_log_addr2idx(adap, dest_laddr);
bool is_directed = la_idx >= 0;
bool from_unregistered = init_laddr == 0xf;
struct cec_msg tx_cec_msg = { };
@ -1560,7 +1570,7 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
* Unprocessed messages are aborted if userspace isn't doing
* any processing either.
*/
if (is_directed && !is_reply && !adap->follower_cnt &&
if (!is_broadcast && !is_reply && !adap->follower_cnt &&
!adap->cec_follower && msg->msg[1] != CEC_MSG_FEATURE_ABORT)
return cec_feature_abort(adap, msg);
break;

View File

@ -162,7 +162,7 @@ static long cec_adap_s_log_addrs(struct cec_adapter *adap, struct cec_fh *fh,
return -ENOTTY;
if (copy_from_user(&log_addrs, parg, sizeof(log_addrs)))
return -EFAULT;
log_addrs.flags = 0;
log_addrs.flags &= CEC_LOG_ADDRS_FL_ALLOW_UNREG_FALLBACK;
mutex_lock(&adap->lock);
if (!adap->is_configuring &&
(!log_addrs.num_log_addrs || !adap->is_configured) &&
@ -435,7 +435,7 @@ static long cec_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
void __user *parg = (void __user *)arg;
if (!devnode->registered)
return -EIO;
return -ENODEV;
switch (cmd) {
case CEC_ADAP_G_CAPS:
@ -508,14 +508,14 @@ static int cec_open(struct inode *inode, struct file *filp)
filp->private_data = fh;
mutex_lock(&devnode->fhs_lock);
mutex_lock(&devnode->lock);
/* Queue up initial state events */
ev_state.state_change.phys_addr = adap->phys_addr;
ev_state.state_change.log_addr_mask = adap->log_addrs.log_addr_mask;
cec_queue_event_fh(fh, &ev_state, 0);
list_add(&fh->list, &devnode->fhs);
mutex_unlock(&devnode->fhs_lock);
mutex_unlock(&devnode->lock);
return 0;
}
@ -540,9 +540,9 @@ static int cec_release(struct inode *inode, struct file *filp)
cec_monitor_all_cnt_dec(adap);
mutex_unlock(&adap->lock);
mutex_lock(&devnode->fhs_lock);
mutex_lock(&devnode->lock);
list_del(&fh->list);
mutex_unlock(&devnode->fhs_lock);
mutex_unlock(&devnode->lock);
/* Unhook pending transmits from this filehandle. */
mutex_lock(&adap->lock);

View File

@ -51,31 +51,29 @@ int cec_get_device(struct cec_devnode *devnode)
{
/*
* Check if the cec device is available. This needs to be done with
* the cec_devnode_lock held to prevent an open/unregister race:
* the devnode->lock held to prevent an open/unregister race:
* without the lock, the device could be unregistered and freed between
* the devnode->registered check and get_device() calls, leading to
* a crash.
*/
mutex_lock(&cec_devnode_lock);
mutex_lock(&devnode->lock);
/*
* return ENXIO if the cec device has been removed
* already or if it is not registered anymore.
*/
if (!devnode->registered) {
mutex_unlock(&cec_devnode_lock);
mutex_unlock(&devnode->lock);
return -ENXIO;
}
/* and increase the device refcount */
get_device(&devnode->dev);
mutex_unlock(&cec_devnode_lock);
mutex_unlock(&devnode->lock);
return 0;
}
void cec_put_device(struct cec_devnode *devnode)
{
mutex_lock(&cec_devnode_lock);
put_device(&devnode->dev);
mutex_unlock(&cec_devnode_lock);
}
/* Called when the last user of the cec device exits. */
@ -84,11 +82,10 @@ static void cec_devnode_release(struct device *cd)
struct cec_devnode *devnode = to_cec_devnode(cd);
mutex_lock(&cec_devnode_lock);
/* Mark device node number as free */
clear_bit(devnode->minor, cec_devnode_nums);
mutex_unlock(&cec_devnode_lock);
cec_delete_adapter(to_cec_adapter(devnode));
}
@ -117,7 +114,7 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
/* Initialization */
INIT_LIST_HEAD(&devnode->fhs);
mutex_init(&devnode->fhs_lock);
mutex_init(&devnode->lock);
/* Part 1: Find a free minor number */
mutex_lock(&cec_devnode_lock);
@ -160,7 +157,9 @@ static int __must_check cec_devnode_register(struct cec_devnode *devnode,
cdev_del:
cdev_del(&devnode->cdev);
clr_bit:
mutex_lock(&cec_devnode_lock);
clear_bit(devnode->minor, cec_devnode_nums);
mutex_unlock(&cec_devnode_lock);
return ret;
}
@ -177,17 +176,21 @@ static void cec_devnode_unregister(struct cec_devnode *devnode)
{
struct cec_fh *fh;
/* Check if devnode was never registered or already unregistered */
if (!devnode->registered || devnode->unregistered)
return;
mutex_lock(&devnode->lock);
/* Check if devnode was never registered or already unregistered */
if (!devnode->registered || devnode->unregistered) {
mutex_unlock(&devnode->lock);
return;
}
mutex_lock(&devnode->fhs_lock);
list_for_each_entry(fh, &devnode->fhs, list)
wake_up_interruptible(&fh->wait);
mutex_unlock(&devnode->fhs_lock);
devnode->registered = false;
devnode->unregistered = true;
mutex_unlock(&devnode->lock);
device_del(&devnode->dev);
cdev_del(&devnode->cdev);
put_device(&devnode->dev);

View File

@ -114,14 +114,11 @@ static void pulse8_irq_work_handler(struct work_struct *work)
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_OK,
0, 0, 0, 0);
break;
case MSGCODE_TRANSMIT_FAILED_LINE:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ARB_LOST,
1, 0, 0, 0);
break;
case MSGCODE_TRANSMIT_FAILED_ACK:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_NACK,
0, 1, 0, 0);
break;
case MSGCODE_TRANSMIT_FAILED_LINE:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_DATA:
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
cec_transmit_done(pulse8->adap, CEC_TX_STATUS_ERROR,
@ -170,6 +167,9 @@ static irqreturn_t pulse8_interrupt(struct serio *serio, unsigned char data,
case MSGCODE_TRANSMIT_FAILED_TIMEOUT_LINE:
schedule_work(&pulse8->work);
break;
case MSGCODE_HIGH_ERROR:
case MSGCODE_LOW_ERROR:
case MSGCODE_RECEIVE_FAILED:
case MSGCODE_TIMEOUT_ERROR:
break;
case MSGCODE_COMMAND_ACCEPTED:
@ -388,7 +388,7 @@ static int pulse8_cec_adap_transmit(struct cec_adapter *adap, u8 attempts,
int err;
cmd[0] = MSGCODE_TRANSMIT_IDLETIME;
cmd[1] = 3;
cmd[1] = signal_free_time;
err = pulse8_send_and_wait(pulse8, cmd, 2,
MSGCODE_COMMAND_ACCEPTED, 1);
cmd[0] = MSGCODE_TRANSMIT_ACK_POLARITY;

View File

@ -4271,13 +4271,10 @@ int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
if (ret < 0)
return ret;
/*
* Use new btrfs_qgroup_reserve_data to reserve precious data space
*
* TODO: Find a good method to avoid reserve data space for NOCOW
* range, but don't impact performance on quota disable case.
*/
/* Use new btrfs_qgroup_reserve_data to reserve precious data space. */
ret = btrfs_qgroup_reserve_data(inode, start, len);
if (ret)
btrfs_free_reserved_data_space_noquota(inode, start, len);
return ret;
}

View File

@ -1634,6 +1634,9 @@ static noinline int btrfs_ioctl_snap_create_transid(struct file *file,
int namelen;
int ret = 0;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
ret = mnt_want_write_file(file);
if (ret)
goto out;
@ -1691,6 +1694,9 @@ static noinline int btrfs_ioctl_snap_create(struct file *file,
struct btrfs_ioctl_vol_args *vol_args;
int ret;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
@ -1714,6 +1720,9 @@ static noinline int btrfs_ioctl_snap_create_v2(struct file *file,
bool readonly = false;
struct btrfs_qgroup_inherit *inherit = NULL;
if (!S_ISDIR(file_inode(file)->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);
@ -2357,6 +2366,9 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
int ret;
int err = 0;
if (!S_ISDIR(dir->i_mode))
return -ENOTDIR;
vol_args = memdup_user(arg, sizeof(*vol_args));
if (IS_ERR(vol_args))
return PTR_ERR(vol_args);

View File

@ -333,6 +333,7 @@ configfs_write_bin_file(struct file *file, const char __user *buf,
if (bin_attr->cb_max_size &&
*ppos + count > bin_attr->cb_max_size) {
len = -EFBIG;
goto out;
}
tbuf = vmalloc(*ppos + count);

View File

@ -430,6 +430,7 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
static ssize_t
read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
{
char *buf = file->private_data;
ssize_t acc = 0;
size_t size, tsz;
size_t elf_buflen;
@ -500,23 +501,20 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
if (clear_user(buffer, tsz))
return -EFAULT;
} else if (is_vmalloc_or_module_addr((void *)start)) {
char * elf_buf;
elf_buf = kzalloc(tsz, GFP_KERNEL);
if (!elf_buf)
return -ENOMEM;
vread(elf_buf, (char *)start, tsz);
vread(buf, (char *)start, tsz);
/* we have to zero-fill user buffer even if no read */
if (copy_to_user(buffer, elf_buf, tsz)) {
kfree(elf_buf);
if (copy_to_user(buffer, buf, tsz))
return -EFAULT;
}
kfree(elf_buf);
} else {
if (kern_addr_valid(start)) {
unsigned long n;
n = copy_to_user(buffer, (char *)start, tsz);
/*
* Using bounce buffer to bypass the
* hardened user copy kernel text checks.
*/
memcpy(buf, (char *) start, tsz);
n = copy_to_user(buffer, buf, tsz);
/*
* We cannot distinguish between fault on source
* and fault on destination. When this happens
@ -549,6 +547,11 @@ static int open_kcore(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_RAWIO))
return -EPERM;
filp->private_data = kmalloc(PAGE_SIZE, GFP_KERNEL);
if (!filp->private_data)
return -ENOMEM;
if (kcore_need_update)
kcore_update_ram();
if (i_size_read(inode) != proc_root_kcore->size) {
@ -559,10 +562,16 @@ static int open_kcore(struct inode *inode, struct file *filp)
return 0;
}
static int release_kcore(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static const struct file_operations proc_kcore_operations = {
.read = read_kcore,
.open = open_kcore,
.release = release_kcore,
.llseek = default_llseek,
};

Some files were not shown because too many files have changed in this diff Show More