mirror of https://gitee.com/openkylin/linux.git
Merge 4.4-rc6 into tty-next
We want the serial/tty fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
462a1196a5
|
@ -22,8 +22,7 @@ Required properties:
|
|||
Optional properties:
|
||||
- ti,hwmods: Name of the hwmods associated to the eDMA CC
|
||||
- ti,edma-memcpy-channels: List of channels allocated to be used for memcpy, iow
|
||||
these channels will be SW triggered channels. The list must
|
||||
contain 16 bits numbers, see example.
|
||||
these channels will be SW triggered channels. See example.
|
||||
- ti,edma-reserved-slot-ranges: PaRAM slot ranges which should not be used by
|
||||
the driver, they are allocated to be used by for example the
|
||||
DSP. See example.
|
||||
|
@ -56,10 +55,9 @@ edma: edma@49000000 {
|
|||
ti,tptcs = <&edma_tptc0 7>, <&edma_tptc1 7>, <&edma_tptc2 0>;
|
||||
|
||||
/* Channel 20 and 21 is allocated for memcpy */
|
||||
ti,edma-memcpy-channels = /bits/ 16 <20 21>;
|
||||
/* The following PaRAM slots are reserved: 35-45 and 100-110 */
|
||||
ti,edma-reserved-slot-ranges = /bits/ 16 <35 10>,
|
||||
/bits/ 16 <100 10>;
|
||||
ti,edma-memcpy-channels = <20 21>;
|
||||
/* The following PaRAM slots are reserved: 35-44 and 100-109 */
|
||||
ti,edma-reserved-slot-ranges = <35 10>, <100 10>;
|
||||
};
|
||||
|
||||
edma_tptc0: tptc@49800000 {
|
||||
|
|
|
@ -12,7 +12,7 @@ Each key is represented as a sub-node of "allwinner,sun4i-a10-lradc-keys":
|
|||
Required subnode-properties:
|
||||
- label: Descriptive name of the key.
|
||||
- linux,code: Keycode to emit.
|
||||
- channel: Channel this key is attached to, mut be 0 or 1.
|
||||
- channel: Channel this key is attached to, must be 0 or 1.
|
||||
- voltage: Voltage in µV at lradc input when this key is pressed.
|
||||
|
||||
Example:
|
||||
|
|
|
@ -6,7 +6,9 @@ used for what purposes, but which don't use an on-flash partition table such
|
|||
as RedBoot.
|
||||
|
||||
The partition table should be a subnode of the mtd node and should be named
|
||||
'partitions'. Partitions are defined in subnodes of the partitions node.
|
||||
'partitions'. This node should have the following property:
|
||||
- compatible : (required) must be "fixed-partitions"
|
||||
Partitions are then defined in subnodes of the partitions node.
|
||||
|
||||
For backwards compatibility partitions as direct subnodes of the mtd device are
|
||||
supported. This use is discouraged.
|
||||
|
@ -36,6 +38,7 @@ Examples:
|
|||
|
||||
flash@0 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
|
@ -53,6 +56,7 @@ flash@0 {
|
|||
|
||||
flash@1 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <2>;
|
||||
|
||||
|
@ -66,6 +70,7 @@ flash@1 {
|
|||
|
||||
flash@2 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <2>;
|
||||
#size-cells = <2>;
|
||||
|
||||
|
|
|
@ -181,17 +181,3 @@ For general information, go to the Intel support website at:
|
|||
If an issue is identified with the released source code on the supported
|
||||
kernel with a supported adapter, email the specific information related to the
|
||||
issue to e1000-devel@lists.sourceforge.net.
|
||||
|
||||
|
||||
License
|
||||
=======
|
||||
|
||||
This software program is released under the terms of a license agreement
|
||||
between you ('Licensee') and Intel. Do not use or load this software or any
|
||||
associated materials (collectively, the 'Software') until you have carefully
|
||||
read the full terms and conditions of the file COPYING located in this software
|
||||
package. By loading or using the Software, you agree to the terms of this
|
||||
Agreement. If you do not agree with the terms of this Agreement, do not install
|
||||
or use the Software.
|
||||
|
||||
* Other names and brands may be claimed as the property of others.
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -5578,7 +5578,7 @@ R: Jesse Brandeburg <jesse.brandeburg@intel.com>
|
|||
R: Shannon Nelson <shannon.nelson@intel.com>
|
||||
R: Carolyn Wyborny <carolyn.wyborny@intel.com>
|
||||
R: Don Skidmore <donald.c.skidmore@intel.com>
|
||||
R: Matthew Vick <matthew.vick@intel.com>
|
||||
R: Bruce Allan <bruce.w.allan@intel.com>
|
||||
R: John Ronciak <john.ronciak@intel.com>
|
||||
R: Mitch Williams <mitch.a.williams@intel.com>
|
||||
L: intel-wired-lan@lists.osuosl.org
|
||||
|
@ -8380,6 +8380,14 @@ L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
|
|||
S: Maintained
|
||||
F: drivers/pinctrl/samsung/
|
||||
|
||||
PIN CONTROLLER - SINGLE
|
||||
M: Tony Lindgren <tony@atomide.com>
|
||||
M: Haojian Zhuang <haojian.zhuang@linaro.org>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
L: linux-omap@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/pinctrl/pinctrl-single.c
|
||||
|
||||
PIN CONTROLLER - ST SPEAR
|
||||
M: Viresh Kumar <vireshk@kernel.org>
|
||||
L: spear-devel@list.st.com
|
||||
|
@ -8946,6 +8954,13 @@ F: drivers/rpmsg/
|
|||
F: Documentation/rpmsg.txt
|
||||
F: include/linux/rpmsg.h
|
||||
|
||||
RENESAS ETHERNET DRIVERS
|
||||
R: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
|
||||
L: netdev@vger.kernel.org
|
||||
L: linux-sh@vger.kernel.org
|
||||
F: drivers/net/ethernet/renesas/
|
||||
F: include/linux/sh_eth.h
|
||||
|
||||
RESET CONTROLLER FRAMEWORK
|
||||
M: Philipp Zabel <p.zabel@pengutronix.de>
|
||||
S: Maintained
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Blurry Fish Butt
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -445,6 +445,7 @@ config LINUX_LINK_BASE
|
|||
However some customers have peripherals mapped at this addr, so
|
||||
Linux needs to be scooted a bit.
|
||||
If you don't know what the above means, leave this setting alone.
|
||||
This needs to match memory start address specified in Device Tree
|
||||
|
||||
config HIGHMEM
|
||||
bool "High Memory Support"
|
||||
|
|
|
@ -46,6 +46,7 @@ ethernet@0x18000 {
|
|||
snps,pbl = < 32 >;
|
||||
clocks = <&apbclk>;
|
||||
clock-names = "stmmaceth";
|
||||
max-speed = <100>;
|
||||
};
|
||||
|
||||
ehci@0x40000 {
|
||||
|
|
|
@ -17,7 +17,8 @@ / {
|
|||
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <0x0 0x80000000 0x0 0x40000000 /* 1 GB low mem */
|
||||
/* CONFIG_LINUX_LINK_BASE needs to match low mem start */
|
||||
reg = <0x0 0x80000000 0x0 0x20000000 /* 512 MB low mem */
|
||||
0x1 0x00000000 0x0 0x40000000>; /* 1 GB highmem */
|
||||
};
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
* @dt_compat: Array of device tree 'compatible' strings
|
||||
* (XXX: although only 1st entry is looked at)
|
||||
* @init_early: Very early callback [called from setup_arch()]
|
||||
* @init_cpu_smp: for each CPU as it is coming up (SMP as well as UP)
|
||||
* @init_per_cpu: for each CPU as it is coming up (SMP as well as UP)
|
||||
* [(M):init_IRQ(), (o):start_kernel_secondary()]
|
||||
* @init_machine: arch initcall level callback (e.g. populate static
|
||||
* platform devices or parse Devicetree)
|
||||
|
@ -35,7 +35,7 @@ struct machine_desc {
|
|||
const char **dt_compat;
|
||||
void (*init_early)(void);
|
||||
#ifdef CONFIG_SMP
|
||||
void (*init_cpu_smp)(unsigned int);
|
||||
void (*init_per_cpu)(unsigned int);
|
||||
#endif
|
||||
void (*init_machine)(void);
|
||||
void (*init_late)(void);
|
||||
|
|
|
@ -48,7 +48,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
|
|||
* @init_early_smp: A SMP specific h/w block can init itself
|
||||
* Could be common across platforms so not covered by
|
||||
* mach_desc->init_early()
|
||||
* @init_irq_cpu: Called for each core so SMP h/w block driver can do
|
||||
* @init_per_cpu: Called for each core so SMP h/w block driver can do
|
||||
* any needed setup per cpu (e.g. IPI request)
|
||||
* @cpu_kick: For Master to kickstart a cpu (optionally at a PC)
|
||||
* @ipi_send: To send IPI to a @cpu
|
||||
|
@ -57,7 +57,7 @@ extern int smp_ipi_irq_setup(int cpu, int irq);
|
|||
struct plat_smp_ops {
|
||||
const char *info;
|
||||
void (*init_early_smp)(void);
|
||||
void (*init_irq_cpu)(int cpu);
|
||||
void (*init_per_cpu)(int cpu);
|
||||
void (*cpu_kick)(int cpu, unsigned long pc);
|
||||
void (*ipi_send)(int cpu);
|
||||
void (*ipi_clear)(int irq);
|
||||
|
|
|
@ -112,7 +112,6 @@ struct unwind_frame_info {
|
|||
|
||||
extern int arc_unwind(struct unwind_frame_info *frame);
|
||||
extern void arc_unwind_init(void);
|
||||
extern void arc_unwind_setup(void);
|
||||
extern void *unwind_add_table(struct module *module, const void *table_start,
|
||||
unsigned long table_size);
|
||||
extern void unwind_remove_table(void *handle, int init_only);
|
||||
|
@ -152,9 +151,6 @@ static inline void arc_unwind_init(void)
|
|||
{
|
||||
}
|
||||
|
||||
static inline void arc_unwind_setup(void)
|
||||
{
|
||||
}
|
||||
#define unwind_add_table(a, b, c)
|
||||
#define unwind_remove_table(a, b)
|
||||
|
||||
|
|
|
@ -106,10 +106,21 @@ static struct irq_chip arcv2_irq_chip = {
|
|||
static int arcv2_irq_map(struct irq_domain *d, unsigned int irq,
|
||||
irq_hw_number_t hw)
|
||||
{
|
||||
if (irq == TIMER0_IRQ || irq == IPI_IRQ)
|
||||
/*
|
||||
* core intc IRQs [16, 23]:
|
||||
* Statically assigned always private-per-core (Timers, WDT, IPI, PCT)
|
||||
*/
|
||||
if (hw < 24) {
|
||||
/*
|
||||
* A subsequent request_percpu_irq() fails if percpu_devid is
|
||||
* not set. That in turns sets NOAUTOEN, meaning each core needs
|
||||
* to call enable_percpu_irq()
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_percpu_irq);
|
||||
else
|
||||
} else {
|
||||
irq_set_chip_and_handler(irq, &arcv2_irq_chip, handle_level_irq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -29,11 +29,11 @@ void __init init_IRQ(void)
|
|||
|
||||
#ifdef CONFIG_SMP
|
||||
/* a SMP H/w block could do IPI IRQ request here */
|
||||
if (plat_smp_ops.init_irq_cpu)
|
||||
plat_smp_ops.init_irq_cpu(smp_processor_id());
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(smp_processor_id());
|
||||
|
||||
if (machine_desc->init_cpu_smp)
|
||||
machine_desc->init_cpu_smp(smp_processor_id());
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(smp_processor_id());
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,18 @@ void arch_do_IRQ(unsigned int irq, struct pt_regs *regs)
|
|||
set_irq_regs(old_regs);
|
||||
}
|
||||
|
||||
/*
|
||||
* API called for requesting percpu interrupts - called by each CPU
|
||||
* - For boot CPU, actually request the IRQ with genirq core + enables
|
||||
* - For subsequent callers only enable called locally
|
||||
*
|
||||
* Relies on being called by boot cpu first (i.e. request called ahead) of
|
||||
* any enable as expected by genirq. Hence Suitable only for TIMER, IPI
|
||||
* which are guaranteed to be setup on boot core first.
|
||||
* Late probed peripherals such as perf can't use this as there no guarantee
|
||||
* of being called on boot CPU first.
|
||||
*/
|
||||
|
||||
void arc_request_percpu_irq(int irq, int cpu,
|
||||
irqreturn_t (*isr)(int irq, void *dev),
|
||||
const char *irq_nm,
|
||||
|
@ -60,14 +72,17 @@ void arc_request_percpu_irq(int irq, int cpu,
|
|||
if (!cpu) {
|
||||
int rc;
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
/*
|
||||
* These 2 calls are essential to making percpu IRQ APIs work
|
||||
* Ideally these details could be hidden in irq chip map function
|
||||
* but the issue is IPIs IRQs being static (non-DT) and platform
|
||||
* specific, so we can't identify them there.
|
||||
* A subsequent request_percpu_irq() fails if percpu_devid is
|
||||
* not set. That in turns sets NOAUTOEN, meaning each core needs
|
||||
* to call enable_percpu_irq()
|
||||
*
|
||||
* For ARCv2, this is done in irq map function since we know
|
||||
* which irqs are strictly per cpu
|
||||
*/
|
||||
irq_set_percpu_devid(irq);
|
||||
irq_modify_status(irq, IRQ_NOAUTOEN, 0); /* @irq, @clr, @set */
|
||||
#endif
|
||||
|
||||
rc = request_percpu_irq(irq, isr, irq_nm, percpu_dev);
|
||||
if (rc)
|
||||
|
|
|
@ -132,7 +132,7 @@ static void mcip_probe_n_setup(void)
|
|||
struct plat_smp_ops plat_smp_ops = {
|
||||
.info = smp_cpuinfo_buf,
|
||||
.init_early_smp = mcip_probe_n_setup,
|
||||
.init_irq_cpu = mcip_setup_per_cpu,
|
||||
.init_per_cpu = mcip_setup_per_cpu,
|
||||
.ipi_send = mcip_ipi_send,
|
||||
.ipi_clear = mcip_ipi_clear,
|
||||
};
|
||||
|
|
|
@ -428,12 +428,11 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
|
|||
|
||||
#endif /* CONFIG_ISA_ARCV2 */
|
||||
|
||||
void arc_cpu_pmu_irq_init(void)
|
||||
static void arc_cpu_pmu_irq_init(void *data)
|
||||
{
|
||||
struct arc_pmu_cpu *pmu_cpu = this_cpu_ptr(&arc_pmu_cpu);
|
||||
int irq = *(int *)data;
|
||||
|
||||
arc_request_percpu_irq(arc_pmu->irq, smp_processor_id(), arc_pmu_intr,
|
||||
"ARC perf counters", pmu_cpu);
|
||||
enable_percpu_irq(irq, IRQ_TYPE_NONE);
|
||||
|
||||
/* Clear all pending interrupt flags */
|
||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||
|
@ -515,7 +514,6 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
|||
|
||||
if (has_interrupts) {
|
||||
int irq = platform_get_irq(pdev, 0);
|
||||
unsigned long flags;
|
||||
|
||||
if (irq < 0) {
|
||||
pr_err("Cannot get IRQ number for the platform\n");
|
||||
|
@ -524,24 +522,12 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
|
|||
|
||||
arc_pmu->irq = irq;
|
||||
|
||||
/*
|
||||
* arc_cpu_pmu_irq_init() needs to be called on all cores for
|
||||
* their respective local PMU.
|
||||
* However we use opencoded on_each_cpu() to ensure it is called
|
||||
* on core0 first, so that arc_request_percpu_irq() sets up
|
||||
* AUTOEN etc. Otherwise enable_percpu_irq() fails to enable
|
||||
* perf IRQ on non master cores.
|
||||
* see arc_request_percpu_irq()
|
||||
*/
|
||||
preempt_disable();
|
||||
local_irq_save(flags);
|
||||
arc_cpu_pmu_irq_init();
|
||||
local_irq_restore(flags);
|
||||
smp_call_function((smp_call_func_t)arc_cpu_pmu_irq_init, 0, 1);
|
||||
preempt_enable();
|
||||
/* intc map function ensures irq_set_percpu_devid() called */
|
||||
request_percpu_irq(irq, arc_pmu_intr, "ARC perf counters",
|
||||
this_cpu_ptr(&arc_pmu_cpu));
|
||||
|
||||
on_each_cpu(arc_cpu_pmu_irq_init, &irq, 1);
|
||||
|
||||
/* Clean all pending interrupt flags */
|
||||
write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
|
||||
} else
|
||||
arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
|
||||
|
||||
|
|
|
@ -429,7 +429,6 @@ void __init setup_arch(char **cmdline_p)
|
|||
#endif
|
||||
|
||||
arc_unwind_init();
|
||||
arc_unwind_setup();
|
||||
}
|
||||
|
||||
static int __init customize_machine(void)
|
||||
|
|
|
@ -132,11 +132,11 @@ void start_kernel_secondary(void)
|
|||
pr_info("## CPU%u LIVE ##: Executing Code...\n", cpu);
|
||||
|
||||
/* Some SMP H/w setup - for each cpu */
|
||||
if (plat_smp_ops.init_irq_cpu)
|
||||
plat_smp_ops.init_irq_cpu(cpu);
|
||||
if (plat_smp_ops.init_per_cpu)
|
||||
plat_smp_ops.init_per_cpu(cpu);
|
||||
|
||||
if (machine_desc->init_cpu_smp)
|
||||
machine_desc->init_cpu_smp(cpu);
|
||||
if (machine_desc->init_per_cpu)
|
||||
machine_desc->init_per_cpu(cpu);
|
||||
|
||||
arc_local_timer_setup();
|
||||
|
||||
|
|
|
@ -170,6 +170,23 @@ static struct unwind_table *find_table(unsigned long pc)
|
|||
|
||||
static unsigned long read_pointer(const u8 **pLoc,
|
||||
const void *end, signed ptrType);
|
||||
static void init_unwind_hdr(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long));
|
||||
|
||||
/*
|
||||
* wrappers for header alloc (vs. calling one vs. other at call site)
|
||||
* to elide section mismatches warnings
|
||||
*/
|
||||
static void *__init unw_hdr_alloc_early(unsigned long sz)
|
||||
{
|
||||
return __alloc_bootmem_nopanic(sz, sizeof(unsigned int),
|
||||
MAX_DMA_ADDRESS);
|
||||
}
|
||||
|
||||
static void *unw_hdr_alloc(unsigned long sz)
|
||||
{
|
||||
return kmalloc(sz, GFP_KERNEL);
|
||||
}
|
||||
|
||||
static void init_unwind_table(struct unwind_table *table, const char *name,
|
||||
const void *core_start, unsigned long core_size,
|
||||
|
@ -209,6 +226,8 @@ void __init arc_unwind_init(void)
|
|||
__start_unwind, __end_unwind - __start_unwind,
|
||||
NULL, 0);
|
||||
/*__start_unwind_hdr, __end_unwind_hdr - __start_unwind_hdr);*/
|
||||
|
||||
init_unwind_hdr(&root_table, unw_hdr_alloc_early);
|
||||
}
|
||||
|
||||
static const u32 bad_cie, not_fde;
|
||||
|
@ -241,8 +260,8 @@ static void swap_eh_frame_hdr_table_entries(void *p1, void *p2, int size)
|
|||
e2->fde = v;
|
||||
}
|
||||
|
||||
static void __init setup_unwind_table(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long))
|
||||
static void init_unwind_hdr(struct unwind_table *table,
|
||||
void *(*alloc) (unsigned long))
|
||||
{
|
||||
const u8 *ptr;
|
||||
unsigned long tableSize = table->size, hdrSize;
|
||||
|
@ -274,13 +293,13 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
|||
const u32 *cie = cie_for_fde(fde, table);
|
||||
signed ptrType;
|
||||
|
||||
if (cie == ¬_fde)
|
||||
if (cie == ¬_fde) /* only process FDE here */
|
||||
continue;
|
||||
if (cie == NULL || cie == &bad_cie)
|
||||
return;
|
||||
continue; /* say FDE->CIE.version != 1 */
|
||||
ptrType = fde_pointer_type(cie);
|
||||
if (ptrType < 0)
|
||||
return;
|
||||
continue;
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
if (!read_pointer(&ptr, (const u8 *)(fde + 1) + *fde,
|
||||
|
@ -300,9 +319,11 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
|||
|
||||
hdrSize = 4 + sizeof(unsigned long) + sizeof(unsigned int)
|
||||
+ 2 * n * sizeof(unsigned long);
|
||||
|
||||
header = alloc(hdrSize);
|
||||
if (!header)
|
||||
return;
|
||||
|
||||
header->version = 1;
|
||||
header->eh_frame_ptr_enc = DW_EH_PE_abs | DW_EH_PE_native;
|
||||
header->fde_count_enc = DW_EH_PE_abs | DW_EH_PE_data4;
|
||||
|
@ -322,6 +343,10 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
|||
|
||||
if (fde[1] == 0xffffffff)
|
||||
continue; /* this is a CIE */
|
||||
|
||||
if (*(u8 *)(cie + 2) != 1)
|
||||
continue; /* FDE->CIE.version not supported */
|
||||
|
||||
ptr = (const u8 *)(fde + 2);
|
||||
header->table[n].start = read_pointer(&ptr,
|
||||
(const u8 *)(fde + 1) +
|
||||
|
@ -342,18 +367,6 @@ static void __init setup_unwind_table(struct unwind_table *table,
|
|||
table->header = (const void *)header;
|
||||
}
|
||||
|
||||
static void *__init balloc(unsigned long sz)
|
||||
{
|
||||
return __alloc_bootmem_nopanic(sz,
|
||||
sizeof(unsigned int),
|
||||
__pa(MAX_DMA_ADDRESS));
|
||||
}
|
||||
|
||||
void __init arc_unwind_setup(void)
|
||||
{
|
||||
setup_unwind_table(&root_table, balloc);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MODULES
|
||||
|
||||
static struct unwind_table *last_table;
|
||||
|
@ -377,6 +390,8 @@ void *unwind_add_table(struct module *module, const void *table_start,
|
|||
table_start, table_size,
|
||||
NULL, 0);
|
||||
|
||||
init_unwind_hdr(table, unw_hdr_alloc);
|
||||
|
||||
#ifdef UNWIND_DEBUG
|
||||
unw_debug("Table added for [%s] %lx %lx\n",
|
||||
module->name, table->core.pc, table->core.range);
|
||||
|
@ -439,6 +454,7 @@ void unwind_remove_table(void *handle, int init_only)
|
|||
info.init_only = init_only;
|
||||
|
||||
unlink_table(&info); /* XXX: SMP */
|
||||
kfree(table->header);
|
||||
kfree(table);
|
||||
}
|
||||
|
||||
|
@ -507,7 +523,8 @@ static const u32 *cie_for_fde(const u32 *fde, const struct unwind_table *table)
|
|||
|
||||
if (*cie <= sizeof(*cie) + 4 || *cie >= fde[1] - sizeof(*fde)
|
||||
|| (*cie & (sizeof(*cie) - 1))
|
||||
|| (cie[1] != 0xffffffff))
|
||||
|| (cie[1] != 0xffffffff)
|
||||
|| ( *(u8 *)(cie + 2) != 1)) /* version 1 supported */
|
||||
return NULL; /* this is not a (valid) CIE */
|
||||
return cie;
|
||||
}
|
||||
|
|
|
@ -51,7 +51,9 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size)
|
|||
int in_use = 0;
|
||||
|
||||
if (!low_mem_sz) {
|
||||
BUG_ON(base != low_mem_start);
|
||||
if (base != low_mem_start)
|
||||
panic("CONFIG_LINUX_LINK_BASE != DT memory { }");
|
||||
|
||||
low_mem_sz = size;
|
||||
in_use = 1;
|
||||
} else {
|
||||
|
|
|
@ -510,10 +510,14 @@ __copy_to_user_std(void __user *to, const void *from, unsigned long n);
|
|||
static inline unsigned long __must_check
|
||||
__copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
#ifndef CONFIG_UACCESS_WITH_MEMCPY
|
||||
unsigned int __ua_flags = uaccess_save_and_enable();
|
||||
n = arm_copy_to_user(to, from, n);
|
||||
uaccess_restore(__ua_flags);
|
||||
return n;
|
||||
#else
|
||||
return arm_copy_to_user(to, from, n);
|
||||
#endif
|
||||
}
|
||||
|
||||
extern unsigned long __must_check
|
||||
|
|
|
@ -95,6 +95,22 @@ void __show_regs(struct pt_regs *regs)
|
|||
{
|
||||
unsigned long flags;
|
||||
char buf[64];
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
unsigned int domain;
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Get the domain register for the parent context. In user
|
||||
* mode, we don't save the DACR, so lets use what it should
|
||||
* be. For other modes, we place it after the pt_regs struct.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
domain = DACR_UACCESS_ENABLE;
|
||||
else
|
||||
domain = *(unsigned int *)(regs + 1);
|
||||
#else
|
||||
domain = get_domain();
|
||||
#endif
|
||||
#endif
|
||||
|
||||
show_regs_print_info(KERN_DEFAULT);
|
||||
|
||||
|
@ -123,21 +139,8 @@ void __show_regs(struct pt_regs *regs)
|
|||
|
||||
#ifndef CONFIG_CPU_V7M
|
||||
{
|
||||
unsigned int domain = get_domain();
|
||||
const char *segment;
|
||||
|
||||
#ifdef CONFIG_CPU_SW_DOMAIN_PAN
|
||||
/*
|
||||
* Get the domain register for the parent context. In user
|
||||
* mode, we don't save the DACR, so lets use what it should
|
||||
* be. For other modes, we place it after the pt_regs struct.
|
||||
*/
|
||||
if (user_mode(regs))
|
||||
domain = DACR_UACCESS_ENABLE;
|
||||
else
|
||||
domain = *(unsigned int *)(regs + 1);
|
||||
#endif
|
||||
|
||||
if ((domain & domain_mask(DOMAIN_USER)) ==
|
||||
domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
|
||||
segment = "none";
|
||||
|
@ -163,11 +166,11 @@ void __show_regs(struct pt_regs *regs)
|
|||
buf[0] = '\0';
|
||||
#ifdef CONFIG_CPU_CP15_MMU
|
||||
{
|
||||
unsigned int transbase, dac = get_domain();
|
||||
unsigned int transbase;
|
||||
asm("mrc p15, 0, %0, c2, c0\n\t"
|
||||
: "=r" (transbase));
|
||||
snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
|
||||
transbase, dac);
|
||||
transbase, domain);
|
||||
}
|
||||
#endif
|
||||
asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
|
||||
|
|
|
@ -36,10 +36,10 @@
|
|||
*/
|
||||
#define __user_swpX_asm(data, addr, res, temp, B) \
|
||||
__asm__ __volatile__( \
|
||||
" mov %2, %1\n" \
|
||||
"0: ldrex"B" %1, [%3]\n" \
|
||||
"1: strex"B" %0, %2, [%3]\n" \
|
||||
"0: ldrex"B" %2, [%3]\n" \
|
||||
"1: strex"B" %0, %1, [%3]\n" \
|
||||
" cmp %0, #0\n" \
|
||||
" moveq %1, %2\n" \
|
||||
" movne %0, %4\n" \
|
||||
"2:\n" \
|
||||
" .section .text.fixup,\"ax\"\n" \
|
||||
|
|
|
@ -88,6 +88,7 @@ pin_page_for_write(const void __user *_addr, pte_t **ptep, spinlock_t **ptlp)
|
|||
static unsigned long noinline
|
||||
__copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
unsigned long ua_flags;
|
||||
int atomic;
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||
|
@ -118,7 +119,9 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n)
|
|||
if (tocopy > n)
|
||||
tocopy = n;
|
||||
|
||||
ua_flags = uaccess_save_and_enable();
|
||||
memcpy((void *)to, from, tocopy);
|
||||
uaccess_restore(ua_flags);
|
||||
to += tocopy;
|
||||
from += tocopy;
|
||||
n -= tocopy;
|
||||
|
@ -145,14 +148,21 @@ arm_copy_to_user(void __user *to, const void *from, unsigned long n)
|
|||
* With frame pointer disabled, tail call optimization kicks in
|
||||
* as well making this test almost invisible.
|
||||
*/
|
||||
if (n < 64)
|
||||
return __copy_to_user_std(to, from, n);
|
||||
return __copy_to_user_memcpy(to, from, n);
|
||||
if (n < 64) {
|
||||
unsigned long ua_flags = uaccess_save_and_enable();
|
||||
n = __copy_to_user_std(to, from, n);
|
||||
uaccess_restore(ua_flags);
|
||||
} else {
|
||||
n = __copy_to_user_memcpy(to, from, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
static unsigned long noinline
|
||||
__clear_user_memset(void __user *addr, unsigned long n)
|
||||
{
|
||||
unsigned long ua_flags;
|
||||
|
||||
if (unlikely(segment_eq(get_fs(), KERNEL_DS))) {
|
||||
memset((void *)addr, 0, n);
|
||||
return 0;
|
||||
|
@ -175,7 +185,9 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
|||
if (tocopy > n)
|
||||
tocopy = n;
|
||||
|
||||
ua_flags = uaccess_save_and_enable();
|
||||
memset((void *)addr, 0, tocopy);
|
||||
uaccess_restore(ua_flags);
|
||||
addr += tocopy;
|
||||
n -= tocopy;
|
||||
|
||||
|
@ -193,9 +205,14 @@ __clear_user_memset(void __user *addr, unsigned long n)
|
|||
unsigned long arm_clear_user(void __user *addr, unsigned long n)
|
||||
{
|
||||
/* See rational for this in __copy_to_user() above. */
|
||||
if (n < 64)
|
||||
return __clear_user_std(addr, n);
|
||||
return __clear_user_memset(addr, n);
|
||||
if (n < 64) {
|
||||
unsigned long ua_flags = uaccess_save_and_enable();
|
||||
n = __clear_user_std(addr, n);
|
||||
uaccess_restore(ua_flags);
|
||||
} else {
|
||||
n = __clear_user_memset(addr, n);
|
||||
}
|
||||
return n;
|
||||
}
|
||||
|
||||
#if 0
|
||||
|
|
|
@ -165,13 +165,28 @@ static void flush_context(unsigned int cpu)
|
|||
__flush_icache_all();
|
||||
}
|
||||
|
||||
static int is_reserved_asid(u64 asid)
|
||||
static bool check_update_reserved_asid(u64 asid, u64 newasid)
|
||||
{
|
||||
int cpu;
|
||||
for_each_possible_cpu(cpu)
|
||||
if (per_cpu(reserved_asids, cpu) == asid)
|
||||
return 1;
|
||||
return 0;
|
||||
bool hit = false;
|
||||
|
||||
/*
|
||||
* Iterate over the set of reserved ASIDs looking for a match.
|
||||
* If we find one, then we can update our mm to use newasid
|
||||
* (i.e. the same ASID in the current generation) but we can't
|
||||
* exit the loop early, since we need to ensure that all copies
|
||||
* of the old ASID are updated to reflect the mm. Failure to do
|
||||
* so could result in us missing the reserved ASID in a future
|
||||
* generation.
|
||||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (per_cpu(reserved_asids, cpu) == asid) {
|
||||
hit = true;
|
||||
per_cpu(reserved_asids, cpu) = newasid;
|
||||
}
|
||||
}
|
||||
|
||||
return hit;
|
||||
}
|
||||
|
||||
static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
||||
|
@ -181,12 +196,14 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|||
u64 generation = atomic64_read(&asid_generation);
|
||||
|
||||
if (asid != 0) {
|
||||
u64 newasid = generation | (asid & ~ASID_MASK);
|
||||
|
||||
/*
|
||||
* If our current ASID was active during a rollover, we
|
||||
* can continue to use it and this was just a false alarm.
|
||||
*/
|
||||
if (is_reserved_asid(asid))
|
||||
return generation | (asid & ~ASID_MASK);
|
||||
if (check_update_reserved_asid(asid, newasid))
|
||||
return newasid;
|
||||
|
||||
/*
|
||||
* We had a valid ASID in a previous life, so try to re-use
|
||||
|
@ -194,7 +211,7 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|||
*/
|
||||
asid &= ~ASID_MASK;
|
||||
if (!__test_and_set_bit(asid, asid_map))
|
||||
goto bump_gen;
|
||||
return newasid;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -216,11 +233,8 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
|
|||
|
||||
__set_bit(asid, asid_map);
|
||||
cur_idx = asid;
|
||||
|
||||
bump_gen:
|
||||
asid |= generation;
|
||||
cpumask_clear(mm_cpumask(mm));
|
||||
return asid;
|
||||
return asid | generation;
|
||||
}
|
||||
|
||||
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
|
||||
|
|
|
@ -1521,7 +1521,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
return -ENOMEM;
|
||||
|
||||
for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
|
||||
phys_addr_t phys = sg_phys(s) & PAGE_MASK;
|
||||
phys_addr_t phys = page_to_phys(sg_page(s));
|
||||
unsigned int len = PAGE_ALIGN(s->offset + s->length);
|
||||
|
||||
if (!is_coherent &&
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
#include <linux/memblock.h>
|
||||
#include <linux/dma-contiguous.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <linux/stop_machine.h>
|
||||
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/mach-types.h>
|
||||
|
@ -627,12 +628,10 @@ static struct section_perm ro_perms[] = {
|
|||
* safe to be called with preemption disabled, as under stop_machine().
|
||||
*/
|
||||
static inline void section_update(unsigned long addr, pmdval_t mask,
|
||||
pmdval_t prot)
|
||||
pmdval_t prot, struct mm_struct *mm)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
pmd_t *pmd;
|
||||
|
||||
mm = current->active_mm;
|
||||
pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
|
@ -656,49 +655,82 @@ static inline bool arch_has_strict_perms(void)
|
|||
return !!(get_cr() & CR_XP);
|
||||
}
|
||||
|
||||
#define set_section_perms(perms, field) { \
|
||||
size_t i; \
|
||||
unsigned long addr; \
|
||||
\
|
||||
if (!arch_has_strict_perms()) \
|
||||
return; \
|
||||
\
|
||||
for (i = 0; i < ARRAY_SIZE(perms); i++) { \
|
||||
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
|
||||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
|
||||
pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
|
||||
perms[i].start, perms[i].end, \
|
||||
SECTION_SIZE); \
|
||||
continue; \
|
||||
} \
|
||||
\
|
||||
for (addr = perms[i].start; \
|
||||
addr < perms[i].end; \
|
||||
addr += SECTION_SIZE) \
|
||||
section_update(addr, perms[i].mask, \
|
||||
perms[i].field); \
|
||||
} \
|
||||
void set_section_perms(struct section_perm *perms, int n, bool set,
|
||||
struct mm_struct *mm)
|
||||
{
|
||||
size_t i;
|
||||
unsigned long addr;
|
||||
|
||||
if (!arch_has_strict_perms())
|
||||
return;
|
||||
|
||||
for (i = 0; i < n; i++) {
|
||||
if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) ||
|
||||
!IS_ALIGNED(perms[i].end, SECTION_SIZE)) {
|
||||
pr_err("BUG: section %lx-%lx not aligned to %lx\n",
|
||||
perms[i].start, perms[i].end,
|
||||
SECTION_SIZE);
|
||||
continue;
|
||||
}
|
||||
|
||||
for (addr = perms[i].start;
|
||||
addr < perms[i].end;
|
||||
addr += SECTION_SIZE)
|
||||
section_update(addr, perms[i].mask,
|
||||
set ? perms[i].prot : perms[i].clear, mm);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static inline void fix_kernmem_perms(void)
|
||||
static void update_sections_early(struct section_perm perms[], int n)
|
||||
{
|
||||
set_section_perms(nx_perms, prot);
|
||||
struct task_struct *t, *s;
|
||||
|
||||
read_lock(&tasklist_lock);
|
||||
for_each_process(t) {
|
||||
if (t->flags & PF_KTHREAD)
|
||||
continue;
|
||||
for_each_thread(t, s)
|
||||
set_section_perms(perms, n, true, s->mm);
|
||||
}
|
||||
read_unlock(&tasklist_lock);
|
||||
set_section_perms(perms, n, true, current->active_mm);
|
||||
set_section_perms(perms, n, true, &init_mm);
|
||||
}
|
||||
|
||||
int __fix_kernmem_perms(void *unused)
|
||||
{
|
||||
update_sections_early(nx_perms, ARRAY_SIZE(nx_perms));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fix_kernmem_perms(void)
|
||||
{
|
||||
stop_machine(__fix_kernmem_perms, NULL, NULL);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_RODATA
|
||||
int __mark_rodata_ro(void *unused)
|
||||
{
|
||||
update_sections_early(ro_perms, ARRAY_SIZE(ro_perms));
|
||||
return 0;
|
||||
}
|
||||
|
||||
void mark_rodata_ro(void)
|
||||
{
|
||||
set_section_perms(ro_perms, prot);
|
||||
stop_machine(__mark_rodata_ro, NULL, NULL);
|
||||
}
|
||||
|
||||
void set_kernel_text_rw(void)
|
||||
{
|
||||
set_section_perms(ro_perms, clear);
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
|
||||
current->active_mm);
|
||||
}
|
||||
|
||||
void set_kernel_text_ro(void)
|
||||
{
|
||||
set_section_perms(ro_perms, prot);
|
||||
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
|
||||
current->active_mm);
|
||||
}
|
||||
#endif /* CONFIG_DEBUG_RODATA */
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ ENDPROC(cpu_v7_dcache_clean_area)
|
|||
.equ cpu_v7_suspend_size, 4 * 9
|
||||
#ifdef CONFIG_ARM_CPU_SUSPEND
|
||||
ENTRY(cpu_v7_do_suspend)
|
||||
stmfd sp!, {r4 - r10, lr}
|
||||
stmfd sp!, {r4 - r11, lr}
|
||||
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
|
||||
mrc p15, 0, r5, c13, c0, 3 @ User r/o thread ID
|
||||
stmia r0!, {r4 - r5}
|
||||
|
@ -112,7 +112,7 @@ ENTRY(cpu_v7_do_suspend)
|
|||
mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register
|
||||
mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control
|
||||
stmia r0, {r5 - r11}
|
||||
ldmfd sp!, {r4 - r10, pc}
|
||||
ldmfd sp!, {r4 - r11, pc}
|
||||
ENDPROC(cpu_v7_do_suspend)
|
||||
|
||||
ENTRY(cpu_v7_do_resume)
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
|
||||
|
||||
#define NR_syscalls 322 /* length of syscall table */
|
||||
#define NR_syscalls 323 /* length of syscall table */
|
||||
|
||||
/*
|
||||
* The following defines stop scripts/checksyscalls.sh from complaining about
|
||||
|
|
|
@ -335,5 +335,6 @@
|
|||
#define __NR_userfaultfd 1343
|
||||
#define __NR_membarrier 1344
|
||||
#define __NR_kcmp 1345
|
||||
#define __NR_mlock2 1346
|
||||
|
||||
#endif /* _UAPI_ASM_IA64_UNISTD_H */
|
||||
|
|
|
@ -1771,5 +1771,6 @@ sys_call_table:
|
|||
data8 sys_userfaultfd
|
||||
data8 sys_membarrier
|
||||
data8 sys_kcmp // 1345
|
||||
data8 sys_mlock2
|
||||
|
||||
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
|
||||
|
|
|
@ -61,7 +61,8 @@ static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
|||
/* FIXME this part of code is untested */
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg);
|
||||
__dma_sync(sg_phys(sg), sg->length, direction);
|
||||
__dma_sync(page_to_phys(sg_page(sg)) + sg->offset,
|
||||
sg->length, direction);
|
||||
}
|
||||
|
||||
return nents;
|
||||
|
|
|
@ -370,16 +370,16 @@ COMPAT_SYS(execveat)
|
|||
PPC64ONLY(switch_endian)
|
||||
SYSCALL_SPU(userfaultfd)
|
||||
SYSCALL_SPU(membarrier)
|
||||
SYSCALL(semop)
|
||||
SYSCALL(semget)
|
||||
COMPAT_SYS(semctl)
|
||||
COMPAT_SYS(semtimedop)
|
||||
COMPAT_SYS(msgsnd)
|
||||
COMPAT_SYS(msgrcv)
|
||||
SYSCALL(msgget)
|
||||
COMPAT_SYS(msgctl)
|
||||
COMPAT_SYS(shmat)
|
||||
SYSCALL(shmdt)
|
||||
SYSCALL(shmget)
|
||||
COMPAT_SYS(shmctl)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(ni_syscall)
|
||||
SYSCALL(mlock2)
|
||||
|
|
|
@ -388,18 +388,6 @@
|
|||
#define __NR_switch_endian 363
|
||||
#define __NR_userfaultfd 364
|
||||
#define __NR_membarrier 365
|
||||
#define __NR_semop 366
|
||||
#define __NR_semget 367
|
||||
#define __NR_semctl 368
|
||||
#define __NR_semtimedop 369
|
||||
#define __NR_msgsnd 370
|
||||
#define __NR_msgrcv 371
|
||||
#define __NR_msgget 372
|
||||
#define __NR_msgctl 373
|
||||
#define __NR_shmat 374
|
||||
#define __NR_shmdt 375
|
||||
#define __NR_shmget 376
|
||||
#define __NR_shmctl 377
|
||||
#define __NR_mlock2 378
|
||||
|
||||
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
|
||||
|
|
|
@ -83,7 +83,19 @@ static void opal_event_unmask(struct irq_data *d)
|
|||
set_bit(d->hwirq, &opal_event_irqchip.mask);
|
||||
|
||||
opal_poll_events(&events);
|
||||
opal_handle_events(be64_to_cpu(events));
|
||||
last_outstanding_events = be64_to_cpu(events);
|
||||
|
||||
/*
|
||||
* We can't just handle the events now with opal_handle_events().
|
||||
* If we did we would deadlock when opal_event_unmask() is called from
|
||||
* handle_level_irq() with the irq descriptor lock held, because
|
||||
* calling opal_handle_events() would call generic_handle_irq() and
|
||||
* then handle_level_irq() which would try to take the descriptor lock
|
||||
* again. Instead queue the events for later.
|
||||
*/
|
||||
if (last_outstanding_events & opal_event_irqchip.mask)
|
||||
/* Need to retrigger the interrupt */
|
||||
irq_work_queue(&opal_event_irq_work);
|
||||
}
|
||||
|
||||
static int opal_event_set_type(struct irq_data *d, unsigned int flow_type)
|
||||
|
|
|
@ -278,7 +278,7 @@ static void opal_handle_message(void)
|
|||
|
||||
/* Sanity check */
|
||||
if (type >= OPAL_MSG_TYPE_MAX) {
|
||||
pr_warning("%s: Unknown message type: %u\n", __func__, type);
|
||||
pr_warn_once("%s: Unknown message type: %u\n", __func__, type);
|
||||
return;
|
||||
}
|
||||
opal_message_do_notify(type, (void *)&msg);
|
||||
|
|
|
@ -89,7 +89,7 @@ static struct addr_marker address_markers[] = {
|
|||
{ 0/* VMALLOC_START */, "vmalloc() Area" },
|
||||
{ 0/*VMALLOC_END*/, "vmalloc() End" },
|
||||
# ifdef CONFIG_HIGHMEM
|
||||
{ 0/*PKMAP_BASE*/, "Persisent kmap() Area" },
|
||||
{ 0/*PKMAP_BASE*/, "Persistent kmap() Area" },
|
||||
# endif
|
||||
{ 0/*FIXADDR_START*/, "Fixmap Area" },
|
||||
#endif
|
||||
|
|
|
@ -2495,14 +2495,9 @@ void __init xen_init_mmu_ops(void)
|
|||
{
|
||||
x86_init.paging.pagetable_init = xen_pagetable_init;
|
||||
|
||||
/* Optimization - we can use the HVM one but it has no idea which
|
||||
* VCPUs are descheduled - which means that it will needlessly IPI
|
||||
* them. Xen knows so let it do the job.
|
||||
*/
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap)) {
|
||||
pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
|
||||
if (xen_feature(XENFEAT_auto_translated_physmap))
|
||||
return;
|
||||
}
|
||||
|
||||
pv_mmu_ops = xen_mmu_ops;
|
||||
|
||||
memset(dummy_mapping, 0xff, PAGE_SIZE);
|
||||
|
|
|
@ -68,26 +68,16 @@ static void xen_pv_post_suspend(int suspend_cancelled)
|
|||
|
||||
void xen_arch_pre_suspend(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
xen_pmu_finish(cpu);
|
||||
|
||||
if (xen_pv_domain())
|
||||
xen_pv_pre_suspend();
|
||||
}
|
||||
|
||||
void xen_arch_post_suspend(int cancelled)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
if (xen_pv_domain())
|
||||
xen_pv_post_suspend(cancelled);
|
||||
else
|
||||
xen_hvm_post_suspend(cancelled);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
xen_pmu_init(cpu);
|
||||
}
|
||||
|
||||
static void xen_vcpu_notify_restore(void *data)
|
||||
|
@ -106,10 +96,20 @@ static void xen_vcpu_notify_suspend(void *data)
|
|||
|
||||
void xen_arch_resume(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
xen_pmu_init(cpu);
|
||||
}
|
||||
|
||||
void xen_arch_suspend(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
for_each_online_cpu(cpu)
|
||||
xen_pmu_finish(cpu);
|
||||
|
||||
on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
|
||||
}
|
||||
|
|
|
@ -277,12 +277,12 @@ static int ablkcipher_walk_first(struct ablkcipher_request *req,
|
|||
if (WARN_ON_ONCE(in_irq()))
|
||||
return -EDEADLK;
|
||||
|
||||
walk->iv = req->info;
|
||||
walk->nbytes = walk->total;
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
walk->iv_buffer = NULL;
|
||||
walk->iv = req->info;
|
||||
if (unlikely(((unsigned long)walk->iv & alignmask))) {
|
||||
int err = ablkcipher_copy_iv(walk, tfm, alignmask);
|
||||
|
||||
|
|
|
@ -326,12 +326,12 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
|
|||
if (WARN_ON_ONCE(in_irq()))
|
||||
return -EDEADLK;
|
||||
|
||||
walk->iv = desc->info;
|
||||
walk->nbytes = walk->total;
|
||||
if (unlikely(!walk->total))
|
||||
return 0;
|
||||
|
||||
walk->buffer = NULL;
|
||||
walk->iv = desc->info;
|
||||
if (unlikely(((unsigned long)walk->iv & walk->alignmask))) {
|
||||
int err = blkcipher_copy_iv(walk);
|
||||
if (err)
|
||||
|
|
|
@ -1810,7 +1810,7 @@ static void acpi_nfit_notify(struct acpi_device *adev, u32 event)
|
|||
if (!dev->driver) {
|
||||
/* dev->driver may be null if we're being removed */
|
||||
dev_dbg(dev, "%s: no driver found for dev\n", __func__);
|
||||
return;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (!acpi_desc) {
|
||||
|
|
|
@ -390,6 +390,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|||
struct generic_pm_domain *genpd;
|
||||
bool (*stop_ok)(struct device *__dev);
|
||||
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
|
||||
bool runtime_pm = pm_runtime_enabled(dev);
|
||||
ktime_t time_start;
|
||||
s64 elapsed_ns;
|
||||
int ret;
|
||||
|
@ -400,12 +401,19 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|||
if (IS_ERR(genpd))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* A runtime PM centric subsystem/driver may re-use the runtime PM
|
||||
* callbacks for other purposes than runtime PM. In those scenarios
|
||||
* runtime PM is disabled. Under these circumstances, we shall skip
|
||||
* validating/measuring the PM QoS latency.
|
||||
*/
|
||||
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
|
||||
if (stop_ok && !stop_ok(dev))
|
||||
if (runtime_pm && stop_ok && !stop_ok(dev))
|
||||
return -EBUSY;
|
||||
|
||||
/* Measure suspend latency. */
|
||||
time_start = ktime_get();
|
||||
if (runtime_pm)
|
||||
time_start = ktime_get();
|
||||
|
||||
ret = genpd_save_dev(genpd, dev);
|
||||
if (ret)
|
||||
|
@ -418,13 +426,15 @@ static int pm_genpd_runtime_suspend(struct device *dev)
|
|||
}
|
||||
|
||||
/* Update suspend latency value if the measured time exceeds it. */
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns > td->suspend_latency_ns) {
|
||||
td->suspend_latency_ns = elapsed_ns;
|
||||
dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
|
||||
elapsed_ns);
|
||||
genpd->max_off_time_changed = true;
|
||||
td->constraint_changed = true;
|
||||
if (runtime_pm) {
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns > td->suspend_latency_ns) {
|
||||
td->suspend_latency_ns = elapsed_ns;
|
||||
dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
|
||||
elapsed_ns);
|
||||
genpd->max_off_time_changed = true;
|
||||
td->constraint_changed = true;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -453,6 +463,7 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|||
{
|
||||
struct generic_pm_domain *genpd;
|
||||
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
|
||||
bool runtime_pm = pm_runtime_enabled(dev);
|
||||
ktime_t time_start;
|
||||
s64 elapsed_ns;
|
||||
int ret;
|
||||
|
@ -479,14 +490,14 @@ static int pm_genpd_runtime_resume(struct device *dev)
|
|||
|
||||
out:
|
||||
/* Measure resume latency. */
|
||||
if (timed)
|
||||
if (timed && runtime_pm)
|
||||
time_start = ktime_get();
|
||||
|
||||
genpd_start_dev(genpd, dev);
|
||||
genpd_restore_dev(genpd, dev);
|
||||
|
||||
/* Update resume latency value if the measured time exceeds it. */
|
||||
if (timed) {
|
||||
if (timed && runtime_pm) {
|
||||
elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
|
||||
if (elapsed_ns > td->resume_latency_ns) {
|
||||
td->resume_latency_ns = elapsed_ns;
|
||||
|
|
|
@ -950,6 +950,8 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
|
|||
goto unmap;
|
||||
|
||||
for (n = 0, i = 0; n < nseg; n++) {
|
||||
uint8_t first_sect, last_sect;
|
||||
|
||||
if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
|
||||
/* Map indirect segments */
|
||||
if (segments)
|
||||
|
@ -957,15 +959,18 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
|
|||
segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
|
||||
}
|
||||
i = n % SEGS_PER_INDIRECT_FRAME;
|
||||
|
||||
pending_req->segments[n]->gref = segments[i].gref;
|
||||
seg[n].nsec = segments[i].last_sect -
|
||||
segments[i].first_sect + 1;
|
||||
seg[n].offset = (segments[i].first_sect << 9);
|
||||
if ((segments[i].last_sect >= (XEN_PAGE_SIZE >> 9)) ||
|
||||
(segments[i].last_sect < segments[i].first_sect)) {
|
||||
|
||||
first_sect = READ_ONCE(segments[i].first_sect);
|
||||
last_sect = READ_ONCE(segments[i].last_sect);
|
||||
if (last_sect >= (XEN_PAGE_SIZE >> 9) || last_sect < first_sect) {
|
||||
rc = -EINVAL;
|
||||
goto unmap;
|
||||
}
|
||||
|
||||
seg[n].nsec = last_sect - first_sect + 1;
|
||||
seg[n].offset = first_sect << 9;
|
||||
preq->nr_sects += seg[n].nsec;
|
||||
}
|
||||
|
||||
|
|
|
@ -408,8 +408,8 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst,
|
|||
struct blkif_x86_32_request *src)
|
||||
{
|
||||
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
||||
dst->operation = src->operation;
|
||||
switch (src->operation) {
|
||||
dst->operation = READ_ONCE(src->operation);
|
||||
switch (dst->operation) {
|
||||
case BLKIF_OP_READ:
|
||||
case BLKIF_OP_WRITE:
|
||||
case BLKIF_OP_WRITE_BARRIER:
|
||||
|
@ -456,8 +456,8 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst,
|
|||
struct blkif_x86_64_request *src)
|
||||
{
|
||||
int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST, j;
|
||||
dst->operation = src->operation;
|
||||
switch (src->operation) {
|
||||
dst->operation = READ_ONCE(src->operation);
|
||||
switch (dst->operation) {
|
||||
case BLKIF_OP_READ:
|
||||
case BLKIF_OP_WRITE:
|
||||
case BLKIF_OP_WRITE_BARRIER:
|
||||
|
|
|
@ -226,7 +226,7 @@ config ARM_TEGRA20_CPUFREQ
|
|||
|
||||
config ARM_TEGRA124_CPUFREQ
|
||||
tristate "Tegra124 CPUFreq support"
|
||||
depends on ARCH_TEGRA && CPUFREQ_DT
|
||||
depends on ARCH_TEGRA && CPUFREQ_DT && REGULATOR
|
||||
default y
|
||||
help
|
||||
This adds the CPUFreq driver support for Tegra124 SOCs.
|
||||
|
|
|
@ -1123,7 +1123,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
|||
limits->max_sysfs_pct);
|
||||
limits->max_perf_pct = max(limits->min_policy_pct,
|
||||
limits->max_perf_pct);
|
||||
limits->max_perf = round_up(limits->max_perf, 8);
|
||||
limits->max_perf = round_up(limits->max_perf, FRAC_BITS);
|
||||
|
||||
/* Make sure min_perf_pct <= max_perf_pct */
|
||||
limits->min_perf_pct = min(limits->max_perf_pct, limits->min_perf_pct);
|
||||
|
|
|
@ -156,7 +156,7 @@
|
|||
#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
|
||||
#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
|
||||
#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
|
||||
#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
|
||||
#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */
|
||||
#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
|
||||
#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
|
||||
#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
|
||||
|
@ -965,7 +965,9 @@ at_xdmac_prep_interleaved(struct dma_chan *chan,
|
|||
NULL,
|
||||
src_addr, dst_addr,
|
||||
xt, xt->sgl);
|
||||
for (i = 0; i < xt->numf; i++)
|
||||
|
||||
/* Length of the block is (BLEN+1) microblocks. */
|
||||
for (i = 0; i < xt->numf - 1; i++)
|
||||
at_xdmac_increment_block_count(chan, first);
|
||||
|
||||
dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
|
||||
|
@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
|
|||
/* Check remaining length and change data width if needed. */
|
||||
dwidth = at_xdmac_align_width(chan,
|
||||
src_addr | dst_addr | xfer_size);
|
||||
chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
|
||||
chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
|
||||
|
||||
ublen = xfer_size >> dwidth;
|
||||
|
@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
|
|||
* since we don't care about the stride anymore.
|
||||
*/
|
||||
if ((i == (sg_len - 1)) &&
|
||||
sg_dma_len(ppsg) == sg_dma_len(psg)) {
|
||||
sg_dma_len(psg) == sg_dma_len(sg)) {
|
||||
dev_dbg(chan2dev(chan),
|
||||
"%s: desc 0x%p can be merged with desc 0x%p\n",
|
||||
__func__, desc, pdesc);
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
*/
|
||||
#include <linux/dmaengine.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dmapool.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
@ -62,6 +63,11 @@ struct bcm2835_dma_cb {
|
|||
uint32_t pad[2];
|
||||
};
|
||||
|
||||
struct bcm2835_cb_entry {
|
||||
struct bcm2835_dma_cb *cb;
|
||||
dma_addr_t paddr;
|
||||
};
|
||||
|
||||
struct bcm2835_chan {
|
||||
struct virt_dma_chan vc;
|
||||
struct list_head node;
|
||||
|
@ -72,18 +78,18 @@ struct bcm2835_chan {
|
|||
|
||||
int ch;
|
||||
struct bcm2835_desc *desc;
|
||||
struct dma_pool *cb_pool;
|
||||
|
||||
void __iomem *chan_base;
|
||||
int irq_number;
|
||||
};
|
||||
|
||||
struct bcm2835_desc {
|
||||
struct bcm2835_chan *c;
|
||||
struct virt_dma_desc vd;
|
||||
enum dma_transfer_direction dir;
|
||||
|
||||
unsigned int control_block_size;
|
||||
struct bcm2835_dma_cb *control_block_base;
|
||||
dma_addr_t control_block_base_phys;
|
||||
struct bcm2835_cb_entry *cb_list;
|
||||
|
||||
unsigned int frames;
|
||||
size_t size;
|
||||
|
@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc(
|
|||
static void bcm2835_dma_desc_free(struct virt_dma_desc *vd)
|
||||
{
|
||||
struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd);
|
||||
dma_free_coherent(desc->vd.tx.chan->device->dev,
|
||||
desc->control_block_size,
|
||||
desc->control_block_base,
|
||||
desc->control_block_base_phys);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < desc->frames; i++)
|
||||
dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb,
|
||||
desc->cb_list[i].paddr);
|
||||
|
||||
kfree(desc->cb_list);
|
||||
kfree(desc);
|
||||
}
|
||||
|
||||
|
@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
|
|||
|
||||
c->desc = d = to_bcm2835_dma_desc(&vd->tx);
|
||||
|
||||
writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR);
|
||||
writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
|
||||
writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
|
||||
}
|
||||
|
||||
|
@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data)
|
|||
static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan)
|
||||
{
|
||||
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
|
||||
struct device *dev = c->vc.chan.device->dev;
|
||||
|
||||
dev_dbg(c->vc.chan.device->dev,
|
||||
"Allocating DMA channel %d\n", c->ch);
|
||||
dev_dbg(dev, "Allocating DMA channel %d\n", c->ch);
|
||||
|
||||
c->cb_pool = dma_pool_create(dev_name(dev), dev,
|
||||
sizeof(struct bcm2835_dma_cb), 0, 0);
|
||||
if (!c->cb_pool) {
|
||||
dev_err(dev, "unable to allocate descriptor pool\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return request_irq(c->irq_number,
|
||||
bcm2835_dma_callback, 0, "DMA IRQ", c);
|
||||
|
@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan)
|
|||
|
||||
vchan_free_chan_resources(&c->vc);
|
||||
free_irq(c->irq_number, c);
|
||||
dma_pool_destroy(c->cb_pool);
|
||||
|
||||
dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch);
|
||||
}
|
||||
|
@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr)
|
|||
size_t size;
|
||||
|
||||
for (size = i = 0; i < d->frames; i++) {
|
||||
struct bcm2835_dma_cb *control_block =
|
||||
&d->control_block_base[i];
|
||||
struct bcm2835_dma_cb *control_block = d->cb_list[i].cb;
|
||||
size_t this_size = control_block->length;
|
||||
dma_addr_t dma;
|
||||
|
||||
|
@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
dma_addr_t dev_addr;
|
||||
unsigned int es, sync_type;
|
||||
unsigned int frame;
|
||||
int i;
|
||||
|
||||
/* Grab configuration */
|
||||
if (!is_slave_direction(direction)) {
|
||||
|
@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
if (!d)
|
||||
return NULL;
|
||||
|
||||
d->c = c;
|
||||
d->dir = direction;
|
||||
d->frames = buf_len / period_len;
|
||||
|
||||
/* Allocate memory for control blocks */
|
||||
d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb);
|
||||
d->control_block_base = dma_zalloc_coherent(chan->device->dev,
|
||||
d->control_block_size, &d->control_block_base_phys,
|
||||
GFP_NOWAIT);
|
||||
|
||||
if (!d->control_block_base) {
|
||||
d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL);
|
||||
if (!d->cb_list) {
|
||||
kfree(d);
|
||||
return NULL;
|
||||
}
|
||||
/* Allocate memory for control blocks */
|
||||
for (i = 0; i < d->frames; i++) {
|
||||
struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
|
||||
|
||||
cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC,
|
||||
&cb_entry->paddr);
|
||||
if (!cb_entry->cb)
|
||||
goto error_cb;
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all frames, create a control block
|
||||
* for each frame and link them together.
|
||||
*/
|
||||
for (frame = 0; frame < d->frames; frame++) {
|
||||
struct bcm2835_dma_cb *control_block =
|
||||
&d->control_block_base[frame];
|
||||
struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb;
|
||||
|
||||
/* Setup adresses */
|
||||
if (d->dir == DMA_DEV_TO_MEM) {
|
||||
|
@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
* This DMA engine driver currently only supports cyclic DMA.
|
||||
* Therefore, wrap around at number of frames.
|
||||
*/
|
||||
control_block->next = d->control_block_base_phys +
|
||||
sizeof(struct bcm2835_dma_cb)
|
||||
* ((frame + 1) % d->frames);
|
||||
control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr;
|
||||
}
|
||||
|
||||
return vchan_tx_prep(&c->vc, &d->vd, flags);
|
||||
error_cb:
|
||||
i--;
|
||||
for (; i >= 0; i--) {
|
||||
struct bcm2835_cb_entry *cb_entry = &d->cb_list[i];
|
||||
|
||||
dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr);
|
||||
}
|
||||
|
||||
kfree(d->cb_list);
|
||||
kfree(d);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int bcm2835_dma_slave_config(struct dma_chan *chan,
|
||||
|
|
|
@ -1752,16 +1752,14 @@ static enum dma_status edma_tx_status(struct dma_chan *chan,
|
|||
return ret;
|
||||
}
|
||||
|
||||
static bool edma_is_memcpy_channel(int ch_num, u16 *memcpy_channels)
|
||||
static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
|
||||
{
|
||||
s16 *memcpy_ch = memcpy_channels;
|
||||
|
||||
if (!memcpy_channels)
|
||||
return false;
|
||||
while (*memcpy_ch != -1) {
|
||||
if (*memcpy_ch == ch_num)
|
||||
while (*memcpy_channels != -1) {
|
||||
if (*memcpy_channels == ch_num)
|
||||
return true;
|
||||
memcpy_ch++;
|
||||
memcpy_channels++;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
@ -1775,7 +1773,7 @@ static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
|
|||
{
|
||||
struct dma_device *s_ddev = &ecc->dma_slave;
|
||||
struct dma_device *m_ddev = NULL;
|
||||
s16 *memcpy_channels = ecc->info->memcpy_channels;
|
||||
s32 *memcpy_channels = ecc->info->memcpy_channels;
|
||||
int i, j;
|
||||
|
||||
dma_cap_zero(s_ddev->cap_mask);
|
||||
|
@ -1996,16 +1994,16 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
|
|||
prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
|
||||
if (prop) {
|
||||
const char pname[] = "ti,edma-memcpy-channels";
|
||||
size_t nelm = sz / sizeof(s16);
|
||||
s16 *memcpy_ch;
|
||||
size_t nelm = sz / sizeof(s32);
|
||||
s32 *memcpy_ch;
|
||||
|
||||
memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s16),
|
||||
memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
|
||||
GFP_KERNEL);
|
||||
if (!memcpy_ch)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
ret = of_property_read_u16_array(dev->of_node, pname,
|
||||
(u16 *)memcpy_ch, nelm);
|
||||
ret = of_property_read_u32_array(dev->of_node, pname,
|
||||
(u32 *)memcpy_ch, nelm);
|
||||
if (ret)
|
||||
return ERR_PTR(ret);
|
||||
|
||||
|
@ -2017,31 +2015,50 @@ static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
|
|||
&sz);
|
||||
if (prop) {
|
||||
const char pname[] = "ti,edma-reserved-slot-ranges";
|
||||
u32 (*tmp)[2];
|
||||
s16 (*rsv_slots)[2];
|
||||
size_t nelm = sz / sizeof(*rsv_slots);
|
||||
size_t nelm = sz / sizeof(*tmp);
|
||||
struct edma_rsv_info *rsv_info;
|
||||
int i;
|
||||
|
||||
if (!nelm)
|
||||
return info;
|
||||
|
||||
rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
|
||||
if (!rsv_info)
|
||||
tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
|
||||
if (!tmp)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
|
||||
if (!rsv_info) {
|
||||
kfree(tmp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
|
||||
GFP_KERNEL);
|
||||
if (!rsv_slots)
|
||||
if (!rsv_slots) {
|
||||
kfree(tmp);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ret = of_property_read_u16_array(dev->of_node, pname,
|
||||
(u16 *)rsv_slots, nelm * 2);
|
||||
if (ret)
|
||||
ret = of_property_read_u32_array(dev->of_node, pname,
|
||||
(u32 *)tmp, nelm * 2);
|
||||
if (ret) {
|
||||
kfree(tmp);
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
||||
for (i = 0; i < nelm; i++) {
|
||||
rsv_slots[i][0] = tmp[i][0];
|
||||
rsv_slots[i][1] = tmp[i][1];
|
||||
}
|
||||
rsv_slots[nelm][0] = -1;
|
||||
rsv_slots[nelm][1] = -1;
|
||||
|
||||
info->rsv = rsv_info;
|
||||
info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
|
||||
|
||||
kfree(tmp);
|
||||
}
|
||||
|
||||
return info;
|
||||
|
|
|
@ -317,6 +317,7 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
|
|||
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
|
||||
struct device *dev = mic_dma_ch_to_device(mic_ch);
|
||||
int result;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
if (!len && !flags)
|
||||
return NULL;
|
||||
|
@ -324,10 +325,13 @@ mic_dma_prep_memcpy_lock(struct dma_chan *ch, dma_addr_t dma_dest,
|
|||
spin_lock(&mic_ch->prep_lock);
|
||||
result = mic_dma_do_dma(mic_ch, flags, dma_src, dma_dest, len);
|
||||
if (result >= 0)
|
||||
return allocate_tx(mic_ch);
|
||||
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
|
||||
tx = allocate_tx(mic_ch);
|
||||
|
||||
if (!tx)
|
||||
dev_err(dev, "Error enqueueing dma, error=%d\n", result);
|
||||
|
||||
spin_unlock(&mic_ch->prep_lock);
|
||||
return NULL;
|
||||
return tx;
|
||||
}
|
||||
|
||||
static struct dma_async_tx_descriptor *
|
||||
|
@ -335,13 +339,14 @@ mic_dma_prep_interrupt_lock(struct dma_chan *ch, unsigned long flags)
|
|||
{
|
||||
struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch);
|
||||
int ret;
|
||||
struct dma_async_tx_descriptor *tx = NULL;
|
||||
|
||||
spin_lock(&mic_ch->prep_lock);
|
||||
ret = mic_dma_do_dma(mic_ch, flags, 0, 0, 0);
|
||||
if (!ret)
|
||||
return allocate_tx(mic_ch);
|
||||
tx = allocate_tx(mic_ch);
|
||||
spin_unlock(&mic_ch->prep_lock);
|
||||
return NULL;
|
||||
return tx;
|
||||
}
|
||||
|
||||
/* Return the status of the transaction */
|
||||
|
|
|
@ -113,7 +113,7 @@ static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
|
|||
__raw_writel(BIT(offset), ctrl->base + AR71XX_GPIO_REG_CLEAR);
|
||||
|
||||
__raw_writel(
|
||||
__raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & BIT(offset),
|
||||
__raw_readl(ctrl->base + AR71XX_GPIO_REG_OE) & ~BIT(offset),
|
||||
ctrl->base + AR71XX_GPIO_REG_OE);
|
||||
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
|
|
@ -141,9 +141,9 @@ static int bgpio_get_set(struct gpio_chip *gc, unsigned int gpio)
|
|||
unsigned long pinmask = bgc->pin2mask(bgc, gpio);
|
||||
|
||||
if (bgc->dir & pinmask)
|
||||
return bgc->read_reg(bgc->reg_set) & pinmask;
|
||||
return !!(bgc->read_reg(bgc->reg_set) & pinmask);
|
||||
else
|
||||
return bgc->read_reg(bgc->reg_dat) & pinmask;
|
||||
return !!(bgc->read_reg(bgc->reg_dat) & pinmask);
|
||||
}
|
||||
|
||||
static int bgpio_get(struct gpio_chip *gc, unsigned int gpio)
|
||||
|
|
|
@ -1279,7 +1279,13 @@ static int _gpiod_get_raw_value(const struct gpio_desc *desc)
|
|||
chip = desc->chip;
|
||||
offset = gpio_chip_hwgpio(desc);
|
||||
value = chip->get ? chip->get(chip, offset) : -EIO;
|
||||
value = value < 0 ? value : !!value;
|
||||
/*
|
||||
* FIXME: fix all drivers to clamp to [0,1] or return negative,
|
||||
* then change this to:
|
||||
* value = value < 0 ? value : !!value;
|
||||
* so we can properly propagate error codes.
|
||||
*/
|
||||
value = !!value;
|
||||
trace_gpio_value(desc_to_gpio(desc), 1, value);
|
||||
return value;
|
||||
}
|
||||
|
|
|
@ -229,7 +229,8 @@ static int drm_helper_probe_single_connector_modes_merge_bits(struct drm_connect
|
|||
mode_flags |= DRM_MODE_FLAG_3D_MASK;
|
||||
|
||||
list_for_each_entry(mode, &connector->modes, head) {
|
||||
mode->status = drm_mode_validate_basic(mode);
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_basic(mode);
|
||||
|
||||
if (mode->status == MODE_OK)
|
||||
mode->status = drm_mode_validate_size(mode, maxX, maxY);
|
||||
|
|
|
@ -141,8 +141,6 @@ static void i915_gem_context_clean(struct intel_context *ctx)
|
|||
if (!ppgtt)
|
||||
return;
|
||||
|
||||
WARN_ON(!list_empty(&ppgtt->base.active_list));
|
||||
|
||||
list_for_each_entry_safe(vma, next, &ppgtt->base.inactive_list,
|
||||
mm_list) {
|
||||
if (WARN_ON(__i915_vma_unbind_no_wait(vma)))
|
||||
|
|
|
@ -6309,9 +6309,11 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
|||
if (to_intel_plane_state(crtc->primary->state)->visible) {
|
||||
intel_crtc_wait_for_pending_flips(crtc);
|
||||
intel_pre_disable_primary(crtc);
|
||||
|
||||
intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
|
||||
to_intel_plane_state(crtc->primary->state)->visible = false;
|
||||
}
|
||||
|
||||
intel_crtc_disable_planes(crtc, crtc->state->plane_mask);
|
||||
dev_priv->display.crtc_disable(crtc);
|
||||
intel_crtc->active = false;
|
||||
intel_update_watermarks(crtc);
|
||||
|
|
|
@ -4782,8 +4782,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
|
|||
/* 2b: Program RC6 thresholds.*/
|
||||
|
||||
/* WaRsDoubleRc6WrlWithCoarsePowerGating: Doubling WRL only when CPG is enabled */
|
||||
if (IS_SKYLAKE(dev) && !((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) &&
|
||||
(INTEL_REVID(dev) <= SKL_REVID_E0)))
|
||||
if (IS_SKYLAKE(dev))
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 108 << 16);
|
||||
else
|
||||
I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 54 << 16);
|
||||
|
@ -4825,7 +4824,7 @@ static void gen9_enable_rc6(struct drm_device *dev)
|
|||
* WaRsDisableCoarsePowerGating:skl,bxt - Render/Media PG need to be disabled with RC6.
|
||||
*/
|
||||
if ((IS_BROXTON(dev) && (INTEL_REVID(dev) < BXT_REVID_B0)) ||
|
||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_E0)))
|
||||
((IS_SKL_GT3(dev) || IS_SKL_GT4(dev)) && (INTEL_REVID(dev) <= SKL_REVID_F0)))
|
||||
I915_WRITE(GEN9_PG_ENABLE, 0);
|
||||
else
|
||||
I915_WRITE(GEN9_PG_ENABLE, (rc6_mask & GEN6_RC_CTL_RC6_ENABLE) ?
|
||||
|
|
|
@ -112,11 +112,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
|
|||
dma_addr_t paddr;
|
||||
int ret;
|
||||
|
||||
/* only doing ARGB32 since this is what is needed to alpha-blend
|
||||
* with video overlays:
|
||||
*/
|
||||
sizes->surface_bpp = 32;
|
||||
sizes->surface_depth = 32;
|
||||
sizes->surface_depth = 24;
|
||||
|
||||
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
|
||||
sizes->surface_height, sizes->surface_bpp,
|
||||
|
|
|
@ -1217,6 +1217,7 @@ config SENSORS_PWM_FAN
|
|||
config SENSORS_SHT15
|
||||
tristate "Sensiron humidity and temperature sensors. SHT15 and compat."
|
||||
depends on GPIOLIB || COMPILE_TEST
|
||||
select BITREVERSE
|
||||
help
|
||||
If you say yes here you get support for the Sensiron SHT10, SHT11,
|
||||
SHT15, SHT71, SHT75 humidity and temperature sensors.
|
||||
|
|
|
@ -58,6 +58,7 @@ struct tmp102 {
|
|||
u16 config_orig;
|
||||
unsigned long last_update;
|
||||
int temp[3];
|
||||
bool first_time;
|
||||
};
|
||||
|
||||
/* convert left adjusted 13-bit TMP102 register value to milliCelsius */
|
||||
|
@ -93,6 +94,7 @@ static struct tmp102 *tmp102_update_device(struct device *dev)
|
|||
tmp102->temp[i] = tmp102_reg_to_mC(status);
|
||||
}
|
||||
tmp102->last_update = jiffies;
|
||||
tmp102->first_time = false;
|
||||
}
|
||||
mutex_unlock(&tmp102->lock);
|
||||
return tmp102;
|
||||
|
@ -102,6 +104,12 @@ static int tmp102_read_temp(void *dev, int *temp)
|
|||
{
|
||||
struct tmp102 *tmp102 = tmp102_update_device(dev);
|
||||
|
||||
/* Is it too early even to return a conversion? */
|
||||
if (tmp102->first_time) {
|
||||
dev_dbg(dev, "%s: Conversion not ready yet..\n", __func__);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
*temp = tmp102->temp[0];
|
||||
|
||||
return 0;
|
||||
|
@ -114,6 +122,10 @@ static ssize_t tmp102_show_temp(struct device *dev,
|
|||
struct sensor_device_attribute *sda = to_sensor_dev_attr(attr);
|
||||
struct tmp102 *tmp102 = tmp102_update_device(dev);
|
||||
|
||||
/* Is it too early even to return a read? */
|
||||
if (tmp102->first_time)
|
||||
return -EAGAIN;
|
||||
|
||||
return sprintf(buf, "%d\n", tmp102->temp[sda->index]);
|
||||
}
|
||||
|
||||
|
@ -207,7 +219,9 @@ static int tmp102_probe(struct i2c_client *client,
|
|||
status = -ENODEV;
|
||||
goto fail_restore_config;
|
||||
}
|
||||
tmp102->last_update = jiffies - HZ;
|
||||
tmp102->last_update = jiffies;
|
||||
/* Mark that we are not ready with data until conversion is complete */
|
||||
tmp102->first_time = true;
|
||||
mutex_init(&tmp102->lock);
|
||||
|
||||
hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
|
||||
|
|
|
@ -202,8 +202,15 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
|
|||
* d is always 6 on Keystone I2C controller
|
||||
*/
|
||||
|
||||
/* get minimum of 7 MHz clock, but max of 12 MHz */
|
||||
psc = (input_clock / 7000000) - 1;
|
||||
/*
|
||||
* Both Davinci and current Keystone User Guides recommend a value
|
||||
* between 7MHz and 12MHz. In reality 7MHz module clock doesn't
|
||||
* always produce enough margin between SDA and SCL transitions.
|
||||
* Measurements show that the higher the module clock is, the
|
||||
* bigger is the margin, providing more reliable communication.
|
||||
* So we better target for 12MHz.
|
||||
*/
|
||||
psc = (input_clock / 12000000) - 1;
|
||||
if ((input_clock / (psc + 1)) > 12000000)
|
||||
psc++; /* better to run under spec than over */
|
||||
d = (psc >= 2) ? 5 : 7 - psc;
|
||||
|
|
|
@ -813,6 +813,12 @@ static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
|
|||
tx_aborted:
|
||||
if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
|
||||
complete(&dev->cmd_complete);
|
||||
else if (unlikely(dev->accessor_flags & ACCESS_INTR_MASK)) {
|
||||
/* workaround to trigger pending interrupt */
|
||||
stat = dw_readl(dev, DW_IC_INTR_MASK);
|
||||
i2c_dw_disable_int(dev);
|
||||
dw_writel(dev, stat, DW_IC_INTR_MASK);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
|
|
@ -111,6 +111,7 @@ struct dw_i2c_dev {
|
|||
|
||||
#define ACCESS_SWAP 0x00000001
|
||||
#define ACCESS_16BIT 0x00000002
|
||||
#define ACCESS_INTR_MASK 0x00000004
|
||||
|
||||
extern int i2c_dw_init(struct dw_i2c_dev *dev);
|
||||
extern void i2c_dw_disable(struct dw_i2c_dev *dev);
|
||||
|
|
|
@ -93,6 +93,7 @@ static void dw_i2c_acpi_params(struct platform_device *pdev, char method[],
|
|||
static int dw_i2c_acpi_configure(struct platform_device *pdev)
|
||||
{
|
||||
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
|
||||
const struct acpi_device_id *id;
|
||||
|
||||
dev->adapter.nr = -1;
|
||||
dev->tx_fifo_depth = 32;
|
||||
|
@ -106,6 +107,10 @@ static int dw_i2c_acpi_configure(struct platform_device *pdev)
|
|||
dw_i2c_acpi_params(pdev, "FMCN", &dev->fs_hcnt, &dev->fs_lcnt,
|
||||
&dev->sda_hold_time);
|
||||
|
||||
id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
|
||||
if (id && id->driver_data)
|
||||
dev->accessor_flags |= (u32)id->driver_data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -116,7 +121,7 @@ static const struct acpi_device_id dw_i2c_acpi_match[] = {
|
|||
{ "INT3433", 0 },
|
||||
{ "80860F41", 0 },
|
||||
{ "808622C1", 0 },
|
||||
{ "AMD0010", 0 },
|
||||
{ "AMD0010", ACCESS_INTR_MASK },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, dw_i2c_acpi_match);
|
||||
|
@ -240,12 +245,10 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
r = i2c_dw_probe(dev);
|
||||
if (r) {
|
||||
if (r && !dev->pm_runtime_disabled)
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
return r;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return r;
|
||||
}
|
||||
|
||||
static int dw_i2c_plat_remove(struct platform_device *pdev)
|
||||
|
@ -260,7 +263,8 @@ static int dw_i2c_plat_remove(struct platform_device *pdev)
|
|||
|
||||
pm_runtime_dont_use_autosuspend(&pdev->dev);
|
||||
pm_runtime_put_sync(&pdev->dev);
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
if (!dev->pm_runtime_disabled)
|
||||
pm_runtime_disable(&pdev->dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1119,6 +1119,8 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
|||
i2c_imx, IMX_I2C_I2CR);
|
||||
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
|
||||
|
||||
i2c_imx_init_recovery_info(i2c_imx, pdev);
|
||||
|
||||
/* Add I2C adapter */
|
||||
ret = i2c_add_numbered_adapter(&i2c_imx->adapter);
|
||||
if (ret < 0) {
|
||||
|
@ -1126,8 +1128,6 @@ static int i2c_imx_probe(struct platform_device *pdev)
|
|||
goto clk_disable;
|
||||
}
|
||||
|
||||
i2c_imx_init_recovery_info(i2c_imx, pdev);
|
||||
|
||||
/* Set up platform driver data */
|
||||
platform_set_drvdata(pdev, i2c_imx);
|
||||
clk_disable_unprepare(i2c_imx->clk);
|
||||
|
|
|
@ -146,6 +146,8 @@ struct mv64xxx_i2c_data {
|
|||
bool errata_delay;
|
||||
struct reset_control *rstc;
|
||||
bool irq_clear_inverted;
|
||||
/* Clk div is 2 to the power n, not 2 to the power n + 1 */
|
||||
bool clk_n_base_0;
|
||||
};
|
||||
|
||||
static struct mv64xxx_i2c_regs mv64xxx_i2c_regs_mv64xxx = {
|
||||
|
@ -757,25 +759,29 @@ MODULE_DEVICE_TABLE(of, mv64xxx_i2c_of_match_table);
|
|||
#ifdef CONFIG_OF
|
||||
#ifdef CONFIG_HAVE_CLK
|
||||
static int
|
||||
mv64xxx_calc_freq(const int tclk, const int n, const int m)
|
||||
mv64xxx_calc_freq(struct mv64xxx_i2c_data *drv_data,
|
||||
const int tclk, const int n, const int m)
|
||||
{
|
||||
return tclk / (10 * (m + 1) * (2 << n));
|
||||
if (drv_data->clk_n_base_0)
|
||||
return tclk / (10 * (m + 1) * (1 << n));
|
||||
else
|
||||
return tclk / (10 * (m + 1) * (2 << n));
|
||||
}
|
||||
|
||||
static bool
|
||||
mv64xxx_find_baud_factors(const u32 req_freq, const u32 tclk, u32 *best_n,
|
||||
u32 *best_m)
|
||||
mv64xxx_find_baud_factors(struct mv64xxx_i2c_data *drv_data,
|
||||
const u32 req_freq, const u32 tclk)
|
||||
{
|
||||
int freq, delta, best_delta = INT_MAX;
|
||||
int m, n;
|
||||
|
||||
for (n = 0; n <= 7; n++)
|
||||
for (m = 0; m <= 15; m++) {
|
||||
freq = mv64xxx_calc_freq(tclk, n, m);
|
||||
freq = mv64xxx_calc_freq(drv_data, tclk, n, m);
|
||||
delta = req_freq - freq;
|
||||
if (delta >= 0 && delta < best_delta) {
|
||||
*best_m = m;
|
||||
*best_n = n;
|
||||
drv_data->freq_m = m;
|
||||
drv_data->freq_n = n;
|
||||
best_delta = delta;
|
||||
}
|
||||
if (best_delta == 0)
|
||||
|
@ -813,8 +819,11 @@ mv64xxx_of_config(struct mv64xxx_i2c_data *drv_data,
|
|||
if (of_property_read_u32(np, "clock-frequency", &bus_freq))
|
||||
bus_freq = 100000; /* 100kHz by default */
|
||||
|
||||
if (!mv64xxx_find_baud_factors(bus_freq, tclk,
|
||||
&drv_data->freq_n, &drv_data->freq_m)) {
|
||||
if (of_device_is_compatible(np, "allwinner,sun4i-a10-i2c") ||
|
||||
of_device_is_compatible(np, "allwinner,sun6i-a31-i2c"))
|
||||
drv_data->clk_n_base_0 = true;
|
||||
|
||||
if (!mv64xxx_find_baud_factors(drv_data, bus_freq, tclk)) {
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -576,7 +576,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
|
|||
if (slave->flags & I2C_CLIENT_TEN)
|
||||
return -EAFNOSUPPORT;
|
||||
|
||||
pm_runtime_forbid(rcar_i2c_priv_to_dev(priv));
|
||||
pm_runtime_get_sync(rcar_i2c_priv_to_dev(priv));
|
||||
|
||||
priv->slave = slave;
|
||||
rcar_i2c_write(priv, ICSAR, slave->addr);
|
||||
|
@ -598,7 +598,7 @@ static int rcar_unreg_slave(struct i2c_client *slave)
|
|||
|
||||
priv->slave = NULL;
|
||||
|
||||
pm_runtime_allow(rcar_i2c_priv_to_dev(priv));
|
||||
pm_runtime_put(rcar_i2c_priv_to_dev(priv));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -908,7 +908,7 @@ static int rk3x_i2c_probe(struct platform_device *pdev)
|
|||
&i2c->scl_fall_ns))
|
||||
i2c->scl_fall_ns = 300;
|
||||
if (of_property_read_u32(pdev->dev.of_node, "i2c-sda-falling-time-ns",
|
||||
&i2c->scl_fall_ns))
|
||||
&i2c->sda_fall_ns))
|
||||
i2c->sda_fall_ns = i2c->scl_fall_ns;
|
||||
|
||||
strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
|
||||
|
|
|
@ -822,7 +822,7 @@ static int st_i2c_probe(struct platform_device *pdev)
|
|||
|
||||
adap = &i2c_dev->adap;
|
||||
i2c_set_adapdata(adap, i2c_dev);
|
||||
snprintf(adap->name, sizeof(adap->name), "ST I2C(0x%pa)", &res->start);
|
||||
snprintf(adap->name, sizeof(adap->name), "ST I2C(%pa)", &res->start);
|
||||
adap->owner = THIS_MODULE;
|
||||
adap->timeout = 2 * HZ;
|
||||
adap->retries = 0;
|
||||
|
|
|
@ -592,6 +592,7 @@ static void db9_attach(struct parport *pp)
|
|||
return;
|
||||
}
|
||||
|
||||
memset(&db9_parport_cb, 0, sizeof(db9_parport_cb));
|
||||
db9_parport_cb.flags = PARPORT_FLAG_EXCL;
|
||||
|
||||
pd = parport_register_dev_model(pp, "db9", &db9_parport_cb, port_idx);
|
||||
|
|
|
@ -951,6 +951,7 @@ static void gc_attach(struct parport *pp)
|
|||
pads = gc_cfg[port_idx].args + 1;
|
||||
n_pads = gc_cfg[port_idx].nargs - 1;
|
||||
|
||||
memset(&gc_parport_cb, 0, sizeof(gc_parport_cb));
|
||||
gc_parport_cb.flags = PARPORT_FLAG_EXCL;
|
||||
|
||||
pd = parport_register_dev_model(pp, "gamecon", &gc_parport_cb,
|
||||
|
|
|
@ -181,6 +181,7 @@ static void tgfx_attach(struct parport *pp)
|
|||
n_buttons = tgfx_cfg[port_idx].args + 1;
|
||||
n_devs = tgfx_cfg[port_idx].nargs - 1;
|
||||
|
||||
memset(&tgfx_parport_cb, 0, sizeof(tgfx_parport_cb));
|
||||
tgfx_parport_cb.flags = PARPORT_FLAG_EXCL;
|
||||
|
||||
pd = parport_register_dev_model(pp, "turbografx", &tgfx_parport_cb,
|
||||
|
|
|
@ -218,6 +218,7 @@ static void walkera0701_attach(struct parport *pp)
|
|||
|
||||
w->parport = pp;
|
||||
|
||||
memset(&walkera0701_parport_cb, 0, sizeof(walkera0701_parport_cb));
|
||||
walkera0701_parport_cb.flags = PARPORT_FLAG_EXCL;
|
||||
walkera0701_parport_cb.irq_func = walkera0701_irq_handler;
|
||||
walkera0701_parport_cb.private = w;
|
||||
|
|
|
@ -97,8 +97,7 @@ static void arizona_haptics_work(struct work_struct *work)
|
|||
|
||||
ret = regmap_update_bits(arizona->regmap,
|
||||
ARIZONA_HAPTICS_CONTROL_1,
|
||||
ARIZONA_HAP_CTRL_MASK,
|
||||
1 << ARIZONA_HAP_CTRL_SHIFT);
|
||||
ARIZONA_HAP_CTRL_MASK, 0);
|
||||
if (ret != 0) {
|
||||
dev_err(arizona->dev, "Failed to stop haptics: %d\n",
|
||||
ret);
|
||||
|
|
|
@ -41,6 +41,7 @@
|
|||
|
||||
#define DRIVER_NAME "elan_i2c"
|
||||
#define ELAN_DRIVER_VERSION "1.6.1"
|
||||
#define ELAN_VENDOR_ID 0x04f3
|
||||
#define ETP_MAX_PRESSURE 255
|
||||
#define ETP_FWIDTH_REDUCE 90
|
||||
#define ETP_FINGER_WIDTH 15
|
||||
|
@ -914,6 +915,8 @@ static int elan_setup_input_device(struct elan_tp_data *data)
|
|||
|
||||
input->name = "Elan Touchpad";
|
||||
input->id.bustype = BUS_I2C;
|
||||
input->id.vendor = ELAN_VENDOR_ID;
|
||||
input->id.product = data->product_id;
|
||||
input_set_drvdata(input, data);
|
||||
|
||||
error = input_mt_init_slots(input, ETP_MAX_FINGERS,
|
||||
|
|
|
@ -145,6 +145,7 @@ static int parkbd_getport(struct parport *pp)
|
|||
{
|
||||
struct pardev_cb parkbd_parport_cb;
|
||||
|
||||
memset(&parkbd_parport_cb, 0, sizeof(parkbd_parport_cb));
|
||||
parkbd_parport_cb.irq_func = parkbd_interrupt;
|
||||
parkbd_parport_cb.flags = PARPORT_FLAG_EXCL;
|
||||
|
||||
|
|
|
@ -1819,6 +1819,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|||
input_set_abs_params(inputdev, ABS_TILT_Y, AIPTEK_TILT_MIN, AIPTEK_TILT_MAX, 0, 0);
|
||||
input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0);
|
||||
|
||||
/* Verify that a device really has an endpoint */
|
||||
if (intf->altsetting[0].desc.bNumEndpoints < 1) {
|
||||
dev_err(&intf->dev,
|
||||
"interface has %d endpoints, but must have minimum 1\n",
|
||||
intf->altsetting[0].desc.bNumEndpoints);
|
||||
err = -EINVAL;
|
||||
goto fail3;
|
||||
}
|
||||
endpoint = &intf->altsetting[0].endpoint[0].desc;
|
||||
|
||||
/* Go set up our URB, which is called when the tablet receives
|
||||
|
@ -1861,6 +1869,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|||
if (i == ARRAY_SIZE(speeds)) {
|
||||
dev_info(&intf->dev,
|
||||
"Aiptek tried all speeds, no sane response\n");
|
||||
err = -EINVAL;
|
||||
goto fail3;
|
||||
}
|
||||
|
||||
|
|
|
@ -2487,6 +2487,31 @@ static struct mxt_acpi_platform_data samus_platform_data[] = {
|
|||
{ }
|
||||
};
|
||||
|
||||
static unsigned int chromebook_tp_buttons[] = {
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
KEY_RESERVED,
|
||||
BTN_LEFT
|
||||
};
|
||||
|
||||
static struct mxt_acpi_platform_data chromebook_platform_data[] = {
|
||||
{
|
||||
/* Touchpad */
|
||||
.hid = "ATML0000",
|
||||
.pdata = {
|
||||
.t19_num_keys = ARRAY_SIZE(chromebook_tp_buttons),
|
||||
.t19_keymap = chromebook_tp_buttons,
|
||||
},
|
||||
},
|
||||
{
|
||||
/* Touchscreen */
|
||||
.hid = "ATML0001",
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static const struct dmi_system_id mxt_dmi_table[] = {
|
||||
{
|
||||
/* 2015 Google Pixel */
|
||||
|
@ -2497,6 +2522,14 @@ static const struct dmi_system_id mxt_dmi_table[] = {
|
|||
},
|
||||
.driver_data = samus_platform_data,
|
||||
},
|
||||
{
|
||||
/* Other Google Chromebooks */
|
||||
.ident = "Chromebook",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "GOOGLE"),
|
||||
},
|
||||
.driver_data = chromebook_platform_data,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -2701,6 +2734,7 @@ static const struct i2c_device_id mxt_id[] = {
|
|||
{ "qt602240_ts", 0 },
|
||||
{ "atmel_mxt_ts", 0 },
|
||||
{ "atmel_mxt_tp", 0 },
|
||||
{ "maxtouch", 0 },
|
||||
{ "mXT224", 0 },
|
||||
{ }
|
||||
};
|
||||
|
|
|
@ -1316,7 +1316,13 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
|
|||
|
||||
disable_irq(client->irq);
|
||||
|
||||
if (device_may_wakeup(dev) || ts->keep_power_in_suspend) {
|
||||
if (device_may_wakeup(dev)) {
|
||||
/*
|
||||
* The device will automatically enter idle mode
|
||||
* that has reduced power consumption.
|
||||
*/
|
||||
ts->wake_irq_enabled = (enable_irq_wake(client->irq) == 0);
|
||||
} else if (ts->keep_power_in_suspend) {
|
||||
for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
|
||||
error = elants_i2c_send(client, set_sleep_cmd,
|
||||
sizeof(set_sleep_cmd));
|
||||
|
@ -1326,10 +1332,6 @@ static int __maybe_unused elants_i2c_suspend(struct device *dev)
|
|||
dev_err(&client->dev,
|
||||
"suspend command failed: %d\n", error);
|
||||
}
|
||||
|
||||
if (device_may_wakeup(dev))
|
||||
ts->wake_irq_enabled =
|
||||
(enable_irq_wake(client->irq) == 0);
|
||||
} else {
|
||||
elants_i2c_power_off(ts);
|
||||
}
|
||||
|
@ -1345,10 +1347,11 @@ static int __maybe_unused elants_i2c_resume(struct device *dev)
|
|||
int retry_cnt;
|
||||
int error;
|
||||
|
||||
if (device_may_wakeup(dev) && ts->wake_irq_enabled)
|
||||
disable_irq_wake(client->irq);
|
||||
|
||||
if (ts->keep_power_in_suspend) {
|
||||
if (device_may_wakeup(dev)) {
|
||||
if (ts->wake_irq_enabled)
|
||||
disable_irq_wake(client->irq);
|
||||
elants_i2c_sw_reset(client);
|
||||
} else if (ts->keep_power_in_suspend) {
|
||||
for (retry_cnt = 0; retry_cnt < MAX_RETRIES; retry_cnt++) {
|
||||
error = elants_i2c_send(client, set_active_cmd,
|
||||
sizeof(set_active_cmd));
|
||||
|
|
|
@ -494,6 +494,22 @@ static void handle_fault_error(struct fault *fault)
|
|||
}
|
||||
}
|
||||
|
||||
static bool access_error(struct vm_area_struct *vma, struct fault *fault)
|
||||
{
|
||||
unsigned long requested = 0;
|
||||
|
||||
if (fault->flags & PPR_FAULT_EXEC)
|
||||
requested |= VM_EXEC;
|
||||
|
||||
if (fault->flags & PPR_FAULT_READ)
|
||||
requested |= VM_READ;
|
||||
|
||||
if (fault->flags & PPR_FAULT_WRITE)
|
||||
requested |= VM_WRITE;
|
||||
|
||||
return (requested & ~vma->vm_flags) != 0;
|
||||
}
|
||||
|
||||
static void do_fault(struct work_struct *work)
|
||||
{
|
||||
struct fault *fault = container_of(work, struct fault, work);
|
||||
|
@ -516,8 +532,8 @@ static void do_fault(struct work_struct *work)
|
|||
goto out;
|
||||
}
|
||||
|
||||
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))) {
|
||||
/* handle_mm_fault would BUG_ON() */
|
||||
/* Check if we have the right permissions on the vma */
|
||||
if (access_error(vma, fault)) {
|
||||
up_read(&mm->mmap_sem);
|
||||
handle_fault_error(fault);
|
||||
goto out;
|
||||
|
|
|
@ -2159,7 +2159,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
|
|||
sg_res = aligned_nrpages(sg->offset, sg->length);
|
||||
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
pteval = (sg_phys(sg) & PAGE_MASK) | prot;
|
||||
pteval = page_to_phys(sg_page(sg)) | prot;
|
||||
phys_pfn = pteval >> VTD_PAGE_SHIFT;
|
||||
}
|
||||
|
||||
|
@ -3704,7 +3704,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
|
|||
|
||||
for_each_sg(sglist, sg, nelems, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
sg->dma_address = sg_phys(sg);
|
||||
sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
return nelems;
|
||||
|
|
|
@ -484,6 +484,23 @@ struct page_req_dsc {
|
|||
};
|
||||
|
||||
#define PRQ_RING_MASK ((0x1000 << PRQ_ORDER) - 0x10)
|
||||
|
||||
static bool access_error(struct vm_area_struct *vma, struct page_req_dsc *req)
|
||||
{
|
||||
unsigned long requested = 0;
|
||||
|
||||
if (req->exe_req)
|
||||
requested |= VM_EXEC;
|
||||
|
||||
if (req->rd_req)
|
||||
requested |= VM_READ;
|
||||
|
||||
if (req->wr_req)
|
||||
requested |= VM_WRITE;
|
||||
|
||||
return (requested & ~vma->vm_flags) != 0;
|
||||
}
|
||||
|
||||
static irqreturn_t prq_event_thread(int irq, void *d)
|
||||
{
|
||||
struct intel_iommu *iommu = d;
|
||||
|
@ -539,6 +556,9 @@ static irqreturn_t prq_event_thread(int irq, void *d)
|
|||
if (!vma || address < vma->vm_start)
|
||||
goto invalid;
|
||||
|
||||
if (access_error(vma, req))
|
||||
goto invalid;
|
||||
|
||||
ret = handle_mm_fault(svm->mm, vma, address,
|
||||
req->wr_req ? FAULT_FLAG_WRITE : 0);
|
||||
if (ret & VM_FAULT_ERROR)
|
||||
|
|
|
@ -1430,7 +1430,7 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
|
|||
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
|
||||
|
||||
for_each_sg(sg, s, nents, i) {
|
||||
phys_addr_t phys = sg_phys(s);
|
||||
phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
|
||||
|
||||
/*
|
||||
* We are mapping on IOMMU page boundaries, so offset within
|
||||
|
|
|
@ -67,8 +67,7 @@ static int write_modem(struct cardstate *cs)
|
|||
struct sk_buff *skb = bcs->tx_skb;
|
||||
int sent = -EOPNOTSUPP;
|
||||
|
||||
if (!tty || !tty->driver || !skb)
|
||||
return -EINVAL;
|
||||
WARN_ON(!tty || !tty->ops || !skb);
|
||||
|
||||
if (!skb->len) {
|
||||
dev_kfree_skb_any(skb);
|
||||
|
@ -109,8 +108,7 @@ static int send_cb(struct cardstate *cs)
|
|||
unsigned long flags;
|
||||
int sent = 0;
|
||||
|
||||
if (!tty || !tty->driver)
|
||||
return -EFAULT;
|
||||
WARN_ON(!tty || !tty->ops);
|
||||
|
||||
cb = cs->cmdbuf;
|
||||
if (!cb)
|
||||
|
@ -370,19 +368,18 @@ static void gigaset_freecshw(struct cardstate *cs)
|
|||
tasklet_kill(&cs->write_tasklet);
|
||||
if (!cs->hw.ser)
|
||||
return;
|
||||
dev_set_drvdata(&cs->hw.ser->dev.dev, NULL);
|
||||
platform_device_unregister(&cs->hw.ser->dev);
|
||||
kfree(cs->hw.ser);
|
||||
cs->hw.ser = NULL;
|
||||
}
|
||||
|
||||
static void gigaset_device_release(struct device *dev)
|
||||
{
|
||||
struct platform_device *pdev = to_platform_device(dev);
|
||||
struct cardstate *cs = dev_get_drvdata(dev);
|
||||
|
||||
/* adapted from platform_device_release() in drivers/base/platform.c */
|
||||
kfree(dev->platform_data);
|
||||
kfree(pdev->resource);
|
||||
if (!cs)
|
||||
return;
|
||||
dev_set_drvdata(dev, NULL);
|
||||
kfree(cs->hw.ser);
|
||||
cs->hw.ser = NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -432,7 +429,9 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
|
|||
struct tty_struct *tty = cs->hw.ser->tty;
|
||||
unsigned int set, clear;
|
||||
|
||||
if (!tty || !tty->driver || !tty->ops->tiocmset)
|
||||
WARN_ON(!tty || !tty->ops);
|
||||
/* tiocmset is an optional tty driver method */
|
||||
if (!tty->ops->tiocmset)
|
||||
return -EINVAL;
|
||||
set = new_state & ~old_state;
|
||||
clear = old_state & ~new_state;
|
||||
|
|
|
@ -1170,7 +1170,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
|
|||
|
||||
if (ipac->type & IPAC_TYPE_IPACX) {
|
||||
ista = ReadIPAC(ipac, ISACX_ISTA);
|
||||
while (ista && cnt--) {
|
||||
while (ista && --cnt) {
|
||||
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
|
||||
if (ista & IPACX__ICA)
|
||||
ipac_irq(&ipac->hscx[0], ista);
|
||||
|
@ -1182,7 +1182,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
|
|||
}
|
||||
} else if (ipac->type & IPAC_TYPE_IPAC) {
|
||||
ista = ReadIPAC(ipac, IPAC_ISTA);
|
||||
while (ista && cnt--) {
|
||||
while (ista && --cnt) {
|
||||
pr_debug("%s: ISTA %02x\n", ipac->name, ista);
|
||||
if (ista & (IPAC__ICD | IPAC__EXD)) {
|
||||
istad = ReadISAC(isac, ISAC_ISTA);
|
||||
|
@ -1200,7 +1200,7 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
|
|||
ista = ReadIPAC(ipac, IPAC_ISTA);
|
||||
}
|
||||
} else if (ipac->type & IPAC_TYPE_HSCX) {
|
||||
while (cnt) {
|
||||
while (--cnt) {
|
||||
ista = ReadIPAC(ipac, IPAC_ISTAB + ipac->hscx[1].off);
|
||||
pr_debug("%s: B2 ISTA %02x\n", ipac->name, ista);
|
||||
if (ista)
|
||||
|
@ -1211,7 +1211,6 @@ mISDNipac_irq(struct ipac_hw *ipac, int maxloop)
|
|||
mISDNisac_irq(isac, istad);
|
||||
if (0 == (ista | istad))
|
||||
break;
|
||||
cnt--;
|
||||
}
|
||||
}
|
||||
if (cnt > maxloop) /* only for ISAC/HSCX without PCI IRQ test */
|
||||
|
|
|
@ -314,8 +314,8 @@ static blk_qc_t md_make_request(struct request_queue *q, struct bio *bio)
|
|||
*/
|
||||
void mddev_suspend(struct mddev *mddev)
|
||||
{
|
||||
BUG_ON(mddev->suspended);
|
||||
mddev->suspended = 1;
|
||||
if (mddev->suspended++)
|
||||
return;
|
||||
synchronize_rcu();
|
||||
wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0);
|
||||
mddev->pers->quiesce(mddev, 1);
|
||||
|
@ -326,7 +326,8 @@ EXPORT_SYMBOL_GPL(mddev_suspend);
|
|||
|
||||
void mddev_resume(struct mddev *mddev)
|
||||
{
|
||||
mddev->suspended = 0;
|
||||
if (--mddev->suspended)
|
||||
return;
|
||||
wake_up(&mddev->sb_wait);
|
||||
mddev->pers->quiesce(mddev, 0);
|
||||
|
||||
|
@ -1652,7 +1653,7 @@ static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev)
|
|||
rdev->journal_tail = le64_to_cpu(sb->journal_tail);
|
||||
if (mddev->recovery_cp == MaxSector)
|
||||
set_bit(MD_JOURNAL_CLEAN, &mddev->flags);
|
||||
rdev->raid_disk = mddev->raid_disks;
|
||||
rdev->raid_disk = 0;
|
||||
break;
|
||||
default:
|
||||
rdev->saved_raid_disk = role;
|
||||
|
@ -2773,6 +2774,7 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
|
|||
/* Activating a spare .. or possibly reactivating
|
||||
* if we ever get bitmaps working here.
|
||||
*/
|
||||
int err;
|
||||
|
||||
if (rdev->raid_disk != -1)
|
||||
return -EBUSY;
|
||||
|
@ -2794,9 +2796,15 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len)
|
|||
rdev->saved_raid_disk = -1;
|
||||
clear_bit(In_sync, &rdev->flags);
|
||||
clear_bit(Bitmap_sync, &rdev->flags);
|
||||
remove_and_add_spares(rdev->mddev, rdev);
|
||||
if (rdev->raid_disk == -1)
|
||||
return -EBUSY;
|
||||
err = rdev->mddev->pers->
|
||||
hot_add_disk(rdev->mddev, rdev);
|
||||
if (err) {
|
||||
rdev->raid_disk = -1;
|
||||
return err;
|
||||
} else
|
||||
sysfs_notify_dirent_safe(rdev->sysfs_state);
|
||||
if (sysfs_link_rdev(rdev->mddev, rdev))
|
||||
/* failure here is OK */;
|
||||
/* don't wakeup anyone, leave that to userspace. */
|
||||
} else {
|
||||
if (slot >= rdev->mddev->raid_disks &&
|
||||
|
|
|
@ -566,7 +566,9 @@ static inline char * mdname (struct mddev * mddev)
|
|||
static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char nm[20];
|
||||
if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
|
||||
if (!test_bit(Replacement, &rdev->flags) &&
|
||||
!test_bit(Journal, &rdev->flags) &&
|
||||
mddev->kobj.sd) {
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
return sysfs_create_link(&mddev->kobj, &rdev->kobj, nm);
|
||||
} else
|
||||
|
@ -576,7 +578,9 @@ static inline int sysfs_link_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
|||
static inline void sysfs_unlink_rdev(struct mddev *mddev, struct md_rdev *rdev)
|
||||
{
|
||||
char nm[20];
|
||||
if (!test_bit(Replacement, &rdev->flags) && mddev->kobj.sd) {
|
||||
if (!test_bit(Replacement, &rdev->flags) &&
|
||||
!test_bit(Journal, &rdev->flags) &&
|
||||
mddev->kobj.sd) {
|
||||
sprintf(nm, "rd%d", rdev->raid_disk);
|
||||
sysfs_remove_link(&mddev->kobj, nm);
|
||||
}
|
||||
|
|
|
@ -1946,6 +1946,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
|||
|
||||
first = i;
|
||||
fbio = r10_bio->devs[i].bio;
|
||||
fbio->bi_iter.bi_size = r10_bio->sectors << 9;
|
||||
fbio->bi_iter.bi_idx = 0;
|
||||
|
||||
vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
|
||||
/* now find blocks with errors */
|
||||
|
@ -1989,7 +1991,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
|
|||
bio_reset(tbio);
|
||||
|
||||
tbio->bi_vcnt = vcnt;
|
||||
tbio->bi_iter.bi_size = r10_bio->sectors << 9;
|
||||
tbio->bi_iter.bi_size = fbio->bi_iter.bi_size;
|
||||
tbio->bi_rw = WRITE;
|
||||
tbio->bi_private = r10_bio;
|
||||
tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
|
||||
|
|
|
@ -805,11 +805,11 @@ static void ivtv_init_struct2(struct ivtv *itv)
|
|||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS - 1; i++)
|
||||
for (i = 0; i < IVTV_CARD_MAX_VIDEO_INPUTS; i++)
|
||||
if (itv->card->video_inputs[i].video_type == 0)
|
||||
break;
|
||||
itv->nof_inputs = i;
|
||||
for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS - 1; i++)
|
||||
for (i = 0; i < IVTV_CARD_MAX_AUDIO_INPUTS; i++)
|
||||
if (itv->card->audio_inputs[i].audio_type == 0)
|
||||
break;
|
||||
itv->nof_audio_inputs = i;
|
||||
|
|
|
@ -134,7 +134,7 @@ struct airspy {
|
|||
int urbs_submitted;
|
||||
|
||||
/* USB control message buffer */
|
||||
#define BUF_SIZE 24
|
||||
#define BUF_SIZE 128
|
||||
u8 buf[BUF_SIZE];
|
||||
|
||||
/* Current configuration */
|
||||
|
|
|
@ -24,6 +24,15 @@
|
|||
#include <media/videobuf2-v4l2.h>
|
||||
#include <media/videobuf2-vmalloc.h>
|
||||
|
||||
/*
|
||||
* Used Avago MGA-81563 RF amplifier could be destroyed pretty easily with too
|
||||
* strong signal or transmitting to bad antenna.
|
||||
* Set RF gain control to 'grabbed' state by default for sure.
|
||||
*/
|
||||
static bool hackrf_enable_rf_gain_ctrl;
|
||||
module_param_named(enable_rf_gain_ctrl, hackrf_enable_rf_gain_ctrl, bool, 0644);
|
||||
MODULE_PARM_DESC(enable_rf_gain_ctrl, "enable RX/TX RF amplifier control (warn: could damage amplifier)");
|
||||
|
||||
/* HackRF USB API commands (from HackRF Library) */
|
||||
enum {
|
||||
CMD_SET_TRANSCEIVER_MODE = 0x01,
|
||||
|
@ -1451,6 +1460,7 @@ static int hackrf_probe(struct usb_interface *intf,
|
|||
dev_err(dev->dev, "Could not initialize controls\n");
|
||||
goto err_v4l2_ctrl_handler_free_rx;
|
||||
}
|
||||
v4l2_ctrl_grab(dev->rx_rf_gain, !hackrf_enable_rf_gain_ctrl);
|
||||
v4l2_ctrl_handler_setup(&dev->rx_ctrl_handler);
|
||||
|
||||
/* Register controls for transmitter */
|
||||
|
@ -1471,6 +1481,7 @@ static int hackrf_probe(struct usb_interface *intf,
|
|||
dev_err(dev->dev, "Could not initialize controls\n");
|
||||
goto err_v4l2_ctrl_handler_free_tx;
|
||||
}
|
||||
v4l2_ctrl_grab(dev->tx_rf_gain, !hackrf_enable_rf_gain_ctrl);
|
||||
v4l2_ctrl_handler_setup(&dev->tx_ctrl_handler);
|
||||
|
||||
/* Register the v4l2_device structure */
|
||||
|
@ -1530,7 +1541,7 @@ static int hackrf_probe(struct usb_interface *intf,
|
|||
err_kfree:
|
||||
kfree(dev);
|
||||
err:
|
||||
dev_dbg(dev->dev, "failed=%d\n", ret);
|
||||
dev_dbg(&intf->dev, "failed=%d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -46,10 +46,18 @@ static int parse_ofpart_partitions(struct mtd_info *master,
|
|||
|
||||
ofpart_node = of_get_child_by_name(mtd_node, "partitions");
|
||||
if (!ofpart_node) {
|
||||
pr_warn("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
|
||||
master->name, mtd_node->full_name);
|
||||
/*
|
||||
* We might get here even when ofpart isn't used at all (e.g.,
|
||||
* when using another parser), so don't be louder than
|
||||
* KERN_DEBUG
|
||||
*/
|
||||
pr_debug("%s: 'partitions' subnode not found on %s. Trying to parse direct subnodes as partitions.\n",
|
||||
master->name, mtd_node->full_name);
|
||||
ofpart_node = mtd_node;
|
||||
dedicated = false;
|
||||
} else if (!of_device_is_compatible(ofpart_node, "fixed-partitions")) {
|
||||
/* The 'partitions' subnode might be used by another parser */
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* First count the subnodes */
|
||||
|
|
|
@ -1849,7 +1849,7 @@ static int xgbe_exit(struct xgbe_prv_data *pdata)
|
|||
usleep_range(10, 15);
|
||||
|
||||
/* Poll Until Poll Condition */
|
||||
while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
|
||||
while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
|
||||
usleep_range(500, 600);
|
||||
|
||||
if (!count)
|
||||
|
@ -1873,7 +1873,7 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
|
|||
/* Poll Until Poll Condition */
|
||||
for (i = 0; i < pdata->tx_q_count; i++) {
|
||||
count = 2000;
|
||||
while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
|
||||
while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
|
||||
MTL_Q_TQOMR, FTQ))
|
||||
usleep_range(500, 600);
|
||||
|
||||
|
|
|
@ -289,6 +289,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
|
|||
struct sk_buff *skb)
|
||||
{
|
||||
struct device *dev = ndev_to_dev(tx_ring->ndev);
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(tx_ring->ndev);
|
||||
struct xgene_enet_raw_desc *raw_desc;
|
||||
__le64 *exp_desc = NULL, *exp_bufs = NULL;
|
||||
dma_addr_t dma_addr, pbuf_addr, *frag_dma_addr;
|
||||
|
@ -419,6 +420,7 @@ static int xgene_enet_setup_tx_desc(struct xgene_enet_desc_ring *tx_ring,
|
|||
raw_desc->m0 = cpu_to_le64(SET_VAL(LL, ll) | SET_VAL(NV, nv) |
|
||||
SET_VAL(USERINFO, tx_ring->tail));
|
||||
tx_ring->cp_ring->cp_skb[tx_ring->tail] = skb;
|
||||
pdata->tx_level += count;
|
||||
tx_ring->tail = tail;
|
||||
|
||||
return count;
|
||||
|
@ -429,14 +431,13 @@ static netdev_tx_t xgene_enet_start_xmit(struct sk_buff *skb,
|
|||
{
|
||||
struct xgene_enet_pdata *pdata = netdev_priv(ndev);
|
||||
struct xgene_enet_desc_ring *tx_ring = pdata->tx_ring;
|
||||
struct xgene_enet_desc_ring *cp_ring = tx_ring->cp_ring;
|
||||
u32 tx_level, cq_level;
|
||||
u32 tx_level = pdata->tx_level;
|
||||
int count;
|
||||
|
||||
tx_level = pdata->ring_ops->len(tx_ring);
|
||||
cq_level = pdata->ring_ops->len(cp_ring);
|
||||
if (unlikely(tx_level > pdata->tx_qcnt_hi ||
|
||||
cq_level > pdata->cp_qcnt_hi)) {
|
||||
if (tx_level < pdata->txc_level)
|
||||
tx_level += ((typeof(pdata->tx_level))~0U);
|
||||
|
||||
if ((tx_level - pdata->txc_level) > pdata->tx_qcnt_hi) {
|
||||
netif_stop_queue(ndev);
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
|
@ -539,10 +540,13 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
|
|||
struct xgene_enet_raw_desc *raw_desc, *exp_desc;
|
||||
u16 head = ring->head;
|
||||
u16 slots = ring->slots - 1;
|
||||
int ret, count = 0, processed = 0;
|
||||
int ret, desc_count, count = 0, processed = 0;
|
||||
bool is_completion;
|
||||
|
||||
do {
|
||||
raw_desc = &ring->raw_desc[head];
|
||||
desc_count = 0;
|
||||
is_completion = false;
|
||||
exp_desc = NULL;
|
||||
if (unlikely(xgene_enet_is_desc_slot_empty(raw_desc)))
|
||||
break;
|
||||
|
@ -559,18 +563,24 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
|
|||
}
|
||||
dma_rmb();
|
||||
count++;
|
||||
desc_count++;
|
||||
}
|
||||
if (is_rx_desc(raw_desc))
|
||||
if (is_rx_desc(raw_desc)) {
|
||||
ret = xgene_enet_rx_frame(ring, raw_desc);
|
||||
else
|
||||
} else {
|
||||
ret = xgene_enet_tx_completion(ring, raw_desc);
|
||||
is_completion = true;
|
||||
}
|
||||
xgene_enet_mark_desc_slot_empty(raw_desc);
|
||||
if (exp_desc)
|
||||
xgene_enet_mark_desc_slot_empty(exp_desc);
|
||||
|
||||
head = (head + 1) & slots;
|
||||
count++;
|
||||
desc_count++;
|
||||
processed++;
|
||||
if (is_completion)
|
||||
pdata->txc_level += desc_count;
|
||||
|
||||
if (ret)
|
||||
break;
|
||||
|
@ -580,10 +590,8 @@ static int xgene_enet_process_ring(struct xgene_enet_desc_ring *ring,
|
|||
pdata->ring_ops->wr_cmd(ring, -count);
|
||||
ring->head = head;
|
||||
|
||||
if (netif_queue_stopped(ring->ndev)) {
|
||||
if (pdata->ring_ops->len(ring) < pdata->cp_qcnt_low)
|
||||
netif_wake_queue(ring->ndev);
|
||||
}
|
||||
if (netif_queue_stopped(ring->ndev))
|
||||
netif_start_queue(ring->ndev);
|
||||
}
|
||||
|
||||
return processed;
|
||||
|
@ -1033,9 +1041,7 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
|
|||
pdata->tx_ring->cp_ring = cp_ring;
|
||||
pdata->tx_ring->dst_ring_num = xgene_enet_dst_ring_num(cp_ring);
|
||||
|
||||
pdata->tx_qcnt_hi = pdata->tx_ring->slots / 2;
|
||||
pdata->cp_qcnt_hi = pdata->rx_ring->slots / 2;
|
||||
pdata->cp_qcnt_low = pdata->cp_qcnt_hi / 2;
|
||||
pdata->tx_qcnt_hi = pdata->tx_ring->slots - 128;
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -155,11 +155,11 @@ struct xgene_enet_pdata {
|
|||
enum xgene_enet_id enet_id;
|
||||
struct xgene_enet_desc_ring *tx_ring;
|
||||
struct xgene_enet_desc_ring *rx_ring;
|
||||
u16 tx_level;
|
||||
u16 txc_level;
|
||||
char *dev_name;
|
||||
u32 rx_buff_cnt;
|
||||
u32 tx_qcnt_hi;
|
||||
u32 cp_qcnt_hi;
|
||||
u32 cp_qcnt_low;
|
||||
u32 rx_irq;
|
||||
u32 txc_irq;
|
||||
u8 cq_cnt;
|
||||
|
|
|
@ -1016,13 +1016,12 @@ static int atl1c_setup_ring_resources(struct atl1c_adapter *adapter)
|
|||
sizeof(struct atl1c_recv_ret_status) * rx_desc_count +
|
||||
8 * 4;
|
||||
|
||||
ring_header->desc = pci_alloc_consistent(pdev, ring_header->size,
|
||||
&ring_header->dma);
|
||||
ring_header->desc = dma_zalloc_coherent(&pdev->dev, ring_header->size,
|
||||
&ring_header->dma, GFP_KERNEL);
|
||||
if (unlikely(!ring_header->desc)) {
|
||||
dev_err(&pdev->dev, "pci_alloc_consistend failed\n");
|
||||
dev_err(&pdev->dev, "could not get memory for DMA buffer\n");
|
||||
goto err_nomem;
|
||||
}
|
||||
memset(ring_header->desc, 0, ring_header->size);
|
||||
/* init TPD ring */
|
||||
|
||||
tpd_ring[0].dma = roundup(ring_header->dma, 8);
|
||||
|
|
|
@ -13,6 +13,7 @@ if NET_VENDOR_AURORA
|
|||
|
||||
config AURORA_NB8800
|
||||
tristate "Aurora AU-NB8800 support"
|
||||
depends on HAS_DMA
|
||||
select PHYLIB
|
||||
help
|
||||
Support for the AU-NB8800 gigabit Ethernet controller.
|
||||
|
|
|
@ -2693,17 +2693,16 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
|
|||
req.ver_upd = DRV_VER_UPD;
|
||||
|
||||
if (BNXT_PF(bp)) {
|
||||
unsigned long vf_req_snif_bmap[4];
|
||||
DECLARE_BITMAP(vf_req_snif_bmap, 256);
|
||||
u32 *data = (u32 *)vf_req_snif_bmap;
|
||||
|
||||
memset(vf_req_snif_bmap, 0, 32);
|
||||
memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap));
|
||||
for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++)
|
||||
__set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap);
|
||||
|
||||
for (i = 0; i < 8; i++) {
|
||||
req.vf_req_fwd[i] = cpu_to_le32(*data);
|
||||
data++;
|
||||
}
|
||||
for (i = 0; i < 8; i++)
|
||||
req.vf_req_fwd[i] = cpu_to_le32(data[i]);
|
||||
|
||||
req.enables |=
|
||||
cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD);
|
||||
}
|
||||
|
@ -4603,7 +4602,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|||
bp->nge_port_cnt = 1;
|
||||
}
|
||||
|
||||
bp->state = BNXT_STATE_OPEN;
|
||||
set_bit(BNXT_STATE_OPEN, &bp->state);
|
||||
bnxt_enable_int(bp);
|
||||
/* Enable TX queues */
|
||||
bnxt_tx_enable(bp);
|
||||
|
@ -4679,8 +4678,10 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
|
|||
/* Change device state to avoid TX queue wake up's */
|
||||
bnxt_tx_disable(bp);
|
||||
|
||||
bp->state = BNXT_STATE_CLOSED;
|
||||
cancel_work_sync(&bp->sp_task);
|
||||
clear_bit(BNXT_STATE_OPEN, &bp->state);
|
||||
smp_mb__after_atomic();
|
||||
while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state))
|
||||
msleep(20);
|
||||
|
||||
/* Flush rings before disabling interrupts */
|
||||
bnxt_shutdown_nic(bp, irq_re_init);
|
||||
|
@ -5030,8 +5031,10 @@ static void bnxt_dbg_dump_states(struct bnxt *bp)
|
|||
static void bnxt_reset_task(struct bnxt *bp)
|
||||
{
|
||||
bnxt_dbg_dump_states(bp);
|
||||
if (netif_running(bp->dev))
|
||||
bnxt_tx_disable(bp); /* prevent tx timout again */
|
||||
if (netif_running(bp->dev)) {
|
||||
bnxt_close_nic(bp, false, false);
|
||||
bnxt_open_nic(bp, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
static void bnxt_tx_timeout(struct net_device *dev)
|
||||
|
@ -5081,8 +5084,12 @@ static void bnxt_sp_task(struct work_struct *work)
|
|||
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
|
||||
int rc;
|
||||
|
||||
if (bp->state != BNXT_STATE_OPEN)
|
||||
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
smp_mb__after_atomic();
|
||||
if (!test_bit(BNXT_STATE_OPEN, &bp->state)) {
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
return;
|
||||
}
|
||||
|
||||
if (test_and_clear_bit(BNXT_RX_MASK_SP_EVENT, &bp->sp_event))
|
||||
bnxt_cfg_rx_mode(bp);
|
||||
|
@ -5106,8 +5113,19 @@ static void bnxt_sp_task(struct work_struct *work)
|
|||
bnxt_hwrm_tunnel_dst_port_free(
|
||||
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN);
|
||||
}
|
||||
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
|
||||
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event)) {
|
||||
/* bnxt_reset_task() calls bnxt_close_nic() which waits
|
||||
* for BNXT_STATE_IN_SP_TASK to clear.
|
||||
*/
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_lock();
|
||||
bnxt_reset_task(bp);
|
||||
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
smp_mb__before_atomic();
|
||||
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
|
||||
}
|
||||
|
||||
static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
|
||||
|
@ -5186,7 +5204,7 @@ static int bnxt_init_board(struct pci_dev *pdev, struct net_device *dev)
|
|||
bp->timer.function = bnxt_timer;
|
||||
bp->current_interval = BNXT_TIMER_INTERVAL;
|
||||
|
||||
bp->state = BNXT_STATE_CLOSED;
|
||||
clear_bit(BNXT_STATE_OPEN, &bp->state);
|
||||
|
||||
return 0;
|
||||
|
||||
|
|
|
@ -925,9 +925,9 @@ struct bnxt {
|
|||
|
||||
struct timer_list timer;
|
||||
|
||||
int state;
|
||||
#define BNXT_STATE_CLOSED 0
|
||||
#define BNXT_STATE_OPEN 1
|
||||
unsigned long state;
|
||||
#define BNXT_STATE_OPEN 0
|
||||
#define BNXT_STATE_IN_SP_TASK 1
|
||||
|
||||
struct bnxt_irq *irq_tbl;
|
||||
u8 mac_addr[ETH_ALEN];
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue