Merge 5.5-rc6 into usb-next
We need the USB fixes in here as well. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
commit
845f081002
|
@ -251,11 +251,11 @@ selectively from different subsystems.
|
|||
.. code-block:: c
|
||||
|
||||
struct kcov_remote_arg {
|
||||
unsigned trace_mode;
|
||||
unsigned area_size;
|
||||
unsigned num_handles;
|
||||
uint64_t common_handle;
|
||||
uint64_t handles[0];
|
||||
__u32 trace_mode;
|
||||
__u32 area_size;
|
||||
__u32 num_handles;
|
||||
__aligned_u64 common_handle;
|
||||
__aligned_u64 handles[0];
|
||||
};
|
||||
|
||||
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
|
||||
|
|
|
@ -24,19 +24,16 @@ The wrapper can be run with:
|
|||
For more information on this wrapper (also called kunit_tool) checkout the
|
||||
:doc:`kunit-tool` page.
|
||||
|
||||
Creating a kunitconfig
|
||||
======================
|
||||
Creating a .kunitconfig
|
||||
=======================
|
||||
The Python script is a thin wrapper around Kbuild. As such, it needs to be
|
||||
configured with a ``kunitconfig`` file. This file essentially contains the
|
||||
configured with a ``.kunitconfig`` file. This file essentially contains the
|
||||
regular Kernel config, with the specific test targets as well.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
git clone -b master https://kunit.googlesource.com/kunitconfig $PATH_TO_KUNITCONFIG_REPO
|
||||
cd $PATH_TO_LINUX_REPO
|
||||
ln -s $PATH_TO_KUNIT_CONFIG_REPO/kunitconfig kunitconfig
|
||||
|
||||
You may want to add kunitconfig to your local gitignore.
|
||||
cp arch/um/configs/kunit_defconfig .kunitconfig
|
||||
|
||||
Verifying KUnit Works
|
||||
---------------------
|
||||
|
@ -151,7 +148,7 @@ and the following to ``drivers/misc/Makefile``:
|
|||
|
||||
obj-$(CONFIG_MISC_EXAMPLE_TEST) += example-test.o
|
||||
|
||||
Now add it to your ``kunitconfig``:
|
||||
Now add it to your ``.kunitconfig``:
|
||||
|
||||
.. code-block:: none
|
||||
|
||||
|
|
|
@ -18,8 +18,10 @@ Optional properties:
|
|||
- dma-names: should contain "tx" and "rx".
|
||||
- atmel,fifo-size: maximum number of data the RX and TX FIFOs can store for FIFO
|
||||
capable I2C controllers.
|
||||
- i2c-sda-hold-time-ns: TWD hold time, only available for "atmel,sama5d4-i2c"
|
||||
and "atmel,sama5d2-i2c".
|
||||
- i2c-sda-hold-time-ns: TWD hold time, only available for:
|
||||
"atmel,sama5d4-i2c",
|
||||
"atmel,sama5d2-i2c",
|
||||
"microchip,sam9x60-i2c".
|
||||
- Child nodes conforming to i2c bus binding
|
||||
|
||||
Examples :
|
||||
|
|
|
@ -111,7 +111,7 @@ patternProperties:
|
|||
spi-rx-bus-width:
|
||||
allOf:
|
||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||
- enum: [ 1, 2, 4 ]
|
||||
- enum: [ 1, 2, 4, 8 ]
|
||||
- default: 1
|
||||
description:
|
||||
Bus width to the SPI bus used for MISO.
|
||||
|
@ -123,7 +123,7 @@ patternProperties:
|
|||
spi-tx-bus-width:
|
||||
allOf:
|
||||
- $ref: /schemas/types.yaml#/definitions/uint32
|
||||
- enum: [ 1, 2, 4 ]
|
||||
- enum: [ 1, 2, 4, 8 ]
|
||||
- default: 1
|
||||
description:
|
||||
Bus width to the SPI bus used for MOSI.
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | ok |
|
||||
| riscv: | TODO |
|
||||
| riscv: | ok |
|
||||
| s390: | ok |
|
||||
| sh: | ok |
|
||||
| sparc: | TODO |
|
||||
|
|
|
@ -230,12 +230,6 @@ simultaneously on two ports. The driver checks the consistency of the schedules
|
|||
against this restriction and errors out when appropriate. Schedule analysis is
|
||||
needed to avoid this, which is outside the scope of the document.
|
||||
|
||||
At the moment, the time-aware scheduler can only be triggered based on a
|
||||
standalone clock and not based on PTP time. This means the base-time argument
|
||||
from tc-taprio is ignored and the schedule starts right away. It also means it
|
||||
is more difficult to phase-align the scheduler with the other devices in the
|
||||
network.
|
||||
|
||||
Device Tree bindings and board design
|
||||
=====================================
|
||||
|
||||
|
|
|
@ -603,7 +603,7 @@ tcp_synack_retries - INTEGER
|
|||
with the current initial RTO of 1second. With this the final timeout
|
||||
for a passive TCP connection will happen after 63seconds.
|
||||
|
||||
tcp_syncookies - BOOLEAN
|
||||
tcp_syncookies - INTEGER
|
||||
Only valid when the kernel was compiled with CONFIG_SYN_COOKIES
|
||||
Send out syncookies when the syn backlog queue of a socket
|
||||
overflows. This is to prevent against the common 'SYN flood attack'
|
||||
|
|
|
@ -34,8 +34,8 @@ the names, the ``net`` tree is for fixes to existing code already in the
|
|||
mainline tree from Linus, and ``net-next`` is where the new code goes
|
||||
for the future release. You can find the trees here:
|
||||
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
- https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
|
||||
Q: How often do changes from these trees make it to the mainline Linus tree?
|
||||
----------------------------------------------------------------------------
|
||||
|
|
|
@ -60,6 +60,7 @@ lack of a better place.
|
|||
volatile-considered-harmful
|
||||
botching-up-ioctls
|
||||
clang-format
|
||||
../riscv/patch-acceptance
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ RISC-V architecture
|
|||
|
||||
boot-image-header
|
||||
pmu
|
||||
patch-acceptance
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
arch/riscv maintenance guidelines for developers
|
||||
================================================
|
||||
|
||||
Overview
|
||||
--------
|
||||
The RISC-V instruction set architecture is developed in the open:
|
||||
in-progress drafts are available for all to review and to experiment
|
||||
with implementations. New module or extension drafts can change
|
||||
during the development process - sometimes in ways that are
|
||||
incompatible with previous drafts. This flexibility can present a
|
||||
challenge for RISC-V Linux maintenance. Linux maintainers disapprove
|
||||
of churn, and the Linux development process prefers well-reviewed and
|
||||
tested code over experimental code. We wish to extend these same
|
||||
principles to the RISC-V-related code that will be accepted for
|
||||
inclusion in the kernel.
|
||||
|
||||
Submit Checklist Addendum
|
||||
-------------------------
|
||||
We'll only accept patches for new modules or extensions if the
|
||||
specifications for those modules or extensions are listed as being
|
||||
"Frozen" or "Ratified" by the RISC-V Foundation. (Developers may, of
|
||||
course, maintain their own Linux kernel trees that contain code for
|
||||
any draft extensions that they wish.)
|
||||
|
||||
Additionally, the RISC-V specification allows implementors to create
|
||||
their own custom extensions. These custom extensions aren't required
|
||||
to go through any review or ratification process by the RISC-V
|
||||
Foundation. To avoid the maintenance complexity and potential
|
||||
performance impact of adding kernel code for implementor-specific
|
||||
RISC-V extensions, we'll only to accept patches for extensions that
|
||||
have been officially frozen or ratified by the RISC-V Foundation.
|
||||
(Implementors, may, of course, maintain their own Linux kernel trees
|
||||
containing code for any custom extensions that they wish.)
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -771,6 +771,8 @@ F: drivers/thermal/thermal_mmio.c
|
|||
|
||||
AMAZON ETHERNET DRIVERS
|
||||
M: Netanel Belgazal <netanel@amazon.com>
|
||||
M: Arthur Kiyanovski <akiyano@amazon.com>
|
||||
R: Guy Tzalik <gtzalik@amazon.com>
|
||||
R: Saeed Bishara <saeedb@amazon.com>
|
||||
R: Zorik Machulsky <zorik@amazon.com>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -7034,6 +7036,7 @@ L: linux-acpi@vger.kernel.org
|
|||
S: Maintained
|
||||
F: Documentation/firmware-guide/acpi/gpio-properties.rst
|
||||
F: drivers/gpio/gpiolib-acpi.c
|
||||
F: drivers/gpio/gpiolib-acpi.h
|
||||
|
||||
GPIO IR Transmitter
|
||||
M: Sean Young <sean@mess.org>
|
||||
|
@ -11457,8 +11460,8 @@ M: "David S. Miller" <davem@davemloft.net>
|
|||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
S: Odd Fixes
|
||||
F: Documentation/devicetree/bindings/net/
|
||||
F: drivers/net/
|
||||
|
@ -11499,8 +11502,8 @@ M: "David S. Miller" <davem@davemloft.net>
|
|||
L: netdev@vger.kernel.org
|
||||
W: http://www.linuxfoundation.org/en/Net
|
||||
Q: http://patchwork.ozlabs.org/project/netdev/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git
|
||||
B: mailto:netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: net/
|
||||
|
@ -11545,7 +11548,7 @@ M: "David S. Miller" <davem@davemloft.net>
|
|||
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
|
||||
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
|
||||
L: netdev@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git
|
||||
S: Maintained
|
||||
F: net/ipv4/
|
||||
F: net/ipv6/
|
||||
|
@ -13676,7 +13679,6 @@ F: drivers/net/ethernet/qualcomm/emac/
|
|||
|
||||
QUALCOMM ETHQOS ETHERNET DRIVER
|
||||
M: Vinod Koul <vkoul@kernel.org>
|
||||
M: Niklas Cassel <niklas.cassel@linaro.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c
|
||||
|
@ -14118,6 +14120,7 @@ M: Paul Walmsley <paul.walmsley@sifive.com>
|
|||
M: Palmer Dabbelt <palmer@dabbelt.com>
|
||||
M: Albert Ou <aou@eecs.berkeley.edu>
|
||||
L: linux-riscv@lists.infradead.org
|
||||
P: Documentation/riscv/patch-acceptance.rst
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux.git
|
||||
S: Supported
|
||||
F: arch/riscv/
|
||||
|
@ -14545,8 +14548,6 @@ F: include/linux/platform_data/spi-s3c64xx.h
|
|||
|
||||
SAMSUNG SXGBE DRIVERS
|
||||
M: Byungho An <bh74.an@samsung.com>
|
||||
M: Girish K S <ks.giri@samsung.com>
|
||||
M: Vipul Pandya <vipul.pandya@samsung.com>
|
||||
S: Supported
|
||||
L: netdev@vger.kernel.org
|
||||
F: drivers/net/ethernet/samsung/sxgbe/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 5
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -162,7 +162,7 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
ST2 r58, r59, PT_sp + 12
|
||||
ST2 r58, r59, PT_r58
|
||||
#endif
|
||||
|
||||
.endm
|
||||
|
@ -172,8 +172,8 @@
|
|||
|
||||
LD2 gp, fp, PT_r26 ; gp (r26), fp (r27)
|
||||
|
||||
ld r12, [sp, PT_sp + 4]
|
||||
ld r30, [sp, PT_sp + 8]
|
||||
ld r12, [sp, PT_r12]
|
||||
ld r30, [sp, PT_r30]
|
||||
|
||||
; Restore SP (into AUX_USER_SP) only if returning to U mode
|
||||
; - for K mode, it will be implicitly restored as stack is unwound
|
||||
|
@ -190,7 +190,7 @@
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
LD2 r58, r59, PT_sp + 12
|
||||
LD2 r58, r59, PT_r58
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@
|
|||
#define _ASM_ARC_HUGEPAGE_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#define __ARCH_USE_5LEVEL_HACK
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
|
||||
static inline pte_t pmd_pte(pmd_t pmd)
|
||||
|
|
|
@ -66,7 +66,15 @@ int main(void)
|
|||
|
||||
DEFINE(SZ_CALLEE_REGS, sizeof(struct callee_regs));
|
||||
DEFINE(SZ_PT_REGS, sizeof(struct pt_regs));
|
||||
DEFINE(PT_user_r25, offsetof(struct pt_regs, user_r25));
|
||||
|
||||
#ifdef CONFIG_ISA_ARCV2
|
||||
OFFSET(PT_r12, pt_regs, r12);
|
||||
OFFSET(PT_r30, pt_regs, r30);
|
||||
#endif
|
||||
#ifdef CONFIG_ARC_HAS_ACCL_REGS
|
||||
OFFSET(PT_r58, pt_regs, r58);
|
||||
OFFSET(PT_r59, pt_regs, r59);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
menuconfig ARC_PLAT_EZNPS
|
||||
bool "\"EZchip\" ARC dev platform"
|
||||
select CPU_BIG_ENDIAN
|
||||
select CLKSRC_NPS
|
||||
select CLKSRC_NPS if !PHYS_ADDR_T_64BIT
|
||||
select EZNPS_GIC
|
||||
select EZCHIP_NPS_MANAGEMENT_ENET if ETHERNET
|
||||
help
|
||||
|
|
|
@ -72,6 +72,7 @@ config ARM
|
|||
select HAVE_ARM_SMCCC if CPU_V7
|
||||
select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS if MMU
|
||||
|
|
|
@ -226,8 +226,8 @@ void release_thread(struct task_struct *dead_task)
|
|||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||
|
||||
int
|
||||
copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
unsigned long stk_sz, struct task_struct *p)
|
||||
copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
|
||||
unsigned long stk_sz, struct task_struct *p, unsigned long tls)
|
||||
{
|
||||
struct thread_info *thread = task_thread_info(p);
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
@ -261,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
clear_ptrace_hw_breakpoint(p);
|
||||
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
thread->tp_value[0] = childregs->ARM_r3;
|
||||
thread->tp_value[0] = tls;
|
||||
thread->tp_value[1] = get_tpuser();
|
||||
|
||||
thread_notify(THREAD_NOTIFY_COPY, thread);
|
||||
|
|
|
@ -138,6 +138,7 @@ config ARM64
|
|||
select HAVE_CMPXCHG_DOUBLE
|
||||
select HAVE_CMPXCHG_LOCAL
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
|
|
|
@ -85,13 +85,12 @@
|
|||
#define PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
|
||||
#define PAGE_READONLY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
|
||||
#define PAGE_READONLY_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||
#define PAGE_EXECONLY __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
|
||||
|
||||
#define __P000 PAGE_NONE
|
||||
#define __P001 PAGE_READONLY
|
||||
#define __P010 PAGE_READONLY
|
||||
#define __P011 PAGE_READONLY
|
||||
#define __P100 PAGE_EXECONLY
|
||||
#define __P100 PAGE_READONLY_EXEC
|
||||
#define __P101 PAGE_READONLY_EXEC
|
||||
#define __P110 PAGE_READONLY_EXEC
|
||||
#define __P111 PAGE_READONLY_EXEC
|
||||
|
@ -100,7 +99,7 @@
|
|||
#define __S001 PAGE_READONLY
|
||||
#define __S010 PAGE_SHARED
|
||||
#define __S011 PAGE_SHARED
|
||||
#define __S100 PAGE_EXECONLY
|
||||
#define __S100 PAGE_READONLY_EXEC
|
||||
#define __S101 PAGE_READONLY_EXEC
|
||||
#define __S110 PAGE_SHARED_EXEC
|
||||
#define __S111 PAGE_SHARED_EXEC
|
||||
|
|
|
@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||
#define pte_dirty(pte) (pte_sw_dirty(pte) || pte_hw_dirty(pte))
|
||||
|
||||
#define pte_valid(pte) (!!(pte_val(pte) & PTE_VALID))
|
||||
/*
|
||||
* Execute-only user mappings do not have the PTE_USER bit set. All valid
|
||||
* kernel mappings have the PTE_UXN bit set.
|
||||
*/
|
||||
#define pte_valid_not_user(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
|
||||
((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
|
||||
#define pte_valid_young(pte) \
|
||||
((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
|
||||
#define pte_valid_user(pte) \
|
||||
|
@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
|||
|
||||
/*
|
||||
* p??_access_permitted() is true for valid user mappings (subject to the
|
||||
* write permission check) other than user execute-only which do not have the
|
||||
* PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
|
||||
* write permission check). PROT_NONE mappings do not have the PTE_VALID bit
|
||||
* set.
|
||||
*/
|
||||
#define pte_access_permitted(pte, write) \
|
||||
(pte_valid_user(pte) && (!(write) || pte_write(pte)))
|
||||
|
|
|
@ -42,7 +42,6 @@
|
|||
#endif
|
||||
|
||||
#define __ARCH_WANT_SYS_CLONE
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
|
||||
#ifndef __COMPAT_SYSCALL_NR
|
||||
#include <uapi/asm/unistd.h>
|
||||
|
|
|
@ -19,5 +19,6 @@
|
|||
#define __ARCH_WANT_NEW_STAT
|
||||
#define __ARCH_WANT_SET_GET_RLIMIT
|
||||
#define __ARCH_WANT_TIME32_SYSCALLS
|
||||
#define __ARCH_WANT_SYS_CLONE3
|
||||
|
||||
#include <asm-generic/unistd.h>
|
||||
|
|
|
@ -360,8 +360,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|||
|
||||
asmlinkage void ret_from_fork(void) asm("ret_from_fork");
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
||||
unsigned long stk_sz, struct task_struct *p)
|
||||
int copy_thread_tls(unsigned long clone_flags, unsigned long stack_start,
|
||||
unsigned long stk_sz, struct task_struct *p, unsigned long tls)
|
||||
{
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
|
@ -394,11 +394,11 @@ int copy_thread(unsigned long clone_flags, unsigned long stack_start,
|
|||
}
|
||||
|
||||
/*
|
||||
* If a TLS pointer was passed to clone (4th argument), use it
|
||||
* for the new thread.
|
||||
* If a TLS pointer was passed to clone, use it for the new
|
||||
* thread.
|
||||
*/
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
p->thread.uw.tp_value = childregs->regs[3];
|
||||
p->thread.uw.tp_value = tls;
|
||||
} else {
|
||||
memset(childregs, 0, sizeof(struct pt_regs));
|
||||
childregs->pstate = PSR_MODE_EL1h;
|
||||
|
|
|
@ -445,7 +445,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
|||
const struct fault_info *inf;
|
||||
struct mm_struct *mm = current->mm;
|
||||
vm_fault_t fault, major = 0;
|
||||
unsigned long vm_flags = VM_READ | VM_WRITE;
|
||||
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
|
||||
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
if (kprobe_page_fault(regs, esr))
|
||||
|
|
|
@ -1070,7 +1070,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
/*
|
||||
* FIXME: Cleanup page tables (also in arch_add_memory() in case
|
||||
|
@ -1079,7 +1078,6 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||
* unplug. ARCH_ENABLE_MEMORY_HOTREMOVE must not be
|
||||
* unlocked yet.
|
||||
*/
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -91,7 +91,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
|
|||
"1: %0 = memw_locked(%1);\n" \
|
||||
" %0 = "#op "(%0,%2);\n" \
|
||||
" memw_locked(%1,P3)=%0;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
" if (!P3) jump 1b;\n" \
|
||||
: "=&r" (output) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
|
@ -107,7 +107,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
|
|||
"1: %0 = memw_locked(%1);\n" \
|
||||
" %0 = "#op "(%0,%2);\n" \
|
||||
" memw_locked(%1,P3)=%0;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
" if (!P3) jump 1b;\n" \
|
||||
: "=&r" (output) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
|
@ -124,7 +124,7 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
|||
"1: %0 = memw_locked(%2);\n" \
|
||||
" %1 = "#op "(%0,%3);\n" \
|
||||
" memw_locked(%2,P3)=%1;\n" \
|
||||
" if !P3 jump 1b;\n" \
|
||||
" if (!P3) jump 1b;\n" \
|
||||
: "=&r" (output), "=&r" (val) \
|
||||
: "r" (&v->counter), "r" (i) \
|
||||
: "memory", "p3" \
|
||||
|
@ -173,7 +173,7 @@ static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
|
|||
" }"
|
||||
" memw_locked(%2, p3) = %1;"
|
||||
" {"
|
||||
" if !p3 jump 1b;"
|
||||
" if (!p3) jump 1b;"
|
||||
" }"
|
||||
"2:"
|
||||
: "=&r" (__oldval), "=&r" (tmp)
|
||||
|
|
|
@ -38,7 +38,7 @@ static inline int test_and_clear_bit(int nr, volatile void *addr)
|
|||
"1: R12 = memw_locked(R10);\n"
|
||||
" { P0 = tstbit(R12,R11); R12 = clrbit(R12,R11); }\n"
|
||||
" memw_locked(R10,P1) = R12;\n"
|
||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
: "=&r" (oldval)
|
||||
: "r" (addr), "r" (nr)
|
||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||
|
@ -62,7 +62,7 @@ static inline int test_and_set_bit(int nr, volatile void *addr)
|
|||
"1: R12 = memw_locked(R10);\n"
|
||||
" { P0 = tstbit(R12,R11); R12 = setbit(R12,R11); }\n"
|
||||
" memw_locked(R10,P1) = R12;\n"
|
||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
: "=&r" (oldval)
|
||||
: "r" (addr), "r" (nr)
|
||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||
|
@ -88,7 +88,7 @@ static inline int test_and_change_bit(int nr, volatile void *addr)
|
|||
"1: R12 = memw_locked(R10);\n"
|
||||
" { P0 = tstbit(R12,R11); R12 = togglebit(R12,R11); }\n"
|
||||
" memw_locked(R10,P1) = R12;\n"
|
||||
" {if !P1 jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
" {if (!P1) jump 1b; %0 = mux(P0,#1,#0);}\n"
|
||||
: "=&r" (oldval)
|
||||
: "r" (addr), "r" (nr)
|
||||
: "r10", "r11", "r12", "p0", "p1", "memory"
|
||||
|
@ -223,7 +223,7 @@ static inline int ffs(int x)
|
|||
int r;
|
||||
|
||||
asm("{ P0 = cmp.eq(%1,#0); %0 = ct0(%1);}\n"
|
||||
"{ if P0 %0 = #0; if !P0 %0 = add(%0,#1);}\n"
|
||||
"{ if (P0) %0 = #0; if (!P0) %0 = add(%0,#1);}\n"
|
||||
: "=&r" (r)
|
||||
: "r" (x)
|
||||
: "p0");
|
||||
|
|
|
@ -30,7 +30,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
|
|||
__asm__ __volatile__ (
|
||||
"1: %0 = memw_locked(%1);\n" /* load into retval */
|
||||
" memw_locked(%1,P0) = %2;\n" /* store into memory */
|
||||
" if !P0 jump 1b;\n"
|
||||
" if (!P0) jump 1b;\n"
|
||||
: "=&r" (retval)
|
||||
: "r" (ptr), "r" (x)
|
||||
: "memory", "p0"
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
/* For example: %1 = %4 */ \
|
||||
insn \
|
||||
"2: memw_locked(%3,p2) = %1;\n" \
|
||||
" if !p2 jump 1b;\n" \
|
||||
" if (!p2) jump 1b;\n" \
|
||||
" %1 = #0;\n" \
|
||||
"3:\n" \
|
||||
".section .fixup,\"ax\"\n" \
|
||||
|
@ -84,10 +84,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
|
|||
"1: %1 = memw_locked(%3)\n"
|
||||
" {\n"
|
||||
" p2 = cmp.eq(%1,%4)\n"
|
||||
" if !p2.new jump:NT 3f\n"
|
||||
" if (!p2.new) jump:NT 3f\n"
|
||||
" }\n"
|
||||
"2: memw_locked(%3,p2) = %5\n"
|
||||
" if !p2 jump 1b\n"
|
||||
" if (!p2) jump 1b\n"
|
||||
"3:\n"
|
||||
".section .fixup,\"ax\"\n"
|
||||
"4: %0 = #%6\n"
|
||||
|
|
|
@ -173,6 +173,7 @@ static inline void writel(u32 data, volatile void __iomem *addr)
|
|||
|
||||
void __iomem *ioremap(unsigned long phys_addr, unsigned long size);
|
||||
#define ioremap_nocache ioremap
|
||||
#define ioremap_uc(X, Y) ioremap((X), (Y))
|
||||
|
||||
|
||||
#define __raw_writel writel
|
||||
|
|
|
@ -30,9 +30,9 @@ static inline void arch_read_lock(arch_rwlock_t *lock)
|
|||
__asm__ __volatile__(
|
||||
"1: R6 = memw_locked(%0);\n"
|
||||
" { P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
" memw_locked(%0,P3) = R6;\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
|
@ -46,7 +46,7 @@ static inline void arch_read_unlock(arch_rwlock_t *lock)
|
|||
"1: R6 = memw_locked(%0);\n"
|
||||
" R6 = add(R6,#-1);\n"
|
||||
" memw_locked(%0,P3) = R6\n"
|
||||
" if !P3 jump 1b;\n"
|
||||
" if (!P3) jump 1b;\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
|
@ -61,7 +61,7 @@ static inline int arch_read_trylock(arch_rwlock_t *lock)
|
|||
__asm__ __volatile__(
|
||||
" R6 = memw_locked(%1);\n"
|
||||
" { %0 = #0; P3 = cmp.ge(R6,#0); R6 = add(R6,#1);}\n"
|
||||
" { if !P3 jump 1f; }\n"
|
||||
" { if (!P3) jump 1f; }\n"
|
||||
" memw_locked(%1,P3) = R6;\n"
|
||||
" { %0 = P3 }\n"
|
||||
"1:\n"
|
||||
|
@ -78,9 +78,9 @@ static inline void arch_write_lock(arch_rwlock_t *lock)
|
|||
__asm__ __volatile__(
|
||||
"1: R6 = memw_locked(%0)\n"
|
||||
" { P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
" memw_locked(%0,P3) = R6;\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
|
@ -94,7 +94,7 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
|
|||
__asm__ __volatile__(
|
||||
" R6 = memw_locked(%1)\n"
|
||||
" { %0 = #0; P3 = cmp.eq(R6,#0); R6 = #-1;}\n"
|
||||
" { if !P3 jump 1f; }\n"
|
||||
" { if (!P3) jump 1f; }\n"
|
||||
" memw_locked(%1,P3) = R6;\n"
|
||||
" %0 = P3;\n"
|
||||
"1:\n"
|
||||
|
@ -117,9 +117,9 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
|
|||
__asm__ __volatile__(
|
||||
"1: R6 = memw_locked(%0);\n"
|
||||
" P3 = cmp.eq(R6,#0);\n"
|
||||
" { if !P3 jump 1b; R6 = #1; }\n"
|
||||
" { if (!P3) jump 1b; R6 = #1; }\n"
|
||||
" memw_locked(%0,P3) = R6;\n"
|
||||
" { if !P3 jump 1b; }\n"
|
||||
" { if (!P3) jump 1b; }\n"
|
||||
:
|
||||
: "r" (&lock->lock)
|
||||
: "memory", "r6", "p3"
|
||||
|
@ -139,7 +139,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
|
|||
__asm__ __volatile__(
|
||||
" R6 = memw_locked(%1);\n"
|
||||
" P3 = cmp.eq(R6,#0);\n"
|
||||
" { if !P3 jump 1f; R6 = #1; %0 = #0; }\n"
|
||||
" { if (!P3) jump 1f; R6 = #1; %0 = #0; }\n"
|
||||
" memw_locked(%1,P3) = R6;\n"
|
||||
" %0 = P3;\n"
|
||||
"1:\n"
|
||||
|
|
|
@ -11,8 +11,6 @@
|
|||
#include <linux/thread_info.h>
|
||||
#include <linux/module.h>
|
||||
|
||||
register unsigned long current_frame_pointer asm("r30");
|
||||
|
||||
struct stackframe {
|
||||
unsigned long fp;
|
||||
unsigned long rets;
|
||||
|
@ -30,7 +28,7 @@ void save_stack_trace(struct stack_trace *trace)
|
|||
|
||||
low = (unsigned long)task_stack_page(current);
|
||||
high = low + THREAD_SIZE;
|
||||
fp = current_frame_pointer;
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
|
||||
while (fp >= low && fp <= (high - sizeof(*frame))) {
|
||||
frame = (struct stackframe *)fp;
|
||||
|
|
|
@ -369,7 +369,7 @@ ret_from_fork:
|
|||
R26.L = #LO(do_work_pending);
|
||||
R0 = #VM_INT_DISABLE;
|
||||
}
|
||||
if P0 jump check_work_pending
|
||||
if (P0) jump check_work_pending
|
||||
{
|
||||
R0 = R25;
|
||||
callr R24
|
||||
|
|
|
@ -689,9 +689,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -47,7 +47,7 @@ config MIPS
|
|||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES
|
||||
select HAVE_ASM_MODVERSIONS
|
||||
select HAVE_EBPF_JIT if (!CPU_MICROMIPS)
|
||||
select HAVE_EBPF_JIT if 64BIT && !CPU_MICROMIPS && TARGET_ISA_REV >= 2
|
||||
select HAVE_CONTEXT_TRACKING
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_C_RECORDMCOUNT
|
||||
|
|
|
@ -29,6 +29,9 @@ KBUILD_AFLAGS := $(KBUILD_AFLAGS) -D__ASSEMBLY__ \
|
|||
-DBOOT_HEAP_SIZE=$(BOOT_HEAP_SIZE) \
|
||||
-DKERNEL_ENTRY=$(VMLINUX_ENTRY_ADDRESS)
|
||||
|
||||
# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
|
||||
KCOV_INSTRUMENT := n
|
||||
|
||||
# decompressor objects (linked with vmlinuz)
|
||||
vmlinuzobjs-y := $(obj)/head.o $(obj)/decompress.o $(obj)/string.o
|
||||
|
||||
|
|
|
@ -15,7 +15,8 @@
|
|||
static inline int __pure __get_cpu_type(const int cpu_type)
|
||||
{
|
||||
switch (cpu_type) {
|
||||
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2EF)
|
||||
#if defined(CONFIG_SYS_HAS_CPU_LOONGSON2E) || \
|
||||
defined(CONFIG_SYS_HAS_CPU_LOONGSON2F)
|
||||
case CPU_LOONGSON2EF:
|
||||
#endif
|
||||
|
||||
|
|
|
@ -49,8 +49,26 @@ struct thread_info {
|
|||
.addr_limit = KERNEL_DS, \
|
||||
}
|
||||
|
||||
/* How to get the thread information struct from C. */
|
||||
/*
|
||||
* A pointer to the struct thread_info for the currently executing thread is
|
||||
* held in register $28/$gp.
|
||||
*
|
||||
* We declare __current_thread_info as a global register variable rather than a
|
||||
* local register variable within current_thread_info() because clang doesn't
|
||||
* support explicit local register variables.
|
||||
*
|
||||
* When building the VDSO we take care not to declare the global register
|
||||
* variable because this causes GCC to not preserve the value of $28/$gp in
|
||||
* functions that change its value (which is common in the PIC VDSO when
|
||||
* accessing the GOT). Since the VDSO shouldn't be accessing
|
||||
* __current_thread_info anyway we declare it extern in order to cause a link
|
||||
* failure if it's referenced.
|
||||
*/
|
||||
#ifdef __VDSO__
|
||||
extern struct thread_info *__current_thread_info;
|
||||
#else
|
||||
register struct thread_info *__current_thread_info __asm__("$28");
|
||||
#endif
|
||||
|
||||
static inline struct thread_info *current_thread_info(void)
|
||||
{
|
||||
|
|
|
@ -26,8 +26,6 @@
|
|||
|
||||
#define __VDSO_USE_SYSCALL ULLONG_MAX
|
||||
|
||||
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
|
||||
|
||||
static __always_inline long gettimeofday_fallback(
|
||||
struct __kernel_old_timeval *_tv,
|
||||
struct timezone *_tz)
|
||||
|
@ -48,17 +46,6 @@ static __always_inline long gettimeofday_fallback(
|
|||
return error ? -ret : ret;
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static __always_inline long gettimeofday_fallback(
|
||||
struct __kernel_old_timeval *_tv,
|
||||
struct timezone *_tz)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static __always_inline long clock_gettime_fallback(
|
||||
clockid_t _clkid,
|
||||
struct __kernel_timespec *_ts)
|
||||
|
|
|
@ -50,6 +50,25 @@ static int __init_cache_level(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
|
||||
{
|
||||
int cpu1;
|
||||
|
||||
for_each_possible_cpu(cpu1)
|
||||
if (cpus_are_siblings(cpu, cpu1))
|
||||
cpumask_set_cpu(cpu1, cpu_map);
|
||||
}
|
||||
|
||||
static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
|
||||
{
|
||||
int cpu1;
|
||||
int cluster = cpu_cluster(&cpu_data[cpu]);
|
||||
|
||||
for_each_possible_cpu(cpu1)
|
||||
if (cpu_cluster(&cpu_data[cpu1]) == cluster)
|
||||
cpumask_set_cpu(cpu1, cpu_map);
|
||||
}
|
||||
|
||||
static int __populate_cache_leaves(unsigned int cpu)
|
||||
{
|
||||
struct cpuinfo_mips *c = ¤t_cpu_data;
|
||||
|
@ -57,14 +76,20 @@ static int __populate_cache_leaves(unsigned int cpu)
|
|||
struct cacheinfo *this_leaf = this_cpu_ci->info_list;
|
||||
|
||||
if (c->icache.waysize) {
|
||||
/* L1 caches are per core */
|
||||
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
||||
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA);
|
||||
fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
|
||||
populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST);
|
||||
} else {
|
||||
populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED);
|
||||
}
|
||||
|
||||
if (c->scache.waysize)
|
||||
if (c->scache.waysize) {
|
||||
/* L2 cache is per cluster */
|
||||
fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
|
||||
populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED);
|
||||
}
|
||||
|
||||
if (c->tcache.waysize)
|
||||
populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED);
|
||||
|
|
|
@ -1804,7 +1804,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||
unsigned int image_size;
|
||||
u8 *image_ptr;
|
||||
|
||||
if (!prog->jit_requested || MIPS_ISA_REV < 2)
|
||||
if (!prog->jit_requested)
|
||||
return prog;
|
||||
|
||||
tmp = bpf_jit_blind_constants(prog);
|
||||
|
|
|
@ -17,12 +17,22 @@ int __vdso_clock_gettime(clockid_t clock,
|
|||
return __cvdso_clock_gettime32(clock, ts);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
|
||||
|
||||
/*
|
||||
* This is behind the ifdef so that we don't provide the symbol when there's no
|
||||
* possibility of there being a usable clocksource, because there's nothing we
|
||||
* can do without it. When libc fails the symbol lookup it should fall back on
|
||||
* the standard syscall path.
|
||||
*/
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
||||
struct timezone *tz)
|
||||
{
|
||||
return __cvdso_gettimeofday(tv, tz);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
|
||||
|
||||
int __vdso_clock_getres(clockid_t clock_id,
|
||||
struct old_timespec32 *res)
|
||||
{
|
||||
|
@ -43,12 +53,22 @@ int __vdso_clock_gettime(clockid_t clock,
|
|||
return __cvdso_clock_gettime(clock, ts);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_CLOCK_VSYSCALL
|
||||
|
||||
/*
|
||||
* This is behind the ifdef so that we don't provide the symbol when there's no
|
||||
* possibility of there being a usable clocksource, because there's nothing we
|
||||
* can do without it. When libc fails the symbol lookup it should fall back on
|
||||
* the standard syscall path.
|
||||
*/
|
||||
int __vdso_gettimeofday(struct __kernel_old_timeval *tv,
|
||||
struct timezone *tz)
|
||||
{
|
||||
return __cvdso_gettimeofday(tv, tz);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_MIPS_CLOCK_VSYSCALL */
|
||||
|
||||
int __vdso_clock_getres(clockid_t clock_id,
|
||||
struct __kernel_timespec *res)
|
||||
{
|
||||
|
|
|
@ -62,6 +62,7 @@ config PARISC
|
|||
select HAVE_FTRACE_MCOUNT_RECORD if HAVE_DYNAMIC_FTRACE
|
||||
select HAVE_KPROBES_ON_FTRACE
|
||||
select HAVE_DYNAMIC_FTRACE_WITH_REGS
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
|
||||
help
|
||||
The PA-RISC microprocessor is designed by Hewlett-Packard and used
|
||||
|
|
|
@ -208,8 +208,8 @@ arch_initcall(parisc_idle_init);
|
|||
* Copy architecture-specific thread state
|
||||
*/
|
||||
int
|
||||
copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long kthread_arg, struct task_struct *p)
|
||||
copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
|
||||
{
|
||||
struct pt_regs *cregs = &(p->thread.regs);
|
||||
void *stack = task_stack_page(p);
|
||||
|
@ -254,9 +254,9 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
|
||||
cregs->kpc = (unsigned long) &child_return;
|
||||
|
||||
/* Setup thread TLS area from the 4th parameter in clone */
|
||||
/* Setup thread TLS area */
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
cregs->cr27 = cregs->gr[23];
|
||||
cregs->cr27 = tls;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
*
|
||||
* (the type definitions are in asm/spinlock_types.h)
|
||||
*/
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/irqflags.h>
|
||||
#ifdef CONFIG_PPC64
|
||||
#include <asm/paca.h>
|
||||
|
|
|
@ -151,10 +151,9 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
|
||||
int ret;
|
||||
|
||||
__remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
|
||||
/* Remove htab bolted mappings for this section of memory */
|
||||
start = (unsigned long)__va(start);
|
||||
|
|
|
@ -50,7 +50,7 @@ static void slice_print_mask(const char *label, const struct slice_mask *mask) {
|
|||
|
||||
#endif
|
||||
|
||||
static inline bool slice_addr_is_low(unsigned long addr)
|
||||
static inline notrace bool slice_addr_is_low(unsigned long addr)
|
||||
{
|
||||
u64 tmp = (u64)addr;
|
||||
|
||||
|
@ -659,7 +659,7 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
|
|||
mm_ctx_user_psize(¤t->mm->context), 1);
|
||||
}
|
||||
|
||||
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
|
||||
unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
|
||||
{
|
||||
unsigned char *psizes;
|
||||
int index, mask_index;
|
||||
|
|
|
@ -64,6 +64,8 @@ config RISCV
|
|||
select SPARSEMEM_STATIC if 32BIT
|
||||
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
|
||||
select HAVE_ARCH_MMAP_RND_BITS if MMU
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
|
||||
config ARCH_MMAP_RND_BITS_MIN
|
||||
default 18 if 64BIT
|
||||
|
|
|
@ -54,6 +54,7 @@ cpu1: cpu@1 {
|
|||
reg = <1>;
|
||||
riscv,isa = "rv64imafdc";
|
||||
tlb-split;
|
||||
next-level-cache = <&l2cache>;
|
||||
cpu1_intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc";
|
||||
|
@ -77,6 +78,7 @@ cpu2: cpu@2 {
|
|||
reg = <2>;
|
||||
riscv,isa = "rv64imafdc";
|
||||
tlb-split;
|
||||
next-level-cache = <&l2cache>;
|
||||
cpu2_intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc";
|
||||
|
@ -100,6 +102,7 @@ cpu3: cpu@3 {
|
|||
reg = <3>;
|
||||
riscv,isa = "rv64imafdc";
|
||||
tlb-split;
|
||||
next-level-cache = <&l2cache>;
|
||||
cpu3_intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc";
|
||||
|
@ -123,6 +126,7 @@ cpu4: cpu@4 {
|
|||
reg = <4>;
|
||||
riscv,isa = "rv64imafdc";
|
||||
tlb-split;
|
||||
next-level-cache = <&l2cache>;
|
||||
cpu4_intc: interrupt-controller {
|
||||
#interrupt-cells = <1>;
|
||||
compatible = "riscv,cpu-intc";
|
||||
|
@ -253,6 +257,17 @@ pwm1: pwm@10021000 {
|
|||
#pwm-cells = <3>;
|
||||
status = "disabled";
|
||||
};
|
||||
l2cache: cache-controller@2010000 {
|
||||
compatible = "sifive,fu540-c000-ccache", "cache";
|
||||
cache-block-size = <64>;
|
||||
cache-level = <2>;
|
||||
cache-sets = <1024>;
|
||||
cache-size = <2097152>;
|
||||
cache-unified;
|
||||
interrupt-parent = <&plic0>;
|
||||
interrupts = <1 2 3>;
|
||||
reg = <0x0 0x2010000 0x0 0x1000>;
|
||||
};
|
||||
|
||||
};
|
||||
};
|
||||
|
|
|
@ -116,9 +116,9 @@
|
|||
# define SR_PIE SR_MPIE
|
||||
# define SR_PP SR_MPP
|
||||
|
||||
# define IRQ_SOFT IRQ_M_SOFT
|
||||
# define IRQ_TIMER IRQ_M_TIMER
|
||||
# define IRQ_EXT IRQ_M_EXT
|
||||
# define RV_IRQ_SOFT IRQ_M_SOFT
|
||||
# define RV_IRQ_TIMER IRQ_M_TIMER
|
||||
# define RV_IRQ_EXT IRQ_M_EXT
|
||||
#else /* CONFIG_RISCV_M_MODE */
|
||||
# define CSR_STATUS CSR_SSTATUS
|
||||
# define CSR_IE CSR_SIE
|
||||
|
@ -133,15 +133,15 @@
|
|||
# define SR_PIE SR_SPIE
|
||||
# define SR_PP SR_SPP
|
||||
|
||||
# define IRQ_SOFT IRQ_S_SOFT
|
||||
# define IRQ_TIMER IRQ_S_TIMER
|
||||
# define IRQ_EXT IRQ_S_EXT
|
||||
# define RV_IRQ_SOFT IRQ_S_SOFT
|
||||
# define RV_IRQ_TIMER IRQ_S_TIMER
|
||||
# define RV_IRQ_EXT IRQ_S_EXT
|
||||
#endif /* CONFIG_RISCV_M_MODE */
|
||||
|
||||
/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
|
||||
#define IE_SIE (_AC(0x1, UL) << IRQ_SOFT)
|
||||
#define IE_TIE (_AC(0x1, UL) << IRQ_TIMER)
|
||||
#define IE_EIE (_AC(0x1, UL) << IRQ_EXT)
|
||||
#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
|
||||
#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
|
||||
#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -246,6 +246,7 @@ check_syscall_nr:
|
|||
*/
|
||||
li t1, -1
|
||||
beq a7, t1, ret_from_syscall_rejected
|
||||
blt a7, t1, 1f
|
||||
/* Call syscall */
|
||||
la s0, sys_call_table
|
||||
slli t0, a7, RISCV_LGPTR
|
||||
|
|
|
@ -142,7 +142,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
|
|||
*/
|
||||
old = *parent;
|
||||
|
||||
if (function_graph_enter(old, self_addr, frame_pointer, parent))
|
||||
if (!function_graph_enter(old, self_addr, frame_pointer, parent))
|
||||
*parent = return_hooker;
|
||||
}
|
||||
|
||||
|
|
|
@ -251,7 +251,7 @@ ENTRY(reset_regs)
|
|||
#ifdef CONFIG_FPU
|
||||
csrr t0, CSR_MISA
|
||||
andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
|
||||
bnez t0, .Lreset_regs_done
|
||||
beqz t0, .Lreset_regs_done
|
||||
|
||||
li t1, SR_FS
|
||||
csrs CSR_STATUS, t1
|
||||
|
|
|
@ -23,11 +23,11 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
|
||||
irq_enter();
|
||||
switch (regs->cause & ~CAUSE_IRQ_FLAG) {
|
||||
case IRQ_TIMER:
|
||||
case RV_IRQ_TIMER:
|
||||
riscv_timer_interrupt();
|
||||
break;
|
||||
#ifdef CONFIG_SMP
|
||||
case IRQ_SOFT:
|
||||
case RV_IRQ_SOFT:
|
||||
/*
|
||||
* We only use software interrupts to pass IPIs, so if a non-SMP
|
||||
* system gets one, then we don't know what to do.
|
||||
|
@ -35,7 +35,7 @@ asmlinkage __visible void __irq_entry do_IRQ(struct pt_regs *regs)
|
|||
riscv_software_interrupt();
|
||||
break;
|
||||
#endif
|
||||
case IRQ_EXT:
|
||||
case RV_IRQ_EXT:
|
||||
handle_arch_irq(regs);
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -99,8 +99,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long arg, struct task_struct *p)
|
||||
int copy_thread_tls(unsigned long clone_flags, unsigned long usp,
|
||||
unsigned long arg, struct task_struct *p, unsigned long tls)
|
||||
{
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
|
@ -121,7 +121,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
|
|||
if (usp) /* User fork */
|
||||
childregs->sp = usp;
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
childregs->tp = childregs->a5;
|
||||
childregs->tp = tls;
|
||||
childregs->a0 = 0; /* Return value of fork() */
|
||||
p->thread.ra = (unsigned long)ret_from_fork;
|
||||
}
|
||||
|
|
|
@ -9,8 +9,5 @@
|
|||
/*
|
||||
* Assembly functions that may be used (directly or indirectly) by modules
|
||||
*/
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(__asm_copy_to_user);
|
||||
EXPORT_SYMBOL(__asm_copy_from_user);
|
||||
EXPORT_SYMBOL(memset);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include <linux/linkage.h>
|
||||
#include <asm-generic/export.h>
|
||||
#include <asm/asm.h>
|
||||
#include <asm/csr.h>
|
||||
|
||||
|
@ -66,6 +67,8 @@ ENTRY(__asm_copy_from_user)
|
|||
j 3b
|
||||
ENDPROC(__asm_copy_to_user)
|
||||
ENDPROC(__asm_copy_from_user)
|
||||
EXPORT_SYMBOL(__asm_copy_to_user)
|
||||
EXPORT_SYMBOL(__asm_copy_from_user)
|
||||
|
||||
|
||||
ENTRY(__clear_user)
|
||||
|
@ -108,6 +111,7 @@ ENTRY(__clear_user)
|
|||
bltu a0, a3, 5b
|
||||
j 3b
|
||||
ENDPROC(__clear_user)
|
||||
EXPORT_SYMBOL(__clear_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.balign 4
|
||||
|
|
|
@ -22,6 +22,7 @@ void flush_icache_all(void)
|
|||
else
|
||||
on_each_cpu(ipi_remote_fence_i, NULL, 1);
|
||||
}
|
||||
EXPORT_SYMBOL(flush_icache_all);
|
||||
|
||||
/*
|
||||
* Performs an icache flush for the given MM context. RISC-V has no direct
|
||||
|
|
|
@ -99,13 +99,13 @@ static void __init setup_initrd(void)
|
|||
pr_info("initrd not found or empty");
|
||||
goto disable;
|
||||
}
|
||||
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||
if (__pa_symbol(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||
pr_err("initrd extends beyond end of memory");
|
||||
goto disable;
|
||||
}
|
||||
|
||||
size = initrd_end - initrd_start;
|
||||
memblock_reserve(__pa(initrd_start), size);
|
||||
memblock_reserve(__pa_symbol(initrd_start), size);
|
||||
initrd_below_start_ok = 1;
|
||||
|
||||
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||
|
@ -124,8 +124,8 @@ void __init setup_bootmem(void)
|
|||
{
|
||||
struct memblock_region *reg;
|
||||
phys_addr_t mem_size = 0;
|
||||
phys_addr_t vmlinux_end = __pa(&_end);
|
||||
phys_addr_t vmlinux_start = __pa(&_start);
|
||||
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||
|
||||
/* Find the memory region containing the kernel */
|
||||
for_each_memblock(memory, reg) {
|
||||
|
@ -445,7 +445,7 @@ static void __init setup_vm_final(void)
|
|||
|
||||
/* Setup swapper PGD for fixmap */
|
||||
create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
|
||||
__pa(fixmap_pgd_next),
|
||||
__pa_symbol(fixmap_pgd_next),
|
||||
PGDIR_SIZE, PAGE_TABLE);
|
||||
|
||||
/* Map all memory banks */
|
||||
|
@ -474,7 +474,7 @@ static void __init setup_vm_final(void)
|
|||
clear_fixmap(FIX_PMD);
|
||||
|
||||
/* Move to swapper page table */
|
||||
csr_write(CSR_SATP, PFN_DOWN(__pa(swapper_pg_dir)) | SATP_MODE);
|
||||
csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
|
||||
local_flush_tlb_all();
|
||||
}
|
||||
#else
|
||||
|
|
|
@ -292,10 +292,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
vmem_remove_mapping(start, size);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
|
|
@ -434,9 +434,7 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = PFN_DOWN(start);
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
|
|
@ -14,6 +14,7 @@ config UML
|
|||
select HAVE_FUTEX_CMPXCHG if FUTEX
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DEBUG_BUGVERBOSE
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select GENERIC_IRQ_SHOW
|
||||
select GENERIC_CPU_DEVICES
|
||||
select GENERIC_CLOCKEVENTS
|
||||
|
|
|
@ -36,7 +36,7 @@ extern long subarch_ptrace(struct task_struct *child, long request,
|
|||
extern unsigned long getreg(struct task_struct *child, int regno);
|
||||
extern int putreg(struct task_struct *child, int regno, unsigned long value);
|
||||
|
||||
extern int arch_copy_tls(struct task_struct *new);
|
||||
extern int arch_set_tls(struct task_struct *new, unsigned long tls);
|
||||
extern void clear_flushed_tls(struct task_struct *task);
|
||||
extern int syscall_trace_enter(struct pt_regs *regs);
|
||||
extern void syscall_trace_leave(struct pt_regs *regs);
|
||||
|
|
|
@ -153,8 +153,8 @@ void fork_handler(void)
|
|||
userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
|
||||
}
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long sp,
|
||||
unsigned long arg, struct task_struct * p)
|
||||
int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
|
||||
unsigned long arg, struct task_struct * p, unsigned long tls)
|
||||
{
|
||||
void (*handler)(void);
|
||||
int kthread = current->flags & PF_KTHREAD;
|
||||
|
@ -188,7 +188,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
|
|||
* Set a new TLS for the child thread?
|
||||
*/
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
ret = arch_copy_tls(p);
|
||||
ret = arch_set_tls(p, tls);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
|
|
@ -865,10 +865,8 @@ void arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct zone *zone;
|
||||
|
||||
zone = page_zone(pfn_to_page(start_pfn));
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -1212,10 +1212,8 @@ void __ref arch_remove_memory(int nid, u64 start, u64 size,
|
|||
{
|
||||
unsigned long start_pfn = start >> PAGE_SHIFT;
|
||||
unsigned long nr_pages = size >> PAGE_SHIFT;
|
||||
struct page *page = pfn_to_page(start_pfn) + vmem_altmap_offset(altmap);
|
||||
struct zone *zone = page_zone(page);
|
||||
|
||||
__remove_pages(zone, start_pfn, nr_pages, altmap);
|
||||
__remove_pages(start_pfn, nr_pages, altmap);
|
||||
kernel_physical_mapping_remove(start, start + size);
|
||||
}
|
||||
#endif /* CONFIG_MEMORY_HOTPLUG */
|
||||
|
|
|
@ -215,14 +215,12 @@ static int set_tls_entry(struct task_struct* task, struct user_desc *info,
|
|||
return 0;
|
||||
}
|
||||
|
||||
int arch_copy_tls(struct task_struct *new)
|
||||
int arch_set_tls(struct task_struct *new, unsigned long tls)
|
||||
{
|
||||
struct user_desc info;
|
||||
int idx, ret = -EFAULT;
|
||||
|
||||
if (copy_from_user(&info,
|
||||
(void __user *) UPT_SI(&new->thread.regs.regs),
|
||||
sizeof(info)))
|
||||
if (copy_from_user(&info, (void __user *) tls, sizeof(info)))
|
||||
goto out;
|
||||
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -6,14 +6,13 @@ void clear_flushed_tls(struct task_struct *task)
|
|||
{
|
||||
}
|
||||
|
||||
int arch_copy_tls(struct task_struct *t)
|
||||
int arch_set_tls(struct task_struct *t, unsigned long tls)
|
||||
{
|
||||
/*
|
||||
* If CLONE_SETTLS is set, we need to save the thread id
|
||||
* (which is argument 5, child_tid, of clone) so it can be set
|
||||
* during context switches.
|
||||
* so it can be set during context switches.
|
||||
*/
|
||||
t->thread.arch.fs = t->thread.regs.regs.gp[R8 / sizeof(long)];
|
||||
t->thread.arch.fs = tls;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ config XTENSA
|
|||
select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL
|
||||
select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL
|
||||
select HAVE_ARCH_TRACEHOOK
|
||||
select HAVE_COPY_THREAD_TLS
|
||||
select HAVE_DEBUG_KMEMLEAK
|
||||
select HAVE_DMA_CONTIGUOUS
|
||||
select HAVE_EXIT_THREAD
|
||||
|
|
|
@ -202,8 +202,9 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
|
|||
* involved. Much simpler to just not copy those live frames across.
|
||||
*/
|
||||
|
||||
int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
|
||||
unsigned long thread_fn_arg, struct task_struct *p)
|
||||
int copy_thread_tls(unsigned long clone_flags, unsigned long usp_thread_fn,
|
||||
unsigned long thread_fn_arg, struct task_struct *p,
|
||||
unsigned long tls)
|
||||
{
|
||||
struct pt_regs *childregs = task_pt_regs(p);
|
||||
|
||||
|
@ -266,9 +267,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp_thread_fn,
|
|||
|
||||
childregs->syscall = regs->syscall;
|
||||
|
||||
/* The thread pointer is passed in the '4th argument' (= a5) */
|
||||
if (clone_flags & CLONE_SETTLS)
|
||||
childregs->threadptr = childregs->areg[5];
|
||||
childregs->threadptr = tls;
|
||||
} else {
|
||||
p->thread.ra = MAKE_RA_FOR_CALL(
|
||||
(unsigned long)ret_from_kernel_thread, 1);
|
||||
|
|
49
block/bio.c
49
block/bio.c
|
@ -538,6 +538,55 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
|
|||
}
|
||||
EXPORT_SYMBOL(zero_fill_bio_iter);
|
||||
|
||||
/**
|
||||
* bio_truncate - truncate the bio to small size of @new_size
|
||||
* @bio: the bio to be truncated
|
||||
* @new_size: new size for truncating the bio
|
||||
*
|
||||
* Description:
|
||||
* Truncate the bio to new size of @new_size. If bio_op(bio) is
|
||||
* REQ_OP_READ, zero the truncated part. This function should only
|
||||
* be used for handling corner cases, such as bio eod.
|
||||
*/
|
||||
void bio_truncate(struct bio *bio, unsigned new_size)
|
||||
{
|
||||
struct bio_vec bv;
|
||||
struct bvec_iter iter;
|
||||
unsigned int done = 0;
|
||||
bool truncated = false;
|
||||
|
||||
if (new_size >= bio->bi_iter.bi_size)
|
||||
return;
|
||||
|
||||
if (bio_op(bio) != REQ_OP_READ)
|
||||
goto exit;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
if (done + bv.bv_len > new_size) {
|
||||
unsigned offset;
|
||||
|
||||
if (!truncated)
|
||||
offset = new_size - done;
|
||||
else
|
||||
offset = 0;
|
||||
zero_user(bv.bv_page, offset, bv.bv_len - offset);
|
||||
truncated = true;
|
||||
}
|
||||
done += bv.bv_len;
|
||||
}
|
||||
|
||||
exit:
|
||||
/*
|
||||
* Don't touch bvec table here and make it really immutable, since
|
||||
* fs bio user has to retrieve all pages via bio_for_each_segment_all
|
||||
* in its .end_bio() callback.
|
||||
*
|
||||
* It is enough to truncate bio by updating .bi_size since we can make
|
||||
* correct bvec with the updated .bi_size for drivers.
|
||||
*/
|
||||
bio->bi_iter.bi_size = new_size;
|
||||
}
|
||||
|
||||
/**
|
||||
* bio_put - release a reference to a bio
|
||||
* @bio: bio to release reference to
|
||||
|
|
|
@ -157,16 +157,14 @@ static inline unsigned get_max_io_size(struct request_queue *q,
|
|||
return sectors & (lbs - 1);
|
||||
}
|
||||
|
||||
static unsigned get_max_segment_size(const struct request_queue *q,
|
||||
unsigned offset)
|
||||
static inline unsigned get_max_segment_size(const struct request_queue *q,
|
||||
struct page *start_page,
|
||||
unsigned long offset)
|
||||
{
|
||||
unsigned long mask = queue_segment_boundary(q);
|
||||
|
||||
/* default segment boundary mask means no boundary limit */
|
||||
if (mask == BLK_SEG_BOUNDARY_MASK)
|
||||
return queue_max_segment_size(q);
|
||||
|
||||
return min_t(unsigned long, mask - (mask & offset) + 1,
|
||||
offset = mask & (page_to_phys(start_page) + offset);
|
||||
return min_t(unsigned long, mask - offset + 1,
|
||||
queue_max_segment_size(q));
|
||||
}
|
||||
|
||||
|
@ -201,7 +199,8 @@ static bool bvec_split_segs(const struct request_queue *q,
|
|||
unsigned seg_size = 0;
|
||||
|
||||
while (len && *nsegs < max_segs) {
|
||||
seg_size = get_max_segment_size(q, bv->bv_offset + total_len);
|
||||
seg_size = get_max_segment_size(q, bv->bv_page,
|
||||
bv->bv_offset + total_len);
|
||||
seg_size = min(seg_size, len);
|
||||
|
||||
(*nsegs)++;
|
||||
|
@ -419,7 +418,8 @@ static unsigned blk_bvec_map_sg(struct request_queue *q,
|
|||
|
||||
while (nbytes > 0) {
|
||||
unsigned offset = bvec->bv_offset + total;
|
||||
unsigned len = min(get_max_segment_size(q, offset), nbytes);
|
||||
unsigned len = min(get_max_segment_size(q, bvec->bv_page,
|
||||
offset), nbytes);
|
||||
struct page *page = bvec->bv_page;
|
||||
|
||||
/*
|
||||
|
|
|
@ -6,6 +6,7 @@
|
|||
#include <linux/compat.h>
|
||||
#include <linux/elevator.h>
|
||||
#include <linux/hdreg.h>
|
||||
#include <linux/pr.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/syscalls.h>
|
||||
#include <linux/types.h>
|
||||
|
@ -354,6 +355,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
* but we call blkdev_ioctl, which gets the lock for us
|
||||
*/
|
||||
case BLKRRPART:
|
||||
case BLKREPORTZONE:
|
||||
case BLKRESETZONE:
|
||||
case BLKOPENZONE:
|
||||
case BLKCLOSEZONE:
|
||||
case BLKFINISHZONE:
|
||||
case BLKGETZONESZ:
|
||||
case BLKGETNRZONES:
|
||||
return blkdev_ioctl(bdev, mode, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
case BLKBSZSET_32:
|
||||
|
@ -401,6 +409,14 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
|
|||
case BLKTRACETEARDOWN: /* compatible */
|
||||
ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg));
|
||||
return ret;
|
||||
case IOC_PR_REGISTER:
|
||||
case IOC_PR_RESERVE:
|
||||
case IOC_PR_RELEASE:
|
||||
case IOC_PR_PREEMPT:
|
||||
case IOC_PR_PREEMPT_ABORT:
|
||||
case IOC_PR_CLEAR:
|
||||
return blkdev_ioctl(bdev, mode, cmd,
|
||||
(unsigned long)compat_ptr(arg));
|
||||
default:
|
||||
if (disk->fops->compat_ioctl)
|
||||
ret = disk->fops->compat_ioctl(bdev, mode, cmd, arg);
|
||||
|
|
|
@ -76,8 +76,7 @@ enum brcm_ahci_version {
|
|||
};
|
||||
|
||||
enum brcm_ahci_quirks {
|
||||
BRCM_AHCI_QUIRK_NO_NCQ = BIT(0),
|
||||
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(1),
|
||||
BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE = BIT(0),
|
||||
};
|
||||
|
||||
struct brcm_ahci_priv {
|
||||
|
@ -213,19 +212,12 @@ static void brcm_sata_phys_disable(struct brcm_ahci_priv *priv)
|
|||
brcm_sata_phy_disable(priv, i);
|
||||
}
|
||||
|
||||
static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
|
||||
static u32 brcm_ahci_get_portmask(struct ahci_host_priv *hpriv,
|
||||
struct brcm_ahci_priv *priv)
|
||||
{
|
||||
void __iomem *ahci;
|
||||
struct resource *res;
|
||||
u32 impl;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ahci");
|
||||
ahci = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(ahci))
|
||||
return 0;
|
||||
|
||||
impl = readl(ahci + HOST_PORTS_IMPL);
|
||||
impl = readl(hpriv->mmio + HOST_PORTS_IMPL);
|
||||
|
||||
if (fls(impl) > SATA_TOP_MAX_PHYS)
|
||||
dev_warn(priv->dev, "warning: more ports than PHYs (%#x)\n",
|
||||
|
@ -233,9 +225,6 @@ static u32 brcm_ahci_get_portmask(struct platform_device *pdev,
|
|||
else if (!impl)
|
||||
dev_info(priv->dev, "no ports found\n");
|
||||
|
||||
devm_iounmap(&pdev->dev, ahci);
|
||||
devm_release_mem_region(&pdev->dev, res->start, resource_size(res));
|
||||
|
||||
return impl;
|
||||
}
|
||||
|
||||
|
@ -285,6 +274,13 @@ static unsigned int brcm_ahci_read_id(struct ata_device *dev,
|
|||
/* Perform the SATA PHY reset sequence */
|
||||
brcm_sata_phy_disable(priv, ap->port_no);
|
||||
|
||||
/* Reset the SATA clock */
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
msleep(10);
|
||||
|
||||
ahci_platform_enable_clks(hpriv);
|
||||
msleep(10);
|
||||
|
||||
/* Bring the PHY back on */
|
||||
brcm_sata_phy_enable(priv, ap->port_no);
|
||||
|
||||
|
@ -347,11 +343,10 @@ static int brcm_ahci_suspend(struct device *dev)
|
|||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
ret = ahci_platform_suspend(dev);
|
||||
brcm_sata_phys_disable(priv);
|
||||
return ret;
|
||||
|
||||
return ahci_platform_suspend(dev);
|
||||
}
|
||||
|
||||
static int brcm_ahci_resume(struct device *dev)
|
||||
|
@ -359,11 +354,44 @@ static int brcm_ahci_resume(struct device *dev)
|
|||
struct ata_host *host = dev_get_drvdata(dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
/* Make sure clocks are turned on before re-configuration */
|
||||
ret = ahci_platform_enable_clks(hpriv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_sata_init(priv);
|
||||
brcm_sata_phys_enable(priv);
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
return ahci_platform_resume(dev);
|
||||
|
||||
/* Since we had to enable clocks earlier on, we cannot use
|
||||
* ahci_platform_resume() as-is since a second call to
|
||||
* ahci_platform_enable_resources() would bump up the resources
|
||||
* (regulators, clocks, PHYs) count artificially so we copy the part
|
||||
* after ahci_platform_enable_resources().
|
||||
*/
|
||||
ret = ahci_platform_enable_phys(hpriv);
|
||||
if (ret)
|
||||
goto out_disable_phys;
|
||||
|
||||
ret = ahci_platform_resume_host(dev);
|
||||
if (ret)
|
||||
goto out_disable_platform_phys;
|
||||
|
||||
/* We resumed so update PM runtime state */
|
||||
pm_runtime_disable(dev);
|
||||
pm_runtime_set_active(dev);
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_platform_phys:
|
||||
ahci_platform_disable_phys(hpriv);
|
||||
out_disable_phys:
|
||||
brcm_sata_phys_disable(priv);
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -410,44 +438,71 @@ static int brcm_ahci_probe(struct platform_device *pdev)
|
|||
if (!IS_ERR_OR_NULL(priv->rcdev))
|
||||
reset_control_deassert(priv->rcdev);
|
||||
|
||||
if ((priv->version == BRCM_SATA_BCM7425) ||
|
||||
(priv->version == BRCM_SATA_NSP)) {
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_NO_NCQ;
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
|
||||
hpriv = ahci_platform_get_resources(pdev, 0);
|
||||
if (IS_ERR(hpriv)) {
|
||||
ret = PTR_ERR(hpriv);
|
||||
goto out_reset;
|
||||
}
|
||||
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP | AHCI_HFLAG_NO_WRITE_TO_RO;
|
||||
|
||||
switch (priv->version) {
|
||||
case BRCM_SATA_BCM7425:
|
||||
hpriv->flags |= AHCI_HFLAG_DELAY_ENGINE;
|
||||
/* fall through */
|
||||
case BRCM_SATA_NSP:
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
priv->quirks |= BRCM_AHCI_QUIRK_SKIP_PHY_ENABLE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
ret = ahci_platform_enable_clks(hpriv);
|
||||
if (ret)
|
||||
goto out_reset;
|
||||
|
||||
/* Must be first so as to configure endianness including that
|
||||
* of the standard AHCI register space.
|
||||
*/
|
||||
brcm_sata_init(priv);
|
||||
|
||||
priv->port_mask = brcm_ahci_get_portmask(pdev, priv);
|
||||
if (!priv->port_mask)
|
||||
return -ENODEV;
|
||||
/* Initializes priv->port_mask which is used below */
|
||||
priv->port_mask = brcm_ahci_get_portmask(hpriv, priv);
|
||||
if (!priv->port_mask) {
|
||||
ret = -ENODEV;
|
||||
goto out_disable_clks;
|
||||
}
|
||||
|
||||
/* Must be done before ahci_platform_enable_phys() */
|
||||
brcm_sata_phys_enable(priv);
|
||||
|
||||
hpriv = ahci_platform_get_resources(pdev, 0);
|
||||
if (IS_ERR(hpriv))
|
||||
return PTR_ERR(hpriv);
|
||||
hpriv->plat_data = priv;
|
||||
hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
|
||||
|
||||
brcm_sata_alpm_init(hpriv);
|
||||
|
||||
ret = ahci_platform_enable_resources(hpriv);
|
||||
ret = ahci_platform_enable_phys(hpriv);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (priv->quirks & BRCM_AHCI_QUIRK_NO_NCQ)
|
||||
hpriv->flags |= AHCI_HFLAG_NO_NCQ;
|
||||
hpriv->flags |= AHCI_HFLAG_NO_WRITE_TO_RO;
|
||||
goto out_disable_phys;
|
||||
|
||||
ret = ahci_platform_init_host(pdev, hpriv, &ahci_brcm_port_info,
|
||||
&ahci_platform_sht);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_disable_platform_phys;
|
||||
|
||||
dev_info(dev, "Broadcom AHCI SATA3 registered\n");
|
||||
|
||||
return 0;
|
||||
|
||||
out_disable_platform_phys:
|
||||
ahci_platform_disable_phys(hpriv);
|
||||
out_disable_phys:
|
||||
brcm_sata_phys_disable(priv);
|
||||
out_disable_clks:
|
||||
ahci_platform_disable_clks(hpriv);
|
||||
out_reset:
|
||||
if (!IS_ERR_OR_NULL(priv->rcdev))
|
||||
reset_control_assert(priv->rcdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int brcm_ahci_remove(struct platform_device *pdev)
|
||||
|
@ -457,12 +512,12 @@ static int brcm_ahci_remove(struct platform_device *pdev)
|
|||
struct brcm_ahci_priv *priv = hpriv->plat_data;
|
||||
int ret;
|
||||
|
||||
brcm_sata_phys_disable(priv);
|
||||
|
||||
ret = ata_platform_remove_one(pdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
brcm_sata_phys_disable(priv);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ EXPORT_SYMBOL_GPL(ahci_platform_ops);
|
|||
* RETURNS:
|
||||
* 0 on success otherwise a negative error code
|
||||
*/
|
||||
static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
||||
int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int rc, i;
|
||||
|
||||
|
@ -74,6 +74,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
|||
}
|
||||
return rc;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_platform_enable_phys);
|
||||
|
||||
/**
|
||||
* ahci_platform_disable_phys - Disable PHYs
|
||||
|
@ -81,7 +82,7 @@ static int ahci_platform_enable_phys(struct ahci_host_priv *hpriv)
|
|||
*
|
||||
* This function disables all PHYs found in hpriv->phys.
|
||||
*/
|
||||
static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
||||
void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
||||
{
|
||||
int i;
|
||||
|
||||
|
@ -90,6 +91,7 @@ static void ahci_platform_disable_phys(struct ahci_host_priv *hpriv)
|
|||
phy_exit(hpriv->phys[i]);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ahci_platform_disable_phys);
|
||||
|
||||
/**
|
||||
* ahci_platform_enable_clks - Enable platform clocks
|
||||
|
|
|
@ -5328,6 +5328,30 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_qc_get_active - get bitmask of active qcs
|
||||
* @ap: port in question
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*
|
||||
* RETURNS:
|
||||
* Bitmask of active qcs
|
||||
*/
|
||||
u64 ata_qc_get_active(struct ata_port *ap)
|
||||
{
|
||||
u64 qc_active = ap->qc_active;
|
||||
|
||||
/* ATA_TAG_INTERNAL is sent to hw as tag 0 */
|
||||
if (qc_active & (1ULL << ATA_TAG_INTERNAL)) {
|
||||
qc_active |= (1 << 0);
|
||||
qc_active &= ~(1ULL << ATA_TAG_INTERNAL);
|
||||
}
|
||||
|
||||
return qc_active;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(ata_qc_get_active);
|
||||
|
||||
/**
|
||||
* ata_qc_complete_multiple - Complete multiple qcs successfully
|
||||
* @ap: port in question
|
||||
|
|
|
@ -1280,7 +1280,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
|
|||
i, ioread32(hcr_base + CC),
|
||||
ioread32(hcr_base + CA));
|
||||
}
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
return;
|
||||
|
||||
} else if ((ap->qc_active & (1ULL << ATA_TAG_INTERNAL))) {
|
||||
|
|
|
@ -2829,7 +2829,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
|
|||
}
|
||||
|
||||
if (work_done) {
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
|
||||
/* Update the software queue position index in hardware */
|
||||
writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
|
||||
|
|
|
@ -984,7 +984,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
|||
check_commands = 0;
|
||||
check_commands &= ~(1 << pos);
|
||||
}
|
||||
ata_qc_complete_multiple(ap, ap->qc_active ^ done_mask);
|
||||
ata_qc_complete_multiple(ap, ata_qc_get_active(ap) ^ done_mask);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -374,7 +374,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
|
|||
here = (eni_vcc->descr+skip) & (eni_vcc->words-1);
|
||||
dma[j++] = (here << MID_DMA_COUNT_SHIFT) | (vcc->vci
|
||||
<< MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
||||
j++;
|
||||
dma[j++] = 0;
|
||||
}
|
||||
here = (eni_vcc->descr+size+skip) & (eni_vcc->words-1);
|
||||
if (!eff) size += skip;
|
||||
|
@ -447,7 +447,7 @@ static int do_rx_dma(struct atm_vcc *vcc,struct sk_buff *skb,
|
|||
if (size != eff) {
|
||||
dma[j++] = (here << MID_DMA_COUNT_SHIFT) |
|
||||
(vcc->vci << MID_DMA_VCI_SHIFT) | MID_DT_JK;
|
||||
j++;
|
||||
dma[j++] = 0;
|
||||
}
|
||||
if (!j || j > 2*RX_DMA_BUF) {
|
||||
printk(KERN_CRIT DEV_LABEL "!j or j too big!!!\n");
|
||||
|
|
|
@ -186,7 +186,10 @@ static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_opf op,
|
|||
if (zone->cond == BLK_ZONE_COND_FULL)
|
||||
return BLK_STS_IOERR;
|
||||
|
||||
zone->cond = BLK_ZONE_COND_CLOSED;
|
||||
if (zone->wp == zone->start)
|
||||
zone->cond = BLK_ZONE_COND_EMPTY;
|
||||
else
|
||||
zone->cond = BLK_ZONE_COND_CLOSED;
|
||||
break;
|
||||
case REQ_OP_ZONE_FINISH:
|
||||
if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
|
||||
|
|
|
@ -2707,7 +2707,7 @@ static const struct block_device_operations pktcdvd_ops = {
|
|||
.release = pkt_close,
|
||||
.ioctl = pkt_ioctl,
|
||||
#ifdef CONFIG_COMPAT
|
||||
.ioctl = pkt_compat_ioctl,
|
||||
.compat_ioctl = pkt_compat_ioctl,
|
||||
#endif
|
||||
.check_events = pkt_check_events,
|
||||
};
|
||||
|
|
|
@ -84,7 +84,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
|
|||
unsigned int cdev = 0;
|
||||
u32 mnistat, tnistat, tstatus, mcmd;
|
||||
u16 tnicmd, mnicmd;
|
||||
u8 mcapndx;
|
||||
u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async;
|
||||
u32 step, rem, rem_isoch, rem_async;
|
||||
int ret = 0;
|
||||
|
@ -138,8 +137,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
|
|||
cur = list_entry(pos, struct agp_3_5_dev, list);
|
||||
dev = cur->dev;
|
||||
|
||||
mcapndx = cur->capndx;
|
||||
|
||||
pci_read_config_dword(dev, cur->capndx+AGPNISTAT, &mnistat);
|
||||
|
||||
master[cdev].maxbw = (mnistat >> 16) & 0xff;
|
||||
|
@ -251,8 +248,6 @@ static int agp_3_5_isochronous_node_enable(struct agp_bridge_data *bridge,
|
|||
cur = master[cdev].dev;
|
||||
dev = cur->dev;
|
||||
|
||||
mcapndx = cur->capndx;
|
||||
|
||||
master[cdev].rq += (cdev == ndevs - 1)
|
||||
? (rem_async + rem_isoch) : step;
|
||||
|
||||
|
@ -319,7 +314,7 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
|
|||
{
|
||||
struct pci_dev *td = bridge->dev, *dev = NULL;
|
||||
u8 mcapndx;
|
||||
u32 isoch, arqsz;
|
||||
u32 isoch;
|
||||
u32 tstatus, mstatus, ncapid;
|
||||
u32 mmajor;
|
||||
u16 mpstat;
|
||||
|
@ -334,8 +329,6 @@ int agp_3_5_enable(struct agp_bridge_data *bridge)
|
|||
if (isoch == 0) /* isoch xfers not available, bail out. */
|
||||
return -ENODEV;
|
||||
|
||||
arqsz = (tstatus >> 13) & 0x7;
|
||||
|
||||
/*
|
||||
* Allocate a head for our AGP 3.5 device list
|
||||
* (multiple AGP v3 devices are allowed behind a single bridge).
|
||||
|
|
|
@ -130,7 +130,7 @@ ssize_t tpm_common_read(struct file *file, char __user *buf,
|
|||
priv->response_read = true;
|
||||
|
||||
ret_size = min_t(ssize_t, size, priv->response_length);
|
||||
if (!ret_size) {
|
||||
if (ret_size <= 0) {
|
||||
priv->response_length = 0;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ struct file_priv {
|
|||
struct work_struct timeout_work;
|
||||
struct work_struct async_work;
|
||||
wait_queue_head_t async_wait;
|
||||
size_t response_length;
|
||||
ssize_t response_length;
|
||||
bool response_read;
|
||||
bool command_enqueued;
|
||||
|
||||
|
|
|
@ -978,13 +978,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||
|
||||
if (wait_startup(chip, 0) != 0) {
|
||||
rc = -ENODEV;
|
||||
goto err_start;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Take control of the TPM's interrupt hardware and shut it off */
|
||||
rc = tpm_tis_read32(priv, TPM_INT_ENABLE(priv->locality), &intmask);
|
||||
if (rc < 0)
|
||||
goto err_start;
|
||||
goto out_err;
|
||||
|
||||
intmask |= TPM_INTF_CMD_READY_INT | TPM_INTF_LOCALITY_CHANGE_INT |
|
||||
TPM_INTF_DATA_AVAIL_INT | TPM_INTF_STS_VALID_INT;
|
||||
|
@ -993,21 +993,21 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||
|
||||
rc = tpm_chip_start(chip);
|
||||
if (rc)
|
||||
goto err_start;
|
||||
|
||||
goto out_err;
|
||||
rc = tpm2_probe(chip);
|
||||
tpm_chip_stop(chip);
|
||||
if (rc)
|
||||
goto err_probe;
|
||||
goto out_err;
|
||||
|
||||
rc = tpm_tis_read32(priv, TPM_DID_VID(0), &vendor);
|
||||
if (rc < 0)
|
||||
goto err_probe;
|
||||
goto out_err;
|
||||
|
||||
priv->manufacturer_id = vendor;
|
||||
|
||||
rc = tpm_tis_read8(priv, TPM_RID(0), &rid);
|
||||
if (rc < 0)
|
||||
goto err_probe;
|
||||
goto out_err;
|
||||
|
||||
dev_info(dev, "%s TPM (device-id 0x%X, rev-id %d)\n",
|
||||
(chip->flags & TPM_CHIP_FLAG_TPM2) ? "2.0" : "1.2",
|
||||
|
@ -1016,13 +1016,13 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||
probe = probe_itpm(chip);
|
||||
if (probe < 0) {
|
||||
rc = -ENODEV;
|
||||
goto err_probe;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
/* Figure out the capabilities */
|
||||
rc = tpm_tis_read32(priv, TPM_INTF_CAPS(priv->locality), &intfcaps);
|
||||
if (rc < 0)
|
||||
goto err_probe;
|
||||
goto out_err;
|
||||
|
||||
dev_dbg(dev, "TPM interface capabilities (0x%x):\n",
|
||||
intfcaps);
|
||||
|
@ -1056,10 +1056,9 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||
if (tpm_get_timeouts(chip)) {
|
||||
dev_err(dev, "Could not get TPM timeouts and durations\n");
|
||||
rc = -ENODEV;
|
||||
goto err_probe;
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
chip->flags |= TPM_CHIP_FLAG_IRQ;
|
||||
if (irq) {
|
||||
tpm_tis_probe_irq_single(chip, intmask, IRQF_SHARED,
|
||||
irq);
|
||||
|
@ -1071,18 +1070,15 @@ int tpm_tis_core_init(struct device *dev, struct tpm_tis_data *priv, int irq,
|
|||
}
|
||||
}
|
||||
|
||||
tpm_chip_stop(chip);
|
||||
|
||||
rc = tpm_chip_register(chip);
|
||||
if (rc)
|
||||
goto err_start;
|
||||
goto out_err;
|
||||
|
||||
if (chip->ops->clk_enable != NULL)
|
||||
chip->ops->clk_enable(chip, false);
|
||||
|
||||
return 0;
|
||||
|
||||
err_probe:
|
||||
tpm_chip_stop(chip);
|
||||
|
||||
err_start:
|
||||
out_err:
|
||||
if ((chip->ops != NULL) && (chip->ops->clk_enable != NULL))
|
||||
chip->ops->clk_enable(chip, false);
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ static unsigned long long riscv_clocksource_rdtime(struct clocksource *cs)
|
|||
return get_cycles64();
|
||||
}
|
||||
|
||||
static u64 riscv_sched_clock(void)
|
||||
static u64 notrace riscv_sched_clock(void)
|
||||
{
|
||||
return get_cycles64();
|
||||
}
|
||||
|
|
|
@ -121,6 +121,8 @@ static const struct of_device_id blacklist[] __initconst = {
|
|||
{ .compatible = "mediatek,mt8176", },
|
||||
{ .compatible = "mediatek,mt8183", },
|
||||
|
||||
{ .compatible = "nvidia,tegra20", },
|
||||
{ .compatible = "nvidia,tegra30", },
|
||||
{ .compatible = "nvidia,tegra124", },
|
||||
{ .compatible = "nvidia,tegra210", },
|
||||
|
||||
|
|
|
@ -83,7 +83,6 @@ config ARM_EXYNOS_BUS_DEVFREQ
|
|||
select DEVFREQ_GOV_PASSIVE
|
||||
select DEVFREQ_EVENT_EXYNOS_PPMU
|
||||
select PM_DEVFREQ_EVENT
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the common DEVFREQ driver for Exynos Memory bus. Exynos
|
||||
Memory bus has one more group of memory bus (e.g, MIF and INT block).
|
||||
|
@ -98,7 +97,7 @@ config ARM_TEGRA_DEVFREQ
|
|||
ARCH_TEGRA_132_SOC || ARCH_TEGRA_124_SOC || \
|
||||
ARCH_TEGRA_210_SOC || \
|
||||
COMPILE_TEST
|
||||
select PM_OPP
|
||||
depends on COMMON_CLK
|
||||
help
|
||||
This adds the DEVFREQ driver for the Tegra family of SoCs.
|
||||
It reads ACTMON counters of memory controllers and adjusts the
|
||||
|
@ -109,7 +108,6 @@ config ARM_TEGRA20_DEVFREQ
|
|||
depends on (TEGRA_MC && TEGRA20_EMC) || COMPILE_TEST
|
||||
depends on COMMON_CLK
|
||||
select DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the DEVFREQ driver for the Tegra20 family of SoCs.
|
||||
It reads Memory Controller counters and adjusts the operating
|
||||
|
@ -121,7 +119,6 @@ config ARM_RK3399_DMC_DEVFREQ
|
|||
select DEVFREQ_EVENT_ROCKCHIP_DFI
|
||||
select DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||
select PM_DEVFREQ_EVENT
|
||||
select PM_OPP
|
||||
help
|
||||
This adds the DEVFREQ driver for the RK3399 DMC(Dynamic Memory Controller).
|
||||
It sets the frequency for the memory controller and reads the usage counts
|
||||
|
|
|
@ -999,7 +999,8 @@ static const struct jz4780_dma_soc_data jz4740_dma_soc_data = {
|
|||
static const struct jz4780_dma_soc_data jz4725b_dma_soc_data = {
|
||||
.nb_channels = 6,
|
||||
.transfer_ord_max = 5,
|
||||
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC,
|
||||
.flags = JZ_SOC_DATA_PER_CHAN_PM | JZ_SOC_DATA_NO_DCKES_DCKEC |
|
||||
JZ_SOC_DATA_BREAK_LINKS,
|
||||
};
|
||||
|
||||
static const struct jz4780_dma_soc_data jz4770_dma_soc_data = {
|
||||
|
|
|
@ -377,10 +377,11 @@ ioat_alloc_ring(struct dma_chan *c, int order, gfp_t flags)
|
|||
|
||||
descs->virt = dma_alloc_coherent(to_dev(ioat_chan),
|
||||
SZ_2M, &descs->hw, flags);
|
||||
if (!descs->virt && (i > 0)) {
|
||||
if (!descs->virt) {
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < i; idx++) {
|
||||
descs = &ioat_chan->descs[idx];
|
||||
dma_free_coherent(to_dev(ioat_chan), SZ_2M,
|
||||
descs->virt, descs->hw);
|
||||
descs->virt = NULL;
|
||||
|
|
|
@ -229,9 +229,11 @@ static irqreturn_t k3_dma_int_handler(int irq, void *dev_id)
|
|||
c = p->vchan;
|
||||
if (c && (tc1 & BIT(i))) {
|
||||
spin_lock_irqsave(&c->vc.lock, flags);
|
||||
vchan_cookie_complete(&p->ds_run->vd);
|
||||
p->ds_done = p->ds_run;
|
||||
p->ds_run = NULL;
|
||||
if (p->ds_run != NULL) {
|
||||
vchan_cookie_complete(&p->ds_run->vd);
|
||||
p->ds_done = p->ds_run;
|
||||
p->ds_run = NULL;
|
||||
}
|
||||
spin_unlock_irqrestore(&c->vc.lock, flags);
|
||||
}
|
||||
if (c && (tc2 & BIT(i))) {
|
||||
|
@ -271,6 +273,10 @@ static int k3_dma_start_txd(struct k3_dma_chan *c)
|
|||
if (BIT(c->phy->idx) & k3_dma_get_chan_stat(d))
|
||||
return -EAGAIN;
|
||||
|
||||
/* Avoid losing track of ds_run if a transaction is in flight */
|
||||
if (c->phy->ds_run)
|
||||
return -EAGAIN;
|
||||
|
||||
if (vd) {
|
||||
struct k3_dma_desc_sw *ds =
|
||||
container_of(vd, struct k3_dma_desc_sw, vd);
|
||||
|
|
|
@ -104,9 +104,8 @@ static void vchan_complete(unsigned long arg)
|
|||
dmaengine_desc_get_callback(&vd->tx, &cb);
|
||||
|
||||
list_del(&vd->node);
|
||||
vchan_vdesc_fini(vd);
|
||||
|
||||
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
|
||||
vchan_vdesc_fini(vd);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
#include <linux/edac.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include "edac_module.h"
|
||||
#include <asm/sifive_l2_cache.h>
|
||||
#include <soc/sifive/sifive_l2_cache.h>
|
||||
|
||||
#define DRVNAME "sifive_edac"
|
||||
|
||||
|
|
|
@ -215,7 +215,6 @@ static int tee_bnxt_fw_probe(struct device *dev)
|
|||
fw_shm_pool = tee_shm_alloc(pvt_data.ctx, MAX_SHM_MEM_SZ,
|
||||
TEE_SHM_MAPPED | TEE_SHM_DMA_BUF);
|
||||
if (IS_ERR(fw_shm_pool)) {
|
||||
tee_client_close_context(pvt_data.ctx);
|
||||
dev_err(pvt_data.dev, "tee_shm_alloc failed\n");
|
||||
err = PTR_ERR(fw_shm_pool);
|
||||
goto out_sess;
|
||||
|
|
|
@ -553,8 +553,8 @@ config GPIO_TEGRA
|
|||
|
||||
config GPIO_TEGRA186
|
||||
tristate "NVIDIA Tegra186 GPIO support"
|
||||
default ARCH_TEGRA_186_SOC
|
||||
depends on ARCH_TEGRA_186_SOC || COMPILE_TEST
|
||||
default ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC
|
||||
depends on ARCH_TEGRA_186_SOC || ARCH_TEGRA_194_SOC || COMPILE_TEST
|
||||
depends on OF_GPIO
|
||||
select GPIOLIB_IRQCHIP
|
||||
select IRQ_DOMAIN_HIERARCHY
|
||||
|
@ -1148,6 +1148,7 @@ config GPIO_MADERA
|
|||
config GPIO_MAX77620
|
||||
tristate "GPIO support for PMIC MAX77620 and MAX20024"
|
||||
depends on MFD_MAX77620
|
||||
select GPIOLIB_IRQCHIP
|
||||
help
|
||||
GPIO driver for MAX77620 and MAX20024 PMIC from Maxim Semiconductor.
|
||||
MAX77620 PMIC has 8 pins that can be configured as GPIOs. The
|
||||
|
|
|
@ -107,7 +107,7 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio,
|
|||
return gpio->base + bank->irq_regs + GPIO_IRQ_STATUS;
|
||||
default:
|
||||
/* acturally if code runs to here, it's an error case */
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -156,7 +156,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
|
|||
mutex_lock(&chip->lock);
|
||||
|
||||
if (test_bit(FLAG_REQUESTED, &desc->flags) &&
|
||||
!test_bit(FLAG_IS_OUT, &desc->flags)) {
|
||||
!test_bit(FLAG_IS_OUT, &desc->flags)) {
|
||||
curr = __gpio_mockup_get(chip, offset);
|
||||
if (curr == value)
|
||||
goto out;
|
||||
|
@ -165,7 +165,7 @@ static int gpio_mockup_apply_pull(struct gpio_mockup_chip *chip,
|
|||
irq_type = irq_get_trigger_type(irq);
|
||||
|
||||
if ((value == 1 && (irq_type & IRQ_TYPE_EDGE_RISING)) ||
|
||||
(value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
|
||||
(value == 0 && (irq_type & IRQ_TYPE_EDGE_FALLING)))
|
||||
irq_sim_fire(sim, offset);
|
||||
}
|
||||
|
||||
|
@ -226,7 +226,7 @@ static int gpio_mockup_get_direction(struct gpio_chip *gc, unsigned int offset)
|
|||
int direction;
|
||||
|
||||
mutex_lock(&chip->lock);
|
||||
direction = !chip->lines[offset].dir;
|
||||
direction = chip->lines[offset].dir;
|
||||
mutex_unlock(&chip->lock);
|
||||
|
||||
return direction;
|
||||
|
@ -395,7 +395,7 @@ static int gpio_mockup_probe(struct platform_device *pdev)
|
|||
struct gpio_chip *gc;
|
||||
struct device *dev;
|
||||
const char *name;
|
||||
int rv, base;
|
||||
int rv, base, i;
|
||||
u16 ngpio;
|
||||
|
||||
dev = &pdev->dev;
|
||||
|
@ -447,6 +447,9 @@ static int gpio_mockup_probe(struct platform_device *pdev)
|
|||
if (!chip->lines)
|
||||
return -ENOMEM;
|
||||
|
||||
for (i = 0; i < gc->ngpio; i++)
|
||||
chip->lines[i].dir = GPIO_LINE_DIRECTION_IN;
|
||||
|
||||
if (device_property_read_bool(dev, "named-gpio-lines")) {
|
||||
rv = gpio_mockup_name_lines(dev, chip);
|
||||
if (rv)
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue