Linux 4.16-rc2
-----BEGIN PGP SIGNATURE----- iQFSBAABCAA8FiEEq68RxlopcLEwq+PEeb4+QwBBGIYFAlqKKI0eHHRvcnZhbGRz QGxpbnV4LWZvdW5kYXRpb24ub3JnAAoJEHm+PkMAQRiGRNAH/0v3+nuJ0oiHE1Cl fH89F9Ma17j8oTo28byRPi7X5XJfJAqANhHa209rguvnC27y3ew/l9k93HoxG12i ttvyKFDQulQbytfJZXw8lhUyYGXVsTpyNaihPe/NtqPdIxNgfrXsUN9EIEtcnuS2 SiAj51jUySDRNR4ST6TOx4ulDm1zLrmA28WHOBNOTvDi4jTQMt1TsngHfF5AySBB lD4RTRDDwWDWtdMI7euYSq019TiDXCxmwQ94vZjrqmjmSQcl/yCK/JzEV33SZslg 4WqGIllxONvP/UlwxZLaJ+RrslqxNgDVqQKwJdfYhGaWvpgPFtS1s86zW6IgyXny 02jJfD0= =DLWn -----END PGP SIGNATURE----- Merge tag 'v4.16-rc2' into x86/platform, to pick up fixes Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
24e8db62c1
|
@ -0,0 +1,39 @@
|
|||
What: /sys/devices/platform/dock.N/docked
|
||||
Date: Dec, 2006
|
||||
KernelVersion: 2.6.19
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Value 1 or 0 indicates whether the software believes the
|
||||
laptop is docked in a docking station.
|
||||
|
||||
What: /sys/devices/platform/dock.N/undock
|
||||
Date: Dec, 2006
|
||||
KernelVersion: 2.6.19
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(WO) Writing to this file causes the software to initiate an
|
||||
undock request to the firmware.
|
||||
|
||||
What: /sys/devices/platform/dock.N/uid
|
||||
Date: Feb, 2007
|
||||
KernelVersion: v2.6.21
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Displays the docking station the laptop is docked to.
|
||||
|
||||
What: /sys/devices/platform/dock.N/flags
|
||||
Date: May, 2007
|
||||
KernelVersion: v2.6.21
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Show dock station flags, useful for checking if undock
|
||||
request has been made by the user (from the immediate_undock
|
||||
option).
|
||||
|
||||
What: /sys/devices/platform/dock.N/type
|
||||
Date: Aug, 2008
|
||||
KernelVersion: v2.6.27
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display the dock station type- dock_station, ata_bay or
|
||||
battery_bay.
|
|
@ -108,6 +108,8 @@ Description: CPU topology files that describe a logical CPU's relationship
|
|||
|
||||
What: /sys/devices/system/cpu/cpuidle/current_driver
|
||||
/sys/devices/system/cpu/cpuidle/current_governer_ro
|
||||
/sys/devices/system/cpu/cpuidle/available_governors
|
||||
/sys/devices/system/cpu/cpuidle/current_governor
|
||||
Date: September 2007
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Discover cpuidle policy and mechanism
|
||||
|
@ -119,13 +121,84 @@ Description: Discover cpuidle policy and mechanism
|
|||
Idle policy (governor) is differentiated from idle mechanism
|
||||
(driver)
|
||||
|
||||
current_driver: displays current idle mechanism
|
||||
current_driver: (RO) displays current idle mechanism
|
||||
|
||||
current_governor_ro: displays current idle policy
|
||||
current_governor_ro: (RO) displays current idle policy
|
||||
|
||||
With the cpuidle_sysfs_switch boot option enabled (meant for
|
||||
developer testing), the following three attributes are visible
|
||||
instead:
|
||||
|
||||
current_driver: same as described above
|
||||
|
||||
available_governors: (RO) displays a space separated list of
|
||||
available governors
|
||||
|
||||
current_governor: (RW) displays current idle policy. Users can
|
||||
switch the governor at runtime by writing to this file.
|
||||
|
||||
See files in Documentation/cpuidle/ for more information.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/name
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/latency
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/power
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/time
|
||||
/sys/devices/system/cpu/cpuX/cpuidle/stateN/usage
|
||||
Date: September 2007
|
||||
KernelVersion: v2.6.24
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
The directory /sys/devices/system/cpu/cpuX/cpuidle contains per
|
||||
logical CPU specific cpuidle information for each online cpu X.
|
||||
The processor idle states which are available for use have the
|
||||
following attributes:
|
||||
|
||||
name: (RO) Name of the idle state (string).
|
||||
|
||||
latency: (RO) The latency to exit out of this idle state (in
|
||||
microseconds).
|
||||
|
||||
power: (RO) The power consumed while in this idle state (in
|
||||
milliwatts).
|
||||
|
||||
time: (RO) The total time spent in this idle state (in microseconds).
|
||||
|
||||
usage: (RO) Number of times this state was entered (a count).
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/desc
|
||||
Date: February 2008
|
||||
KernelVersion: v2.6.25
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
(RO) A small description about the idle state (string).
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/disable
|
||||
Date: March 2012
|
||||
KernelVersion: v3.10
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
(RW) Option to disable this idle state (bool). The behavior and
|
||||
the effect of the disable variable depends on the implementation
|
||||
of a particular governor. In the ladder governor, for example,
|
||||
it is not coherent, i.e. if one is disabling a light state, then
|
||||
all deeper states are disabled as well, but the disable variable
|
||||
does not reflect it. Likewise, if one enables a deep state but a
|
||||
lighter state still is disabled, then this has no effect.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/cpuidle/stateN/residency
|
||||
Date: March 2014
|
||||
KernelVersion: v3.15
|
||||
Contact: Linux power management list <linux-pm@vger.kernel.org>
|
||||
Description:
|
||||
(RO) Display the target residency i.e. the minimum amount of
|
||||
time (in microseconds) this cpu should spend in this idle state
|
||||
to make the transition worth the effort.
|
||||
|
||||
|
||||
What: /sys/devices/system/cpu/cpu#/cpufreq/*
|
||||
Date: pre-git history
|
||||
Contact: linux-pm@vger.kernel.org
|
||||
|
|
|
@ -0,0 +1,40 @@
|
|||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/charger_type
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) The charger type - Traditional, Hybrid or NVDC.
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/adapter_rating_mw
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Adapter rating in milliwatts (the maximum Adapter power).
|
||||
Must be 0 if no AC Adaptor is plugged in.
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/max_platform_power_mw
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Maximum platform power that can be supported by the battery
|
||||
in milliwatts.
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/platform_power_source
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) Display the platform power source
|
||||
0x00 = DC
|
||||
0x01 = AC
|
||||
0x02 = USB
|
||||
0x03 = Wireless Charger
|
||||
|
||||
What: /sys/bus/platform/devices/INT3407:00/dptf_power/battery_steady_power
|
||||
Date: Jul, 2016
|
||||
KernelVersion: v4.10
|
||||
Contact: linux-acpi@vger.kernel.org
|
||||
Description:
|
||||
(RO) The maximum sustained power for battery in milliwatts.
|
|
@ -58,7 +58,12 @@ Like with atomic_t, the rule of thumb is:
|
|||
|
||||
- RMW operations that have a return value are fully ordered.
|
||||
|
||||
Except for test_and_set_bit_lock() which has ACQUIRE semantics and
|
||||
- RMW operations that are conditional are unordered on FAILURE,
|
||||
otherwise the above rules apply. In the case of test_and_{}_bit() operations,
|
||||
if the bit in memory is unchanged by the operation then it is deemed to have
|
||||
failed.
|
||||
|
||||
Except for a successful test_and_set_bit_lock() which has ACQUIRE semantics and
|
||||
clear_bit_unlock() which has RELEASE semantics.
|
||||
|
||||
Since a platform only has a single means of achieving atomic operations
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
#
|
||||
# Feature name: membarrier-sync-core
|
||||
# Kconfig: ARCH_HAS_MEMBARRIER_SYNC_CORE
|
||||
# description: arch supports core serializing membarrier
|
||||
#
|
||||
# Architecture requirements
|
||||
#
|
||||
# * arm64
|
||||
#
|
||||
# Rely on eret context synchronization when returning from IPI handler, and
|
||||
# when returning to user-space.
|
||||
#
|
||||
# * x86
|
||||
#
|
||||
# x86-32 uses IRET as return from interrupt, which takes care of the IPI.
|
||||
# However, it uses both IRET and SYSEXIT to go back to user-space. The IRET
|
||||
# instruction is core serializing, but not SYSEXIT.
|
||||
#
|
||||
# x86-64 uses IRET as return from interrupt, which takes care of the IPI.
|
||||
# However, it can return to user-space through either SYSRETL (compat code),
|
||||
# SYSRETQ, or IRET.
|
||||
#
|
||||
# Given that neither SYSRET{L,Q}, nor SYSEXIT, are core serializing, we rely
|
||||
# instead on write_cr3() performed by switch_mm() to provide core serialization
|
||||
# after changing the current mm, and deal with the special case of kthread ->
|
||||
# uthread (temporarily keeping current mm into active_mm) by issuing a
|
||||
# sync_core_before_usermode() in that specific case.
|
||||
#
|
||||
-----------------------
|
||||
| arch |status|
|
||||
-----------------------
|
||||
| alpha: | TODO |
|
||||
| arc: | TODO |
|
||||
| arm: | TODO |
|
||||
| arm64: | ok |
|
||||
| blackfin: | TODO |
|
||||
| c6x: | TODO |
|
||||
| cris: | TODO |
|
||||
| frv: | TODO |
|
||||
| h8300: | TODO |
|
||||
| hexagon: | TODO |
|
||||
| ia64: | TODO |
|
||||
| m32r: | TODO |
|
||||
| m68k: | TODO |
|
||||
| metag: | TODO |
|
||||
| microblaze: | TODO |
|
||||
| mips: | TODO |
|
||||
| mn10300: | TODO |
|
||||
| nios2: | TODO |
|
||||
| openrisc: | TODO |
|
||||
| parisc: | TODO |
|
||||
| powerpc: | TODO |
|
||||
| s390: | TODO |
|
||||
| score: | TODO |
|
||||
| sh: | TODO |
|
||||
| sparc: | TODO |
|
||||
| tile: | TODO |
|
||||
| um: | TODO |
|
||||
| unicore32: | TODO |
|
||||
| x86: | ok |
|
||||
| xtensa: | TODO |
|
||||
-----------------------
|
|
@ -21,37 +21,23 @@ Implementation
|
|||
--------------
|
||||
|
||||
Mutexes are represented by 'struct mutex', defined in include/linux/mutex.h
|
||||
and implemented in kernel/locking/mutex.c. These locks use a three
|
||||
state atomic counter (->count) to represent the different possible
|
||||
transitions that can occur during the lifetime of a lock:
|
||||
|
||||
1: unlocked
|
||||
0: locked, no waiters
|
||||
negative: locked, with potential waiters
|
||||
|
||||
In its most basic form it also includes a wait-queue and a spinlock
|
||||
that serializes access to it. CONFIG_SMP systems can also include
|
||||
a pointer to the lock task owner (->owner) as well as a spinner MCS
|
||||
lock (->osq), both described below in (ii).
|
||||
and implemented in kernel/locking/mutex.c. These locks use an atomic variable
|
||||
(->owner) to keep track of the lock state during its lifetime. Field owner
|
||||
actually contains 'struct task_struct *' to the current lock owner and it is
|
||||
therefore NULL if not currently owned. Since task_struct pointers are aligned
|
||||
at at least L1_CACHE_BYTES, low bits (3) are used to store extra state (e.g.,
|
||||
if waiter list is non-empty). In its most basic form it also includes a
|
||||
wait-queue and a spinlock that serializes access to it. Furthermore,
|
||||
CONFIG_MUTEX_SPIN_ON_OWNER=y systems use a spinner MCS lock (->osq), described
|
||||
below in (ii).
|
||||
|
||||
When acquiring a mutex, there are three possible paths that can be
|
||||
taken, depending on the state of the lock:
|
||||
|
||||
(i) fastpath: tries to atomically acquire the lock by decrementing the
|
||||
counter. If it was already taken by another task it goes to the next
|
||||
possible path. This logic is architecture specific. On x86-64, the
|
||||
locking fastpath is 2 instructions:
|
||||
|
||||
0000000000000e10 <mutex_lock>:
|
||||
e21: f0 ff 0b lock decl (%rbx)
|
||||
e24: 79 08 jns e2e <mutex_lock+0x1e>
|
||||
|
||||
the unlocking fastpath is equally tight:
|
||||
|
||||
0000000000000bc0 <mutex_unlock>:
|
||||
bc8: f0 ff 07 lock incl (%rdi)
|
||||
bcb: 7f 0a jg bd7 <mutex_unlock+0x17>
|
||||
|
||||
(i) fastpath: tries to atomically acquire the lock by cmpxchg()ing the owner with
|
||||
the current task. This only works in the uncontended case (cmpxchg() checks
|
||||
against 0UL, so all 3 state bits above have to be 0). If the lock is
|
||||
contended it goes to the next possible path.
|
||||
|
||||
(ii) midpath: aka optimistic spinning, tries to spin for acquisition
|
||||
while the lock owner is running and there are no other tasks ready
|
||||
|
@ -143,11 +129,10 @@ Test if the mutex is taken:
|
|||
Disadvantages
|
||||
-------------
|
||||
|
||||
Unlike its original design and purpose, 'struct mutex' is larger than
|
||||
most locks in the kernel. E.g: on x86-64 it is 40 bytes, almost twice
|
||||
as large as 'struct semaphore' (24 bytes) and tied, along with rwsems,
|
||||
for the largest lock in the kernel. Larger structure sizes mean more
|
||||
CPU cache and memory footprint.
|
||||
Unlike its original design and purpose, 'struct mutex' is among the largest
|
||||
locks in the kernel. E.g: on x86-64 it is 32 bytes, where 'struct semaphore'
|
||||
is 24 bytes and rw_semaphore is 40 bytes. Larger structure sizes mean more CPU
|
||||
cache and memory footprint.
|
||||
|
||||
When to use mutexes
|
||||
-------------------
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 16
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc1
|
||||
EXTRAVERSION = -rc2
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
|
||||
#define MPIDR_UP_BITMASK (0x1 << 30)
|
||||
#define MPIDR_MT_BITMASK (0x1 << 24)
|
||||
#define MPIDR_HWID_BITMASK 0xff00ffffff
|
||||
#define MPIDR_HWID_BITMASK 0xff00ffffffUL
|
||||
|
||||
#define MPIDR_LEVEL_BITS_SHIFT 3
|
||||
#define MPIDR_LEVEL_BITS (1 << MPIDR_LEVEL_BITS_SHIFT)
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
static inline pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
return *ptep;
|
||||
return READ_ONCE(*ptep);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -185,42 +185,42 @@ static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
|
|||
return pmd;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *pte)
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *ptep)
|
||||
{
|
||||
pteval_t old_pteval, pteval;
|
||||
|
||||
pteval = READ_ONCE(pte_val(*pte));
|
||||
pteval = READ_ONCE(pte_val(*ptep));
|
||||
do {
|
||||
old_pteval = pteval;
|
||||
pteval &= ~PTE_S2_RDWR;
|
||||
pteval |= PTE_S2_RDONLY;
|
||||
pteval = cmpxchg_relaxed(&pte_val(*pte), old_pteval, pteval);
|
||||
pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval);
|
||||
} while (pteval != old_pteval);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_readonly(pte_t *pte)
|
||||
static inline bool kvm_s2pte_readonly(pte_t *ptep)
|
||||
{
|
||||
return (pte_val(*pte) & PTE_S2_RDWR) == PTE_S2_RDONLY;
|
||||
return (READ_ONCE(pte_val(*ptep)) & PTE_S2_RDWR) == PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_exec(pte_t *pte)
|
||||
static inline bool kvm_s2pte_exec(pte_t *ptep)
|
||||
{
|
||||
return !(pte_val(*pte) & PTE_S2_XN);
|
||||
return !(READ_ONCE(pte_val(*ptep)) & PTE_S2_XN);
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmdp)
|
||||
{
|
||||
kvm_set_s2pte_readonly((pte_t *)pmd);
|
||||
kvm_set_s2pte_readonly((pte_t *)pmdp);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmdp)
|
||||
{
|
||||
return kvm_s2pte_readonly((pte_t *)pmd);
|
||||
return kvm_s2pte_readonly((pte_t *)pmdp);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_exec(pmd_t *pmd)
|
||||
static inline bool kvm_s2pmd_exec(pmd_t *pmdp)
|
||||
{
|
||||
return !(pmd_val(*pmd) & PMD_S2_XN);
|
||||
return !(READ_ONCE(pmd_val(*pmdp)) & PMD_S2_XN);
|
||||
}
|
||||
|
||||
static inline bool kvm_page_empty(void *ptr)
|
||||
|
|
|
@ -141,13 +141,13 @@ static inline void cpu_install_idmap(void)
|
|||
* Atomically replaces the active TTBR1_EL1 PGD with a new VA-compatible PGD,
|
||||
* avoiding the possibility of conflicting TLB entries being allocated.
|
||||
*/
|
||||
static inline void cpu_replace_ttbr1(pgd_t *pgd)
|
||||
static inline void cpu_replace_ttbr1(pgd_t *pgdp)
|
||||
{
|
||||
typedef void (ttbr_replace_func)(phys_addr_t);
|
||||
extern ttbr_replace_func idmap_cpu_replace_ttbr1;
|
||||
ttbr_replace_func *replace_phys;
|
||||
|
||||
phys_addr_t pgd_phys = virt_to_phys(pgd);
|
||||
phys_addr_t pgd_phys = virt_to_phys(pgdp);
|
||||
|
||||
replace_phys = (void *)__pa_symbol(idmap_cpu_replace_ttbr1);
|
||||
|
||||
|
|
|
@ -36,23 +36,23 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|||
return (pmd_t *)__get_free_page(PGALLOC_GFP);
|
||||
}
|
||||
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
||||
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
|
||||
{
|
||||
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pmd);
|
||||
BUG_ON((unsigned long)pmdp & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pmdp);
|
||||
}
|
||||
|
||||
static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
|
||||
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
||||
{
|
||||
set_pud(pud, __pud(__phys_to_pud_val(pmd) | prot));
|
||||
set_pud(pudp, __pud(__phys_to_pud_val(pmdp) | prot));
|
||||
}
|
||||
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmdp)
|
||||
{
|
||||
__pud_populate(pud, __pa(pmd), PMD_TYPE_TABLE);
|
||||
__pud_populate(pudp, __pa(pmdp), PMD_TYPE_TABLE);
|
||||
}
|
||||
#else
|
||||
static inline void __pud_populate(pud_t *pud, phys_addr_t pmd, pudval_t prot)
|
||||
static inline void __pud_populate(pud_t *pudp, phys_addr_t pmdp, pudval_t prot)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
|
@ -65,30 +65,30 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|||
return (pud_t *)__get_free_page(PGALLOC_GFP);
|
||||
}
|
||||
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pud)
|
||||
static inline void pud_free(struct mm_struct *mm, pud_t *pudp)
|
||||
{
|
||||
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pud);
|
||||
BUG_ON((unsigned long)pudp & (PAGE_SIZE-1));
|
||||
free_page((unsigned long)pudp);
|
||||
}
|
||||
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||
{
|
||||
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pud) | prot));
|
||||
set_pgd(pgdp, __pgd(__phys_to_pgd_val(pudp) | prot));
|
||||
}
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgdp, pud_t *pudp)
|
||||
{
|
||||
__pgd_populate(pgd, __pa(pud), PUD_TYPE_TABLE);
|
||||
__pgd_populate(pgdp, __pa(pudp), PUD_TYPE_TABLE);
|
||||
}
|
||||
#else
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pud, pgdval_t prot)
|
||||
static inline void __pgd_populate(pgd_t *pgdp, phys_addr_t pudp, pgdval_t prot)
|
||||
{
|
||||
BUILD_BUG();
|
||||
}
|
||||
#endif /* CONFIG_PGTABLE_LEVELS > 3 */
|
||||
|
||||
extern pgd_t *pgd_alloc(struct mm_struct *mm);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
|
||||
extern void pgd_free(struct mm_struct *mm, pgd_t *pgdp);
|
||||
|
||||
static inline pte_t *
|
||||
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
|
||||
|
@ -114,10 +114,10 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
|
|||
/*
|
||||
* Free a PTE table.
|
||||
*/
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
|
||||
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *ptep)
|
||||
{
|
||||
if (pte)
|
||||
free_page((unsigned long)pte);
|
||||
if (ptep)
|
||||
free_page((unsigned long)ptep);
|
||||
}
|
||||
|
||||
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
||||
|
@ -126,10 +126,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
|
|||
__free_page(pte);
|
||||
}
|
||||
|
||||
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
|
||||
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t ptep,
|
||||
pmdval_t prot)
|
||||
{
|
||||
set_pmd(pmdp, __pmd(__phys_to_pmd_val(pte) | prot));
|
||||
set_pmd(pmdp, __pmd(__phys_to_pmd_val(ptep) | prot));
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -218,7 +218,7 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
|
|||
|
||||
static inline void set_pte(pte_t *ptep, pte_t pte)
|
||||
{
|
||||
*ptep = pte;
|
||||
WRITE_ONCE(*ptep, pte);
|
||||
|
||||
/*
|
||||
* Only if the new pte is valid and kernel, otherwise TLB maintenance
|
||||
|
@ -250,6 +250,8 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
|
|||
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
pte_t old_pte;
|
||||
|
||||
if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
|
||||
__sync_icache_dcache(pte, addr);
|
||||
|
||||
|
@ -258,14 +260,15 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
* hardware updates of the pte (ptep_set_access_flags safely changes
|
||||
* valid ptes without going through an invalid entry).
|
||||
*/
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(*ptep) && pte_valid(pte) &&
|
||||
old_pte = READ_ONCE(*ptep);
|
||||
if (IS_ENABLED(CONFIG_DEBUG_VM) && pte_valid(old_pte) && pte_valid(pte) &&
|
||||
(mm == current->active_mm || atomic_read(&mm->mm_users) > 1)) {
|
||||
VM_WARN_ONCE(!pte_young(pte),
|
||||
"%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
|
||||
__func__, pte_val(old_pte), pte_val(pte));
|
||||
VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte),
|
||||
"%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
|
||||
__func__, pte_val(*ptep), pte_val(pte));
|
||||
__func__, pte_val(old_pte), pte_val(pte));
|
||||
}
|
||||
|
||||
set_pte(ptep, pte);
|
||||
|
@ -431,7 +434,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
|
|||
|
||||
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
|
||||
{
|
||||
*pmdp = pmd;
|
||||
WRITE_ONCE(*pmdp, pmd);
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
|
@ -482,7 +485,7 @@ static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
|
|||
|
||||
static inline void set_pud(pud_t *pudp, pud_t pud)
|
||||
{
|
||||
*pudp = pud;
|
||||
WRITE_ONCE(*pudp, pud);
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
|
@ -500,7 +503,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
|
|||
/* Find an entry in the second-level page table. */
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
|
||||
#define pmd_offset_phys(dir, addr) (pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
|
||||
#define pmd_offset_phys(dir, addr) (pud_page_paddr(READ_ONCE(*(dir))) + pmd_index(addr) * sizeof(pmd_t))
|
||||
#define pmd_offset(dir, addr) ((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
|
||||
|
||||
#define pmd_set_fixmap(addr) ((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
|
||||
|
@ -535,7 +538,7 @@ static inline phys_addr_t pud_page_paddr(pud_t pud)
|
|||
|
||||
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
|
||||
{
|
||||
*pgdp = pgd;
|
||||
WRITE_ONCE(*pgdp, pgd);
|
||||
dsb(ishst);
|
||||
}
|
||||
|
||||
|
@ -552,7 +555,7 @@ static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
|
|||
/* Find an entry in the frst-level page table. */
|
||||
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
|
||||
|
||||
#define pud_offset_phys(dir, addr) (pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
|
||||
#define pud_offset_phys(dir, addr) (pgd_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t))
|
||||
#define pud_offset(dir, addr) ((pud_t *)__va(pud_offset_phys((dir), (addr))))
|
||||
|
||||
#define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr))
|
||||
|
|
|
@ -406,6 +406,15 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
|||
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
.enable = qcom_enable_link_stack_sanitization,
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BP_POST_GUEST_EXIT,
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
|
||||
},
|
||||
{
|
||||
.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
|
||||
MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
|
||||
|
|
|
@ -90,7 +90,7 @@ static int __init set_permissions(pte_t *ptep, pgtable_t token,
|
|||
unsigned long addr, void *data)
|
||||
{
|
||||
efi_memory_desc_t *md = data;
|
||||
pte_t pte = *ptep;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
if (md->attribute & EFI_MEMORY_RO)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
|
|
|
@ -202,10 +202,10 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
|||
gfp_t mask)
|
||||
{
|
||||
int rc = 0;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep;
|
||||
unsigned long dst = (unsigned long)allocator(mask);
|
||||
|
||||
if (!dst) {
|
||||
|
@ -216,38 +216,38 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
|||
memcpy((void *)dst, src_start, length);
|
||||
flush_icache_range(dst, dst + length);
|
||||
|
||||
pgd = pgd_offset_raw(allocator(mask), dst_addr);
|
||||
if (pgd_none(*pgd)) {
|
||||
pud = allocator(mask);
|
||||
if (!pud) {
|
||||
pgdp = pgd_offset_raw(allocator(mask), dst_addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
pudp = allocator(mask);
|
||||
if (!pudp) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pgd_populate(&init_mm, pgd, pud);
|
||||
pgd_populate(&init_mm, pgdp, pudp);
|
||||
}
|
||||
|
||||
pud = pud_offset(pgd, dst_addr);
|
||||
if (pud_none(*pud)) {
|
||||
pmd = allocator(mask);
|
||||
if (!pmd) {
|
||||
pudp = pud_offset(pgdp, dst_addr);
|
||||
if (pud_none(READ_ONCE(*pudp))) {
|
||||
pmdp = allocator(mask);
|
||||
if (!pmdp) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pud_populate(&init_mm, pud, pmd);
|
||||
pud_populate(&init_mm, pudp, pmdp);
|
||||
}
|
||||
|
||||
pmd = pmd_offset(pud, dst_addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pte = allocator(mask);
|
||||
if (!pte) {
|
||||
pmdp = pmd_offset(pudp, dst_addr);
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
ptep = allocator(mask);
|
||||
if (!ptep) {
|
||||
rc = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
pmd_populate_kernel(&init_mm, pmd, pte);
|
||||
pmd_populate_kernel(&init_mm, pmdp, ptep);
|
||||
}
|
||||
|
||||
pte = pte_offset_kernel(pmd, dst_addr);
|
||||
set_pte(pte, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
|
||||
ptep = pte_offset_kernel(pmdp, dst_addr);
|
||||
set_pte(ptep, pfn_pte(virt_to_pfn(dst), PAGE_KERNEL_EXEC));
|
||||
|
||||
/*
|
||||
* Load our new page tables. A strict BBM approach requires that we
|
||||
|
@ -263,7 +263,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
|
|||
*/
|
||||
cpu_set_reserved_ttbr0();
|
||||
local_flush_tlb_all();
|
||||
write_sysreg(phys_to_ttbr(virt_to_phys(pgd)), ttbr0_el1);
|
||||
write_sysreg(phys_to_ttbr(virt_to_phys(pgdp)), ttbr0_el1);
|
||||
isb();
|
||||
|
||||
*phys_dst_addr = virt_to_phys((void *)dst);
|
||||
|
@ -320,9 +320,9 @@ int swsusp_arch_suspend(void)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
||||
static void _copy_pte(pte_t *dst_ptep, pte_t *src_ptep, unsigned long addr)
|
||||
{
|
||||
pte_t pte = *src_pte;
|
||||
pte_t pte = READ_ONCE(*src_ptep);
|
||||
|
||||
if (pte_valid(pte)) {
|
||||
/*
|
||||
|
@ -330,7 +330,7 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
|||
* read only (code, rodata). Clear the RDONLY bit from
|
||||
* the temporary mappings we use during restore.
|
||||
*/
|
||||
set_pte(dst_pte, pte_mkwrite(pte));
|
||||
set_pte(dst_ptep, pte_mkwrite(pte));
|
||||
} else if (debug_pagealloc_enabled() && !pte_none(pte)) {
|
||||
/*
|
||||
* debug_pagealloc will removed the PTE_VALID bit if
|
||||
|
@ -343,112 +343,116 @@ static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr)
|
|||
*/
|
||||
BUG_ON(!pfn_valid(pte_pfn(pte)));
|
||||
|
||||
set_pte(dst_pte, pte_mkpresent(pte_mkwrite(pte)));
|
||||
set_pte(dst_ptep, pte_mkpresent(pte_mkwrite(pte)));
|
||||
}
|
||||
}
|
||||
|
||||
static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start,
|
||||
static int copy_pte(pmd_t *dst_pmdp, pmd_t *src_pmdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pte_t *src_pte;
|
||||
pte_t *dst_pte;
|
||||
pte_t *src_ptep;
|
||||
pte_t *dst_ptep;
|
||||
unsigned long addr = start;
|
||||
|
||||
dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pte)
|
||||
dst_ptep = (pte_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_ptep)
|
||||
return -ENOMEM;
|
||||
pmd_populate_kernel(&init_mm, dst_pmd, dst_pte);
|
||||
dst_pte = pte_offset_kernel(dst_pmd, start);
|
||||
pmd_populate_kernel(&init_mm, dst_pmdp, dst_ptep);
|
||||
dst_ptep = pte_offset_kernel(dst_pmdp, start);
|
||||
|
||||
src_pte = pte_offset_kernel(src_pmd, start);
|
||||
src_ptep = pte_offset_kernel(src_pmdp, start);
|
||||
do {
|
||||
_copy_pte(dst_pte, src_pte, addr);
|
||||
} while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end);
|
||||
_copy_pte(dst_ptep, src_ptep, addr);
|
||||
} while (dst_ptep++, src_ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start,
|
||||
static int copy_pmd(pud_t *dst_pudp, pud_t *src_pudp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pmd_t *src_pmd;
|
||||
pmd_t *dst_pmd;
|
||||
pmd_t *src_pmdp;
|
||||
pmd_t *dst_pmdp;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pud_none(*dst_pud)) {
|
||||
dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pmd)
|
||||
if (pud_none(READ_ONCE(*dst_pudp))) {
|
||||
dst_pmdp = (pmd_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pmdp)
|
||||
return -ENOMEM;
|
||||
pud_populate(&init_mm, dst_pud, dst_pmd);
|
||||
pud_populate(&init_mm, dst_pudp, dst_pmdp);
|
||||
}
|
||||
dst_pmd = pmd_offset(dst_pud, start);
|
||||
dst_pmdp = pmd_offset(dst_pudp, start);
|
||||
|
||||
src_pmd = pmd_offset(src_pud, start);
|
||||
src_pmdp = pmd_offset(src_pudp, start);
|
||||
do {
|
||||
pmd_t pmd = READ_ONCE(*src_pmdp);
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
if (pmd_none(*src_pmd))
|
||||
if (pmd_none(pmd))
|
||||
continue;
|
||||
if (pmd_table(*src_pmd)) {
|
||||
if (copy_pte(dst_pmd, src_pmd, addr, next))
|
||||
if (pmd_table(pmd)) {
|
||||
if (copy_pte(dst_pmdp, src_pmdp, addr, next))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
set_pmd(dst_pmd,
|
||||
__pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY));
|
||||
set_pmd(dst_pmdp,
|
||||
__pmd(pmd_val(pmd) & ~PMD_SECT_RDONLY));
|
||||
}
|
||||
} while (dst_pmd++, src_pmd++, addr = next, addr != end);
|
||||
} while (dst_pmdp++, src_pmdp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start,
|
||||
static int copy_pud(pgd_t *dst_pgdp, pgd_t *src_pgdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
pud_t *dst_pud;
|
||||
pud_t *src_pud;
|
||||
pud_t *dst_pudp;
|
||||
pud_t *src_pudp;
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
|
||||
if (pgd_none(*dst_pgd)) {
|
||||
dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pud)
|
||||
if (pgd_none(READ_ONCE(*dst_pgdp))) {
|
||||
dst_pudp = (pud_t *)get_safe_page(GFP_ATOMIC);
|
||||
if (!dst_pudp)
|
||||
return -ENOMEM;
|
||||
pgd_populate(&init_mm, dst_pgd, dst_pud);
|
||||
pgd_populate(&init_mm, dst_pgdp, dst_pudp);
|
||||
}
|
||||
dst_pud = pud_offset(dst_pgd, start);
|
||||
dst_pudp = pud_offset(dst_pgdp, start);
|
||||
|
||||
src_pud = pud_offset(src_pgd, start);
|
||||
src_pudp = pud_offset(src_pgdp, start);
|
||||
do {
|
||||
pud_t pud = READ_ONCE(*src_pudp);
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
if (pud_none(*src_pud))
|
||||
if (pud_none(pud))
|
||||
continue;
|
||||
if (pud_table(*(src_pud))) {
|
||||
if (copy_pmd(dst_pud, src_pud, addr, next))
|
||||
if (pud_table(pud)) {
|
||||
if (copy_pmd(dst_pudp, src_pudp, addr, next))
|
||||
return -ENOMEM;
|
||||
} else {
|
||||
set_pud(dst_pud,
|
||||
__pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY));
|
||||
set_pud(dst_pudp,
|
||||
__pud(pud_val(pud) & ~PMD_SECT_RDONLY));
|
||||
}
|
||||
} while (dst_pud++, src_pud++, addr = next, addr != end);
|
||||
} while (dst_pudp++, src_pudp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int copy_page_tables(pgd_t *dst_pgd, unsigned long start,
|
||||
static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
|
||||
unsigned long end)
|
||||
{
|
||||
unsigned long next;
|
||||
unsigned long addr = start;
|
||||
pgd_t *src_pgd = pgd_offset_k(start);
|
||||
pgd_t *src_pgdp = pgd_offset_k(start);
|
||||
|
||||
dst_pgd = pgd_offset_raw(dst_pgd, start);
|
||||
dst_pgdp = pgd_offset_raw(dst_pgdp, start);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
if (pgd_none(*src_pgd))
|
||||
if (pgd_none(READ_ONCE(*src_pgdp)))
|
||||
continue;
|
||||
if (copy_pud(dst_pgd, src_pgd, addr, next))
|
||||
if (copy_pud(dst_pgdp, src_pgdp, addr, next))
|
||||
return -ENOMEM;
|
||||
} while (dst_pgd++, src_pgd++, addr = next, addr != end);
|
||||
} while (dst_pgdp++, src_pgdp++, addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -407,8 +407,10 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
|
|||
u32 midr = read_cpuid_id();
|
||||
|
||||
/* Apply BTAC predictors mitigation to all Falkor chips */
|
||||
if ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)
|
||||
if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
|
||||
((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) {
|
||||
__qcom_hyp_sanitize_btac_predictors();
|
||||
}
|
||||
}
|
||||
|
||||
fp_enabled = __fpsimd_enabled();
|
||||
|
|
|
@ -286,48 +286,52 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
|||
|
||||
}
|
||||
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmdp, unsigned long start)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0UL);
|
||||
pte_t *ptep = pte_offset_kernel(pmdp, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
|
||||
for (i = 0; i < PTRS_PER_PTE; i++, ptep++) {
|
||||
addr = start + i * PAGE_SIZE;
|
||||
note_page(st, addr, 4, pte_val(*pte));
|
||||
note_page(st, addr, 4, READ_ONCE(pte_val(*ptep)));
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pudp, unsigned long start)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, 0UL);
|
||||
pmd_t *pmdp = pmd_offset(pudp, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, pmd++) {
|
||||
for (i = 0; i < PTRS_PER_PMD; i++, pmdp++) {
|
||||
pmd_t pmd = READ_ONCE(*pmdp);
|
||||
|
||||
addr = start + i * PMD_SIZE;
|
||||
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
||||
note_page(st, addr, 3, pmd_val(*pmd));
|
||||
if (pmd_none(pmd) || pmd_sect(pmd)) {
|
||||
note_page(st, addr, 3, pmd_val(pmd));
|
||||
} else {
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
walk_pte(st, pmd, addr);
|
||||
BUG_ON(pmd_bad(pmd));
|
||||
walk_pte(st, pmdp, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgdp, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0UL);
|
||||
pud_t *pudp = pud_offset(pgdp, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
|
||||
for (i = 0; i < PTRS_PER_PUD; i++, pudp++) {
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
|
||||
addr = start + i * PUD_SIZE;
|
||||
if (pud_none(*pud) || pud_sect(*pud)) {
|
||||
note_page(st, addr, 2, pud_val(*pud));
|
||||
if (pud_none(pud) || pud_sect(pud)) {
|
||||
note_page(st, addr, 2, pud_val(pud));
|
||||
} else {
|
||||
BUG_ON(pud_bad(*pud));
|
||||
walk_pmd(st, pud, addr);
|
||||
BUG_ON(pud_bad(pud));
|
||||
walk_pmd(st, pudp, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -335,17 +339,19 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
|||
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
|
||||
unsigned long start)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, 0UL);
|
||||
pgd_t *pgdp = pgd_offset(mm, 0UL);
|
||||
unsigned i;
|
||||
unsigned long addr;
|
||||
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
|
||||
for (i = 0; i < PTRS_PER_PGD; i++, pgdp++) {
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
|
||||
addr = start + i * PGDIR_SIZE;
|
||||
if (pgd_none(*pgd)) {
|
||||
note_page(st, addr, 1, pgd_val(*pgd));
|
||||
if (pgd_none(pgd)) {
|
||||
note_page(st, addr, 1, pgd_val(pgd));
|
||||
} else {
|
||||
BUG_ON(pgd_bad(*pgd));
|
||||
walk_pud(st, pgd, addr);
|
||||
BUG_ON(pgd_bad(pgd));
|
||||
walk_pud(st, pgdp, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -130,7 +130,8 @@ static void mem_abort_decode(unsigned int esr)
|
|||
void show_pte(unsigned long addr)
|
||||
{
|
||||
struct mm_struct *mm;
|
||||
pgd_t *pgd;
|
||||
pgd_t *pgdp;
|
||||
pgd_t pgd;
|
||||
|
||||
if (addr < TASK_SIZE) {
|
||||
/* TTBR0 */
|
||||
|
@ -149,33 +150,37 @@ void show_pte(unsigned long addr)
|
|||
return;
|
||||
}
|
||||
|
||||
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgd = %p\n",
|
||||
pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp = %p\n",
|
||||
mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K,
|
||||
VA_BITS, mm->pgd);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pr_alert("[%016lx] *pgd=%016llx", addr, pgd_val(*pgd));
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd));
|
||||
|
||||
do {
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
if (pgd_none(*pgd) || pgd_bad(*pgd))
|
||||
if (pgd_none(pgd) || pgd_bad(pgd))
|
||||
break;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
pr_cont(", *pud=%016llx", pud_val(*pud));
|
||||
if (pud_none(*pud) || pud_bad(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
pr_cont(", pud=%016llx", pud_val(pud));
|
||||
if (pud_none(pud) || pud_bad(pud))
|
||||
break;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pr_cont(", *pmd=%016llx", pmd_val(*pmd));
|
||||
if (pmd_none(*pmd) || pmd_bad(*pmd))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
pr_cont(", pmd=%016llx", pmd_val(pmd));
|
||||
if (pmd_none(pmd) || pmd_bad(pmd))
|
||||
break;
|
||||
|
||||
pte = pte_offset_map(pmd, addr);
|
||||
pr_cont(", *pte=%016llx", pte_val(*pte));
|
||||
pte_unmap(pte);
|
||||
ptep = pte_offset_map(pmdp, addr);
|
||||
pte = READ_ONCE(*ptep);
|
||||
pr_cont(", pte=%016llx", pte_val(pte));
|
||||
pte_unmap(ptep);
|
||||
} while(0);
|
||||
|
||||
pr_cont("\n");
|
||||
|
@ -196,8 +201,9 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
pte_t entry, int dirty)
|
||||
{
|
||||
pteval_t old_pteval, pteval;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
if (pte_same(*ptep, entry))
|
||||
if (pte_same(pte, entry))
|
||||
return 0;
|
||||
|
||||
/* only preserve the access flags and write permission */
|
||||
|
@ -210,7 +216,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma,
|
|||
* (calculated as: a & b == ~(~a | ~b)).
|
||||
*/
|
||||
pte_val(entry) ^= PTE_RDONLY;
|
||||
pteval = READ_ONCE(pte_val(*ptep));
|
||||
pteval = pte_val(pte);
|
||||
do {
|
||||
old_pteval = pteval;
|
||||
pteval ^= PTE_RDONLY;
|
||||
|
|
|
@ -54,14 +54,14 @@ static inline pgprot_t pte_pgprot(pte_t pte)
|
|||
static int find_num_contig(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, size_t *pgsize)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, addr);
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp = pgd_offset(mm, addr);
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
||||
*pgsize = PAGE_SIZE;
|
||||
pud = pud_offset(pgd, addr);
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if ((pte_t *)pmd == ptep) {
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if ((pte_t *)pmdp == ptep) {
|
||||
*pgsize = PMD_SIZE;
|
||||
return CONT_PMDS;
|
||||
}
|
||||
|
@ -181,11 +181,8 @@ void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
|
||||
clear_flush(mm, addr, ptep, pgsize, ncontig);
|
||||
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) {
|
||||
pr_debug("%s: set pte %p to 0x%llx\n", __func__, ptep,
|
||||
pte_val(pfn_pte(pfn, hugeprot)));
|
||||
for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn)
|
||||
set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot));
|
||||
}
|
||||
}
|
||||
|
||||
void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
|
@ -203,20 +200,20 @@ void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr,
|
|||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pte_t *pte = NULL;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
pte_t *ptep = NULL;
|
||||
|
||||
pr_debug("%s: addr:0x%lx sz:0x%lx\n", __func__, addr, sz);
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pud = pud_alloc(mm, pgd, addr);
|
||||
if (!pud)
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
pudp = pud_alloc(mm, pgdp, addr);
|
||||
if (!pudp)
|
||||
return NULL;
|
||||
|
||||
if (sz == PUD_SIZE) {
|
||||
pte = (pte_t *)pud;
|
||||
ptep = (pte_t *)pudp;
|
||||
} else if (sz == (PAGE_SIZE * CONT_PTES)) {
|
||||
pmd_t *pmd = pmd_alloc(mm, pud, addr);
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
|
||||
WARN_ON(addr & (sz - 1));
|
||||
/*
|
||||
|
@ -226,60 +223,55 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
|
|||
* will be no pte_unmap() to correspond with this
|
||||
* pte_alloc_map().
|
||||
*/
|
||||
pte = pte_alloc_map(mm, pmd, addr);
|
||||
ptep = pte_alloc_map(mm, pmdp, addr);
|
||||
} else if (sz == PMD_SIZE) {
|
||||
if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) &&
|
||||
pud_none(*pud))
|
||||
pte = huge_pmd_share(mm, addr, pud);
|
||||
pud_none(READ_ONCE(*pudp)))
|
||||
ptep = huge_pmd_share(mm, addr, pudp);
|
||||
else
|
||||
pte = (pte_t *)pmd_alloc(mm, pud, addr);
|
||||
ptep = (pte_t *)pmd_alloc(mm, pudp, addr);
|
||||
} else if (sz == (PMD_SIZE * CONT_PMDS)) {
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_alloc(mm, pud, addr);
|
||||
pmdp = pmd_alloc(mm, pudp, addr);
|
||||
WARN_ON(addr & (sz - 1));
|
||||
return (pte_t *)pmd;
|
||||
return (pte_t *)pmdp;
|
||||
}
|
||||
|
||||
pr_debug("%s: addr:0x%lx sz:0x%lx ret pte=%p/0x%llx\n", __func__, addr,
|
||||
sz, pte, pte_val(*pte));
|
||||
return pte;
|
||||
return ptep;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_offset(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
|
||||
pgd = pgd_offset(mm, addr);
|
||||
pr_debug("%s: addr:0x%lx pgd:%p\n", __func__, addr, pgd);
|
||||
if (!pgd_present(*pgd))
|
||||
pgdp = pgd_offset(mm, addr);
|
||||
if (!pgd_present(READ_ONCE(*pgdp)))
|
||||
return NULL;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (sz != PUD_SIZE && pud_none(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (sz != PUD_SIZE && pud_none(pud))
|
||||
return NULL;
|
||||
/* hugepage or swap? */
|
||||
if (pud_huge(*pud) || !pud_present(*pud))
|
||||
return (pte_t *)pud;
|
||||
if (pud_huge(pud) || !pud_present(pud))
|
||||
return (pte_t *)pudp;
|
||||
/* table; check the next level */
|
||||
|
||||
if (sz == CONT_PMD_SIZE)
|
||||
addr &= CONT_PMD_MASK;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) &&
|
||||
pmd_none(*pmd))
|
||||
pmd_none(pmd))
|
||||
return NULL;
|
||||
if (pmd_huge(*pmd) || !pmd_present(*pmd))
|
||||
return (pte_t *)pmd;
|
||||
if (pmd_huge(pmd) || !pmd_present(pmd))
|
||||
return (pte_t *)pmdp;
|
||||
|
||||
if (sz == CONT_PTE_SIZE) {
|
||||
pte_t *pte = pte_offset_kernel(pmd, (addr & CONT_PTE_MASK));
|
||||
return pte;
|
||||
}
|
||||
if (sz == CONT_PTE_SIZE)
|
||||
return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK));
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
@ -367,7 +359,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
|||
size_t pgsize;
|
||||
pte_t pte;
|
||||
|
||||
if (!pte_cont(*ptep)) {
|
||||
if (!pte_cont(READ_ONCE(*ptep))) {
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
return;
|
||||
}
|
||||
|
@ -391,7 +383,7 @@ void huge_ptep_clear_flush(struct vm_area_struct *vma,
|
|||
size_t pgsize;
|
||||
int ncontig;
|
||||
|
||||
if (!pte_cont(*ptep)) {
|
||||
if (!pte_cont(READ_ONCE(*ptep))) {
|
||||
ptep_clear_flush(vma, addr, ptep);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -44,92 +44,92 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
|
|||
return __pa(p);
|
||||
}
|
||||
|
||||
static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
|
||||
static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pmd_none(*pmd)) {
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
||||
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pte_offset_kimg(pmd, addr)
|
||||
: pte_offset_kernel(pmd, addr);
|
||||
return early ? pte_offset_kimg(pmdp, addr)
|
||||
: pte_offset_kernel(pmdp, addr);
|
||||
}
|
||||
|
||||
static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
|
||||
static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pud_none(*pud)) {
|
||||
if (pud_none(READ_ONCE(*pudp))) {
|
||||
phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
|
||||
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
|
||||
return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
|
||||
}
|
||||
|
||||
static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
|
||||
static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
|
||||
bool early)
|
||||
{
|
||||
if (pgd_none(*pgd)) {
|
||||
if (pgd_none(READ_ONCE(*pgdp))) {
|
||||
phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
__pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
|
||||
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
|
||||
}
|
||||
|
||||
return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
|
||||
return early ? pud_offset_kimg(pgdp, addr) : pud_offset(pgdp, addr);
|
||||
}
|
||||
|
||||
static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
|
||||
static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
|
||||
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
|
||||
|
||||
do {
|
||||
phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
|
||||
: kasan_alloc_zeroed_page(node);
|
||||
next = addr + PAGE_SIZE;
|
||||
set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
||||
} while (pte++, addr = next, addr != end && pte_none(*pte));
|
||||
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
|
||||
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
|
||||
}
|
||||
|
||||
static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
|
||||
static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
|
||||
pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
kasan_pte_populate(pmd, addr, next, node, early);
|
||||
} while (pmd++, addr = next, addr != end && pmd_none(*pmd));
|
||||
kasan_pte_populate(pmdp, addr, next, node, early);
|
||||
} while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
|
||||
static void __init kasan_pud_populate(pgd_t *pgdp, unsigned long addr,
|
||||
unsigned long end, int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
|
||||
pud_t *pudp = kasan_pud_offset(pgdp, addr, node, early);
|
||||
|
||||
do {
|
||||
next = pud_addr_end(addr, end);
|
||||
kasan_pmd_populate(pud, addr, next, node, early);
|
||||
} while (pud++, addr = next, addr != end && pud_none(*pud));
|
||||
kasan_pmd_populate(pudp, addr, next, node, early);
|
||||
} while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
|
||||
}
|
||||
|
||||
static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
|
||||
int node, bool early)
|
||||
{
|
||||
unsigned long next;
|
||||
pgd_t *pgd;
|
||||
pgd_t *pgdp;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
pgdp = pgd_offset_k(addr);
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
kasan_pud_populate(pgd, addr, next, node, early);
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
kasan_pud_populate(pgdp, addr, next, node, early);
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/* The early shadow maps everything to a single page of zeroes */
|
||||
|
@ -155,14 +155,14 @@ static void __init kasan_map_populate(unsigned long start, unsigned long end,
|
|||
*/
|
||||
void __init kasan_copy_shadow(pgd_t *pgdir)
|
||||
{
|
||||
pgd_t *pgd, *pgd_new, *pgd_end;
|
||||
pgd_t *pgdp, *pgdp_new, *pgdp_end;
|
||||
|
||||
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
||||
pgd_end = pgd_offset_k(KASAN_SHADOW_END);
|
||||
pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
|
||||
pgdp = pgd_offset_k(KASAN_SHADOW_START);
|
||||
pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
|
||||
pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
|
||||
do {
|
||||
set_pgd(pgd_new, *pgd);
|
||||
} while (pgd++, pgd_new++, pgd != pgd_end);
|
||||
set_pgd(pgdp_new, READ_ONCE(*pgdp));
|
||||
} while (pgdp++, pgdp_new++, pgdp != pgdp_end);
|
||||
}
|
||||
|
||||
static void __init clear_pgds(unsigned long start,
|
||||
|
|
|
@ -125,45 +125,48 @@ static bool pgattr_change_is_safe(u64 old, u64 new)
|
|||
return ((old ^ new) & ~mask) == 0;
|
||||
}
|
||||
|
||||
static void init_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
|
||||
static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pte_t *pte;
|
||||
pte_t *ptep;
|
||||
|
||||
pte = pte_set_fixmap_offset(pmd, addr);
|
||||
ptep = pte_set_fixmap_offset(pmdp, addr);
|
||||
do {
|
||||
pte_t old_pte = *pte;
|
||||
pte_t old_pte = READ_ONCE(*ptep);
|
||||
|
||||
set_pte(pte, pfn_pte(__phys_to_pfn(phys), prot));
|
||||
set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot));
|
||||
|
||||
/*
|
||||
* After the PTE entry has been populated once, we
|
||||
* only allow updates to the permission attributes.
|
||||
*/
|
||||
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), pte_val(*pte)));
|
||||
BUG_ON(!pgattr_change_is_safe(pte_val(old_pte),
|
||||
READ_ONCE(pte_val(*ptep))));
|
||||
|
||||
phys += PAGE_SIZE;
|
||||
} while (pte++, addr += PAGE_SIZE, addr != end);
|
||||
} while (ptep++, addr += PAGE_SIZE, addr != end);
|
||||
|
||||
pte_clear_fixmap();
|
||||
}
|
||||
|
||||
static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
|
||||
static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
int flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t pmd = READ_ONCE(*pmdp);
|
||||
|
||||
BUG_ON(pmd_sect(*pmd));
|
||||
if (pmd_none(*pmd)) {
|
||||
BUG_ON(pmd_sect(pmd));
|
||||
if (pmd_none(pmd)) {
|
||||
phys_addr_t pte_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pte_phys = pgtable_alloc();
|
||||
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
||||
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
}
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
BUG_ON(pmd_bad(pmd));
|
||||
|
||||
do {
|
||||
pgprot_t __prot = prot;
|
||||
|
@ -175,67 +178,69 @@ static void alloc_init_cont_pte(pmd_t *pmd, unsigned long addr,
|
|||
(flags & NO_CONT_MAPPINGS) == 0)
|
||||
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
|
||||
|
||||
init_pte(pmd, addr, next, phys, __prot);
|
||||
init_pte(pmdp, addr, next, phys, __prot);
|
||||
|
||||
phys += next - addr;
|
||||
} while (addr = next, addr != end);
|
||||
}
|
||||
|
||||
static void init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void), int flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pmd_t *pmd;
|
||||
pmd_t *pmdp;
|
||||
|
||||
pmd = pmd_set_fixmap_offset(pud, addr);
|
||||
pmdp = pmd_set_fixmap_offset(pudp, addr);
|
||||
do {
|
||||
pmd_t old_pmd = *pmd;
|
||||
pmd_t old_pmd = READ_ONCE(*pmdp);
|
||||
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
/* try section mapping first */
|
||||
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
|
||||
(flags & NO_BLOCK_MAPPINGS) == 0) {
|
||||
pmd_set_huge(pmd, phys, prot);
|
||||
pmd_set_huge(pmdp, phys, prot);
|
||||
|
||||
/*
|
||||
* After the PMD entry has been populated once, we
|
||||
* only allow updates to the permission attributes.
|
||||
*/
|
||||
BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd),
|
||||
pmd_val(*pmd)));
|
||||
READ_ONCE(pmd_val(*pmdp))));
|
||||
} else {
|
||||
alloc_init_cont_pte(pmd, addr, next, phys, prot,
|
||||
alloc_init_cont_pte(pmdp, addr, next, phys, prot,
|
||||
pgtable_alloc, flags);
|
||||
|
||||
BUG_ON(pmd_val(old_pmd) != 0 &&
|
||||
pmd_val(old_pmd) != pmd_val(*pmd));
|
||||
pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp)));
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pmd++, addr = next, addr != end);
|
||||
} while (pmdp++, addr = next, addr != end);
|
||||
|
||||
pmd_clear_fixmap();
|
||||
}
|
||||
|
||||
static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
|
||||
static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr,
|
||||
unsigned long end, phys_addr_t phys,
|
||||
pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void), int flags)
|
||||
{
|
||||
unsigned long next;
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
|
||||
/*
|
||||
* Check for initial section mappings in the pgd/pud.
|
||||
*/
|
||||
BUG_ON(pud_sect(*pud));
|
||||
if (pud_none(*pud)) {
|
||||
BUG_ON(pud_sect(pud));
|
||||
if (pud_none(pud)) {
|
||||
phys_addr_t pmd_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pmd_phys = pgtable_alloc();
|
||||
__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
|
||||
__pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE);
|
||||
pud = READ_ONCE(*pudp);
|
||||
}
|
||||
BUG_ON(pud_bad(*pud));
|
||||
BUG_ON(pud_bad(pud));
|
||||
|
||||
do {
|
||||
pgprot_t __prot = prot;
|
||||
|
@ -247,7 +252,7 @@ static void alloc_init_cont_pmd(pud_t *pud, unsigned long addr,
|
|||
(flags & NO_CONT_MAPPINGS) == 0)
|
||||
__prot = __pgprot(pgprot_val(prot) | PTE_CONT);
|
||||
|
||||
init_pmd(pud, addr, next, phys, __prot, pgtable_alloc, flags);
|
||||
init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags);
|
||||
|
||||
phys += next - addr;
|
||||
} while (addr = next, addr != end);
|
||||
|
@ -265,25 +270,27 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
int flags)
|
||||
static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
int flags)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
pud_t *pudp;
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
|
||||
if (pgd_none(*pgd)) {
|
||||
if (pgd_none(pgd)) {
|
||||
phys_addr_t pud_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pud_phys = pgtable_alloc();
|
||||
__pgd_populate(pgd, pud_phys, PUD_TYPE_TABLE);
|
||||
__pgd_populate(pgdp, pud_phys, PUD_TYPE_TABLE);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
}
|
||||
BUG_ON(pgd_bad(*pgd));
|
||||
BUG_ON(pgd_bad(pgd));
|
||||
|
||||
pud = pud_set_fixmap_offset(pgd, addr);
|
||||
pudp = pud_set_fixmap_offset(pgdp, addr);
|
||||
do {
|
||||
pud_t old_pud = *pud;
|
||||
pud_t old_pud = READ_ONCE(*pudp);
|
||||
|
||||
next = pud_addr_end(addr, end);
|
||||
|
||||
|
@ -292,23 +299,23 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
|||
*/
|
||||
if (use_1G_block(addr, next, phys) &&
|
||||
(flags & NO_BLOCK_MAPPINGS) == 0) {
|
||||
pud_set_huge(pud, phys, prot);
|
||||
pud_set_huge(pudp, phys, prot);
|
||||
|
||||
/*
|
||||
* After the PUD entry has been populated once, we
|
||||
* only allow updates to the permission attributes.
|
||||
*/
|
||||
BUG_ON(!pgattr_change_is_safe(pud_val(old_pud),
|
||||
pud_val(*pud)));
|
||||
READ_ONCE(pud_val(*pudp))));
|
||||
} else {
|
||||
alloc_init_cont_pmd(pud, addr, next, phys, prot,
|
||||
alloc_init_cont_pmd(pudp, addr, next, phys, prot,
|
||||
pgtable_alloc, flags);
|
||||
|
||||
BUG_ON(pud_val(old_pud) != 0 &&
|
||||
pud_val(old_pud) != pud_val(*pud));
|
||||
pud_val(old_pud) != READ_ONCE(pud_val(*pudp)));
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
} while (pudp++, addr = next, addr != end);
|
||||
|
||||
pud_clear_fixmap();
|
||||
}
|
||||
|
@ -320,7 +327,7 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
|||
int flags)
|
||||
{
|
||||
unsigned long addr, length, end, next;
|
||||
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
|
||||
pgd_t *pgdp = pgd_offset_raw(pgdir, virt);
|
||||
|
||||
/*
|
||||
* If the virtual and physical address don't have the same offset
|
||||
|
@ -336,10 +343,10 @@ static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
|||
end = addr + length;
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
|
||||
alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc,
|
||||
flags);
|
||||
phys += next - addr;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
} while (pgdp++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static phys_addr_t pgd_pgtable_alloc(void)
|
||||
|
@ -401,10 +408,10 @@ static void update_mapping_prot(phys_addr_t phys, unsigned long virt,
|
|||
flush_tlb_kernel_range(virt, virt + size);
|
||||
}
|
||||
|
||||
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start,
|
||||
static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start,
|
||||
phys_addr_t end, pgprot_t prot, int flags)
|
||||
{
|
||||
__create_pgd_mapping(pgd, start, __phys_to_virt(start), end - start,
|
||||
__create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start,
|
||||
prot, early_pgtable_alloc, flags);
|
||||
}
|
||||
|
||||
|
@ -418,7 +425,7 @@ void __init mark_linear_text_alias_ro(void)
|
|||
PAGE_KERNEL_RO);
|
||||
}
|
||||
|
||||
static void __init map_mem(pgd_t *pgd)
|
||||
static void __init map_mem(pgd_t *pgdp)
|
||||
{
|
||||
phys_addr_t kernel_start = __pa_symbol(_text);
|
||||
phys_addr_t kernel_end = __pa_symbol(__init_begin);
|
||||
|
@ -451,7 +458,7 @@ static void __init map_mem(pgd_t *pgd)
|
|||
if (memblock_is_nomap(reg))
|
||||
continue;
|
||||
|
||||
__map_memblock(pgd, start, end, PAGE_KERNEL, flags);
|
||||
__map_memblock(pgdp, start, end, PAGE_KERNEL, flags);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -464,7 +471,7 @@ static void __init map_mem(pgd_t *pgd)
|
|||
* Note that contiguous mappings cannot be remapped in this way,
|
||||
* so we should avoid them here.
|
||||
*/
|
||||
__map_memblock(pgd, kernel_start, kernel_end,
|
||||
__map_memblock(pgdp, kernel_start, kernel_end,
|
||||
PAGE_KERNEL, NO_CONT_MAPPINGS);
|
||||
memblock_clear_nomap(kernel_start, kernel_end - kernel_start);
|
||||
|
||||
|
@ -475,7 +482,7 @@ static void __init map_mem(pgd_t *pgd)
|
|||
* through /sys/kernel/kexec_crash_size interface.
|
||||
*/
|
||||
if (crashk_res.end) {
|
||||
__map_memblock(pgd, crashk_res.start, crashk_res.end + 1,
|
||||
__map_memblock(pgdp, crashk_res.start, crashk_res.end + 1,
|
||||
PAGE_KERNEL,
|
||||
NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS);
|
||||
memblock_clear_nomap(crashk_res.start,
|
||||
|
@ -499,7 +506,7 @@ void mark_rodata_ro(void)
|
|||
debug_checkwx();
|
||||
}
|
||||
|
||||
static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
||||
static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end,
|
||||
pgprot_t prot, struct vm_struct *vma,
|
||||
int flags, unsigned long vm_flags)
|
||||
{
|
||||
|
@ -509,7 +516,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
|||
BUG_ON(!PAGE_ALIGNED(pa_start));
|
||||
BUG_ON(!PAGE_ALIGNED(size));
|
||||
|
||||
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
||||
__create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot,
|
||||
early_pgtable_alloc, flags);
|
||||
|
||||
if (!(vm_flags & VM_NO_GUARD))
|
||||
|
@ -562,7 +569,7 @@ core_initcall(map_entry_trampoline);
|
|||
/*
|
||||
* Create fine-grained mappings for the kernel.
|
||||
*/
|
||||
static void __init map_kernel(pgd_t *pgd)
|
||||
static void __init map_kernel(pgd_t *pgdp)
|
||||
{
|
||||
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext,
|
||||
vmlinux_initdata, vmlinux_data;
|
||||
|
@ -578,24 +585,24 @@ static void __init map_kernel(pgd_t *pgd)
|
|||
* Only rodata will be remapped with different permissions later on,
|
||||
* all other segments are allowed to use contiguous mappings.
|
||||
*/
|
||||
map_kernel_segment(pgd, _text, _etext, text_prot, &vmlinux_text, 0,
|
||||
map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0,
|
||||
VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, __start_rodata, __inittext_begin, PAGE_KERNEL,
|
||||
map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL,
|
||||
&vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, __inittext_begin, __inittext_end, text_prot,
|
||||
map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot,
|
||||
&vmlinux_inittext, 0, VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, __initdata_begin, __initdata_end, PAGE_KERNEL,
|
||||
map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL,
|
||||
&vmlinux_initdata, 0, VM_NO_GUARD);
|
||||
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
|
||||
map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0);
|
||||
|
||||
if (!pgd_val(*pgd_offset_raw(pgd, FIXADDR_START))) {
|
||||
if (!READ_ONCE(pgd_val(*pgd_offset_raw(pgdp, FIXADDR_START)))) {
|
||||
/*
|
||||
* The fixmap falls in a separate pgd to the kernel, and doesn't
|
||||
* live in the carveout for the swapper_pg_dir. We can simply
|
||||
* re-use the existing dir for the fixmap.
|
||||
*/
|
||||
set_pgd(pgd_offset_raw(pgd, FIXADDR_START),
|
||||
*pgd_offset_k(FIXADDR_START));
|
||||
set_pgd(pgd_offset_raw(pgdp, FIXADDR_START),
|
||||
READ_ONCE(*pgd_offset_k(FIXADDR_START)));
|
||||
} else if (CONFIG_PGTABLE_LEVELS > 3) {
|
||||
/*
|
||||
* The fixmap shares its top level pgd entry with the kernel
|
||||
|
@ -604,14 +611,15 @@ static void __init map_kernel(pgd_t *pgd)
|
|||
* entry instead.
|
||||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
pud_populate(&init_mm, pud_set_fixmap_offset(pgd, FIXADDR_START),
|
||||
pud_populate(&init_mm,
|
||||
pud_set_fixmap_offset(pgdp, FIXADDR_START),
|
||||
lm_alias(bm_pmd));
|
||||
pud_clear_fixmap();
|
||||
} else {
|
||||
BUG();
|
||||
}
|
||||
|
||||
kasan_copy_shadow(pgd);
|
||||
kasan_copy_shadow(pgdp);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -621,10 +629,10 @@ static void __init map_kernel(pgd_t *pgd)
|
|||
void __init paging_init(void)
|
||||
{
|
||||
phys_addr_t pgd_phys = early_pgtable_alloc();
|
||||
pgd_t *pgd = pgd_set_fixmap(pgd_phys);
|
||||
pgd_t *pgdp = pgd_set_fixmap(pgd_phys);
|
||||
|
||||
map_kernel(pgd);
|
||||
map_mem(pgd);
|
||||
map_kernel(pgdp);
|
||||
map_mem(pgdp);
|
||||
|
||||
/*
|
||||
* We want to reuse the original swapper_pg_dir so we don't have to
|
||||
|
@ -635,7 +643,7 @@ void __init paging_init(void)
|
|||
* To do this we need to go via a temporary pgd.
|
||||
*/
|
||||
cpu_replace_ttbr1(__va(pgd_phys));
|
||||
memcpy(swapper_pg_dir, pgd, PGD_SIZE);
|
||||
memcpy(swapper_pg_dir, pgdp, PGD_SIZE);
|
||||
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
|
||||
|
||||
pgd_clear_fixmap();
|
||||
|
@ -655,37 +663,40 @@ void __init paging_init(void)
|
|||
*/
|
||||
int kern_addr_valid(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep, pte;
|
||||
|
||||
if ((((long)addr) >> VA_BITS) != -1UL)
|
||||
return 0;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return 0;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return 0;
|
||||
|
||||
if (pud_sect(*pud))
|
||||
return pfn_valid(pud_pfn(*pud));
|
||||
if (pud_sect(pud))
|
||||
return pfn_valid(pud_pfn(pud));
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_none(pmd))
|
||||
return 0;
|
||||
|
||||
if (pmd_sect(*pmd))
|
||||
return pfn_valid(pmd_pfn(*pmd));
|
||||
if (pmd_sect(pmd))
|
||||
return pfn_valid(pmd_pfn(pmd));
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
if (pte_none(*pte))
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
pte = READ_ONCE(*ptep);
|
||||
if (pte_none(pte))
|
||||
return 0;
|
||||
|
||||
return pfn_valid(pte_pfn(*pte));
|
||||
return pfn_valid(pte_pfn(pte));
|
||||
}
|
||||
#ifdef CONFIG_SPARSEMEM_VMEMMAP
|
||||
#if !ARM64_SWAPPER_USES_SECTION_MAPS
|
||||
|
@ -700,32 +711,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
|
|||
{
|
||||
unsigned long addr = start;
|
||||
unsigned long next;
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
|
||||
do {
|
||||
next = pmd_addr_end(addr, end);
|
||||
|
||||
pgd = vmemmap_pgd_populate(addr, node);
|
||||
if (!pgd)
|
||||
pgdp = vmemmap_pgd_populate(addr, node);
|
||||
if (!pgdp)
|
||||
return -ENOMEM;
|
||||
|
||||
pud = vmemmap_pud_populate(pgd, addr, node);
|
||||
if (!pud)
|
||||
pudp = vmemmap_pud_populate(pgdp, addr, node);
|
||||
if (!pudp)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd)) {
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
if (pmd_none(READ_ONCE(*pmdp))) {
|
||||
void *p = NULL;
|
||||
|
||||
p = vmemmap_alloc_block_buf(PMD_SIZE, node);
|
||||
if (!p)
|
||||
return -ENOMEM;
|
||||
|
||||
pmd_set_huge(pmd, __pa(p), __pgprot(PROT_SECT_NORMAL));
|
||||
pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL));
|
||||
} else
|
||||
vmemmap_verify((pte_t *)pmd, node, addr, next);
|
||||
vmemmap_verify((pte_t *)pmdp, node, addr, next);
|
||||
} while (addr = next, addr != end);
|
||||
|
||||
return 0;
|
||||
|
@ -739,20 +750,22 @@ void vmemmap_free(unsigned long start, unsigned long end,
|
|||
|
||||
static inline pud_t * fixmap_pud(unsigned long addr)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset_k(addr);
|
||||
pgd_t *pgdp = pgd_offset_k(addr);
|
||||
pgd_t pgd = READ_ONCE(*pgdp);
|
||||
|
||||
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
||||
BUG_ON(pgd_none(pgd) || pgd_bad(pgd));
|
||||
|
||||
return pud_offset_kimg(pgd, addr);
|
||||
return pud_offset_kimg(pgdp, addr);
|
||||
}
|
||||
|
||||
static inline pmd_t * fixmap_pmd(unsigned long addr)
|
||||
{
|
||||
pud_t *pud = fixmap_pud(addr);
|
||||
pud_t *pudp = fixmap_pud(addr);
|
||||
pud_t pud = READ_ONCE(*pudp);
|
||||
|
||||
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
||||
BUG_ON(pud_none(pud) || pud_bad(pud));
|
||||
|
||||
return pmd_offset_kimg(pud, addr);
|
||||
return pmd_offset_kimg(pudp, addr);
|
||||
}
|
||||
|
||||
static inline pte_t * fixmap_pte(unsigned long addr)
|
||||
|
@ -768,30 +781,31 @@ static inline pte_t * fixmap_pte(unsigned long addr)
|
|||
*/
|
||||
void __init early_fixmap_init(void)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pgd_t *pgdp, pgd;
|
||||
pud_t *pudp;
|
||||
pmd_t *pmdp;
|
||||
unsigned long addr = FIXADDR_START;
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
pgdp = pgd_offset_k(addr);
|
||||
pgd = READ_ONCE(*pgdp);
|
||||
if (CONFIG_PGTABLE_LEVELS > 3 &&
|
||||
!(pgd_none(*pgd) || pgd_page_paddr(*pgd) == __pa_symbol(bm_pud))) {
|
||||
!(pgd_none(pgd) || pgd_page_paddr(pgd) == __pa_symbol(bm_pud))) {
|
||||
/*
|
||||
* We only end up here if the kernel mapping and the fixmap
|
||||
* share the top level pgd entry, which should only happen on
|
||||
* 16k/4 levels configurations.
|
||||
*/
|
||||
BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES));
|
||||
pud = pud_offset_kimg(pgd, addr);
|
||||
pudp = pud_offset_kimg(pgdp, addr);
|
||||
} else {
|
||||
if (pgd_none(*pgd))
|
||||
__pgd_populate(pgd, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
||||
pud = fixmap_pud(addr);
|
||||
if (pgd_none(pgd))
|
||||
__pgd_populate(pgdp, __pa_symbol(bm_pud), PUD_TYPE_TABLE);
|
||||
pudp = fixmap_pud(addr);
|
||||
}
|
||||
if (pud_none(*pud))
|
||||
__pud_populate(pud, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
|
||||
pmd = fixmap_pmd(addr);
|
||||
__pmd_populate(pmd, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
|
||||
if (pud_none(READ_ONCE(*pudp)))
|
||||
__pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE);
|
||||
pmdp = fixmap_pmd(addr);
|
||||
__pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE);
|
||||
|
||||
/*
|
||||
* The boot-ioremap range spans multiple pmds, for which
|
||||
|
@ -800,11 +814,11 @@ void __init early_fixmap_init(void)
|
|||
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
||||
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
||||
|
||||
if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
||||
|| pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
|
||||
|| pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||
WARN_ON(1);
|
||||
pr_warn("pmd %p != %p, %p\n",
|
||||
pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
||||
pr_warn("pmdp %p != %p, %p\n",
|
||||
pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
|
||||
fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
||||
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||
fix_to_virt(FIX_BTMAP_BEGIN));
|
||||
|
@ -824,16 +838,16 @@ void __set_fixmap(enum fixed_addresses idx,
|
|||
phys_addr_t phys, pgprot_t flags)
|
||||
{
|
||||
unsigned long addr = __fix_to_virt(idx);
|
||||
pte_t *pte;
|
||||
pte_t *ptep;
|
||||
|
||||
BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
|
||||
|
||||
pte = fixmap_pte(addr);
|
||||
ptep = fixmap_pte(addr);
|
||||
|
||||
if (pgprot_val(flags)) {
|
||||
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||
} else {
|
||||
pte_clear(&init_mm, addr, pte);
|
||||
pte_clear(&init_mm, addr, ptep);
|
||||
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
@ -915,36 +929,36 @@ int __init arch_ioremap_pmd_supported(void)
|
|||
return 1;
|
||||
}
|
||||
|
||||
int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
|
||||
int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pgprot_t sect_prot = __pgprot(PUD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
BUG_ON(phys & ~PUD_MASK);
|
||||
set_pud(pud, pfn_pud(__phys_to_pfn(phys), sect_prot));
|
||||
set_pud(pudp, pfn_pud(__phys_to_pfn(phys), sect_prot));
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
|
||||
int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot)
|
||||
{
|
||||
pgprot_t sect_prot = __pgprot(PMD_TYPE_SECT |
|
||||
pgprot_val(mk_sect_prot(prot)));
|
||||
BUG_ON(phys & ~PMD_MASK);
|
||||
set_pmd(pmd, pfn_pmd(__phys_to_pfn(phys), sect_prot));
|
||||
set_pmd(pmdp, pfn_pmd(__phys_to_pfn(phys), sect_prot));
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pud_clear_huge(pud_t *pud)
|
||||
int pud_clear_huge(pud_t *pudp)
|
||||
{
|
||||
if (!pud_sect(*pud))
|
||||
if (!pud_sect(READ_ONCE(*pudp)))
|
||||
return 0;
|
||||
pud_clear(pud);
|
||||
pud_clear(pudp);
|
||||
return 1;
|
||||
}
|
||||
|
||||
int pmd_clear_huge(pmd_t *pmd)
|
||||
int pmd_clear_huge(pmd_t *pmdp)
|
||||
{
|
||||
if (!pmd_sect(*pmd))
|
||||
if (!pmd_sect(READ_ONCE(*pmdp)))
|
||||
return 0;
|
||||
pmd_clear(pmd);
|
||||
pmd_clear(pmdp);
|
||||
return 1;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
|
|||
void *data)
|
||||
{
|
||||
struct page_change_data *cdata = data;
|
||||
pte_t pte = *ptep;
|
||||
pte_t pte = READ_ONCE(*ptep);
|
||||
|
||||
pte = clear_pte_bit(pte, cdata->clear_mask);
|
||||
pte = set_pte_bit(pte, cdata->set_mask);
|
||||
|
@ -156,30 +156,32 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
|
|||
*/
|
||||
bool kernel_page_present(struct page *page)
|
||||
{
|
||||
pgd_t *pgd;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
pte_t *pte;
|
||||
pgd_t *pgdp;
|
||||
pud_t *pudp, pud;
|
||||
pmd_t *pmdp, pmd;
|
||||
pte_t *ptep;
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
pgd = pgd_offset_k(addr);
|
||||
if (pgd_none(*pgd))
|
||||
pgdp = pgd_offset_k(addr);
|
||||
if (pgd_none(READ_ONCE(*pgdp)))
|
||||
return false;
|
||||
|
||||
pud = pud_offset(pgd, addr);
|
||||
if (pud_none(*pud))
|
||||
pudp = pud_offset(pgdp, addr);
|
||||
pud = READ_ONCE(*pudp);
|
||||
if (pud_none(pud))
|
||||
return false;
|
||||
if (pud_sect(*pud))
|
||||
if (pud_sect(pud))
|
||||
return true;
|
||||
|
||||
pmd = pmd_offset(pud, addr);
|
||||
if (pmd_none(*pmd))
|
||||
pmdp = pmd_offset(pudp, addr);
|
||||
pmd = READ_ONCE(*pmdp);
|
||||
if (pmd_none(pmd))
|
||||
return false;
|
||||
if (pmd_sect(*pmd))
|
||||
if (pmd_sect(pmd))
|
||||
return true;
|
||||
|
||||
pte = pte_offset_kernel(pmd, addr);
|
||||
return pte_valid(*pte);
|
||||
ptep = pte_offset_kernel(pmdp, addr);
|
||||
return pte_valid(READ_ONCE(*ptep));
|
||||
}
|
||||
#endif /* CONFIG_HIBERNATION */
|
||||
#endif /* CONFIG_DEBUG_PAGEALLOC */
|
||||
|
|
|
@ -205,7 +205,8 @@ ENDPROC(idmap_cpu_replace_ttbr1)
|
|||
dc cvac, cur_\()\type\()p // Ensure any existing dirty
|
||||
dmb sy // lines are written back before
|
||||
ldr \type, [cur_\()\type\()p] // loading the entry
|
||||
tbz \type, #0, next_\()\type // Skip invalid entries
|
||||
tbz \type, #0, skip_\()\type // Skip invalid and
|
||||
tbnz \type, #11, skip_\()\type // non-global entries
|
||||
.endm
|
||||
|
||||
.macro __idmap_kpti_put_pgtable_ent_ng, type
|
||||
|
@ -265,8 +266,9 @@ ENTRY(idmap_kpti_install_ng_mappings)
|
|||
add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8)
|
||||
do_pgd: __idmap_kpti_get_pgtable_ent pgd
|
||||
tbnz pgd, #1, walk_puds
|
||||
__idmap_kpti_put_pgtable_ent_ng pgd
|
||||
next_pgd:
|
||||
__idmap_kpti_put_pgtable_ent_ng pgd
|
||||
skip_pgd:
|
||||
add cur_pgdp, cur_pgdp, #8
|
||||
cmp cur_pgdp, end_pgdp
|
||||
b.ne do_pgd
|
||||
|
@ -294,8 +296,9 @@ walk_puds:
|
|||
add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8)
|
||||
do_pud: __idmap_kpti_get_pgtable_ent pud
|
||||
tbnz pud, #1, walk_pmds
|
||||
__idmap_kpti_put_pgtable_ent_ng pud
|
||||
next_pud:
|
||||
__idmap_kpti_put_pgtable_ent_ng pud
|
||||
skip_pud:
|
||||
add cur_pudp, cur_pudp, 8
|
||||
cmp cur_pudp, end_pudp
|
||||
b.ne do_pud
|
||||
|
@ -314,8 +317,9 @@ walk_pmds:
|
|||
add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8)
|
||||
do_pmd: __idmap_kpti_get_pgtable_ent pmd
|
||||
tbnz pmd, #1, walk_ptes
|
||||
__idmap_kpti_put_pgtable_ent_ng pmd
|
||||
next_pmd:
|
||||
__idmap_kpti_put_pgtable_ent_ng pmd
|
||||
skip_pmd:
|
||||
add cur_pmdp, cur_pmdp, #8
|
||||
cmp cur_pmdp, end_pmdp
|
||||
b.ne do_pmd
|
||||
|
@ -333,7 +337,7 @@ walk_ptes:
|
|||
add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8)
|
||||
do_pte: __idmap_kpti_get_pgtable_ent pte
|
||||
__idmap_kpti_put_pgtable_ent_ng pte
|
||||
next_pte:
|
||||
skip_pte:
|
||||
add cur_ptep, cur_ptep, #8
|
||||
cmp cur_ptep, end_ptep
|
||||
b.ne do_pte
|
||||
|
|
|
@ -41,7 +41,6 @@ ifneq ($(CONFIG_IA64_ESI),)
|
|||
obj-y += esi_stub.o # must be in kernel proper
|
||||
endif
|
||||
obj-$(CONFIG_INTEL_IOMMU) += pci-dma.o
|
||||
obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
|
||||
|
||||
obj-$(CONFIG_BINFMT_ELF) += elfcore.o
|
||||
|
||||
|
|
|
@ -375,6 +375,7 @@ static void __init bootmem_init(void)
|
|||
unsigned long reserved_end;
|
||||
unsigned long mapstart = ~0UL;
|
||||
unsigned long bootmap_size;
|
||||
phys_addr_t ramstart = (phys_addr_t)ULLONG_MAX;
|
||||
bool bootmap_valid = false;
|
||||
int i;
|
||||
|
||||
|
@ -395,7 +396,8 @@ static void __init bootmem_init(void)
|
|||
max_low_pfn = 0;
|
||||
|
||||
/*
|
||||
* Find the highest page frame number we have available.
|
||||
* Find the highest page frame number we have available
|
||||
* and the lowest used RAM address
|
||||
*/
|
||||
for (i = 0; i < boot_mem_map.nr_map; i++) {
|
||||
unsigned long start, end;
|
||||
|
@ -407,6 +409,8 @@ static void __init bootmem_init(void)
|
|||
end = PFN_DOWN(boot_mem_map.map[i].addr
|
||||
+ boot_mem_map.map[i].size);
|
||||
|
||||
ramstart = min(ramstart, boot_mem_map.map[i].addr);
|
||||
|
||||
#ifndef CONFIG_HIGHMEM
|
||||
/*
|
||||
* Skip highmem here so we get an accurate max_low_pfn if low
|
||||
|
@ -436,6 +440,13 @@ static void __init bootmem_init(void)
|
|||
mapstart = max(reserved_end, start);
|
||||
}
|
||||
|
||||
/*
|
||||
* Reserve any memory between the start of RAM and PHYS_OFFSET
|
||||
*/
|
||||
if (ramstart > PHYS_OFFSET)
|
||||
add_memory_region(PHYS_OFFSET, ramstart - PHYS_OFFSET,
|
||||
BOOT_MEM_RESERVED);
|
||||
|
||||
if (min_low_pfn >= max_low_pfn)
|
||||
panic("Incorrect memory mapping !!!");
|
||||
if (min_low_pfn > ARCH_PFN_OFFSET) {
|
||||
|
@ -664,9 +675,6 @@ static int __init early_parse_mem(char *p)
|
|||
|
||||
add_memory_region(start, size, BOOT_MEM_RAM);
|
||||
|
||||
if (start && start > PHYS_OFFSET)
|
||||
add_memory_region(PHYS_OFFSET, start - PHYS_OFFSET,
|
||||
BOOT_MEM_RESERVED);
|
||||
return 0;
|
||||
}
|
||||
early_param("mem", early_parse_mem);
|
||||
|
|
|
@ -572,7 +572,7 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
|
|||
*/
|
||||
}
|
||||
|
||||
void __init bmips_cpu_setup(void)
|
||||
void bmips_cpu_setup(void)
|
||||
{
|
||||
void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
|
||||
u32 __maybe_unused cfg;
|
||||
|
|
|
@ -81,6 +81,9 @@ static inline int numa_update_cpu_topology(bool cpus_locked)
|
|||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void update_numa_cpu_lookup_table(unsigned int cpu, int node) {}
|
||||
|
||||
#endif /* CONFIG_NUMA */
|
||||
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
|
||||
|
|
|
@ -788,7 +788,8 @@ static int register_cpu_online(unsigned int cpu)
|
|||
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
|
||||
device_create_file(s, &dev_attr_pir);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
|
||||
!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
device_create_file(s, &dev_attr_tscr);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
|
@ -873,7 +874,8 @@ static int unregister_cpu_online(unsigned int cpu)
|
|||
if (cpu_has_feature(CPU_FTR_PPCAS_ARCH_V2))
|
||||
device_remove_file(s, &dev_attr_pir);
|
||||
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206))
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_206) &&
|
||||
!firmware_has_feature(FW_FEATURE_LPAR))
|
||||
device_remove_file(s, &dev_attr_tscr);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
|
|
|
@ -216,6 +216,8 @@ static void __init __walk_drmem_v1_lmbs(const __be32 *prop, const __be32 *usm,
|
|||
u32 i, n_lmbs;
|
||||
|
||||
n_lmbs = of_read_number(prop++, 1);
|
||||
if (n_lmbs == 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < n_lmbs; i++) {
|
||||
read_drconf_v1_cell(&lmb, &prop);
|
||||
|
@ -245,6 +247,8 @@ static void __init __walk_drmem_v2_lmbs(const __be32 *prop, const __be32 *usm,
|
|||
u32 i, j, lmb_sets;
|
||||
|
||||
lmb_sets = of_read_number(prop++, 1);
|
||||
if (lmb_sets == 0)
|
||||
return;
|
||||
|
||||
for (i = 0; i < lmb_sets; i++) {
|
||||
read_drconf_v2_cell(&dr_cell, &prop);
|
||||
|
@ -354,6 +358,8 @@ static void __init init_drmem_v1_lmbs(const __be32 *prop)
|
|||
struct drmem_lmb *lmb;
|
||||
|
||||
drmem_info->n_lmbs = of_read_number(prop++, 1);
|
||||
if (drmem_info->n_lmbs == 0)
|
||||
return;
|
||||
|
||||
drmem_info->lmbs = kcalloc(drmem_info->n_lmbs, sizeof(*lmb),
|
||||
GFP_KERNEL);
|
||||
|
@ -373,6 +379,8 @@ static void __init init_drmem_v2_lmbs(const __be32 *prop)
|
|||
int lmb_index;
|
||||
|
||||
lmb_sets = of_read_number(prop++, 1);
|
||||
if (lmb_sets == 0)
|
||||
return;
|
||||
|
||||
/* first pass, calculate the number of LMBs */
|
||||
p = prop;
|
||||
|
|
|
@ -199,9 +199,11 @@ static void disable_nest_pmu_counters(void)
|
|||
const struct cpumask *l_cpumask;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_node(nid) {
|
||||
for_each_node_with_cpus(nid) {
|
||||
l_cpumask = cpumask_of_node(nid);
|
||||
cpu = cpumask_first(l_cpumask);
|
||||
cpu = cpumask_first_and(l_cpumask, cpu_online_mask);
|
||||
if (cpu >= nr_cpu_ids)
|
||||
continue;
|
||||
opal_imc_counters_stop(OPAL_IMC_COUNTERS_NEST,
|
||||
get_hard_smp_processor_id(cpu));
|
||||
}
|
||||
|
|
|
@ -356,7 +356,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
|
|||
|
||||
rc = plpar_int_get_queue_info(0, target, prio, &esn_page, &esn_size);
|
||||
if (rc) {
|
||||
pr_err("Error %lld getting queue info prio %d\n", rc, prio);
|
||||
pr_err("Error %lld getting queue info CPU %d prio %d\n", rc,
|
||||
target, prio);
|
||||
rc = -EIO;
|
||||
goto fail;
|
||||
}
|
||||
|
@ -370,7 +371,8 @@ static int xive_spapr_configure_queue(u32 target, struct xive_q *q, u8 prio,
|
|||
/* Configure and enable the queue in HW */
|
||||
rc = plpar_int_set_queue_config(flags, target, prio, qpage_phys, order);
|
||||
if (rc) {
|
||||
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
|
||||
pr_err("Error %lld setting queue for CPU %d prio %d\n", rc,
|
||||
target, prio);
|
||||
rc = -EIO;
|
||||
} else {
|
||||
q->qpage = qpage;
|
||||
|
@ -389,8 +391,8 @@ static int xive_spapr_setup_queue(unsigned int cpu, struct xive_cpu *xc,
|
|||
if (IS_ERR(qpage))
|
||||
return PTR_ERR(qpage);
|
||||
|
||||
return xive_spapr_configure_queue(cpu, q, prio, qpage,
|
||||
xive_queue_shift);
|
||||
return xive_spapr_configure_queue(get_hard_smp_processor_id(cpu),
|
||||
q, prio, qpage, xive_queue_shift);
|
||||
}
|
||||
|
||||
static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
|
||||
|
@ -399,10 +401,12 @@ static void xive_spapr_cleanup_queue(unsigned int cpu, struct xive_cpu *xc,
|
|||
struct xive_q *q = &xc->queue[prio];
|
||||
unsigned int alloc_order;
|
||||
long rc;
|
||||
int hw_cpu = get_hard_smp_processor_id(cpu);
|
||||
|
||||
rc = plpar_int_set_queue_config(0, cpu, prio, 0, 0);
|
||||
rc = plpar_int_set_queue_config(0, hw_cpu, prio, 0, 0);
|
||||
if (rc)
|
||||
pr_err("Error %ld setting queue for prio %d\n", rc, prio);
|
||||
pr_err("Error %ld setting queue for CPU %d prio %d\n", rc,
|
||||
hw_cpu, prio);
|
||||
|
||||
alloc_order = xive_alloc_order(xive_queue_shift);
|
||||
free_pages((unsigned long)q->qpage, alloc_order);
|
||||
|
|
|
@ -430,6 +430,8 @@ config SPARC_LEON
|
|||
depends on SPARC32
|
||||
select USB_EHCI_BIG_ENDIAN_MMIO
|
||||
select USB_EHCI_BIG_ENDIAN_DESC
|
||||
select USB_UHCI_BIG_ENDIAN_MMIO
|
||||
select USB_UHCI_BIG_ENDIAN_DESC
|
||||
---help---
|
||||
If you say Y here if you are running on a SPARC-LEON processor.
|
||||
The LEON processor is a synthesizable VHDL model of the
|
||||
|
|
|
@ -1404,7 +1404,7 @@ config HIGHMEM4G
|
|||
|
||||
config HIGHMEM64G
|
||||
bool "64GB"
|
||||
depends on !M486
|
||||
depends on !M486 && !M586 && !M586TSC && !M586MMX && !MGEODE_LX && !MGEODEGX1 && !MCYRIXIII && !MELAN && !MWINCHIPC6 && !WINCHIP3D && !MK6
|
||||
select X86_PAE
|
||||
---help---
|
||||
Select this if you have a 32-bit processor and more than 4
|
||||
|
|
|
@ -374,7 +374,7 @@ config X86_TSC
|
|||
|
||||
config X86_CMPXCHG64
|
||||
def_bool y
|
||||
depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
|
||||
depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586TSC || M586MMX || MATOM || MGEODE_LX || MGEODEGX1 || MK6 || MK7 || MK8
|
||||
|
||||
# this should be set for all -march=.. options where the compiler
|
||||
# generates cmov.
|
||||
|
@ -385,7 +385,7 @@ config X86_CMOV
|
|||
config X86_MINIMUM_CPU_FAMILY
|
||||
int
|
||||
default "64" if X86_64
|
||||
default "6" if X86_32 && X86_P6_NOP
|
||||
default "6" if X86_32 && (MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MEFFICEON || MATOM || MCRUSOE || MCORE2 || MK7 || MK8)
|
||||
default "5" if X86_32 && X86_CMPXCHG64
|
||||
default "4"
|
||||
|
||||
|
|
|
@ -129,6 +129,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|||
void cpu_disable_common(void);
|
||||
void native_smp_prepare_boot_cpu(void);
|
||||
void native_smp_prepare_cpus(unsigned int max_cpus);
|
||||
void calculate_max_logical_packages(void);
|
||||
void native_smp_cpus_done(unsigned int max_cpus);
|
||||
void common_cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
||||
int native_cpu_up(unsigned int cpunum, struct task_struct *tidle);
|
||||
|
|
|
@ -1281,11 +1281,10 @@ void __init native_smp_prepare_boot_cpu(void)
|
|||
cpu_set_state_online(me);
|
||||
}
|
||||
|
||||
void __init native_smp_cpus_done(unsigned int max_cpus)
|
||||
void __init calculate_max_logical_packages(void)
|
||||
{
|
||||
int ncpus;
|
||||
|
||||
pr_debug("Boot done\n");
|
||||
/*
|
||||
* Today neither Intel nor AMD support heterogenous systems so
|
||||
* extrapolate the boot cpu's data to all packages.
|
||||
|
@ -1293,6 +1292,13 @@ void __init native_smp_cpus_done(unsigned int max_cpus)
|
|||
ncpus = cpu_data(0).booted_cores * topology_max_smt_threads();
|
||||
__max_logical_packages = DIV_ROUND_UP(nr_cpu_ids, ncpus);
|
||||
pr_info("Max logical packages: %u\n", __max_logical_packages);
|
||||
}
|
||||
|
||||
void __init native_smp_cpus_done(unsigned int max_cpus)
|
||||
{
|
||||
pr_debug("Boot done\n");
|
||||
|
||||
calculate_max_logical_packages();
|
||||
|
||||
if (x86_has_numa_in_package)
|
||||
set_sched_topology(x86_numa_in_package_topology);
|
||||
|
|
|
@ -122,6 +122,8 @@ void __init xen_smp_cpus_done(unsigned int max_cpus)
|
|||
|
||||
if (xen_hvm_domain())
|
||||
native_smp_cpus_done(max_cpus);
|
||||
else
|
||||
calculate_max_logical_packages();
|
||||
|
||||
if (xen_have_vcpu_info_placement)
|
||||
return;
|
||||
|
|
|
@ -3164,6 +3164,7 @@ static bool __blk_mq_poll(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
|||
cpu_relax();
|
||||
}
|
||||
|
||||
__set_current_state(TASK_RUNNING);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -660,13 +660,15 @@ struct acpi_device *acpi_companion_match(const struct device *dev)
|
|||
* acpi_of_match_device - Match device object using the "compatible" property.
|
||||
* @adev: ACPI device object to match.
|
||||
* @of_match_table: List of device IDs to match against.
|
||||
* @of_id: OF ID if matched
|
||||
*
|
||||
* If @dev has an ACPI companion which has ACPI_DT_NAMESPACE_HID in its list of
|
||||
* identifiers and a _DSD object with the "compatible" property, use that
|
||||
* property to match against the given list of identifiers.
|
||||
*/
|
||||
static bool acpi_of_match_device(struct acpi_device *adev,
|
||||
const struct of_device_id *of_match_table)
|
||||
const struct of_device_id *of_match_table,
|
||||
const struct of_device_id **of_id)
|
||||
{
|
||||
const union acpi_object *of_compatible, *obj;
|
||||
int i, nval;
|
||||
|
@ -690,8 +692,11 @@ static bool acpi_of_match_device(struct acpi_device *adev,
|
|||
const struct of_device_id *id;
|
||||
|
||||
for (id = of_match_table; id->compatible[0]; id++)
|
||||
if (!strcasecmp(obj->string.pointer, id->compatible))
|
||||
if (!strcasecmp(obj->string.pointer, id->compatible)) {
|
||||
if (of_id)
|
||||
*of_id = id;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
|
@ -762,10 +767,11 @@ static bool __acpi_match_device_cls(const struct acpi_device_id *id,
|
|||
return true;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id *__acpi_match_device(
|
||||
struct acpi_device *device,
|
||||
const struct acpi_device_id *ids,
|
||||
const struct of_device_id *of_ids)
|
||||
static bool __acpi_match_device(struct acpi_device *device,
|
||||
const struct acpi_device_id *acpi_ids,
|
||||
const struct of_device_id *of_ids,
|
||||
const struct acpi_device_id **acpi_id,
|
||||
const struct of_device_id **of_id)
|
||||
{
|
||||
const struct acpi_device_id *id;
|
||||
struct acpi_hardware_id *hwid;
|
||||
|
@ -775,30 +781,32 @@ static const struct acpi_device_id *__acpi_match_device(
|
|||
* driver for it.
|
||||
*/
|
||||
if (!device || !device->status.present)
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
list_for_each_entry(hwid, &device->pnp.ids, list) {
|
||||
/* First, check the ACPI/PNP IDs provided by the caller. */
|
||||
for (id = ids; id->id[0] || id->cls; id++) {
|
||||
if (id->id[0] && !strcmp((char *) id->id, hwid->id))
|
||||
return id;
|
||||
else if (id->cls && __acpi_match_device_cls(id, hwid))
|
||||
return id;
|
||||
if (acpi_ids) {
|
||||
for (id = acpi_ids; id->id[0] || id->cls; id++) {
|
||||
if (id->id[0] && !strcmp((char *)id->id, hwid->id))
|
||||
goto out_acpi_match;
|
||||
if (id->cls && __acpi_match_device_cls(id, hwid))
|
||||
goto out_acpi_match;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Next, check ACPI_DT_NAMESPACE_HID and try to match the
|
||||
* "compatible" property if found.
|
||||
*
|
||||
* The id returned by the below is not valid, but the only
|
||||
* caller passing non-NULL of_ids here is only interested in
|
||||
* whether or not the return value is NULL.
|
||||
*/
|
||||
if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id)
|
||||
&& acpi_of_match_device(device, of_ids))
|
||||
return id;
|
||||
if (!strcmp(ACPI_DT_NAMESPACE_HID, hwid->id))
|
||||
return acpi_of_match_device(device, of_ids, of_id);
|
||||
}
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
out_acpi_match:
|
||||
if (acpi_id)
|
||||
*acpi_id = id;
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -815,32 +823,29 @@ static const struct acpi_device_id *__acpi_match_device(
|
|||
const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
|
||||
const struct device *dev)
|
||||
{
|
||||
return __acpi_match_device(acpi_companion_match(dev), ids, NULL);
|
||||
const struct acpi_device_id *id = NULL;
|
||||
|
||||
__acpi_match_device(acpi_companion_match(dev), ids, NULL, &id, NULL);
|
||||
return id;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_match_device);
|
||||
|
||||
void *acpi_get_match_data(const struct device *dev)
|
||||
const void *acpi_device_get_match_data(const struct device *dev)
|
||||
{
|
||||
const struct acpi_device_id *match;
|
||||
|
||||
if (!dev->driver)
|
||||
return NULL;
|
||||
|
||||
if (!dev->driver->acpi_match_table)
|
||||
return NULL;
|
||||
|
||||
match = acpi_match_device(dev->driver->acpi_match_table, dev);
|
||||
if (!match)
|
||||
return NULL;
|
||||
|
||||
return (void *)match->driver_data;
|
||||
return (const void *)match->driver_data;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_get_match_data);
|
||||
EXPORT_SYMBOL_GPL(acpi_device_get_match_data);
|
||||
|
||||
int acpi_match_device_ids(struct acpi_device *device,
|
||||
const struct acpi_device_id *ids)
|
||||
{
|
||||
return __acpi_match_device(device, ids, NULL) ? 0 : -ENOENT;
|
||||
return __acpi_match_device(device, ids, NULL, NULL, NULL) ? 0 : -ENOENT;
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_match_device_ids);
|
||||
|
||||
|
@ -849,10 +854,12 @@ bool acpi_driver_match_device(struct device *dev,
|
|||
{
|
||||
if (!drv->acpi_match_table)
|
||||
return acpi_of_match_device(ACPI_COMPANION(dev),
|
||||
drv->of_match_table);
|
||||
drv->of_match_table,
|
||||
NULL);
|
||||
|
||||
return !!__acpi_match_device(acpi_companion_match(dev),
|
||||
drv->acpi_match_table, drv->of_match_table);
|
||||
return __acpi_match_device(acpi_companion_match(dev),
|
||||
drv->acpi_match_table, drv->of_match_table,
|
||||
NULL, NULL);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_driver_match_device);
|
||||
|
||||
|
|
|
@ -1927,6 +1927,9 @@ static int acpi_ec_suspend_noirq(struct device *dev)
|
|||
ec->reference_count >= 1)
|
||||
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_DISABLE);
|
||||
|
||||
if (acpi_sleep_no_ec_events())
|
||||
acpi_ec_enter_noirq(ec);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1934,6 +1937,9 @@ static int acpi_ec_resume_noirq(struct device *dev)
|
|||
{
|
||||
struct acpi_ec *ec = acpi_driver_data(to_acpi_device(dev));
|
||||
|
||||
if (acpi_sleep_no_ec_events())
|
||||
acpi_ec_leave_noirq(ec);
|
||||
|
||||
if (ec_no_wakeup && test_bit(EC_FLAGS_STARTED, &ec->flags) &&
|
||||
ec->reference_count >= 1)
|
||||
acpi_set_gpe(NULL, ec->gpe, ACPI_GPE_ENABLE);
|
||||
|
|
|
@ -1271,11 +1271,11 @@ static int acpi_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
static const void *
|
||||
acpi_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
|
||||
const struct device *dev)
|
||||
{
|
||||
return acpi_get_match_data(dev);
|
||||
return acpi_device_get_match_data(dev);
|
||||
}
|
||||
|
||||
#define DECLARE_ACPI_FWNODE_OPS(ops) \
|
||||
|
|
|
@ -115,6 +115,7 @@ int __init acpi_parse_spcr(bool enable_earlycon, bool enable_console)
|
|||
table->serial_port.access_width))) {
|
||||
default:
|
||||
pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n");
|
||||
/* fall through */
|
||||
case 8:
|
||||
iotype = "mmio";
|
||||
break;
|
||||
|
|
|
@ -310,6 +310,9 @@ static void __device_link_del(struct device_link *link)
|
|||
dev_info(link->consumer, "Dropping the link to %s\n",
|
||||
dev_name(link->supplier));
|
||||
|
||||
if (link->flags & DL_FLAG_PM_RUNTIME)
|
||||
pm_runtime_drop_link(link->consumer);
|
||||
|
||||
list_del(&link->s_node);
|
||||
list_del(&link->c_node);
|
||||
device_link_free(link);
|
||||
|
|
|
@ -321,7 +321,8 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq)
|
|||
return;
|
||||
|
||||
if (device_may_wakeup(wirq->dev)) {
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
|
||||
!pm_runtime_status_suspended(wirq->dev))
|
||||
enable_irq(wirq->irq);
|
||||
|
||||
enable_irq_wake(wirq->irq);
|
||||
|
@ -343,7 +344,8 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq)
|
|||
if (device_may_wakeup(wirq->dev)) {
|
||||
disable_irq_wake(wirq->irq);
|
||||
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED)
|
||||
if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED &&
|
||||
!pm_runtime_status_suspended(wirq->dev))
|
||||
disable_irq_nosync(wirq->irq);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1410,9 +1410,8 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
|
|||
}
|
||||
EXPORT_SYMBOL(fwnode_graph_parse_endpoint);
|
||||
|
||||
void *device_get_match_data(struct device *dev)
|
||||
const void *device_get_match_data(struct device *dev)
|
||||
{
|
||||
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data,
|
||||
dev);
|
||||
return fwnode_call_ptr_op(dev_fwnode(dev), device_get_match_data, dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(device_get_match_data);
|
||||
|
|
|
@ -568,6 +568,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
|
|||
/* HG _PR3 doesn't seem to work on this A+A weston board */
|
||||
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
|
||||
{ 0, 0, 0, 0, 0 },
|
||||
};
|
||||
|
||||
|
|
|
@ -733,6 +733,25 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
|
|||
return ret == 0 ? count : ret;
|
||||
}
|
||||
|
||||
static bool gtt_entry(struct mdev_device *mdev, loff_t *ppos)
|
||||
{
|
||||
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
|
||||
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
int offset;
|
||||
|
||||
/* Only allow MMIO GGTT entry access */
|
||||
if (index != PCI_BASE_ADDRESS_0)
|
||||
return false;
|
||||
|
||||
offset = (u64)(*ppos & VFIO_PCI_OFFSET_MASK) -
|
||||
intel_vgpu_get_bar_gpa(vgpu, PCI_BASE_ADDRESS_0);
|
||||
|
||||
return (offset >= gvt->device_info.gtt_start_offset &&
|
||||
offset < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) ?
|
||||
true : false;
|
||||
}
|
||||
|
||||
static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
|
||||
size_t count, loff_t *ppos)
|
||||
{
|
||||
|
@ -742,7 +761,21 @@ static ssize_t intel_vgpu_read(struct mdev_device *mdev, char __user *buf,
|
|||
while (count) {
|
||||
size_t filled;
|
||||
|
||||
if (count >= 4 && !(*ppos % 4)) {
|
||||
/* Only support GGTT entry 8 bytes read */
|
||||
if (count >= 8 && !(*ppos % 8) &&
|
||||
gtt_entry(mdev, ppos)) {
|
||||
u64 val;
|
||||
|
||||
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
||||
ppos, false);
|
||||
if (ret <= 0)
|
||||
goto read_err;
|
||||
|
||||
if (copy_to_user(buf, &val, sizeof(val)))
|
||||
goto read_err;
|
||||
|
||||
filled = 8;
|
||||
} else if (count >= 4 && !(*ppos % 4)) {
|
||||
u32 val;
|
||||
|
||||
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
||||
|
@ -802,7 +835,21 @@ static ssize_t intel_vgpu_write(struct mdev_device *mdev,
|
|||
while (count) {
|
||||
size_t filled;
|
||||
|
||||
if (count >= 4 && !(*ppos % 4)) {
|
||||
/* Only support GGTT entry 8 bytes write */
|
||||
if (count >= 8 && !(*ppos % 8) &&
|
||||
gtt_entry(mdev, ppos)) {
|
||||
u64 val;
|
||||
|
||||
if (copy_from_user(&val, buf, sizeof(val)))
|
||||
goto write_err;
|
||||
|
||||
ret = intel_vgpu_rw(mdev, (char *)&val, sizeof(val),
|
||||
ppos, true);
|
||||
if (ret <= 0)
|
||||
goto write_err;
|
||||
|
||||
filled = 8;
|
||||
} else if (count >= 4 && !(*ppos % 4)) {
|
||||
u32 val;
|
||||
|
||||
if (copy_from_user(&val, buf, sizeof(val)))
|
||||
|
|
|
@ -118,6 +118,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
|
|||
{RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
|
||||
{RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
|
||||
{RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
|
||||
{RCS, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
|
||||
{RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
|
||||
{RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
|
||||
{RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
|
||||
|
|
|
@ -333,7 +333,7 @@ TRACE_EVENT(render_mmio,
|
|||
TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
|
||||
unsigned int old_val, unsigned int new_val),
|
||||
|
||||
TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
|
||||
TP_ARGS(old_id, new_id, action, reg, old_val, new_val),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(int, old_id)
|
||||
|
|
|
@ -1433,19 +1433,7 @@ void i915_driver_unload(struct drm_device *dev)
|
|||
|
||||
intel_modeset_cleanup(dev);
|
||||
|
||||
/*
|
||||
* free the memory space allocated for the child device
|
||||
* config parsed from VBT
|
||||
*/
|
||||
if (dev_priv->vbt.child_dev && dev_priv->vbt.child_dev_num) {
|
||||
kfree(dev_priv->vbt.child_dev);
|
||||
dev_priv->vbt.child_dev = NULL;
|
||||
dev_priv->vbt.child_dev_num = 0;
|
||||
}
|
||||
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
|
||||
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
|
||||
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
|
||||
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
|
||||
intel_bios_cleanup(dev_priv);
|
||||
|
||||
vga_switcheroo_unregister_client(pdev);
|
||||
vga_client_register(pdev, NULL, NULL, NULL);
|
||||
|
|
|
@ -1349,6 +1349,7 @@ struct intel_vbt_data {
|
|||
u32 size;
|
||||
u8 *data;
|
||||
const u8 *sequence[MIPI_SEQ_MAX];
|
||||
u8 *deassert_seq; /* Used by fixup_mipi_sequences() */
|
||||
} dsi;
|
||||
|
||||
int crt_ddc_pin;
|
||||
|
@ -3657,6 +3658,7 @@ extern void intel_i2c_reset(struct drm_i915_private *dev_priv);
|
|||
|
||||
/* intel_bios.c */
|
||||
void intel_bios_init(struct drm_i915_private *dev_priv);
|
||||
void intel_bios_cleanup(struct drm_i915_private *dev_priv);
|
||||
bool intel_bios_is_valid_vbt(const void *buf, size_t size);
|
||||
bool intel_bios_is_tv_present(struct drm_i915_private *dev_priv);
|
||||
bool intel_bios_is_lvds_present(struct drm_i915_private *dev_priv, u8 *i2c_pin);
|
||||
|
|
|
@ -803,7 +803,7 @@ int i915_gem_context_setparam_ioctl(struct drm_device *dev, void *data,
|
|||
|
||||
case I915_CONTEXT_PARAM_PRIORITY:
|
||||
{
|
||||
int priority = args->value;
|
||||
s64 priority = args->value;
|
||||
|
||||
if (args->size)
|
||||
ret = -EINVAL;
|
||||
|
|
|
@ -84,9 +84,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
|||
void
|
||||
i915_perf_load_test_config_cflgt3(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
strncpy(dev_priv->perf.oa.test_config.uuid,
|
||||
strlcpy(dev_priv->perf.oa.test_config.uuid,
|
||||
"577e8e2c-3fa0-4875-8743-3538d585e3b0",
|
||||
UUID_STRING_LEN);
|
||||
sizeof(dev_priv->perf.oa.test_config.uuid));
|
||||
dev_priv->perf.oa.test_config.id = 1;
|
||||
|
||||
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
|
||||
|
|
|
@ -96,9 +96,9 @@ show_test_oa_id(struct device *kdev, struct device_attribute *attr, char *buf)
|
|||
void
|
||||
i915_perf_load_test_config_cnl(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
strncpy(dev_priv->perf.oa.test_config.uuid,
|
||||
strlcpy(dev_priv->perf.oa.test_config.uuid,
|
||||
"db41edd4-d8e7-4730-ad11-b9a2d6833503",
|
||||
UUID_STRING_LEN);
|
||||
sizeof(dev_priv->perf.oa.test_config.uuid));
|
||||
dev_priv->perf.oa.test_config.id = 1;
|
||||
|
||||
dev_priv->perf.oa.test_config.mux_regs = mux_config_test_oa;
|
||||
|
|
|
@ -285,26 +285,41 @@ static u64 count_interrupts(struct drm_i915_private *i915)
|
|||
return sum;
|
||||
}
|
||||
|
||||
static void i915_pmu_event_destroy(struct perf_event *event)
|
||||
{
|
||||
WARN_ON(event->parent);
|
||||
}
|
||||
|
||||
static int engine_event_init(struct perf_event *event)
|
||||
static void engine_event_destroy(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
struct intel_engine_cs *engine;
|
||||
|
||||
if (!intel_engine_lookup_user(i915, engine_event_class(event),
|
||||
engine_event_instance(event)))
|
||||
return -ENODEV;
|
||||
engine = intel_engine_lookup_user(i915,
|
||||
engine_event_class(event),
|
||||
engine_event_instance(event));
|
||||
if (WARN_ON_ONCE(!engine))
|
||||
return;
|
||||
|
||||
switch (engine_event_sample(event)) {
|
||||
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
|
||||
intel_engine_supports_stats(engine))
|
||||
intel_disable_engine_stats(engine);
|
||||
}
|
||||
|
||||
static void i915_pmu_event_destroy(struct perf_event *event)
|
||||
{
|
||||
WARN_ON(event->parent);
|
||||
|
||||
if (is_engine_event(event))
|
||||
engine_event_destroy(event);
|
||||
}
|
||||
|
||||
static int
|
||||
engine_event_status(struct intel_engine_cs *engine,
|
||||
enum drm_i915_pmu_engine_sample sample)
|
||||
{
|
||||
switch (sample) {
|
||||
case I915_SAMPLE_BUSY:
|
||||
case I915_SAMPLE_WAIT:
|
||||
break;
|
||||
case I915_SAMPLE_SEMA:
|
||||
if (INTEL_GEN(i915) < 6)
|
||||
if (INTEL_GEN(engine->i915) < 6)
|
||||
return -ENODEV;
|
||||
break;
|
||||
default:
|
||||
|
@ -314,6 +329,30 @@ static int engine_event_init(struct perf_event *event)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int engine_event_init(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
struct intel_engine_cs *engine;
|
||||
u8 sample;
|
||||
int ret;
|
||||
|
||||
engine = intel_engine_lookup_user(i915, engine_event_class(event),
|
||||
engine_event_instance(event));
|
||||
if (!engine)
|
||||
return -ENODEV;
|
||||
|
||||
sample = engine_event_sample(event);
|
||||
ret = engine_event_status(engine, sample);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (sample == I915_SAMPLE_BUSY && intel_engine_supports_stats(engine))
|
||||
ret = intel_enable_engine_stats(engine);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int i915_pmu_event_init(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
|
@ -370,7 +409,94 @@ static int i915_pmu_event_init(struct perf_event *event)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u64 __i915_pmu_event_read(struct perf_event *event)
|
||||
static u64 __get_rc6(struct drm_i915_private *i915)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
val = intel_rc6_residency_ns(i915,
|
||||
IS_VALLEYVIEW(i915) ?
|
||||
VLV_GT_RENDER_RC6 :
|
||||
GEN6_GT_GFX_RC6);
|
||||
|
||||
if (HAS_RC6p(i915))
|
||||
val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6p);
|
||||
|
||||
if (HAS_RC6pp(i915))
|
||||
val += intel_rc6_residency_ns(i915, GEN6_GT_GFX_RC6pp);
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static u64 get_rc6(struct drm_i915_private *i915, bool locked)
|
||||
{
|
||||
#if IS_ENABLED(CONFIG_PM)
|
||||
unsigned long flags;
|
||||
u64 val;
|
||||
|
||||
if (intel_runtime_pm_get_if_in_use(i915)) {
|
||||
val = __get_rc6(i915);
|
||||
intel_runtime_pm_put(i915);
|
||||
|
||||
/*
|
||||
* If we are coming back from being runtime suspended we must
|
||||
* be careful not to report a larger value than returned
|
||||
* previously.
|
||||
*/
|
||||
|
||||
if (!locked)
|
||||
spin_lock_irqsave(&i915->pmu.lock, flags);
|
||||
|
||||
if (val >= i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) {
|
||||
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = 0;
|
||||
i915->pmu.sample[__I915_SAMPLE_RC6].cur = val;
|
||||
} else {
|
||||
val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur;
|
||||
}
|
||||
|
||||
if (!locked)
|
||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
} else {
|
||||
struct pci_dev *pdev = i915->drm.pdev;
|
||||
struct device *kdev = &pdev->dev;
|
||||
unsigned long flags2;
|
||||
|
||||
/*
|
||||
* We are runtime suspended.
|
||||
*
|
||||
* Report the delta from when the device was suspended to now,
|
||||
* on top of the last known real value, as the approximated RC6
|
||||
* counter value.
|
||||
*/
|
||||
if (!locked)
|
||||
spin_lock_irqsave(&i915->pmu.lock, flags);
|
||||
|
||||
spin_lock_irqsave(&kdev->power.lock, flags2);
|
||||
|
||||
if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur)
|
||||
i915->pmu.suspended_jiffies_last =
|
||||
kdev->power.suspended_jiffies;
|
||||
|
||||
val = kdev->power.suspended_jiffies -
|
||||
i915->pmu.suspended_jiffies_last;
|
||||
val += jiffies - kdev->power.accounting_timestamp;
|
||||
|
||||
spin_unlock_irqrestore(&kdev->power.lock, flags2);
|
||||
|
||||
val = jiffies_to_nsecs(val);
|
||||
val += i915->pmu.sample[__I915_SAMPLE_RC6].cur;
|
||||
i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val;
|
||||
|
||||
if (!locked)
|
||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
}
|
||||
|
||||
return val;
|
||||
#else
|
||||
return __get_rc6(i915);
|
||||
#endif
|
||||
}
|
||||
|
||||
static u64 __i915_pmu_event_read(struct perf_event *event, bool locked)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
container_of(event->pmu, typeof(*i915), pmu.base);
|
||||
|
@ -387,7 +513,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
|
|||
if (WARN_ON_ONCE(!engine)) {
|
||||
/* Do nothing */
|
||||
} else if (sample == I915_SAMPLE_BUSY &&
|
||||
engine->pmu.busy_stats) {
|
||||
intel_engine_supports_stats(engine)) {
|
||||
val = ktime_to_ns(intel_engine_get_busy_time(engine));
|
||||
} else {
|
||||
val = engine->pmu.sample[sample].cur;
|
||||
|
@ -408,18 +534,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
|
|||
val = count_interrupts(i915);
|
||||
break;
|
||||
case I915_PMU_RC6_RESIDENCY:
|
||||
intel_runtime_pm_get(i915);
|
||||
val = intel_rc6_residency_ns(i915,
|
||||
IS_VALLEYVIEW(i915) ?
|
||||
VLV_GT_RENDER_RC6 :
|
||||
GEN6_GT_GFX_RC6);
|
||||
if (HAS_RC6p(i915))
|
||||
val += intel_rc6_residency_ns(i915,
|
||||
GEN6_GT_GFX_RC6p);
|
||||
if (HAS_RC6pp(i915))
|
||||
val += intel_rc6_residency_ns(i915,
|
||||
GEN6_GT_GFX_RC6pp);
|
||||
intel_runtime_pm_put(i915);
|
||||
val = get_rc6(i915, locked);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -434,7 +549,7 @@ static void i915_pmu_event_read(struct perf_event *event)
|
|||
|
||||
again:
|
||||
prev = local64_read(&hwc->prev_count);
|
||||
new = __i915_pmu_event_read(event);
|
||||
new = __i915_pmu_event_read(event, false);
|
||||
|
||||
if (local64_cmpxchg(&hwc->prev_count, prev, new) != prev)
|
||||
goto again;
|
||||
|
@ -442,12 +557,6 @@ static void i915_pmu_event_read(struct perf_event *event)
|
|||
local64_add(new - prev, &event->count);
|
||||
}
|
||||
|
||||
static bool engine_needs_busy_stats(struct intel_engine_cs *engine)
|
||||
{
|
||||
return intel_engine_supports_stats(engine) &&
|
||||
(engine->pmu.enable & BIT(I915_SAMPLE_BUSY));
|
||||
}
|
||||
|
||||
static void i915_pmu_enable(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
|
@ -487,21 +596,7 @@ static void i915_pmu_enable(struct perf_event *event)
|
|||
|
||||
GEM_BUG_ON(sample >= I915_PMU_SAMPLE_BITS);
|
||||
GEM_BUG_ON(engine->pmu.enable_count[sample] == ~0);
|
||||
if (engine->pmu.enable_count[sample]++ == 0) {
|
||||
/*
|
||||
* Enable engine busy stats tracking if needed or
|
||||
* alternatively cancel the scheduled disable.
|
||||
*
|
||||
* If the delayed disable was pending, cancel it and
|
||||
* in this case do not enable since it already is.
|
||||
*/
|
||||
if (engine_needs_busy_stats(engine) &&
|
||||
!engine->pmu.busy_stats) {
|
||||
engine->pmu.busy_stats = true;
|
||||
if (!cancel_delayed_work(&engine->pmu.disable_busy_stats))
|
||||
intel_enable_engine_stats(engine);
|
||||
}
|
||||
}
|
||||
engine->pmu.enable_count[sample]++;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -509,19 +604,11 @@ static void i915_pmu_enable(struct perf_event *event)
|
|||
* for all listeners. Even when the event was already enabled and has
|
||||
* an existing non-zero value.
|
||||
*/
|
||||
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
|
||||
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event, true));
|
||||
|
||||
spin_unlock_irqrestore(&i915->pmu.lock, flags);
|
||||
}
|
||||
|
||||
static void __disable_busy_stats(struct work_struct *work)
|
||||
{
|
||||
struct intel_engine_cs *engine =
|
||||
container_of(work, typeof(*engine), pmu.disable_busy_stats.work);
|
||||
|
||||
intel_disable_engine_stats(engine);
|
||||
}
|
||||
|
||||
static void i915_pmu_disable(struct perf_event *event)
|
||||
{
|
||||
struct drm_i915_private *i915 =
|
||||
|
@ -545,26 +632,8 @@ static void i915_pmu_disable(struct perf_event *event)
|
|||
* Decrement the reference count and clear the enabled
|
||||
* bitmask when the last listener on an event goes away.
|
||||
*/
|
||||
if (--engine->pmu.enable_count[sample] == 0) {
|
||||
if (--engine->pmu.enable_count[sample] == 0)
|
||||
engine->pmu.enable &= ~BIT(sample);
|
||||
if (!engine_needs_busy_stats(engine) &&
|
||||
engine->pmu.busy_stats) {
|
||||
engine->pmu.busy_stats = false;
|
||||
/*
|
||||
* We request a delayed disable to handle the
|
||||
* rapid on/off cycles on events, which can
|
||||
* happen when tools like perf stat start, in a
|
||||
* nicer way.
|
||||
*
|
||||
* In addition, this also helps with busy stats
|
||||
* accuracy with background CPU offline/online
|
||||
* migration events.
|
||||
*/
|
||||
queue_delayed_work(system_wq,
|
||||
&engine->pmu.disable_busy_stats,
|
||||
round_jiffies_up_relative(HZ));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
GEM_BUG_ON(bit >= I915_PMU_MASK_BITS);
|
||||
|
@ -797,8 +866,6 @@ static void i915_pmu_unregister_cpuhp_state(struct drm_i915_private *i915)
|
|||
|
||||
void i915_pmu_register(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
int ret;
|
||||
|
||||
if (INTEL_GEN(i915) <= 2) {
|
||||
|
@ -820,10 +887,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
hrtimer_init(&i915->pmu.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
i915->pmu.timer.function = i915_sample;
|
||||
|
||||
for_each_engine(engine, i915, id)
|
||||
INIT_DELAYED_WORK(&engine->pmu.disable_busy_stats,
|
||||
__disable_busy_stats);
|
||||
|
||||
ret = perf_pmu_register(&i915->pmu.base, "i915", -1);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
@ -843,9 +906,6 @@ void i915_pmu_register(struct drm_i915_private *i915)
|
|||
|
||||
void i915_pmu_unregister(struct drm_i915_private *i915)
|
||||
{
|
||||
struct intel_engine_cs *engine;
|
||||
enum intel_engine_id id;
|
||||
|
||||
if (!i915->pmu.base.event_init)
|
||||
return;
|
||||
|
||||
|
@ -853,11 +913,6 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
|
|||
|
||||
hrtimer_cancel(&i915->pmu.timer);
|
||||
|
||||
for_each_engine(engine, i915, id) {
|
||||
GEM_BUG_ON(engine->pmu.busy_stats);
|
||||
flush_delayed_work(&engine->pmu.disable_busy_stats);
|
||||
}
|
||||
|
||||
i915_pmu_unregister_cpuhp_state(i915);
|
||||
|
||||
perf_pmu_unregister(&i915->pmu.base);
|
||||
|
|
|
@ -27,6 +27,8 @@
|
|||
enum {
|
||||
__I915_SAMPLE_FREQ_ACT = 0,
|
||||
__I915_SAMPLE_FREQ_REQ,
|
||||
__I915_SAMPLE_RC6,
|
||||
__I915_SAMPLE_RC6_ESTIMATED,
|
||||
__I915_NUM_PMU_SAMPLERS
|
||||
};
|
||||
|
||||
|
@ -94,6 +96,10 @@ struct i915_pmu {
|
|||
* struct intel_engine_cs.
|
||||
*/
|
||||
struct i915_pmu_sample sample[__I915_NUM_PMU_SAMPLERS];
|
||||
/**
|
||||
* @suspended_jiffies_last: Cached suspend time from PM core.
|
||||
*/
|
||||
unsigned long suspended_jiffies_last;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PERF_EVENTS
|
||||
|
|
|
@ -947,6 +947,86 @@ static int goto_next_sequence_v3(const u8 *data, int index, int total)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Get len of pre-fixed deassert fragment from a v1 init OTP sequence,
|
||||
* skip all delay + gpio operands and stop at the first DSI packet op.
|
||||
*/
|
||||
static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
|
||||
int index, len;
|
||||
|
||||
if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
|
||||
return 0;
|
||||
|
||||
/* index = 1 to skip sequence byte */
|
||||
for (index = 1; data[index] != MIPI_SEQ_ELEM_END; index += len) {
|
||||
switch (data[index]) {
|
||||
case MIPI_SEQ_ELEM_SEND_PKT:
|
||||
return index == 1 ? 0 : index;
|
||||
case MIPI_SEQ_ELEM_DELAY:
|
||||
len = 5; /* 1 byte for operand + uint32 */
|
||||
break;
|
||||
case MIPI_SEQ_ELEM_GPIO:
|
||||
len = 3; /* 1 byte for op, 1 for gpio_nr, 1 for value */
|
||||
break;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some v1 VBT MIPI sequences do the deassert in the init OTP sequence.
|
||||
* The deassert must be done before calling intel_dsi_device_ready, so for
|
||||
* these devices we split the init OTP sequence into a deassert sequence and
|
||||
* the actual init OTP part.
|
||||
*/
|
||||
static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
u8 *init_otp;
|
||||
int len;
|
||||
|
||||
/* Limit this to VLV for now. */
|
||||
if (!IS_VALLEYVIEW(dev_priv))
|
||||
return;
|
||||
|
||||
/* Limit this to v1 vid-mode sequences */
|
||||
if (dev_priv->vbt.dsi.config->is_cmd_mode ||
|
||||
dev_priv->vbt.dsi.seq_version != 1)
|
||||
return;
|
||||
|
||||
/* Only do this if there are otp and assert seqs and no deassert seq */
|
||||
if (!dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] ||
|
||||
!dev_priv->vbt.dsi.sequence[MIPI_SEQ_ASSERT_RESET] ||
|
||||
dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET])
|
||||
return;
|
||||
|
||||
/* The deassert-sequence ends at the first DSI packet */
|
||||
len = get_init_otp_deassert_fragment_len(dev_priv);
|
||||
if (!len)
|
||||
return;
|
||||
|
||||
DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
|
||||
|
||||
/* Copy the fragment, update seq byte and terminate it */
|
||||
init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
|
||||
dev_priv->vbt.dsi.deassert_seq = kmemdup(init_otp, len + 1, GFP_KERNEL);
|
||||
if (!dev_priv->vbt.dsi.deassert_seq)
|
||||
return;
|
||||
dev_priv->vbt.dsi.deassert_seq[0] = MIPI_SEQ_DEASSERT_RESET;
|
||||
dev_priv->vbt.dsi.deassert_seq[len] = MIPI_SEQ_ELEM_END;
|
||||
/* Use the copy for deassert */
|
||||
dev_priv->vbt.dsi.sequence[MIPI_SEQ_DEASSERT_RESET] =
|
||||
dev_priv->vbt.dsi.deassert_seq;
|
||||
/* Replace the last byte of the fragment with init OTP seq byte */
|
||||
init_otp[len - 1] = MIPI_SEQ_INIT_OTP;
|
||||
/* And make MIPI_MIPI_SEQ_INIT_OTP point to it */
|
||||
dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP] = init_otp + len - 1;
|
||||
}
|
||||
|
||||
static void
|
||||
parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
||||
const struct bdb_header *bdb)
|
||||
|
@ -1016,6 +1096,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
|
|||
dev_priv->vbt.dsi.size = seq_size;
|
||||
dev_priv->vbt.dsi.seq_version = sequence->version;
|
||||
|
||||
fixup_mipi_sequences(dev_priv);
|
||||
|
||||
DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
|
||||
return;
|
||||
|
||||
|
@ -1588,6 +1670,29 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
|
|||
pci_unmap_rom(pdev, bios);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_cleanup - Free any resources allocated by intel_bios_init()
|
||||
* @dev_priv: i915 device instance
|
||||
*/
|
||||
void intel_bios_cleanup(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
kfree(dev_priv->vbt.child_dev);
|
||||
dev_priv->vbt.child_dev = NULL;
|
||||
dev_priv->vbt.child_dev_num = 0;
|
||||
kfree(dev_priv->vbt.sdvo_lvds_vbt_mode);
|
||||
dev_priv->vbt.sdvo_lvds_vbt_mode = NULL;
|
||||
kfree(dev_priv->vbt.lfp_lvds_vbt_mode);
|
||||
dev_priv->vbt.lfp_lvds_vbt_mode = NULL;
|
||||
kfree(dev_priv->vbt.dsi.data);
|
||||
dev_priv->vbt.dsi.data = NULL;
|
||||
kfree(dev_priv->vbt.dsi.pps);
|
||||
dev_priv->vbt.dsi.pps = NULL;
|
||||
kfree(dev_priv->vbt.dsi.config);
|
||||
dev_priv->vbt.dsi.config = NULL;
|
||||
kfree(dev_priv->vbt.dsi.deassert_seq);
|
||||
dev_priv->vbt.dsi.deassert_seq = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_bios_is_tv_present - is integrated TV present in VBT
|
||||
* @dev_priv: i915 device instance
|
||||
|
|
|
@ -594,29 +594,16 @@ void intel_engine_remove_wait(struct intel_engine_cs *engine,
|
|||
spin_unlock_irq(&b->rb_lock);
|
||||
}
|
||||
|
||||
static bool signal_valid(const struct drm_i915_gem_request *request)
|
||||
{
|
||||
return intel_wait_check_request(&request->signaling.wait, request);
|
||||
}
|
||||
|
||||
static bool signal_complete(const struct drm_i915_gem_request *request)
|
||||
{
|
||||
if (!request)
|
||||
return false;
|
||||
|
||||
/* If another process served as the bottom-half it may have already
|
||||
* signalled that this wait is already completed.
|
||||
*/
|
||||
if (intel_wait_complete(&request->signaling.wait))
|
||||
return signal_valid(request);
|
||||
|
||||
/* Carefully check if the request is complete, giving time for the
|
||||
/*
|
||||
* Carefully check if the request is complete, giving time for the
|
||||
* seqno to be visible or if the GPU hung.
|
||||
*/
|
||||
if (__i915_request_irq_complete(request))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
return __i915_request_irq_complete(request);
|
||||
}
|
||||
|
||||
static struct drm_i915_gem_request *to_signaler(struct rb_node *rb)
|
||||
|
@ -659,9 +646,13 @@ static int intel_breadcrumbs_signaler(void *arg)
|
|||
request = i915_gem_request_get_rcu(request);
|
||||
rcu_read_unlock();
|
||||
if (signal_complete(request)) {
|
||||
local_bh_disable();
|
||||
dma_fence_signal(&request->fence);
|
||||
local_bh_enable(); /* kick start the tasklets */
|
||||
if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT,
|
||||
&request->fence.flags)) {
|
||||
local_bh_disable();
|
||||
dma_fence_signal(&request->fence);
|
||||
GEM_BUG_ON(!i915_gem_request_completed(request));
|
||||
local_bh_enable(); /* kick start the tasklets */
|
||||
}
|
||||
|
||||
spin_lock_irq(&b->rb_lock);
|
||||
|
||||
|
|
|
@ -1952,6 +1952,14 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
|
|||
if (crtc_state->has_audio && INTEL_GEN(dev_priv) >= 9)
|
||||
min_cdclk = max(2 * 96000, min_cdclk);
|
||||
|
||||
/*
|
||||
* On Valleyview some DSI panels lose (v|h)sync when the clock is lower
|
||||
* than 320000KHz.
|
||||
*/
|
||||
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) &&
|
||||
IS_VALLEYVIEW(dev_priv))
|
||||
min_cdclk = max(320000, min_cdclk);
|
||||
|
||||
if (min_cdclk > dev_priv->max_cdclk_freq) {
|
||||
DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
|
||||
min_cdclk, dev_priv->max_cdclk_freq);
|
||||
|
|
|
@ -1458,7 +1458,9 @@ static bool ring_is_idle(struct intel_engine_cs *engine)
|
|||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
bool idle = true;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
/* If the whole device is asleep, the engine must be idle */
|
||||
if (!intel_runtime_pm_get_if_in_use(dev_priv))
|
||||
return true;
|
||||
|
||||
/* First check that no commands are left in the ring */
|
||||
if ((I915_READ_HEAD(engine) & HEAD_ADDR) !=
|
||||
|
@ -1943,16 +1945,22 @@ intel_engine_lookup_user(struct drm_i915_private *i915, u8 class, u8 instance)
|
|||
*/
|
||||
int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
||||
{
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
unsigned long flags;
|
||||
int err = 0;
|
||||
|
||||
if (!intel_engine_supports_stats(engine))
|
||||
return -ENODEV;
|
||||
|
||||
tasklet_disable(&execlists->tasklet);
|
||||
spin_lock_irqsave(&engine->stats.lock, flags);
|
||||
if (engine->stats.enabled == ~0)
|
||||
goto busy;
|
||||
|
||||
if (unlikely(engine->stats.enabled == ~0)) {
|
||||
err = -EBUSY;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (engine->stats.enabled++ == 0) {
|
||||
struct intel_engine_execlists *execlists = &engine->execlists;
|
||||
const struct execlist_port *port = execlists->port;
|
||||
unsigned int num_ports = execlists_num_ports(execlists);
|
||||
|
||||
|
@ -1967,14 +1975,12 @@ int intel_enable_engine_stats(struct intel_engine_cs *engine)
|
|||
if (engine->stats.active)
|
||||
engine->stats.start = engine->stats.enabled_at;
|
||||
}
|
||||
|
||||
unlock:
|
||||
spin_unlock_irqrestore(&engine->stats.lock, flags);
|
||||
tasklet_enable(&execlists->tasklet);
|
||||
|
||||
return 0;
|
||||
|
||||
busy:
|
||||
spin_unlock_irqrestore(&engine->stats.lock, flags);
|
||||
|
||||
return -EBUSY;
|
||||
return err;
|
||||
}
|
||||
|
||||
static ktime_t __intel_engine_get_busy_time(struct intel_engine_cs *engine)
|
||||
|
|
|
@ -366,20 +366,6 @@ struct intel_engine_cs {
|
|||
*/
|
||||
#define I915_ENGINE_SAMPLE_MAX (I915_SAMPLE_SEMA + 1)
|
||||
struct i915_pmu_sample sample[I915_ENGINE_SAMPLE_MAX];
|
||||
/**
|
||||
* @busy_stats: Has enablement of engine stats tracking been
|
||||
* requested.
|
||||
*/
|
||||
bool busy_stats;
|
||||
/**
|
||||
* @disable_busy_stats: Work item for busy stats disabling.
|
||||
*
|
||||
* Same as with @enable_busy_stats action, with the difference
|
||||
* that we delay it in case there are rapid enable-disable
|
||||
* actions, which can happen during tool startup (like perf
|
||||
* stat).
|
||||
*/
|
||||
struct delayed_work disable_busy_stats;
|
||||
} pmu;
|
||||
|
||||
/*
|
||||
|
|
|
@ -301,7 +301,7 @@ nvkm_therm_attr_set(struct nvkm_therm *therm,
|
|||
void
|
||||
nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
|
||||
{
|
||||
if (!therm->func->clkgate_enable || !therm->clkgating_enabled)
|
||||
if (!therm || !therm->func->clkgate_enable || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
nvkm_debug(&therm->subdev,
|
||||
|
@ -312,7 +312,7 @@ nvkm_therm_clkgate_enable(struct nvkm_therm *therm)
|
|||
void
|
||||
nvkm_therm_clkgate_fini(struct nvkm_therm *therm, bool suspend)
|
||||
{
|
||||
if (!therm->func->clkgate_fini || !therm->clkgating_enabled)
|
||||
if (!therm || !therm->func->clkgate_fini || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
nvkm_debug(&therm->subdev,
|
||||
|
@ -395,7 +395,7 @@ void
|
|||
nvkm_therm_clkgate_init(struct nvkm_therm *therm,
|
||||
const struct nvkm_therm_clkgate_pack *p)
|
||||
{
|
||||
if (!therm->func->clkgate_init || !therm->clkgating_enabled)
|
||||
if (!therm || !therm->func->clkgate_init || !therm->clkgating_enabled)
|
||||
return;
|
||||
|
||||
therm->func->clkgate_init(therm, p);
|
||||
|
|
|
@ -129,7 +129,10 @@ static ssize_t temp1_input_show(struct device *dev,
|
|||
|
||||
data->read_tempreg(data->pdev, ®val);
|
||||
temp = (regval >> 21) * 125;
|
||||
temp -= data->temp_offset;
|
||||
if (temp > data->temp_offset)
|
||||
temp -= data->temp_offset;
|
||||
else
|
||||
temp = 0;
|
||||
|
||||
return sprintf(buf, "%u\n", temp);
|
||||
}
|
||||
|
|
|
@ -339,9 +339,6 @@ int __init bcm7038_l1_of_init(struct device_node *dn,
|
|||
goto out_unmap;
|
||||
}
|
||||
|
||||
pr_info("registered BCM7038 L1 intc (mem: 0x%p, IRQs: %d)\n",
|
||||
intc->cpus[0]->map_base, IRQS_PER_WORD * intc->n_words);
|
||||
|
||||
return 0;
|
||||
|
||||
out_unmap:
|
||||
|
|
|
@ -318,9 +318,6 @@ static int __init bcm7120_l2_intc_probe(struct device_node *dn,
|
|||
}
|
||||
}
|
||||
|
||||
pr_info("registered %s intc (mem: 0x%p, parent IRQ(s): %d)\n",
|
||||
intc_name, data->map_base[0], data->num_parent_irqs);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_domain:
|
||||
|
|
|
@ -262,9 +262,6 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np,
|
|||
ct->chip.irq_set_wake = irq_gc_set_wake;
|
||||
}
|
||||
|
||||
pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n",
|
||||
base, parent_irq);
|
||||
|
||||
return 0;
|
||||
|
||||
out_free_domain:
|
||||
|
|
|
@ -94,7 +94,7 @@ static struct irq_chip gicv2m_msi_irq_chip = {
|
|||
|
||||
static struct msi_domain_info gicv2m_msi_domain_info = {
|
||||
.flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
|
||||
MSI_FLAG_PCI_MSIX),
|
||||
MSI_FLAG_PCI_MSIX | MSI_FLAG_MULTI_PCI_MSI),
|
||||
.chip = &gicv2m_msi_irq_chip,
|
||||
};
|
||||
|
||||
|
@ -155,18 +155,12 @@ static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq)
|
||||
static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq,
|
||||
int nr_irqs)
|
||||
{
|
||||
int pos;
|
||||
|
||||
pos = hwirq - v2m->spi_start;
|
||||
if (pos < 0 || pos >= v2m->nr_spis) {
|
||||
pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq);
|
||||
return;
|
||||
}
|
||||
|
||||
spin_lock(&v2m_lock);
|
||||
__clear_bit(pos, v2m->bm);
|
||||
bitmap_release_region(v2m->bm, hwirq - v2m->spi_start,
|
||||
get_count_order(nr_irqs));
|
||||
spin_unlock(&v2m_lock);
|
||||
}
|
||||
|
||||
|
@ -174,13 +168,13 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
unsigned int nr_irqs, void *args)
|
||||
{
|
||||
struct v2m_data *v2m = NULL, *tmp;
|
||||
int hwirq, offset, err = 0;
|
||||
int hwirq, offset, i, err = 0;
|
||||
|
||||
spin_lock(&v2m_lock);
|
||||
list_for_each_entry(tmp, &v2m_nodes, entry) {
|
||||
offset = find_first_zero_bit(tmp->bm, tmp->nr_spis);
|
||||
if (offset < tmp->nr_spis) {
|
||||
__set_bit(offset, tmp->bm);
|
||||
offset = bitmap_find_free_region(tmp->bm, tmp->nr_spis,
|
||||
get_count_order(nr_irqs));
|
||||
if (offset >= 0) {
|
||||
v2m = tmp;
|
||||
break;
|
||||
}
|
||||
|
@ -192,16 +186,21 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
|
|||
|
||||
hwirq = v2m->spi_start + offset;
|
||||
|
||||
err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq);
|
||||
if (err) {
|
||||
gicv2m_unalloc_msi(v2m, hwirq);
|
||||
return err;
|
||||
for (i = 0; i < nr_irqs; i++) {
|
||||
err = gicv2m_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
|
||||
if (err)
|
||||
goto fail;
|
||||
|
||||
irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
|
||||
&gicv2m_irq_chip, v2m);
|
||||
}
|
||||
|
||||
irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
|
||||
&gicv2m_irq_chip, v2m);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
|
||||
gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
|
||||
return err;
|
||||
}
|
||||
|
||||
static void gicv2m_irq_domain_free(struct irq_domain *domain,
|
||||
|
@ -210,8 +209,7 @@ static void gicv2m_irq_domain_free(struct irq_domain *domain,
|
|||
struct irq_data *d = irq_domain_get_irq_data(domain, virq);
|
||||
struct v2m_data *v2m = irq_data_get_irq_chip_data(d);
|
||||
|
||||
BUG_ON(nr_irqs != 1);
|
||||
gicv2m_unalloc_msi(v2m, d->hwirq);
|
||||
gicv2m_unalloc_msi(v2m, d->hwirq, nr_irqs);
|
||||
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
|
||||
}
|
||||
|
||||
|
|
|
@ -132,6 +132,8 @@ static int __init its_pci_of_msi_init(void)
|
|||
|
||||
for (np = of_find_matching_node(NULL, its_device_id); np;
|
||||
np = of_find_matching_node(np, its_device_id)) {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
if (!of_property_read_bool(np, "msi-controller"))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -154,6 +154,8 @@ static void __init its_pmsi_of_init(void)
|
|||
|
||||
for (np = of_find_matching_node(NULL, its_device_id); np;
|
||||
np = of_find_matching_node(np, its_device_id)) {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
if (!of_property_read_bool(np, "msi-controller"))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -3314,6 +3314,8 @@ static int __init its_of_probe(struct device_node *node)
|
|||
|
||||
for (np = of_find_matching_node(node, its_device_id); np;
|
||||
np = of_find_matching_node(np, its_device_id)) {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
if (!of_property_read_bool(np, "msi-controller")) {
|
||||
pr_warn("%pOF: no msi-controller property, ITS ignored\n",
|
||||
np);
|
||||
|
|
|
@ -673,7 +673,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
|
|||
MPIDR_TO_SGI_RS(cluster_id) |
|
||||
tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
|
||||
|
||||
pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
|
||||
gic_write_sgi1r(val);
|
||||
}
|
||||
|
||||
|
@ -688,7 +688,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
|
|||
* Ensure that stores to Normal memory are visible to the
|
||||
* other CPUs before issuing the IPI.
|
||||
*/
|
||||
smp_wmb();
|
||||
wmb();
|
||||
|
||||
for_each_cpu(cpu, mask) {
|
||||
u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu));
|
||||
|
|
|
@ -424,8 +424,6 @@ static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
|
|||
spin_lock_irqsave(&gic_lock, flags);
|
||||
write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin);
|
||||
write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu)));
|
||||
gic_clear_pcpu_masks(intr);
|
||||
set_bit(intr, per_cpu_ptr(pcpu_masks, cpu));
|
||||
irq_data_update_effective_affinity(data, cpumask_of(cpu));
|
||||
spin_unlock_irqrestore(&gic_lock, flags);
|
||||
|
||||
|
|
|
@ -375,6 +375,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
|
|||
dev->ofdev.dev.of_node = np;
|
||||
dev->ofdev.archdata.dma_mask = 0xffffffffUL;
|
||||
dev->ofdev.dev.dma_mask = &dev->ofdev.archdata.dma_mask;
|
||||
dev->ofdev.dev.coherent_dma_mask = dev->ofdev.archdata.dma_mask;
|
||||
dev->ofdev.dev.parent = parent;
|
||||
dev->ofdev.dev.bus = &macio_bus_type;
|
||||
dev->ofdev.dev.release = macio_release_dev;
|
||||
|
|
|
@ -903,7 +903,8 @@ static void dec_pending(struct dm_io *io, blk_status_t error)
|
|||
queue_io(md, bio);
|
||||
} else {
|
||||
/* done with normal IO or empty flush */
|
||||
bio->bi_status = io_error;
|
||||
if (io_error)
|
||||
bio->bi_status = io_error;
|
||||
bio_endio(bio);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1265,7 +1265,8 @@ static int bcm2835_add_host(struct bcm2835_host *host)
|
|||
char pio_limit_string[20];
|
||||
int ret;
|
||||
|
||||
mmc->f_max = host->max_clk;
|
||||
if (!mmc->f_max || mmc->f_max > host->max_clk)
|
||||
mmc->f_max = host->max_clk;
|
||||
mmc->f_min = host->max_clk / SDCDIV_MAX_CDIV;
|
||||
|
||||
mmc->max_busy_timeout = ~0 / (mmc->f_max / 1000);
|
||||
|
|
|
@ -717,22 +717,6 @@ static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
|
|||
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct meson_host *host = mmc_priv(mmc);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* If this is the initial tuning, try to get a sane Rx starting
|
||||
* phase before doing the actual tuning.
|
||||
*/
|
||||
if (!mmc->doing_retune) {
|
||||
ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = meson_mmc_clk_phase_tuning(mmc, opcode, host->tx_clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
||||
}
|
||||
|
@ -763,9 +747,8 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
if (!IS_ERR(mmc->supply.vmmc))
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||
|
||||
/* Reset phases */
|
||||
/* Reset rx phase */
|
||||
clk_set_phase(host->rx_clk, 0);
|
||||
clk_set_phase(host->tx_clk, 270);
|
||||
|
||||
break;
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ config MTD_NAND_MARVELL
|
|||
tristate "NAND controller support on Marvell boards"
|
||||
depends on PXA3xx || ARCH_MMP || PLAT_ORION || ARCH_MVEBU || \
|
||||
COMPILE_TEST
|
||||
depends on HAS_IOMEM
|
||||
depends on HAS_IOMEM && HAS_DMA
|
||||
help
|
||||
This enables the NAND flash controller driver for Marvell boards,
|
||||
including:
|
||||
|
|
|
@ -752,10 +752,8 @@ static int vf610_nfc_probe(struct platform_device *pdev)
|
|||
if (mtd->oobsize > 64)
|
||||
mtd->oobsize = 64;
|
||||
|
||||
/*
|
||||
* mtd->ecclayout is not specified here because we're using the
|
||||
* default large page ECC layout defined in NAND core.
|
||||
*/
|
||||
/* Use default large page ECC layout defined in NAND core */
|
||||
mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
|
||||
if (chip->ecc.strength == 32) {
|
||||
nfc->ecc_mode = ECC_60_BYTE;
|
||||
chip->ecc.bytes = 60;
|
||||
|
|
|
@ -120,8 +120,12 @@ int nvme_reset_ctrl_sync(struct nvme_ctrl *ctrl)
|
|||
int ret;
|
||||
|
||||
ret = nvme_reset_ctrl(ctrl);
|
||||
if (!ret)
|
||||
if (!ret) {
|
||||
flush_work(&ctrl->reset_work);
|
||||
if (ctrl->state != NVME_CTRL_LIVE)
|
||||
ret = -ENETRESET;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_reset_ctrl_sync);
|
||||
|
@ -265,7 +269,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
switch (new_state) {
|
||||
case NVME_CTRL_ADMIN_ONLY:
|
||||
switch (old_state) {
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
changed = true;
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
|
@ -276,7 +280,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
switch (old_state) {
|
||||
case NVME_CTRL_NEW:
|
||||
case NVME_CTRL_RESETTING:
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
changed = true;
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
|
@ -294,9 +298,9 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
break;
|
||||
}
|
||||
break;
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
switch (old_state) {
|
||||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_NEW:
|
||||
case NVME_CTRL_RESETTING:
|
||||
changed = true;
|
||||
/* FALLTHRU */
|
||||
|
@ -309,7 +313,7 @@ bool nvme_change_ctrl_state(struct nvme_ctrl *ctrl,
|
|||
case NVME_CTRL_LIVE:
|
||||
case NVME_CTRL_ADMIN_ONLY:
|
||||
case NVME_CTRL_RESETTING:
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
changed = true;
|
||||
/* FALLTHRU */
|
||||
default:
|
||||
|
@ -518,9 +522,11 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req,
|
|||
u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
|
||||
u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
|
||||
|
||||
range[n].cattr = cpu_to_le32(0);
|
||||
range[n].nlb = cpu_to_le32(nlb);
|
||||
range[n].slba = cpu_to_le64(slba);
|
||||
if (n < segments) {
|
||||
range[n].cattr = cpu_to_le32(0);
|
||||
range[n].nlb = cpu_to_le32(nlb);
|
||||
range[n].slba = cpu_to_le64(slba);
|
||||
}
|
||||
n++;
|
||||
}
|
||||
|
||||
|
@ -794,13 +800,9 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
|
|||
|
||||
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_command c;
|
||||
struct request *rq;
|
||||
|
||||
memset(&c, 0, sizeof(c));
|
||||
c.common.opcode = nvme_admin_keep_alive;
|
||||
|
||||
rq = nvme_alloc_request(ctrl->admin_q, &c, BLK_MQ_REQ_RESERVED,
|
||||
rq = nvme_alloc_request(ctrl->admin_q, &ctrl->ka_cmd, BLK_MQ_REQ_RESERVED,
|
||||
NVME_QID_ANY);
|
||||
if (IS_ERR(rq))
|
||||
return PTR_ERR(rq);
|
||||
|
@ -832,6 +834,8 @@ void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
|
|||
return;
|
||||
|
||||
INIT_DELAYED_WORK(&ctrl->ka_work, nvme_keep_alive_work);
|
||||
memset(&ctrl->ka_cmd, 0, sizeof(ctrl->ka_cmd));
|
||||
ctrl->ka_cmd.common.opcode = nvme_admin_keep_alive;
|
||||
schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(nvme_start_keep_alive);
|
||||
|
@ -1117,14 +1121,19 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
|
|||
|
||||
static void nvme_update_formats(struct nvme_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_ns *ns;
|
||||
struct nvme_ns *ns, *next;
|
||||
LIST_HEAD(rm_list);
|
||||
|
||||
mutex_lock(&ctrl->namespaces_mutex);
|
||||
list_for_each_entry(ns, &ctrl->namespaces, list) {
|
||||
if (ns->disk && nvme_revalidate_disk(ns->disk))
|
||||
nvme_ns_remove(ns);
|
||||
if (ns->disk && nvme_revalidate_disk(ns->disk)) {
|
||||
list_move_tail(&ns->list, &rm_list);
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ctrl->namespaces_mutex);
|
||||
|
||||
list_for_each_entry_safe(ns, next, &rm_list, list)
|
||||
nvme_ns_remove(ns);
|
||||
}
|
||||
|
||||
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
|
||||
|
@ -2687,7 +2696,7 @@ static ssize_t nvme_sysfs_show_state(struct device *dev,
|
|||
[NVME_CTRL_LIVE] = "live",
|
||||
[NVME_CTRL_ADMIN_ONLY] = "only-admin",
|
||||
[NVME_CTRL_RESETTING] = "resetting",
|
||||
[NVME_CTRL_RECONNECTING]= "reconnecting",
|
||||
[NVME_CTRL_CONNECTING] = "connecting",
|
||||
[NVME_CTRL_DELETING] = "deleting",
|
||||
[NVME_CTRL_DEAD] = "dead",
|
||||
};
|
||||
|
|
|
@ -171,13 +171,14 @@ static inline blk_status_t nvmf_check_init_req(struct nvme_ctrl *ctrl,
|
|||
cmd->common.opcode != nvme_fabrics_command ||
|
||||
cmd->fabrics.fctype != nvme_fabrics_type_connect) {
|
||||
/*
|
||||
* Reconnecting state means transport disruption, which can take
|
||||
* a long time and even might fail permanently, fail fast to
|
||||
* give upper layers a chance to failover.
|
||||
* Connecting state means transport disruption or initial
|
||||
* establishment, which can take a long time and even might
|
||||
* fail permanently, fail fast to give upper layers a chance
|
||||
* to failover.
|
||||
* Deleting state means that the ctrl will never accept commands
|
||||
* again, fail it permanently.
|
||||
*/
|
||||
if (ctrl->state == NVME_CTRL_RECONNECTING ||
|
||||
if (ctrl->state == NVME_CTRL_CONNECTING ||
|
||||
ctrl->state == NVME_CTRL_DELETING) {
|
||||
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
|
||||
return BLK_STS_IOERR;
|
||||
|
|
|
@ -55,9 +55,7 @@ struct nvme_fc_queue {
|
|||
|
||||
enum nvme_fcop_flags {
|
||||
FCOP_FLAGS_TERMIO = (1 << 0),
|
||||
FCOP_FLAGS_RELEASED = (1 << 1),
|
||||
FCOP_FLAGS_COMPLETE = (1 << 2),
|
||||
FCOP_FLAGS_AEN = (1 << 3),
|
||||
FCOP_FLAGS_AEN = (1 << 1),
|
||||
};
|
||||
|
||||
struct nvmefc_ls_req_op {
|
||||
|
@ -532,7 +530,7 @@ nvme_fc_resume_controller(struct nvme_fc_ctrl *ctrl)
|
|||
{
|
||||
switch (ctrl->ctrl.state) {
|
||||
case NVME_CTRL_NEW:
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
/*
|
||||
* As all reconnects were suppressed, schedule a
|
||||
* connect.
|
||||
|
@ -777,7 +775,7 @@ nvme_fc_ctrl_connectivity_loss(struct nvme_fc_ctrl *ctrl)
|
|||
}
|
||||
break;
|
||||
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
/*
|
||||
* The association has already been terminated and the
|
||||
* controller is attempting reconnects. No need to do anything
|
||||
|
@ -1470,7 +1468,6 @@ nvme_fc_xmt_disconnect_assoc(struct nvme_fc_ctrl *ctrl)
|
|||
|
||||
/* *********************** NVME Ctrl Routines **************************** */
|
||||
|
||||
static void __nvme_fc_final_op_cleanup(struct request *rq);
|
||||
static void nvme_fc_error_recovery(struct nvme_fc_ctrl *ctrl, char *errmsg);
|
||||
|
||||
static int
|
||||
|
@ -1512,13 +1509,19 @@ nvme_fc_exit_request(struct blk_mq_tag_set *set, struct request *rq,
|
|||
static int
|
||||
__nvme_fc_abort_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_fcp_op *op)
|
||||
{
|
||||
int state;
|
||||
unsigned long flags;
|
||||
int opstate;
|
||||
|
||||
state = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
|
||||
if (state != FCPOP_STATE_ACTIVE) {
|
||||
atomic_set(&op->state, state);
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
opstate = atomic_xchg(&op->state, FCPOP_STATE_ABORTED);
|
||||
if (opstate != FCPOP_STATE_ACTIVE)
|
||||
atomic_set(&op->state, opstate);
|
||||
else if (ctrl->flags & FCCTRL_TERMIO)
|
||||
ctrl->iocnt++;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
if (opstate != FCPOP_STATE_ACTIVE)
|
||||
return -ECANCELED;
|
||||
}
|
||||
|
||||
ctrl->lport->ops->fcp_abort(&ctrl->lport->localport,
|
||||
&ctrl->rport->remoteport,
|
||||
|
@ -1532,60 +1535,26 @@ static void
|
|||
nvme_fc_abort_aen_ops(struct nvme_fc_ctrl *ctrl)
|
||||
{
|
||||
struct nvme_fc_fcp_op *aen_op = ctrl->aen_ops;
|
||||
unsigned long flags;
|
||||
int i, ret;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++) {
|
||||
if (atomic_read(&aen_op->state) != FCPOP_STATE_ACTIVE)
|
||||
continue;
|
||||
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (ctrl->flags & FCCTRL_TERMIO) {
|
||||
ctrl->iocnt++;
|
||||
aen_op->flags |= FCOP_FLAGS_TERMIO;
|
||||
}
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
ret = __nvme_fc_abort_op(ctrl, aen_op);
|
||||
if (ret) {
|
||||
/*
|
||||
* if __nvme_fc_abort_op failed the io wasn't
|
||||
* active. Thus this call path is running in
|
||||
* parallel to the io complete. Treat as non-error.
|
||||
*/
|
||||
|
||||
/* back out the flags/counters */
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (ctrl->flags & FCCTRL_TERMIO)
|
||||
ctrl->iocnt--;
|
||||
aen_op->flags &= ~FCOP_FLAGS_TERMIO;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
return;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < NVME_NR_AEN_COMMANDS; i++, aen_op++)
|
||||
__nvme_fc_abort_op(ctrl, aen_op);
|
||||
}
|
||||
|
||||
static inline int
|
||||
static inline void
|
||||
__nvme_fc_fcpop_chk_teardowns(struct nvme_fc_ctrl *ctrl,
|
||||
struct nvme_fc_fcp_op *op)
|
||||
struct nvme_fc_fcp_op *op, int opstate)
|
||||
{
|
||||
unsigned long flags;
|
||||
bool complete_rq = false;
|
||||
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (unlikely(op->flags & FCOP_FLAGS_TERMIO)) {
|
||||
if (opstate == FCPOP_STATE_ABORTED) {
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (ctrl->flags & FCCTRL_TERMIO) {
|
||||
if (!--ctrl->iocnt)
|
||||
wake_up(&ctrl->ioabort_wait);
|
||||
}
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
}
|
||||
if (op->flags & FCOP_FLAGS_RELEASED)
|
||||
complete_rq = true;
|
||||
else
|
||||
op->flags |= FCOP_FLAGS_COMPLETE;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
return complete_rq;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1601,6 +1570,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|||
__le16 status = cpu_to_le16(NVME_SC_SUCCESS << 1);
|
||||
union nvme_result result;
|
||||
bool terminate_assoc = true;
|
||||
int opstate;
|
||||
|
||||
/*
|
||||
* WARNING:
|
||||
|
@ -1639,11 +1609,12 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|||
* association to be terminated.
|
||||
*/
|
||||
|
||||
opstate = atomic_xchg(&op->state, FCPOP_STATE_COMPLETE);
|
||||
|
||||
fc_dma_sync_single_for_cpu(ctrl->lport->dev, op->fcp_req.rspdma,
|
||||
sizeof(op->rsp_iu), DMA_FROM_DEVICE);
|
||||
|
||||
if (atomic_read(&op->state) == FCPOP_STATE_ABORTED ||
|
||||
op->flags & FCOP_FLAGS_TERMIO)
|
||||
if (opstate == FCPOP_STATE_ABORTED)
|
||||
status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
|
||||
else if (freq->status)
|
||||
status = cpu_to_le16(NVME_SC_INTERNAL << 1);
|
||||
|
@ -1708,7 +1679,7 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|||
done:
|
||||
if (op->flags & FCOP_FLAGS_AEN) {
|
||||
nvme_complete_async_event(&queue->ctrl->ctrl, status, &result);
|
||||
__nvme_fc_fcpop_chk_teardowns(ctrl, op);
|
||||
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
|
||||
atomic_set(&op->state, FCPOP_STATE_IDLE);
|
||||
op->flags = FCOP_FLAGS_AEN; /* clear other flags */
|
||||
nvme_fc_ctrl_put(ctrl);
|
||||
|
@ -1722,13 +1693,11 @@ nvme_fc_fcpio_done(struct nvmefc_fcp_req *req)
|
|||
if (status &&
|
||||
(blk_queue_dying(rq->q) ||
|
||||
ctrl->ctrl.state == NVME_CTRL_NEW ||
|
||||
ctrl->ctrl.state == NVME_CTRL_RECONNECTING))
|
||||
ctrl->ctrl.state == NVME_CTRL_CONNECTING))
|
||||
status |= cpu_to_le16(NVME_SC_DNR << 1);
|
||||
|
||||
if (__nvme_fc_fcpop_chk_teardowns(ctrl, op))
|
||||
__nvme_fc_final_op_cleanup(rq);
|
||||
else
|
||||
nvme_end_request(rq, status, result);
|
||||
__nvme_fc_fcpop_chk_teardowns(ctrl, op, opstate);
|
||||
nvme_end_request(rq, status, result);
|
||||
|
||||
check_error:
|
||||
if (terminate_assoc)
|
||||
|
@ -2415,46 +2384,16 @@ nvme_fc_submit_async_event(struct nvme_ctrl *arg)
|
|||
}
|
||||
|
||||
static void
|
||||
__nvme_fc_final_op_cleanup(struct request *rq)
|
||||
nvme_fc_complete_rq(struct request *rq)
|
||||
{
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
||||
|
||||
atomic_set(&op->state, FCPOP_STATE_IDLE);
|
||||
op->flags &= ~(FCOP_FLAGS_TERMIO | FCOP_FLAGS_RELEASED |
|
||||
FCOP_FLAGS_COMPLETE);
|
||||
|
||||
nvme_fc_unmap_data(ctrl, rq, op);
|
||||
nvme_complete_rq(rq);
|
||||
nvme_fc_ctrl_put(ctrl);
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
nvme_fc_complete_rq(struct request *rq)
|
||||
{
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(rq);
|
||||
struct nvme_fc_ctrl *ctrl = op->ctrl;
|
||||
unsigned long flags;
|
||||
bool completed = false;
|
||||
|
||||
/*
|
||||
* the core layer, on controller resets after calling
|
||||
* nvme_shutdown_ctrl(), calls complete_rq without our
|
||||
* calling blk_mq_complete_request(), thus there may still
|
||||
* be live i/o outstanding with the LLDD. Means transport has
|
||||
* to track complete calls vs fcpio_done calls to know what
|
||||
* path to take on completes and dones.
|
||||
*/
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (op->flags & FCOP_FLAGS_COMPLETE)
|
||||
completed = true;
|
||||
else
|
||||
op->flags |= FCOP_FLAGS_RELEASED;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
if (completed)
|
||||
__nvme_fc_final_op_cleanup(rq);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2476,35 +2415,11 @@ nvme_fc_terminate_exchange(struct request *req, void *data, bool reserved)
|
|||
struct nvme_ctrl *nctrl = data;
|
||||
struct nvme_fc_ctrl *ctrl = to_fc_ctrl(nctrl);
|
||||
struct nvme_fc_fcp_op *op = blk_mq_rq_to_pdu(req);
|
||||
unsigned long flags;
|
||||
int status;
|
||||
|
||||
if (!blk_mq_request_started(req))
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (ctrl->flags & FCCTRL_TERMIO) {
|
||||
ctrl->iocnt++;
|
||||
op->flags |= FCOP_FLAGS_TERMIO;
|
||||
}
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
|
||||
status = __nvme_fc_abort_op(ctrl, op);
|
||||
if (status) {
|
||||
/*
|
||||
* if __nvme_fc_abort_op failed the io wasn't
|
||||
* active. Thus this call path is running in
|
||||
* parallel to the io complete. Treat as non-error.
|
||||
*/
|
||||
|
||||
/* back out the flags/counters */
|
||||
spin_lock_irqsave(&ctrl->lock, flags);
|
||||
if (ctrl->flags & FCCTRL_TERMIO)
|
||||
ctrl->iocnt--;
|
||||
op->flags &= ~FCOP_FLAGS_TERMIO;
|
||||
spin_unlock_irqrestore(&ctrl->lock, flags);
|
||||
return;
|
||||
}
|
||||
__nvme_fc_abort_op(ctrl, op);
|
||||
}
|
||||
|
||||
|
||||
|
@ -2943,7 +2858,7 @@ nvme_fc_reconnect_or_delete(struct nvme_fc_ctrl *ctrl, int status)
|
|||
unsigned long recon_delay = ctrl->ctrl.opts->reconnect_delay * HZ;
|
||||
bool recon = true;
|
||||
|
||||
if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING)
|
||||
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING)
|
||||
return;
|
||||
|
||||
if (portptr->port_state == FC_OBJSTATE_ONLINE)
|
||||
|
@ -2991,10 +2906,10 @@ nvme_fc_reset_ctrl_work(struct work_struct *work)
|
|||
/* will block will waiting for io to terminate */
|
||||
nvme_fc_delete_association(ctrl);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
dev_err(ctrl->ctrl.device,
|
||||
"NVME-FC{%d}: error_recovery: Couldn't change state "
|
||||
"to RECONNECTING\n", ctrl->cnum);
|
||||
"to CONNECTING\n", ctrl->cnum);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -3195,7 +3110,7 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts,
|
|||
* transport errors (frame drop, LS failure) inherently must kill
|
||||
* the association. The transport is coded so that any command used
|
||||
* to create the association (prior to a LIVE state transition
|
||||
* while NEW or RECONNECTING) will fail if it completes in error or
|
||||
* while NEW or CONNECTING) will fail if it completes in error or
|
||||
* times out.
|
||||
*
|
||||
* As such: as the connect request was mostly likely due to a
|
||||
|
|
|
@ -123,7 +123,7 @@ enum nvme_ctrl_state {
|
|||
NVME_CTRL_LIVE,
|
||||
NVME_CTRL_ADMIN_ONLY, /* Only admin queue live */
|
||||
NVME_CTRL_RESETTING,
|
||||
NVME_CTRL_RECONNECTING,
|
||||
NVME_CTRL_CONNECTING,
|
||||
NVME_CTRL_DELETING,
|
||||
NVME_CTRL_DEAD,
|
||||
};
|
||||
|
@ -183,6 +183,7 @@ struct nvme_ctrl {
|
|||
struct work_struct scan_work;
|
||||
struct work_struct async_event_work;
|
||||
struct delayed_work ka_work;
|
||||
struct nvme_command ka_cmd;
|
||||
struct work_struct fw_act_work;
|
||||
|
||||
/* Power saving configuration */
|
||||
|
|
|
@ -1141,7 +1141,7 @@ static bool nvme_should_reset(struct nvme_dev *dev, u32 csts)
|
|||
/* If there is a reset/reinit ongoing, we shouldn't reset again. */
|
||||
switch (dev->ctrl.state) {
|
||||
case NVME_CTRL_RESETTING:
|
||||
case NVME_CTRL_RECONNECTING:
|
||||
case NVME_CTRL_CONNECTING:
|
||||
return false;
|
||||
default:
|
||||
break;
|
||||
|
@ -1215,13 +1215,17 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
|
|||
* cancellation error. All outstanding requests are completed on
|
||||
* shutdown, so we return BLK_EH_HANDLED.
|
||||
*/
|
||||
if (dev->ctrl.state == NVME_CTRL_RESETTING) {
|
||||
switch (dev->ctrl.state) {
|
||||
case NVME_CTRL_CONNECTING:
|
||||
case NVME_CTRL_RESETTING:
|
||||
dev_warn(dev->ctrl.device,
|
||||
"I/O %d QID %d timeout, disable controller\n",
|
||||
req->tag, nvmeq->qid);
|
||||
nvme_dev_disable(dev, false);
|
||||
nvme_req(req)->flags |= NVME_REQ_CANCELLED;
|
||||
return BLK_EH_HANDLED;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -1364,18 +1368,14 @@ static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
|
|||
static int nvme_alloc_sq_cmds(struct nvme_dev *dev, struct nvme_queue *nvmeq,
|
||||
int qid, int depth)
|
||||
{
|
||||
if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
||||
unsigned offset = (qid - 1) * roundup(SQ_SIZE(depth),
|
||||
dev->ctrl.page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
||||
nvmeq->sq_cmds_io = dev->cmb + offset;
|
||||
} else {
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||
if (!nvmeq->sq_cmds)
|
||||
return -ENOMEM;
|
||||
}
|
||||
/* CMB SQEs will be mapped before creation */
|
||||
if (qid && dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS))
|
||||
return 0;
|
||||
|
||||
nvmeq->sq_cmds = dma_alloc_coherent(dev->dev, SQ_SIZE(depth),
|
||||
&nvmeq->sq_dma_addr, GFP_KERNEL);
|
||||
if (!nvmeq->sq_cmds)
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1449,6 +1449,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
|
|||
struct nvme_dev *dev = nvmeq->dev;
|
||||
int result;
|
||||
|
||||
if (dev->cmb && use_cmb_sqes && (dev->cmbsz & NVME_CMBSZ_SQS)) {
|
||||
unsigned offset = (qid - 1) * roundup(SQ_SIZE(nvmeq->q_depth),
|
||||
dev->ctrl.page_size);
|
||||
nvmeq->sq_dma_addr = dev->cmb_bus_addr + offset;
|
||||
nvmeq->sq_cmds_io = dev->cmb + offset;
|
||||
}
|
||||
|
||||
nvmeq->cq_vector = qid - 1;
|
||||
result = adapter_alloc_cq(dev, qid, nvmeq);
|
||||
if (result < 0)
|
||||
|
@ -2288,12 +2295,12 @@ static void nvme_reset_work(struct work_struct *work)
|
|||
nvme_dev_disable(dev, false);
|
||||
|
||||
/*
|
||||
* Introduce RECONNECTING state from nvme-fc/rdma transports to mark the
|
||||
* Introduce CONNECTING state from nvme-fc/rdma transports to mark the
|
||||
* initializing procedure here.
|
||||
*/
|
||||
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_RECONNECTING)) {
|
||||
if (!nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
dev_warn(dev->ctrl.device,
|
||||
"failed to mark controller RECONNECTING\n");
|
||||
"failed to mark controller CONNECTING\n");
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
|
|
@ -887,7 +887,7 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
|
|||
static void nvme_rdma_reconnect_or_remove(struct nvme_rdma_ctrl *ctrl)
|
||||
{
|
||||
/* If we are resetting/deleting then do nothing */
|
||||
if (ctrl->ctrl.state != NVME_CTRL_RECONNECTING) {
|
||||
if (ctrl->ctrl.state != NVME_CTRL_CONNECTING) {
|
||||
WARN_ON_ONCE(ctrl->ctrl.state == NVME_CTRL_NEW ||
|
||||
ctrl->ctrl.state == NVME_CTRL_LIVE);
|
||||
return;
|
||||
|
@ -973,7 +973,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
|
|||
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
|
||||
nvme_start_queues(&ctrl->ctrl);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
/* state change failure should never happen */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
|
@ -1756,7 +1756,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||
nvme_stop_ctrl(&ctrl->ctrl);
|
||||
nvme_rdma_shutdown_ctrl(ctrl, false);
|
||||
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RECONNECTING)) {
|
||||
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING)) {
|
||||
/* state change failure should never happen */
|
||||
WARN_ON_ONCE(1);
|
||||
return;
|
||||
|
@ -1784,11 +1784,8 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work)
|
|||
return;
|
||||
|
||||
out_fail:
|
||||
dev_warn(ctrl->ctrl.device, "Removing after reset failure\n");
|
||||
nvme_remove_namespaces(&ctrl->ctrl);
|
||||
nvme_rdma_shutdown_ctrl(ctrl, true);
|
||||
nvme_uninit_ctrl(&ctrl->ctrl);
|
||||
nvme_put_ctrl(&ctrl->ctrl);
|
||||
++ctrl->ctrl.nr_reconnects;
|
||||
nvme_rdma_reconnect_or_remove(ctrl);
|
||||
}
|
||||
|
||||
static const struct nvme_ctrl_ops nvme_rdma_ctrl_ops = {
|
||||
|
@ -1942,6 +1939,9 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
|
|||
if (!ctrl->queues)
|
||||
goto out_uninit_ctrl;
|
||||
|
||||
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
|
||||
WARN_ON_ONCE(!changed);
|
||||
|
||||
ret = nvme_rdma_configure_admin_queue(ctrl, true);
|
||||
if (ret)
|
||||
goto out_kfree_queues;
|
||||
|
|
|
@ -105,10 +105,13 @@ static void nvmet_execute_flush(struct nvmet_req *req)
|
|||
static u16 nvmet_discard_range(struct nvmet_ns *ns,
|
||||
struct nvme_dsm_range *range, struct bio **bio)
|
||||
{
|
||||
if (__blkdev_issue_discard(ns->bdev,
|
||||
int ret;
|
||||
|
||||
ret = __blkdev_issue_discard(ns->bdev,
|
||||
le64_to_cpu(range->slba) << (ns->blksize_shift - 9),
|
||||
le32_to_cpu(range->nlb) << (ns->blksize_shift - 9),
|
||||
GFP_KERNEL, 0, bio))
|
||||
GFP_KERNEL, 0, bio);
|
||||
if (ret && ret != -EOPNOTSUPP)
|
||||
return NVME_SC_INTERNAL | NVME_SC_DNR;
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -977,11 +977,11 @@ static int of_fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
static const void *
|
||||
of_fwnode_device_get_match_data(const struct fwnode_handle *fwnode,
|
||||
const struct device *dev)
|
||||
{
|
||||
return (void *)of_device_get_match_data(dev);
|
||||
return of_device_get_match_data(dev);
|
||||
}
|
||||
|
||||
const struct fwnode_operations of_fwnode_ops = {
|
||||
|
|
|
@ -55,7 +55,7 @@ int dev_pm_opp_init_cpufreq_table(struct device *dev,
|
|||
if (max_opps <= 0)
|
||||
return max_opps ? max_opps : -ENODATA;
|
||||
|
||||
freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
|
||||
freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_KERNEL);
|
||||
if (!freq_table)
|
||||
return -ENOMEM;
|
||||
|
||||
|
|
|
@ -126,24 +126,6 @@ static const struct dmi_system_id dell_device_table[] __initconst = {
|
|||
DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_CHASSIS_TYPE, "30"), /*Tablet*/
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_CHASSIS_TYPE, "31"), /*Convertible*/
|
||||
},
|
||||
},
|
||||
{
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_CHASSIS_TYPE, "32"), /*Detachable*/
|
||||
},
|
||||
},
|
||||
{
|
||||
.ident = "Dell Computer Corporation",
|
||||
.matches = {
|
||||
|
@ -1279,7 +1261,7 @@ static int kbd_get_state(struct kbd_state *state)
|
|||
struct calling_interface_buffer buffer;
|
||||
int ret;
|
||||
|
||||
dell_fill_request(&buffer, 0, 0, 0, 0);
|
||||
dell_fill_request(&buffer, 0x1, 0, 0, 0);
|
||||
ret = dell_send_request(&buffer,
|
||||
CLASS_KBD_BACKLIGHT, SELECT_KBD_BACKLIGHT);
|
||||
if (ret)
|
||||
|
|
|
@ -113,7 +113,7 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth.");
|
|||
/*
|
||||
* ACPI Helpers
|
||||
*/
|
||||
#define IDEAPAD_EC_TIMEOUT (100) /* in ms */
|
||||
#define IDEAPAD_EC_TIMEOUT (200) /* in ms */
|
||||
|
||||
static int read_method_int(acpi_handle handle, const char *method, int *val)
|
||||
{
|
||||
|
|
|
@ -933,7 +933,7 @@ static int wmi_dev_probe(struct device *dev)
|
|||
goto probe_failure;
|
||||
}
|
||||
|
||||
buf = kmalloc(strlen(wdriver->driver.name) + 4, GFP_KERNEL);
|
||||
buf = kmalloc(strlen(wdriver->driver.name) + 5, GFP_KERNEL);
|
||||
if (!buf) {
|
||||
ret = -ENOMEM;
|
||||
goto probe_string_failure;
|
||||
|
|
|
@ -1297,6 +1297,9 @@ static int virtio_ccw_cio_notify(struct ccw_device *cdev, int event)
|
|||
vcdev->device_lost = true;
|
||||
rc = NOTIFY_DONE;
|
||||
break;
|
||||
case CIO_OPER:
|
||||
rc = NOTIFY_OK;
|
||||
break;
|
||||
default:
|
||||
rc = NOTIFY_DONE;
|
||||
break;
|
||||
|
@ -1309,6 +1312,27 @@ static struct ccw_device_id virtio_ids[] = {
|
|||
{},
|
||||
};
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int virtio_ccw_freeze(struct ccw_device *cdev)
|
||||
{
|
||||
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
|
||||
|
||||
return virtio_device_freeze(&vcdev->vdev);
|
||||
}
|
||||
|
||||
static int virtio_ccw_restore(struct ccw_device *cdev)
|
||||
{
|
||||
struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
|
||||
int ret;
|
||||
|
||||
ret = virtio_ccw_set_transport_rev(vcdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return virtio_device_restore(&vcdev->vdev);
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct ccw_driver virtio_ccw_driver = {
|
||||
.driver = {
|
||||
.owner = THIS_MODULE,
|
||||
|
@ -1321,6 +1345,11 @@ static struct ccw_driver virtio_ccw_driver = {
|
|||
.set_online = virtio_ccw_online,
|
||||
.notify = virtio_ccw_cio_notify,
|
||||
.int_class = IRQIO_VIR,
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.freeze = virtio_ccw_freeze,
|
||||
.thaw = virtio_ccw_restore,
|
||||
.restore = virtio_ccw_restore,
|
||||
#endif
|
||||
};
|
||||
|
||||
static int __init pure_hex(char **cp, unsigned int *val, int min_digit,
|
||||
|
|
|
@ -73,6 +73,8 @@ static int __init its_fsl_mc_msi_init(void)
|
|||
|
||||
for (np = of_find_matching_node(NULL, its_device_id); np;
|
||||
np = of_find_matching_node(np, its_device_id)) {
|
||||
if (!of_device_is_available(np))
|
||||
continue;
|
||||
if (!of_property_read_bool(np, "msi-controller"))
|
||||
continue;
|
||||
|
||||
|
|
|
@ -19,6 +19,12 @@ config USB_EHCI_BIG_ENDIAN_MMIO
|
|||
config USB_EHCI_BIG_ENDIAN_DESC
|
||||
bool
|
||||
|
||||
config USB_UHCI_BIG_ENDIAN_MMIO
|
||||
bool
|
||||
|
||||
config USB_UHCI_BIG_ENDIAN_DESC
|
||||
bool
|
||||
|
||||
menuconfig USB_SUPPORT
|
||||
bool "USB support"
|
||||
depends on HAS_IOMEM
|
||||
|
|
|
@ -633,14 +633,6 @@ config USB_UHCI_ASPEED
|
|||
bool
|
||||
default y if ARCH_ASPEED
|
||||
|
||||
config USB_UHCI_BIG_ENDIAN_MMIO
|
||||
bool
|
||||
default y if SPARC_LEON
|
||||
|
||||
config USB_UHCI_BIG_ENDIAN_DESC
|
||||
bool
|
||||
default y if SPARC_LEON
|
||||
|
||||
config USB_FHCI_HCD
|
||||
tristate "Freescale QE USB Host Controller support"
|
||||
depends on OF_GPIO && QE_GPIO && QUICC_ENGINE
|
||||
|
|
|
@ -60,6 +60,7 @@ struct sock_mapping {
|
|||
bool active_socket;
|
||||
struct list_head list;
|
||||
struct socket *sock;
|
||||
atomic_t refcount;
|
||||
union {
|
||||
struct {
|
||||
int irq;
|
||||
|
@ -93,6 +94,32 @@ struct sock_mapping {
|
|||
};
|
||||
};
|
||||
|
||||
static inline struct sock_mapping *pvcalls_enter_sock(struct socket *sock)
|
||||
{
|
||||
struct sock_mapping *map;
|
||||
|
||||
if (!pvcalls_front_dev ||
|
||||
dev_get_drvdata(&pvcalls_front_dev->dev) == NULL)
|
||||
return ERR_PTR(-ENOTCONN);
|
||||
|
||||
map = (struct sock_mapping *)sock->sk->sk_send_head;
|
||||
if (map == NULL)
|
||||
return ERR_PTR(-ENOTSOCK);
|
||||
|
||||
pvcalls_enter();
|
||||
atomic_inc(&map->refcount);
|
||||
return map;
|
||||
}
|
||||
|
||||
static inline void pvcalls_exit_sock(struct socket *sock)
|
||||
{
|
||||
struct sock_mapping *map;
|
||||
|
||||
map = (struct sock_mapping *)sock->sk->sk_send_head;
|
||||
atomic_dec(&map->refcount);
|
||||
pvcalls_exit();
|
||||
}
|
||||
|
||||
static inline int get_request(struct pvcalls_bedata *bedata, int *req_id)
|
||||
{
|
||||
*req_id = bedata->ring.req_prod_pvt & (RING_SIZE(&bedata->ring) - 1);
|
||||
|
@ -369,31 +396,23 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
|
|||
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -ENOTCONN;
|
||||
}
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *)sock->sk->sk_send_head;
|
||||
if (!map) {
|
||||
pvcalls_exit();
|
||||
return -ENOTSOCK;
|
||||
}
|
||||
|
||||
spin_lock(&bedata->socket_lock);
|
||||
ret = get_request(bedata, &req_id);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
ret = create_active(map, &evtchn);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -423,7 +442,7 @@ int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr,
|
|||
smp_rmb();
|
||||
ret = bedata->rsp[req_id].ret;
|
||||
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -488,23 +507,15 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
if (flags & (MSG_CONFIRM|MSG_DONTROUTE|MSG_EOR|MSG_OOB))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -ENOTCONN;
|
||||
}
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (!map) {
|
||||
pvcalls_exit();
|
||||
return -ENOTSOCK;
|
||||
}
|
||||
|
||||
mutex_lock(&map->active.out_mutex);
|
||||
if ((flags & MSG_DONTWAIT) && !pvcalls_front_write_todo(map)) {
|
||||
mutex_unlock(&map->active.out_mutex);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (len > INT_MAX)
|
||||
|
@ -526,7 +537,7 @@ int pvcalls_front_sendmsg(struct socket *sock, struct msghdr *msg,
|
|||
tot_sent = sent;
|
||||
|
||||
mutex_unlock(&map->active.out_mutex);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return tot_sent;
|
||||
}
|
||||
|
||||
|
@ -591,19 +602,11 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
if (flags & (MSG_CMSG_CLOEXEC|MSG_ERRQUEUE|MSG_OOB|MSG_TRUNC))
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -ENOTCONN;
|
||||
}
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (!map) {
|
||||
pvcalls_exit();
|
||||
return -ENOTSOCK;
|
||||
}
|
||||
|
||||
mutex_lock(&map->active.in_mutex);
|
||||
if (len > XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER))
|
||||
len = XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER);
|
||||
|
@ -623,7 +626,7 @@ int pvcalls_front_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
|||
ret = 0;
|
||||
|
||||
mutex_unlock(&map->active.in_mutex);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -637,24 +640,16 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|||
if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -ENOTCONN;
|
||||
}
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (map == NULL) {
|
||||
pvcalls_exit();
|
||||
return -ENOTSOCK;
|
||||
}
|
||||
|
||||
spin_lock(&bedata->socket_lock);
|
||||
ret = get_request(bedata, &req_id);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
req = RING_GET_REQUEST(&bedata->ring, req_id);
|
||||
|
@ -684,7 +679,7 @@ int pvcalls_front_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
|
|||
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
|
||||
|
||||
map->passive.status = PVCALLS_STATUS_BIND;
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -695,21 +690,13 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
|
|||
struct xen_pvcalls_request *req;
|
||||
int notify, req_id, ret;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -ENOTCONN;
|
||||
}
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (!map) {
|
||||
pvcalls_exit();
|
||||
return -ENOTSOCK;
|
||||
}
|
||||
|
||||
if (map->passive.status != PVCALLS_STATUS_BIND) {
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
|
@ -717,7 +704,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
|
|||
ret = get_request(bedata, &req_id);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
req = RING_GET_REQUEST(&bedata->ring, req_id);
|
||||
|
@ -741,7 +728,7 @@ int pvcalls_front_listen(struct socket *sock, int backlog)
|
|||
bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID;
|
||||
|
||||
map->passive.status = PVCALLS_STATUS_LISTEN;
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -753,21 +740,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
struct xen_pvcalls_request *req;
|
||||
int notify, req_id, ret, evtchn, nonblock;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -ENOTCONN;
|
||||
}
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return PTR_ERR(map);
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (!map) {
|
||||
pvcalls_exit();
|
||||
return -ENOTSOCK;
|
||||
}
|
||||
|
||||
if (map->passive.status != PVCALLS_STATUS_LISTEN) {
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -785,13 +764,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
goto received;
|
||||
}
|
||||
if (nonblock) {
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
if (wait_event_interruptible(map->passive.inflight_accept_req,
|
||||
!test_and_set_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||
(void *)&map->passive.flags))) {
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EINTR;
|
||||
}
|
||||
}
|
||||
|
@ -802,7 +781,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||
(void *)&map->passive.flags);
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
map2 = kzalloc(sizeof(*map2), GFP_ATOMIC);
|
||||
|
@ -810,7 +789,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||
(void *)&map->passive.flags);
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
ret = create_active(map2, &evtchn);
|
||||
|
@ -819,7 +798,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||
(void *)&map->passive.flags);
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
list_add_tail(&map2->list, &bedata->socket_mappings);
|
||||
|
@ -841,13 +820,13 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
/* We could check if we have received a response before returning. */
|
||||
if (nonblock) {
|
||||
WRITE_ONCE(map->passive.inflight_req_id, req_id);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
if (wait_event_interruptible(bedata->inflight_req,
|
||||
READ_ONCE(bedata->rsp[req_id].req_id) == req_id)) {
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -EINTR;
|
||||
}
|
||||
/* read req_id, then the content */
|
||||
|
@ -862,7 +841,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT,
|
||||
(void *)&map->passive.flags);
|
||||
pvcalls_front_free_map(bedata, map2);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
newsock->sk->sk_send_head = (void *)map2;
|
||||
|
@ -874,7 +853,7 @@ int pvcalls_front_accept(struct socket *sock, struct socket *newsock, int flags)
|
|||
clear_bit(PVCALLS_FLAG_ACCEPT_INFLIGHT, (void *)&map->passive.flags);
|
||||
wake_up(&map->passive.inflight_accept_req);
|
||||
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -965,23 +944,16 @@ __poll_t pvcalls_front_poll(struct file *file, struct socket *sock,
|
|||
struct sock_mapping *map;
|
||||
__poll_t ret;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map))
|
||||
return EPOLLNVAL;
|
||||
}
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (!map) {
|
||||
pvcalls_exit();
|
||||
return EPOLLNVAL;
|
||||
}
|
||||
if (map->active_socket)
|
||||
ret = pvcalls_front_poll_active(file, bedata, map, wait);
|
||||
else
|
||||
ret = pvcalls_front_poll_passive(file, bedata, map, wait);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -995,25 +967,20 @@ int pvcalls_front_release(struct socket *sock)
|
|||
if (sock->sk == NULL)
|
||||
return 0;
|
||||
|
||||
pvcalls_enter();
|
||||
if (!pvcalls_front_dev) {
|
||||
pvcalls_exit();
|
||||
return -EIO;
|
||||
map = pvcalls_enter_sock(sock);
|
||||
if (IS_ERR(map)) {
|
||||
if (PTR_ERR(map) == -ENOTCONN)
|
||||
return -EIO;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
bedata = dev_get_drvdata(&pvcalls_front_dev->dev);
|
||||
|
||||
map = (struct sock_mapping *) sock->sk->sk_send_head;
|
||||
if (map == NULL) {
|
||||
pvcalls_exit();
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_lock(&bedata->socket_lock);
|
||||
ret = get_request(bedata, &req_id);
|
||||
if (ret < 0) {
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
pvcalls_exit();
|
||||
pvcalls_exit_sock(sock);
|
||||
return ret;
|
||||
}
|
||||
sock->sk->sk_send_head = NULL;
|
||||
|
@ -1043,14 +1010,20 @@ int pvcalls_front_release(struct socket *sock)
|
|||
/*
|
||||
* We need to make sure that sendmsg/recvmsg on this socket have
|
||||
* not started before we've cleared sk_send_head here. The
|
||||
* easiest (though not optimal) way to guarantee this is to see
|
||||
* that no pvcall (other than us) is in progress.
|
||||
* easiest way to guarantee this is to see that no pvcalls
|
||||
* (other than us) is in progress on this socket.
|
||||
*/
|
||||
while (atomic_read(&pvcalls_refcount) > 1)
|
||||
while (atomic_read(&map->refcount) > 1)
|
||||
cpu_relax();
|
||||
|
||||
pvcalls_front_free_map(bedata, map);
|
||||
} else {
|
||||
wake_up(&bedata->inflight_req);
|
||||
wake_up(&map->passive.inflight_accept_req);
|
||||
|
||||
while (atomic_read(&map->refcount) > 1)
|
||||
cpu_relax();
|
||||
|
||||
spin_lock(&bedata->socket_lock);
|
||||
list_del(&map->list);
|
||||
spin_unlock(&bedata->socket_lock);
|
||||
|
|
|
@ -76,6 +76,7 @@ struct xb_req_data {
|
|||
struct list_head list;
|
||||
wait_queue_head_t wq;
|
||||
struct xsd_sockmsg msg;
|
||||
uint32_t caller_req_id;
|
||||
enum xsd_sockmsg_type type;
|
||||
char *body;
|
||||
const struct kvec *vec;
|
||||
|
|
|
@ -309,6 +309,7 @@ static int process_msg(void)
|
|||
goto out;
|
||||
|
||||
if (req->state == xb_req_state_wait_reply) {
|
||||
req->msg.req_id = req->caller_req_id;
|
||||
req->msg.type = state.msg.type;
|
||||
req->msg.len = state.msg.len;
|
||||
req->body = state.body;
|
||||
|
|
|
@ -227,6 +227,8 @@ static void xs_send(struct xb_req_data *req, struct xsd_sockmsg *msg)
|
|||
req->state = xb_req_state_queued;
|
||||
init_waitqueue_head(&req->wq);
|
||||
|
||||
/* Save the caller req_id and restore it later in the reply */
|
||||
req->caller_req_id = req->msg.req_id;
|
||||
req->msg.req_id = xs_request_enter(req);
|
||||
|
||||
mutex_lock(&xb_write_mutex);
|
||||
|
@ -310,6 +312,7 @@ static void *xs_talkv(struct xenbus_transaction t,
|
|||
req->num_vecs = num_vecs;
|
||||
req->cb = xs_wake_up;
|
||||
|
||||
msg.req_id = 0;
|
||||
msg.tx_id = t.id;
|
||||
msg.type = type;
|
||||
msg.len = 0;
|
||||
|
|
|
@ -1264,7 +1264,16 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
|
|||
while (node) {
|
||||
ref = rb_entry(node, struct prelim_ref, rbnode);
|
||||
node = rb_next(&ref->rbnode);
|
||||
WARN_ON(ref->count < 0);
|
||||
/*
|
||||
* ref->count < 0 can happen here if there are delayed
|
||||
* refs with a node->action of BTRFS_DROP_DELAYED_REF.
|
||||
* prelim_ref_insert() relies on this when merging
|
||||
* identical refs to keep the overall count correct.
|
||||
* prelim_ref_insert() will merge only those refs
|
||||
* which compare identically. Any refs having
|
||||
* e.g. different offsets would not be merged,
|
||||
* and would retain their original ref->count < 0.
|
||||
*/
|
||||
if (roots && ref->count && ref->root_id && ref->parent == 0) {
|
||||
if (sc && sc->root_objectid &&
|
||||
ref->root_id != sc->root_objectid) {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue