mirror of https://gitee.com/openkylin/linux.git
Merge branch 'device-properties' into acpi-soc
This commit is contained in:
commit
f75ee9a881
|
@ -94,14 +94,11 @@ has a requirements for a minimum number of vectors the driver can pass a
|
|||
min_vecs argument set to this limit, and the PCI core will return -ENOSPC
|
||||
if it can't meet the minimum number of vectors.
|
||||
|
||||
The flags argument should normally be set to 0, but can be used to pass the
|
||||
PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
|
||||
MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
|
||||
case the device does not support legacy interrupt lines.
|
||||
|
||||
By default this function will spread the interrupts around the available
|
||||
CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
|
||||
flag.
|
||||
The flags argument is used to specify which type of interrupt can be used
|
||||
by the device and the driver (PCI_IRQ_LEGACY, PCI_IRQ_MSI, PCI_IRQ_MSIX).
|
||||
A convenient short-hand (PCI_IRQ_ALL_TYPES) is also available to ask for
|
||||
any possible kind of interrupt. If the PCI_IRQ_AFFINITY flag is set,
|
||||
pci_alloc_irq_vectors() will spread the interrupts around the available CPUs.
|
||||
|
||||
To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
|
||||
vectors, use the following function:
|
||||
|
@ -131,7 +128,7 @@ larger than the number supported by the device it will automatically be
|
|||
capped to the supported limit, so there is no need to query the number of
|
||||
vectors supported beforehand:
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_ALL_TYPES)
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
|
@ -140,7 +137,7 @@ interrupts it can request a particular number of interrupts by passing that
|
|||
number to pci_alloc_irq_vectors() function as both 'min_vecs' and
|
||||
'max_vecs' parameters:
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, PCI_IRQ_ALL_TYPES);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
|
@ -148,15 +145,14 @@ The most notorious example of the request type described above is enabling
|
|||
the single MSI mode for a device. It could be done by passing two 1s as
|
||||
'min_vecs' and 'max_vecs':
|
||||
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
Some devices might not support using legacy line interrupts, in which case
|
||||
the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
|
||||
can't provide MSI or MSI-X interrupts:
|
||||
the driver can specify that only MSI or MSI-X is acceptable:
|
||||
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI | PCI_IRQ_MSIX);
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
|
|
|
@ -53,6 +53,7 @@ stable kernels.
|
|||
| ARM | Cortex-A57 | #832075 | ARM64_ERRATUM_832075 |
|
||||
| ARM | Cortex-A57 | #852523 | N/A |
|
||||
| ARM | Cortex-A57 | #834220 | ARM64_ERRATUM_834220 |
|
||||
| ARM | Cortex-A72 | #853709 | N/A |
|
||||
| ARM | MMU-500 | #841119,#826419 | N/A |
|
||||
| | | | |
|
||||
| Cavium | ThunderX ITS | #22375, #24313 | CAVIUM_ERRATUM_22375 |
|
||||
|
|
|
@ -131,7 +131,7 @@ pygments_style = 'sphinx'
|
|||
todo_include_todos = False
|
||||
|
||||
primary_domain = 'C'
|
||||
highlight_language = 'C'
|
||||
highlight_language = 'guess'
|
||||
|
||||
# -- Options for HTML output ----------------------------------------------
|
||||
|
||||
|
|
|
@ -8,8 +8,6 @@ Required properties:
|
|||
- interrupts: Interrupt number for McPDM
|
||||
- interrupt-parent: The parent interrupt controller
|
||||
- ti,hwmods: Name of the hwmod associated to the McPDM
|
||||
- clocks: phandle for the pdmclk provider, likely <&twl6040>
|
||||
- clock-names: Must be "pdmclk"
|
||||
|
||||
Example:
|
||||
|
||||
|
@ -21,11 +19,3 @@ mcpdm: mcpdm@40132000 {
|
|||
interrupt-parent = <&gic>;
|
||||
ti,hwmods = "mcpdm";
|
||||
};
|
||||
|
||||
In board DTS file the pdmclk needs to be added:
|
||||
|
||||
&mcpdm {
|
||||
clocks = <&twl6040>;
|
||||
clock-names = "pdmclk";
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -62,7 +62,7 @@ For more examples of cooling devices, refer to the example sections below.
|
|||
Required properties:
|
||||
- #cooling-cells: Used to provide cooling device specific information
|
||||
Type: unsigned while referring to it. Must be at least 2, in order
|
||||
Size: one cell to specify minimum and maximum cooling state used
|
||||
Size: one cell to specify minimum and maximum cooling state used
|
||||
in the reference. The first cell is the minimum
|
||||
cooling state requested and the second cell is
|
||||
the maximum cooling state requested in the reference.
|
||||
|
@ -119,7 +119,7 @@ Required properties:
|
|||
Optional property:
|
||||
- contribution: The cooling contribution to the thermal zone of the
|
||||
Type: unsigned referred cooling device at the referred trip point.
|
||||
Size: one cell The contribution is a ratio of the sum
|
||||
Size: one cell The contribution is a ratio of the sum
|
||||
of all cooling contributions within a thermal zone.
|
||||
|
||||
Note: Using the THERMAL_NO_LIMIT (-1UL) constant in the cooling-device phandle
|
||||
|
@ -145,7 +145,7 @@ Required properties:
|
|||
Size: one cell
|
||||
|
||||
- thermal-sensors: A list of thermal sensor phandles and sensor specifier
|
||||
Type: list of used while monitoring the thermal zone.
|
||||
Type: list of used while monitoring the thermal zone.
|
||||
phandles + sensor
|
||||
specifier
|
||||
|
||||
|
@ -473,7 +473,7 @@ thermal-zones {
|
|||
<&adc>; /* pcb north */
|
||||
|
||||
/* hotspot = 100 * bandgap - 120 * adc + 484 */
|
||||
coefficients = <100 -120 484>;
|
||||
coefficients = <100 -120 484>;
|
||||
|
||||
trips {
|
||||
...
|
||||
|
@ -502,7 +502,7 @@ from the ADC sensor. The binding would be then:
|
|||
thermal-sensors = <&adc>;
|
||||
|
||||
/* hotspot = 1 * adc + 6000 */
|
||||
coefficients = <1 6000>;
|
||||
coefficients = <1 6000>;
|
||||
|
||||
(d) - Board thermal
|
||||
|
||||
|
|
|
@ -19,5 +19,5 @@ enhancements. It can monitor up to 4 voltages, 16 temperatures and
|
|||
implemented in this driver.
|
||||
|
||||
Specification of the chip can be found here:
|
||||
ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
|
||||
ftp:///pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
|
||||
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/BMC-Teutates_Specification_V1.21.pdf
|
||||
ftp://ftp.ts.fujitsu.com/pub/Mainboard-OEM-Sales/Services/Software&Tools/Linux_SystemMonitoring&Watchdog&GPIO/Fujitsu_mainboards-1-Sensors_HowTo-en-US.pdf
|
||||
|
|
|
@ -366,8 +366,6 @@ Domain`_ references.
|
|||
Cross-referencing from reStructuredText
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
.. highlight:: none
|
||||
|
||||
To cross-reference the functions and types defined in the kernel-doc comments
|
||||
from reStructuredText documents, please use the `Sphinx C Domain`_
|
||||
references. For example::
|
||||
|
@ -390,8 +388,6 @@ For further details, please refer to the `Sphinx C Domain`_ documentation.
|
|||
Function documentation
|
||||
----------------------
|
||||
|
||||
.. highlight:: c
|
||||
|
||||
The general format of a function and function-like macro kernel-doc comment is::
|
||||
|
||||
/**
|
||||
|
@ -572,8 +568,6 @@ DocBook XML [DEPRECATED]
|
|||
Converting DocBook to Sphinx
|
||||
----------------------------
|
||||
|
||||
.. highlight:: none
|
||||
|
||||
Over time, we expect all of the documents under ``Documentation/DocBook`` to be
|
||||
converted to Sphinx and reStructuredText. For most DocBook XML documents, a good
|
||||
enough solution is to use the simple ``Documentation/sphinx/tmplcvt`` script,
|
||||
|
|
|
@ -3032,6 +3032,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
PAGE_SIZE is used as alignment.
|
||||
PCI-PCI bridge can be specified, if resource
|
||||
windows need to be expanded.
|
||||
To specify the alignment for several
|
||||
instances of a device, the PCI vendor,
|
||||
device, subvendor, and subdevice may be
|
||||
specified, e.g., 4096@pci:8086:9c22:103c:198f
|
||||
ecrc= Enable/disable PCIe ECRC (transaction layer
|
||||
end-to-end CRC checking).
|
||||
bios: Use BIOS/firmware settings. This is the
|
||||
|
|
|
@ -790,13 +790,12 @@ The kernel interface functions are as follows:
|
|||
Data messages can have their contents extracted with the usual bunch of
|
||||
socket buffer manipulation functions. A data message can be determined to
|
||||
be the last one in a sequence with rxrpc_kernel_is_data_last(). When a
|
||||
data message has been used up, rxrpc_kernel_data_delivered() should be
|
||||
called on it..
|
||||
data message has been used up, rxrpc_kernel_data_consumed() should be
|
||||
called on it.
|
||||
|
||||
Non-data messages should be handled to rxrpc_kernel_free_skb() to dispose
|
||||
of. It is possible to get extra refs on all types of message for later
|
||||
freeing, but this may pin the state of a call until the message is finally
|
||||
freed.
|
||||
Messages should be handled to rxrpc_kernel_free_skb() to dispose of. It
|
||||
is possible to get extra refs on all types of message for later freeing,
|
||||
but this may pin the state of a call until the message is finally freed.
|
||||
|
||||
(*) Accept an incoming call.
|
||||
|
||||
|
@ -821,12 +820,14 @@ The kernel interface functions are as follows:
|
|||
Other errors may be returned if the call had been aborted (-ECONNABORTED)
|
||||
or had timed out (-ETIME).
|
||||
|
||||
(*) Record the delivery of a data message and free it.
|
||||
(*) Record the delivery of a data message.
|
||||
|
||||
void rxrpc_kernel_data_delivered(struct sk_buff *skb);
|
||||
void rxrpc_kernel_data_consumed(struct rxrpc_call *call,
|
||||
struct sk_buff *skb);
|
||||
|
||||
This is used to record a data message as having been delivered and to
|
||||
update the ACK state for the call. The socket buffer will be freed.
|
||||
This is used to record a data message as having been consumed and to
|
||||
update the ACK state for the call. The message must still be passed to
|
||||
rxrpc_kernel_free_skb() for disposal by the caller.
|
||||
|
||||
(*) Free a message.
|
||||
|
||||
|
|
|
@ -164,7 +164,32 @@ load n/2 modules more and try again.
|
|||
Again, if you find the offending module(s), it(they) must be unloaded every time
|
||||
before hibernation, and please report the problem with it(them).
|
||||
|
||||
c) Advanced debugging
|
||||
c) Using the "test_resume" hibernation option
|
||||
|
||||
/sys/power/disk generally tells the kernel what to do after creating a
|
||||
hibernation image. One of the available options is "test_resume" which
|
||||
causes the just created image to be used for immediate restoration. Namely,
|
||||
after doing:
|
||||
|
||||
# echo test_resume > /sys/power/disk
|
||||
# echo disk > /sys/power/state
|
||||
|
||||
a hibernation image will be created and a resume from it will be triggered
|
||||
immediately without involving the platform firmware in any way.
|
||||
|
||||
That test can be used to check if failures to resume from hibernation are
|
||||
related to bad interactions with the platform firmware. That is, if the above
|
||||
works every time, but resume from actual hibernation does not work or is
|
||||
unreliable, the platform firmware may be responsible for the failures.
|
||||
|
||||
On architectures and platforms that support using different kernels to restore
|
||||
hibernation images (that is, the kernel used to read the image from storage and
|
||||
load it into memory is different from the one included in the image) or support
|
||||
kernel address space randomization, it also can be used to check if failures
|
||||
to resume may be related to the differences between the restore and image
|
||||
kernels.
|
||||
|
||||
d) Advanced debugging
|
||||
|
||||
In case that hibernation does not work on your system even in the minimal
|
||||
configuration and compiling more drivers as modules is not practical or some
|
||||
|
|
|
@ -1,75 +1,76 @@
|
|||
Power Management Interface
|
||||
Power Management Interface for System Sleep
|
||||
|
||||
Copyright (c) 2016 Intel Corp., Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
|
||||
The power management subsystem provides a unified sysfs interface to
|
||||
userspace, regardless of what architecture or platform one is
|
||||
running. The interface exists in /sys/power/ directory (assuming sysfs
|
||||
is mounted at /sys).
|
||||
The power management subsystem provides userspace with a unified sysfs interface
|
||||
for system sleep regardless of the underlying system architecture or platform.
|
||||
The interface is located in the /sys/power/ directory (assuming that sysfs is
|
||||
mounted at /sys).
|
||||
|
||||
/sys/power/state controls system power state. Reading from this file
|
||||
returns what states are supported, which is hard-coded to 'freeze',
|
||||
'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and 'disk'
|
||||
(Suspend-to-Disk).
|
||||
/sys/power/state is the system sleep state control file.
|
||||
|
||||
Writing to this file one of those strings causes the system to
|
||||
transition into that state. Please see the file
|
||||
Documentation/power/states.txt for a description of each of those
|
||||
states.
|
||||
Reading from it returns a list of supported sleep states, encoded as:
|
||||
|
||||
'freeze' (Suspend-to-Idle)
|
||||
'standby' (Power-On Suspend)
|
||||
'mem' (Suspend-to-RAM)
|
||||
'disk' (Suspend-to-Disk)
|
||||
|
||||
/sys/power/disk controls the operating mode of the suspend-to-disk
|
||||
mechanism. Suspend-to-disk can be handled in several ways. We have a
|
||||
few options for putting the system to sleep - using the platform driver
|
||||
(e.g. ACPI or other suspend_ops), powering off the system or rebooting the
|
||||
system (for testing).
|
||||
Suspend-to-Idle is always supported. Suspend-to-Disk is always supported
|
||||
too as long the kernel has been configured to support hibernation at all
|
||||
(ie. CONFIG_HIBERNATION is set in the kernel configuration file). Support
|
||||
for Suspend-to-RAM and Power-On Suspend depends on the capabilities of the
|
||||
platform.
|
||||
|
||||
Additionally, /sys/power/disk can be used to turn on one of the two testing
|
||||
modes of the suspend-to-disk mechanism: 'testproc' or 'test'. If the
|
||||
suspend-to-disk mechanism is in the 'testproc' mode, writing 'disk' to
|
||||
/sys/power/state will cause the kernel to disable nonboot CPUs and freeze
|
||||
tasks, wait for 5 seconds, unfreeze tasks and enable nonboot CPUs. If it is
|
||||
in the 'test' mode, writing 'disk' to /sys/power/state will cause the kernel
|
||||
to disable nonboot CPUs and freeze tasks, shrink memory, suspend devices, wait
|
||||
for 5 seconds, resume devices, unfreeze tasks and enable nonboot CPUs. Then,
|
||||
we are able to look in the log messages and work out, for example, which code
|
||||
is being slow and which device drivers are misbehaving.
|
||||
If one of the strings listed in /sys/power/state is written to it, the system
|
||||
will attempt to transition into the corresponding sleep state. Refer to
|
||||
Documentation/power/states.txt for a description of each of those states.
|
||||
|
||||
Reading from this file will display all supported modes and the currently
|
||||
selected one in brackets, for example
|
||||
/sys/power/disk controls the operating mode of hibernation (Suspend-to-Disk).
|
||||
Specifically, it tells the kernel what to do after creating a hibernation image.
|
||||
|
||||
[shutdown] reboot test testproc
|
||||
Reading from it returns a list of supported options encoded as:
|
||||
|
||||
Writing to this file will accept one of
|
||||
'platform' (put the system into sleep using a platform-provided method)
|
||||
'shutdown' (shut the system down)
|
||||
'reboot' (reboot the system)
|
||||
'suspend' (trigger a Suspend-to-RAM transition)
|
||||
'test_resume' (resume-after-hibernation test mode)
|
||||
|
||||
'platform' (only if the platform supports it)
|
||||
'shutdown'
|
||||
'reboot'
|
||||
'testproc'
|
||||
'test'
|
||||
The currently selected option is printed in square brackets.
|
||||
|
||||
/sys/power/image_size controls the size of the image created by
|
||||
the suspend-to-disk mechanism. It can be written a string
|
||||
representing a non-negative integer that will be used as an upper
|
||||
limit of the image size, in bytes. The suspend-to-disk mechanism will
|
||||
do its best to ensure the image size will not exceed that number. However,
|
||||
if this turns out to be impossible, it will try to suspend anyway using the
|
||||
smallest image possible. In particular, if "0" is written to this file, the
|
||||
suspend image will be as small as possible.
|
||||
The 'platform' option is only available if the platform provides a special
|
||||
mechanism to put the system to sleep after creating a hibernation image (ACPI
|
||||
does that, for example). The 'suspend' option is available if Suspend-to-RAM
|
||||
is supported. Refer to Documentation/power/basic_pm_debugging.txt for the
|
||||
description of the 'test_resume' option.
|
||||
|
||||
Reading from this file will display the current image size limit, which
|
||||
is set to 2/5 of available RAM by default.
|
||||
To select an option, write the string representing it to /sys/power/disk.
|
||||
|
||||
/sys/power/pm_trace controls the code which saves the last PM event point in
|
||||
the RTC across reboots, so that you can debug a machine that just hangs
|
||||
during suspend (or more commonly, during resume). Namely, the RTC is only
|
||||
used to save the last PM event point if this file contains '1'. Initially it
|
||||
contains '0' which may be changed to '1' by writing a string representing a
|
||||
nonzero integer into it.
|
||||
/sys/power/image_size controls the size of hibernation images.
|
||||
|
||||
To use this debugging feature you should attempt to suspend the machine, then
|
||||
reboot it and run
|
||||
It can be written a string representing a non-negative integer that will be
|
||||
used as a best-effort upper limit of the image size, in bytes. The hibernation
|
||||
core will do its best to ensure that the image size will not exceed that number.
|
||||
However, if that turns out to be impossible to achieve, a hibernation image will
|
||||
still be created and its size will be as small as possible. In particular,
|
||||
writing '0' to this file will enforce hibernation images to be as small as
|
||||
possible.
|
||||
|
||||
dmesg -s 1000000 | grep 'hash matches'
|
||||
Reading from this file returns the current image size limit, which is set to
|
||||
around 2/5 of available RAM by default.
|
||||
|
||||
CAUTION: Using it will cause your machine's real-time (CMOS) clock to be
|
||||
set to a random invalid time after a resume.
|
||||
/sys/power/pm_trace controls the PM trace mechanism saving the last suspend
|
||||
or resume event point in the RTC across reboots.
|
||||
|
||||
It helps to debug hard lockups or reboots due to device driver failures that
|
||||
occur during system suspend or resume (which is more common) more effectively.
|
||||
|
||||
If /sys/power/pm_trace contains '1', the fingerprint of each suspend/resume
|
||||
event point in turn will be stored in the RTC memory (overwriting the actual
|
||||
RTC information), so it will survive a system crash if one occurs right after
|
||||
storing it and it can be used later to identify the driver that caused the crash
|
||||
to happen (see Documentation/power/s2ram.txt for more information).
|
||||
|
||||
Initially it contains '0' which may be changed to '1' by writing a string
|
||||
representing a nonzero integer into it.
|
||||
|
|
|
@ -42,11 +42,12 @@
|
|||
caption a.headerlink { opacity: 0; }
|
||||
caption a.headerlink:hover { opacity: 1; }
|
||||
|
||||
/* inline literal: drop the borderbox and red color */
|
||||
/* inline literal: drop the borderbox, padding and red color */
|
||||
|
||||
code, .rst-content tt, .rst-content code {
|
||||
color: inherit;
|
||||
border: none;
|
||||
padding: unset;
|
||||
background: inherit;
|
||||
font-size: 85%;
|
||||
}
|
||||
|
|
17
MAINTAINERS
17
MAINTAINERS
|
@ -881,6 +881,15 @@ S: Supported
|
|||
F: drivers/gpu/drm/arc/
|
||||
F: Documentation/devicetree/bindings/display/snps,arcpgu.txt
|
||||
|
||||
ARM ARCHITECTED TIMER DRIVER
|
||||
M: Mark Rutland <mark.rutland@arm.com>
|
||||
M: Marc Zyngier <marc.zyngier@arm.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: arch/arm/include/asm/arch_timer.h
|
||||
F: arch/arm64/include/asm/arch_timer.h
|
||||
F: drivers/clocksource/arm_arch_timer.c
|
||||
|
||||
ARM HDLCD DRM DRIVER
|
||||
M: Liviu Dudau <liviu.dudau@arm.com>
|
||||
S: Supported
|
||||
|
@ -4525,6 +4534,12 @@ L: linux-edac@vger.kernel.org
|
|||
S: Maintained
|
||||
F: drivers/edac/sb_edac.c
|
||||
|
||||
EDAC-SKYLAKE
|
||||
M: Tony Luck <tony.luck@intel.com>
|
||||
L: linux-edac@vger.kernel.org
|
||||
S: Maintained
|
||||
F: drivers/edac/skx_edac.c
|
||||
|
||||
EDAC-XGENE
|
||||
APPLIED MICRO (APM) X-GENE SOC EDAC
|
||||
M: Loc Ho <lho@apm.com>
|
||||
|
@ -7655,7 +7670,7 @@ L: linux-rdma@vger.kernel.org
|
|||
S: Supported
|
||||
W: https://github.com/SoftRoCE/rxe-dev/wiki/rxe-dev:-Home
|
||||
Q: http://patchwork.kernel.org/project/linux-rdma/list/
|
||||
F: drivers/infiniband/hw/rxe/
|
||||
F: drivers/infiniband/sw/rxe/
|
||||
F: include/uapi/rdma/rdma_user_rxe.h
|
||||
|
||||
MEMBARRIER SUPPORT
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 4
|
||||
PATCHLEVEL = 8
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc2
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Psychotic Stoned Sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -142,7 +142,7 @@
|
|||
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
; Retrieve orig r25 and save it with rest of callee_regs
|
||||
ld.as r12, [r12, PT_user_r25]
|
||||
ld r12, [r12, PT_user_r25]
|
||||
PUSH r12
|
||||
#else
|
||||
PUSH r25
|
||||
|
@ -198,7 +198,7 @@
|
|||
|
||||
; SP is back to start of pt_regs
|
||||
#ifdef CONFIG_ARC_CURR_IN_REG
|
||||
st.as r12, [sp, PT_user_r25]
|
||||
st r12, [sp, PT_user_r25]
|
||||
#endif
|
||||
.endm
|
||||
|
||||
|
|
|
@ -188,10 +188,10 @@ static inline int arch_irqs_disabled(void)
|
|||
.endm
|
||||
|
||||
.macro IRQ_ENABLE scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
lr \scratch, [status32]
|
||||
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
|
||||
flag \scratch
|
||||
TRACE_ASM_IRQ_ENABLE
|
||||
.endm
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
|
|
@ -280,7 +280,7 @@ static inline void pmd_set(pmd_t *pmdp, pte_t *ptep)
|
|||
|
||||
#define pte_page(pte) pfn_to_page(pte_pfn(pte))
|
||||
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
|
||||
#define pfn_pte(pfn, prot) (__pte(((pte_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
|
||||
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
|
||||
|
||||
/* Don't use virt_to_pfn for macros below: could cause truncations for PAE40*/
|
||||
#define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
|
||||
|
|
|
@ -13,8 +13,15 @@
|
|||
|
||||
/* Machine specific ELF Hdr flags */
|
||||
#define EF_ARC_OSABI_MSK 0x00000f00
|
||||
#define EF_ARC_OSABI_ORIG 0x00000000 /* MUST be zero for back-compat */
|
||||
#define EF_ARC_OSABI_CURRENT 0x00000300 /* v3 (no legacy syscalls) */
|
||||
|
||||
#define EF_ARC_OSABI_V3 0x00000300 /* v3 (no legacy syscalls) */
|
||||
#define EF_ARC_OSABI_V4 0x00000400 /* v4 (64bit data any reg align) */
|
||||
|
||||
#if __GNUC__ < 6
|
||||
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V3
|
||||
#else
|
||||
#define EF_ARC_OSABI_CURRENT EF_ARC_OSABI_V4
|
||||
#endif
|
||||
|
||||
typedef unsigned long elf_greg_t;
|
||||
typedef unsigned long elf_fpregset_t;
|
||||
|
|
|
@ -28,6 +28,7 @@ extern void __muldf3(void);
|
|||
extern void __divdf3(void);
|
||||
extern void __floatunsidf(void);
|
||||
extern void __floatunsisf(void);
|
||||
extern void __udivdi3(void);
|
||||
|
||||
EXPORT_SYMBOL(__ashldi3);
|
||||
EXPORT_SYMBOL(__ashrdi3);
|
||||
|
@ -45,6 +46,7 @@ EXPORT_SYMBOL(__muldf3);
|
|||
EXPORT_SYMBOL(__divdf3);
|
||||
EXPORT_SYMBOL(__floatunsidf);
|
||||
EXPORT_SYMBOL(__floatunsisf);
|
||||
EXPORT_SYMBOL(__udivdi3);
|
||||
|
||||
/* ARC optimised assembler routines */
|
||||
EXPORT_SYMBOL(memset);
|
||||
|
|
|
@ -199,7 +199,7 @@ int elf_check_arch(const struct elf32_hdr *x)
|
|||
}
|
||||
|
||||
eflags = x->e_flags;
|
||||
if ((eflags & EF_ARC_OSABI_MSK) < EF_ARC_OSABI_CURRENT) {
|
||||
if ((eflags & EF_ARC_OSABI_MSK) != EF_ARC_OSABI_CURRENT) {
|
||||
pr_err("ABI mismatch - you need newer toolchain\n");
|
||||
force_sigsegv(SIGSEGV, current);
|
||||
return 0;
|
||||
|
|
|
@ -291,8 +291,10 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
|
|||
cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
|
||||
cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
|
||||
|
||||
n += scnprintf(buf + n, len - n,
|
||||
"OS ABI [v3]\t: no-legacy-syscalls\n");
|
||||
n += scnprintf(buf + n, len - n, "OS ABI [v%d]\t: %s\n",
|
||||
EF_ARC_OSABI_CURRENT >> 8,
|
||||
EF_ARC_OSABI_CURRENT == EF_ARC_OSABI_V3 ?
|
||||
"no-legacy-syscalls" : "64-bit data any register aligned");
|
||||
|
||||
return buf;
|
||||
}
|
||||
|
|
|
@ -921,6 +921,15 @@ void arc_cache_init(void)
|
|||
|
||||
printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
|
||||
|
||||
/*
|
||||
* Only master CPU needs to execute rest of function:
|
||||
* - Assume SMP so all cores will have same cache config so
|
||||
* any geomtry checks will be same for all
|
||||
* - IOC setup / dma callbacks only need to be setup once
|
||||
*/
|
||||
if (cpu)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
|
||||
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ void *kmap(struct page *page)
|
|||
|
||||
return kmap_high(page);
|
||||
}
|
||||
EXPORT_SYMBOL(kmap);
|
||||
|
||||
void *kmap_atomic(struct page *page)
|
||||
{
|
||||
|
|
|
@ -295,6 +295,7 @@ __und_svc_fault:
|
|||
bl __und_fault
|
||||
|
||||
__und_svc_finish:
|
||||
get_thread_info tsk
|
||||
ldr r5, [sp, #S_PSR] @ Get SVC cpsr
|
||||
svc_exit r5 @ return from exception
|
||||
UNWIND(.fnend )
|
||||
|
|
|
@ -1309,7 +1309,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
smp_rmb();
|
||||
|
||||
pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable);
|
||||
if (is_error_pfn(pfn))
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EFAULT;
|
||||
|
||||
if (kvm_is_device_pfn(pfn)) {
|
||||
|
|
|
@ -271,6 +271,12 @@ static int __init imx_gpc_init(struct device_node *node,
|
|||
for (i = 0; i < IMR_NUM; i++)
|
||||
writel_relaxed(~0, gpc_base + GPC_IMR1 + i * 4);
|
||||
|
||||
/*
|
||||
* Clear the OF_POPULATED flag set in of_irq_init so that
|
||||
* later the GPC power domain driver will not be skipped.
|
||||
*/
|
||||
of_node_clear_flag(node, OF_POPULATED);
|
||||
|
||||
return 0;
|
||||
}
|
||||
IRQCHIP_DECLARE(imx_gpc, "fsl,imx6q-gpc", imx_gpc_init);
|
||||
|
|
|
@ -728,7 +728,8 @@ static void *__init late_alloc(unsigned long sz)
|
|||
{
|
||||
void *ptr = (void *)__get_free_pages(PGALLOC_GFP, get_order(sz));
|
||||
|
||||
BUG_ON(!ptr);
|
||||
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
|
||||
BUG();
|
||||
return ptr;
|
||||
}
|
||||
|
||||
|
@ -1155,10 +1156,19 @@ void __init sanity_check_meminfo(void)
|
|||
{
|
||||
phys_addr_t memblock_limit = 0;
|
||||
int highmem = 0;
|
||||
phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1;
|
||||
u64 vmalloc_limit;
|
||||
struct memblock_region *reg;
|
||||
bool should_use_highmem = false;
|
||||
|
||||
/*
|
||||
* Let's use our own (unoptimized) equivalent of __pa() that is
|
||||
* not affected by wrap-arounds when sizeof(phys_addr_t) == 4.
|
||||
* The result is used as the upper bound on physical memory address
|
||||
* and may itself be outside the valid range for which phys_addr_t
|
||||
* and therefore __pa() is defined.
|
||||
*/
|
||||
vmalloc_limit = (u64)(uintptr_t)vmalloc_min - PAGE_OFFSET + PHYS_OFFSET;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
phys_addr_t block_start = reg->base;
|
||||
phys_addr_t block_end = reg->base + reg->size;
|
||||
|
@ -1183,10 +1193,11 @@ void __init sanity_check_meminfo(void)
|
|||
if (reg->size > size_limit) {
|
||||
phys_addr_t overlap_size = reg->size - size_limit;
|
||||
|
||||
pr_notice("Truncating RAM at %pa-%pa to -%pa",
|
||||
&block_start, &block_end, &vmalloc_limit);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
pr_notice("Truncating RAM at %pa-%pa",
|
||||
&block_start, &block_end);
|
||||
block_end = vmalloc_limit;
|
||||
pr_cont(" to -%pa", &block_end);
|
||||
memblock_remove(vmalloc_limit, overlap_size);
|
||||
should_use_highmem = true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
|||
static struct vcpu_info __percpu *xen_vcpu_info;
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||
|
||||
/* These are unused until we support booting "pre-ballooned" */
|
||||
|
|
|
@ -757,6 +757,9 @@ ENTRY(__enable_mmu)
|
|||
isb
|
||||
bl __create_page_tables // recreate kernel mapping
|
||||
|
||||
tlbi vmalle1 // Remove any stale TLB entries
|
||||
dsb nsh
|
||||
|
||||
msr sctlr_el1, x19 // re-enable the MMU
|
||||
isb
|
||||
ic iallu // flush instructions fetched
|
||||
|
|
|
@ -101,12 +101,20 @@ ENTRY(cpu_resume)
|
|||
bl el2_setup // if in EL2 drop to EL1 cleanly
|
||||
/* enable the MMU early - so we can access sleep_save_stash by va */
|
||||
adr_l lr, __enable_mmu /* __cpu_setup will return here */
|
||||
ldr x27, =_cpu_resume /* __enable_mmu will branch here */
|
||||
adr_l x27, _resume_switched /* __enable_mmu will branch here */
|
||||
adrp x25, idmap_pg_dir
|
||||
adrp x26, swapper_pg_dir
|
||||
b __cpu_setup
|
||||
ENDPROC(cpu_resume)
|
||||
|
||||
.pushsection ".idmap.text", "ax"
|
||||
_resume_switched:
|
||||
ldr x8, =_cpu_resume
|
||||
br x8
|
||||
ENDPROC(_resume_switched)
|
||||
.ltorg
|
||||
.popsection
|
||||
|
||||
ENTRY(_cpu_resume)
|
||||
mrs x1, mpidr_el1
|
||||
adrp x8, mpidr_hash
|
||||
|
|
|
@ -256,7 +256,7 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
|
|||
|
||||
/*
|
||||
* We must restore the 32-bit state before the sysregs, thanks
|
||||
* to Cortex-A57 erratum #852523.
|
||||
* to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
|
||||
*/
|
||||
__sysreg32_restore_state(vcpu);
|
||||
__sysreg_restore_guest_state(guest_ctxt);
|
||||
|
|
|
@ -823,14 +823,6 @@ static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
* Architected system registers.
|
||||
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
|
||||
*
|
||||
* We could trap ID_DFR0 and tell the guest we don't support performance
|
||||
* monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was
|
||||
* NAKed, so it will read the PMCR anyway.
|
||||
*
|
||||
* Therefore we tell the guest we have 0 counters. Unfortunately, we
|
||||
* must always support PMCCNTR (the cycle counter): we just RAZ/WI for
|
||||
* all PM registers, which doesn't crash the guest kernel at least.
|
||||
*
|
||||
* Debug handling: We do trap most, if not all debug related system
|
||||
* registers. The implementation is good enough to ensure that a guest
|
||||
* can use these with minimal performance degradation. The drawback is
|
||||
|
@ -1360,7 +1352,7 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
{ Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
|
||||
|
||||
/* ICC_SRE */
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
|
||||
{ Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
|
||||
|
||||
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
|
||||
|
||||
|
|
|
@ -242,7 +242,7 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level,
|
|||
|
||||
static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
||||
{
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0);
|
||||
pte_t *pte = pte_offset_kernel(pmd, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
|
@ -254,7 +254,7 @@ static void walk_pte(struct pg_state *st, pmd_t *pmd, unsigned long start)
|
|||
|
||||
static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
||||
{
|
||||
pmd_t *pmd = pmd_offset(pud, 0);
|
||||
pmd_t *pmd = pmd_offset(pud, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
|
@ -271,7 +271,7 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
|
|||
|
||||
static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
{
|
||||
pud_t *pud = pud_offset(pgd, 0);
|
||||
pud_t *pud = pud_offset(pgd, 0UL);
|
||||
unsigned long addr;
|
||||
unsigned i;
|
||||
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
|
||||
struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
|
||||
EXPORT_SYMBOL(node_data);
|
||||
nodemask_t numa_nodes_parsed __initdata;
|
||||
|
|
|
@ -164,7 +164,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
|
|||
*/
|
||||
static inline unsigned long ___pa(unsigned long x)
|
||||
{
|
||||
if (config_enabled(CONFIG_64BIT)) {
|
||||
if (IS_ENABLED(CONFIG_64BIT)) {
|
||||
/*
|
||||
* For MIPS64 the virtual address may either be in one of
|
||||
* the compatibility segements ckseg0 or ckseg1, or it may
|
||||
|
@ -173,7 +173,7 @@ static inline unsigned long ___pa(unsigned long x)
|
|||
return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
|
||||
}
|
||||
|
||||
if (!config_enabled(CONFIG_EVA)) {
|
||||
if (!IS_ENABLED(CONFIG_EVA)) {
|
||||
/*
|
||||
* We're using the standard MIPS32 legacy memory map, ie.
|
||||
* the address x is going to be in kseg0 or kseg1. We can
|
||||
|
|
|
@ -40,7 +40,7 @@ static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
|||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (is_error_pfn(pfn)) {
|
||||
if (is_error_noslot_pfn(pfn)) {
|
||||
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
|
|
|
@ -97,10 +97,10 @@
|
|||
#define ENOTCONN 235 /* Transport endpoint is not connected */
|
||||
#define ESHUTDOWN 236 /* Cannot send after transport endpoint shutdown */
|
||||
#define ETOOMANYREFS 237 /* Too many references: cannot splice */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define ETIMEDOUT 238 /* Connection timed out */
|
||||
#define ECONNREFUSED 239 /* Connection refused */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EREFUSED ECONNREFUSED /* for HP's NFS apparently */
|
||||
#define EREMOTERELEASE 240 /* Remote peer released connection */
|
||||
#define EHOSTDOWN 241 /* Host is down */
|
||||
#define EHOSTUNREACH 242 /* No route to host */
|
||||
|
||||
|
|
|
@ -51,8 +51,6 @@ EXPORT_SYMBOL(_parisc_requires_coherency);
|
|||
|
||||
DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data);
|
||||
|
||||
extern int update_cr16_clocksource(void); /* from time.c */
|
||||
|
||||
/*
|
||||
** PARISC CPU driver - claim "device" and initialize CPU data structures.
|
||||
**
|
||||
|
@ -228,12 +226,6 @@ static int processor_probe(struct parisc_device *dev)
|
|||
}
|
||||
#endif
|
||||
|
||||
/* If we've registered more than one cpu,
|
||||
* we'll use the jiffies clocksource since cr16
|
||||
* is not synchronized between CPUs.
|
||||
*/
|
||||
update_cr16_clocksource();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,18 +221,6 @@ static struct clocksource clocksource_cr16 = {
|
|||
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
|
||||
};
|
||||
|
||||
int update_cr16_clocksource(void)
|
||||
{
|
||||
/* since the cr16 cycle counters are not synchronized across CPUs,
|
||||
we'll check if we should switch to a safe clocksource: */
|
||||
if (clocksource_cr16.rating != 0 && num_online_cpus() > 1) {
|
||||
clocksource_change_rating(&clocksource_cr16, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void __init start_cpu_itimer(void)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
|
|
|
@ -21,16 +21,21 @@ ENTRY(startup_continue)
|
|||
lg %r15,.Lstack-.LPG1(%r13)
|
||||
aghi %r15,-160
|
||||
brasl %r14,decompress_kernel
|
||||
# setup registers for memory mover & branch to target
|
||||
# Set up registers for memory mover. We move the decompressed image to
|
||||
# 0x11000, starting at offset 0x11000 in the decompressed image so
|
||||
# that code living at 0x11000 in the image will end up at 0x11000 in
|
||||
# memory.
|
||||
lgr %r4,%r2
|
||||
lg %r2,.Loffset-.LPG1(%r13)
|
||||
la %r4,0(%r2,%r4)
|
||||
lg %r3,.Lmvsize-.LPG1(%r13)
|
||||
lgr %r5,%r3
|
||||
# move the memory mover someplace safe
|
||||
# Move the memory mover someplace safe so it doesn't overwrite itself.
|
||||
la %r1,0x200
|
||||
mvc 0(mover_end-mover,%r1),mover-.LPG1(%r13)
|
||||
# decompress image is started at 0x11000
|
||||
# When the memory mover is done we pass control to
|
||||
# arch/s390/kernel/head64.S:startup_continue which lives at 0x11000 in
|
||||
# the decompressed image.
|
||||
lgr %r6,%r2
|
||||
br %r1
|
||||
mover:
|
||||
|
|
|
@ -678,7 +678,7 @@ CONFIG_CRYPTO_SHA512_S390=m
|
|||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
|
||||
CONFIG_X509_CERTIFICATE_PARSER=m
|
||||
|
|
|
@ -616,7 +616,7 @@ CONFIG_CRYPTO_SHA512_S390=m
|
|||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
|
||||
CONFIG_X509_CERTIFICATE_PARSER=m
|
||||
|
|
|
@ -615,7 +615,7 @@ CONFIG_CRYPTO_SHA512_S390=m
|
|||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_GHASH_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_ASYMMETRIC_KEY_TYPE=y
|
||||
CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
|
||||
CONFIG_X509_CERTIFICATE_PARSER=m
|
||||
|
|
|
@ -51,6 +51,9 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
|||
struct kernel_fpu vxstate; \
|
||||
unsigned long prealign, aligned, remaining; \
|
||||
\
|
||||
if (datalen < VX_MIN_LEN + VX_ALIGN_MASK) \
|
||||
return ___crc32_sw(crc, data, datalen); \
|
||||
\
|
||||
if ((unsigned long)data & VX_ALIGN_MASK) { \
|
||||
prealign = VX_ALIGNMENT - \
|
||||
((unsigned long)data & VX_ALIGN_MASK); \
|
||||
|
@ -59,9 +62,6 @@ u32 crc32c_le_vgfm_16(u32 crc, unsigned char const *buf, size_t size);
|
|||
data = (void *)((unsigned long)data + prealign); \
|
||||
} \
|
||||
\
|
||||
if (datalen < VX_MIN_LEN) \
|
||||
return ___crc32_sw(crc, data, datalen); \
|
||||
\
|
||||
aligned = datalen & ~VX_ALIGN_MASK; \
|
||||
remaining = datalen & VX_ALIGN_MASK; \
|
||||
\
|
||||
|
|
|
@ -234,7 +234,7 @@ CONFIG_CRYPTO_SHA256_S390=m
|
|||
CONFIG_CRYPTO_SHA512_S390=m
|
||||
CONFIG_CRYPTO_DES_S390=m
|
||||
CONFIG_CRYPTO_AES_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=m
|
||||
CONFIG_CRYPTO_CRC32_S390=y
|
||||
CONFIG_CRC7=m
|
||||
# CONFIG_XZ_DEC_X86 is not set
|
||||
# CONFIG_XZ_DEC_POWERPC is not set
|
||||
|
|
|
@ -309,7 +309,9 @@ ENTRY(startup_kdump)
|
|||
l %r15,.Lstack-.LPG0(%r13)
|
||||
ahi %r15,-STACK_FRAME_OVERHEAD
|
||||
brasl %r14,verify_facilities
|
||||
/* Continue with startup code in head64.S */
|
||||
# For uncompressed images, continue in
|
||||
# arch/s390/kernel/head64.S. For compressed images, continue in
|
||||
# arch/s390/boot/compressed/head.S.
|
||||
jg startup_continue
|
||||
|
||||
.Lstack:
|
||||
|
|
|
@ -204,11 +204,9 @@ static void __init conmode_default(void)
|
|||
#endif
|
||||
}
|
||||
} else if (MACHINE_IS_KVM) {
|
||||
if (sclp.has_vt220 &&
|
||||
config_enabled(CONFIG_SCLP_VT220_CONSOLE))
|
||||
if (sclp.has_vt220 && IS_ENABLED(CONFIG_SCLP_VT220_CONSOLE))
|
||||
SET_CONSOLE_VT220;
|
||||
else if (sclp.has_linemode &&
|
||||
config_enabled(CONFIG_SCLP_CONSOLE))
|
||||
else if (sclp.has_linemode && IS_ENABLED(CONFIG_SCLP_CONSOLE))
|
||||
SET_CONSOLE_SCLP;
|
||||
else
|
||||
SET_CONSOLE_HVC;
|
||||
|
|
|
@ -237,11 +237,10 @@ char * strrchr(const char * s, int c)
|
|||
EXPORT_SYMBOL(strrchr);
|
||||
|
||||
static inline int clcle(const char *s1, unsigned long l1,
|
||||
const char *s2, unsigned long l2,
|
||||
int *diff)
|
||||
const char *s2, unsigned long l2)
|
||||
{
|
||||
register unsigned long r2 asm("2") = (unsigned long) s1;
|
||||
register unsigned long r3 asm("3") = (unsigned long) l2;
|
||||
register unsigned long r3 asm("3") = (unsigned long) l1;
|
||||
register unsigned long r4 asm("4") = (unsigned long) s2;
|
||||
register unsigned long r5 asm("5") = (unsigned long) l2;
|
||||
int cc;
|
||||
|
@ -252,7 +251,6 @@ static inline int clcle(const char *s1, unsigned long l1,
|
|||
" srl %0,28"
|
||||
: "=&d" (cc), "+a" (r2), "+a" (r3),
|
||||
"+a" (r4), "+a" (r5) : : "cc");
|
||||
*diff = *(char *)r2 - *(char *)r4;
|
||||
return cc;
|
||||
}
|
||||
|
||||
|
@ -270,9 +268,9 @@ char * strstr(const char * s1,const char * s2)
|
|||
return (char *) s1;
|
||||
l1 = __strend(s1) - s1;
|
||||
while (l1-- >= l2) {
|
||||
int cc, dummy;
|
||||
int cc;
|
||||
|
||||
cc = clcle(s1, l1, s2, l2, &dummy);
|
||||
cc = clcle(s1, l2, s2, l2);
|
||||
if (!cc)
|
||||
return (char *) s1;
|
||||
s1++;
|
||||
|
@ -313,11 +311,11 @@ EXPORT_SYMBOL(memchr);
|
|||
*/
|
||||
int memcmp(const void *cs, const void *ct, size_t n)
|
||||
{
|
||||
int ret, diff;
|
||||
int ret;
|
||||
|
||||
ret = clcle(cs, n, ct, n, &diff);
|
||||
ret = clcle(cs, n, ct, n);
|
||||
if (ret)
|
||||
ret = diff;
|
||||
ret = ret == 1 ? -1 : 1;
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(memcmp);
|
||||
|
|
|
@ -252,6 +252,8 @@ static int change_page_attr(unsigned long addr, unsigned long end,
|
|||
int rc = -EINVAL;
|
||||
pgd_t *pgdp;
|
||||
|
||||
if (addr == end)
|
||||
return 0;
|
||||
if (end >= MODULES_END)
|
||||
return -EINVAL;
|
||||
mutex_lock(&cpa_mutex);
|
||||
|
|
|
@ -81,7 +81,7 @@
|
|||
.altinstr_replacement : { *(.altinstr_replacement) }
|
||||
/* .exit.text is discard at runtime, not link time, to deal with references
|
||||
from .altinstructions and .eh_frame */
|
||||
.exit.text : { *(.exit.text) }
|
||||
.exit.text : { EXIT_TEXT }
|
||||
.exit.data : { *(.exit.data) }
|
||||
|
||||
.preinit_array : {
|
||||
|
|
|
@ -485,10 +485,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
|||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -265,13 +265,14 @@ ENTRY(sha256_mb_mgr_get_comp_job_avx2)
|
|||
vpinsrd $1, _args_digest+1*32(state, idx, 4), %xmm0, %xmm0
|
||||
vpinsrd $2, _args_digest+2*32(state, idx, 4), %xmm0, %xmm0
|
||||
vpinsrd $3, _args_digest+3*32(state, idx, 4), %xmm0, %xmm0
|
||||
movl _args_digest+4*32(state, idx, 4), tmp2_w
|
||||
vmovd _args_digest(state , idx, 4) , %xmm0
|
||||
vpinsrd $1, _args_digest+5*32(state, idx, 4), %xmm1, %xmm1
|
||||
vpinsrd $2, _args_digest+6*32(state, idx, 4), %xmm1, %xmm1
|
||||
vpinsrd $3, _args_digest+7*32(state, idx, 4), %xmm1, %xmm1
|
||||
|
||||
vmovdqu %xmm0, _result_digest(job_rax)
|
||||
movl tmp2_w, _result_digest+1*16(job_rax)
|
||||
vmovdqu %xmm0, _result_digest(job_rax)
|
||||
offset = (_result_digest + 1*16)
|
||||
vmovdqu %xmm1, offset(job_rax)
|
||||
|
||||
pop %rbx
|
||||
|
||||
|
|
|
@ -497,10 +497,10 @@ static int sha_complete_job(struct mcryptd_hash_request_ctx *rctx,
|
|||
|
||||
req = cast_mcryptd_ctx_to_req(req_ctx);
|
||||
if (irqs_disabled())
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
else {
|
||||
local_bh_disable();
|
||||
rctx->complete(&req->base, ret);
|
||||
req_ctx->complete(&req->base, ret);
|
||||
local_bh_enable();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1623,6 +1623,9 @@ void __init enable_IR_x2apic(void)
|
|||
unsigned long flags;
|
||||
int ret, ir_stat;
|
||||
|
||||
if (skip_ioapic_setup)
|
||||
return;
|
||||
|
||||
ir_stat = irq_remapping_prepare();
|
||||
if (ir_stat < 0 && !x2apic_supported())
|
||||
return;
|
||||
|
|
|
@ -355,6 +355,7 @@ void load_ucode_amd_ap(void)
|
|||
unsigned int cpu = smp_processor_id();
|
||||
struct equiv_cpu_entry *eq;
|
||||
struct microcode_amd *mc;
|
||||
u8 *cont = container;
|
||||
u32 rev, eax;
|
||||
u16 eq_id;
|
||||
|
||||
|
@ -371,8 +372,11 @@ void load_ucode_amd_ap(void)
|
|||
if (check_current_patch_level(&rev, false))
|
||||
return;
|
||||
|
||||
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
|
||||
cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
|
||||
|
||||
eax = cpuid_eax(0x00000001);
|
||||
eq = (struct equiv_cpu_entry *)(container + CONTAINER_HDR_SZ);
|
||||
eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
|
||||
|
||||
eq_id = find_equiv_id(eq, eax);
|
||||
if (!eq_id)
|
||||
|
@ -434,6 +438,9 @@ int __init save_microcode_in_initrd_amd(void)
|
|||
else
|
||||
container = cont_va;
|
||||
|
||||
/* Add CONFIG_RANDOMIZE_MEMORY offset. */
|
||||
container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
|
||||
|
||||
eax = cpuid_eax(0x00000001);
|
||||
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
|
||||
|
||||
|
|
|
@ -100,10 +100,11 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
|
|||
/* Logical package management. We might want to allocate that dynamically */
|
||||
static int *physical_to_logical_pkg __read_mostly;
|
||||
static unsigned long *physical_package_map __read_mostly;;
|
||||
static unsigned long *logical_package_map __read_mostly;
|
||||
static unsigned int max_physical_pkg_id __read_mostly;
|
||||
unsigned int __max_logical_packages __read_mostly;
|
||||
EXPORT_SYMBOL(__max_logical_packages);
|
||||
static unsigned int logical_packages __read_mostly;
|
||||
static bool logical_packages_frozen __read_mostly;
|
||||
|
||||
/* Maximum number of SMT threads on any online core */
|
||||
int __max_smt_threads __read_mostly;
|
||||
|
@ -277,14 +278,14 @@ int topology_update_package_map(unsigned int apicid, unsigned int cpu)
|
|||
if (test_and_set_bit(pkg, physical_package_map))
|
||||
goto found;
|
||||
|
||||
new = find_first_zero_bit(logical_package_map, __max_logical_packages);
|
||||
if (new >= __max_logical_packages) {
|
||||
if (logical_packages_frozen) {
|
||||
physical_to_logical_pkg[pkg] = -1;
|
||||
pr_warn("APIC(%x) Package %u exceeds logical package map\n",
|
||||
pr_warn("APIC(%x) Package %u exceeds logical package max\n",
|
||||
apicid, pkg);
|
||||
return -ENOSPC;
|
||||
}
|
||||
set_bit(new, logical_package_map);
|
||||
|
||||
new = logical_packages++;
|
||||
pr_info("APIC(%x) Converting physical %u to logical package %u\n",
|
||||
apicid, pkg, new);
|
||||
physical_to_logical_pkg[pkg] = new;
|
||||
|
@ -341,6 +342,7 @@ static void __init smp_init_package_map(void)
|
|||
}
|
||||
|
||||
__max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus);
|
||||
logical_packages = 0;
|
||||
|
||||
/*
|
||||
* Possibly larger than what we need as the number of apic ids per
|
||||
|
@ -352,10 +354,6 @@ static void __init smp_init_package_map(void)
|
|||
memset(physical_to_logical_pkg, 0xff, size);
|
||||
size = BITS_TO_LONGS(max_physical_pkg_id) * sizeof(unsigned long);
|
||||
physical_package_map = kzalloc(size, GFP_KERNEL);
|
||||
size = BITS_TO_LONGS(__max_logical_packages) * sizeof(unsigned long);
|
||||
logical_package_map = kzalloc(size, GFP_KERNEL);
|
||||
|
||||
pr_info("Max logical packages: %u\n", __max_logical_packages);
|
||||
|
||||
for_each_present_cpu(cpu) {
|
||||
unsigned int apicid = apic->cpu_present_to_apicid(cpu);
|
||||
|
@ -369,6 +367,15 @@ static void __init smp_init_package_map(void)
|
|||
set_cpu_possible(cpu, false);
|
||||
set_cpu_present(cpu, false);
|
||||
}
|
||||
|
||||
if (logical_packages > __max_logical_packages) {
|
||||
pr_warn("Detected more packages (%u), then computed by BIOS data (%u).\n",
|
||||
logical_packages, __max_logical_packages);
|
||||
logical_packages_frozen = true;
|
||||
__max_logical_packages = logical_packages;
|
||||
}
|
||||
|
||||
pr_info("Max logical packages: %u\n", __max_logical_packages);
|
||||
}
|
||||
|
||||
void __init smp_store_boot_cpu_info(void)
|
||||
|
|
|
@ -422,6 +422,7 @@ struct nested_vmx {
|
|||
struct list_head vmcs02_pool;
|
||||
int vmcs02_num;
|
||||
u64 vmcs01_tsc_offset;
|
||||
bool change_vmcs01_virtual_x2apic_mode;
|
||||
/* L2 must run next, and mustn't decide to exit to L1. */
|
||||
bool nested_run_pending;
|
||||
/*
|
||||
|
@ -435,6 +436,8 @@ struct nested_vmx {
|
|||
bool pi_pending;
|
||||
u16 posted_intr_nv;
|
||||
|
||||
unsigned long *msr_bitmap;
|
||||
|
||||
struct hrtimer preemption_timer;
|
||||
bool preemption_timer_expired;
|
||||
|
||||
|
@ -924,7 +927,6 @@ static unsigned long *vmx_msr_bitmap_legacy;
|
|||
static unsigned long *vmx_msr_bitmap_longmode;
|
||||
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
|
||||
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
|
||||
static unsigned long *vmx_msr_bitmap_nested;
|
||||
static unsigned long *vmx_vmread_bitmap;
|
||||
static unsigned long *vmx_vmwrite_bitmap;
|
||||
|
||||
|
@ -2198,6 +2200,12 @@ static void vmx_vcpu_pi_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
new.control) != old.control);
|
||||
}
|
||||
|
||||
static void decache_tsc_multiplier(struct vcpu_vmx *vmx)
|
||||
{
|
||||
vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
|
||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||
}
|
||||
|
||||
/*
|
||||
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
|
||||
* vcpu mutex is already taken.
|
||||
|
@ -2256,10 +2264,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
|||
|
||||
/* Setup TSC multiplier */
|
||||
if (kvm_has_tsc_control &&
|
||||
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio) {
|
||||
vmx->current_tsc_ratio = vcpu->arch.tsc_scaling_ratio;
|
||||
vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
|
||||
}
|
||||
vmx->current_tsc_ratio != vcpu->arch.tsc_scaling_ratio)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
vmx_vcpu_pi_load(vcpu, cpu);
|
||||
vmx->host_pkru = read_pkru();
|
||||
|
@ -2508,7 +2514,7 @@ static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
|
|||
unsigned long *msr_bitmap;
|
||||
|
||||
if (is_guest_mode(vcpu))
|
||||
msr_bitmap = vmx_msr_bitmap_nested;
|
||||
msr_bitmap = to_vmx(vcpu)->nested.msr_bitmap;
|
||||
else if (cpu_has_secondary_exec_ctrls() &&
|
||||
(vmcs_read32(SECONDARY_VM_EXEC_CONTROL) &
|
||||
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
|
||||
|
@ -6363,13 +6369,6 @@ static __init int hardware_setup(void)
|
|||
if (!vmx_msr_bitmap_longmode_x2apic)
|
||||
goto out4;
|
||||
|
||||
if (nested) {
|
||||
vmx_msr_bitmap_nested =
|
||||
(unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_msr_bitmap_nested)
|
||||
goto out5;
|
||||
}
|
||||
|
||||
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx_vmread_bitmap)
|
||||
goto out6;
|
||||
|
@ -6392,8 +6391,6 @@ static __init int hardware_setup(void)
|
|||
|
||||
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
|
||||
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
|
||||
if (nested)
|
||||
memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
|
||||
|
||||
if (setup_vmcs_config(&vmcs_config) < 0) {
|
||||
r = -EIO;
|
||||
|
@ -6529,9 +6526,6 @@ static __init int hardware_setup(void)
|
|||
out7:
|
||||
free_page((unsigned long)vmx_vmread_bitmap);
|
||||
out6:
|
||||
if (nested)
|
||||
free_page((unsigned long)vmx_msr_bitmap_nested);
|
||||
out5:
|
||||
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
|
||||
out4:
|
||||
free_page((unsigned long)vmx_msr_bitmap_longmode);
|
||||
|
@ -6557,8 +6551,6 @@ static __exit void hardware_unsetup(void)
|
|||
free_page((unsigned long)vmx_io_bitmap_a);
|
||||
free_page((unsigned long)vmx_vmwrite_bitmap);
|
||||
free_page((unsigned long)vmx_vmread_bitmap);
|
||||
if (nested)
|
||||
free_page((unsigned long)vmx_msr_bitmap_nested);
|
||||
|
||||
free_kvm_area();
|
||||
}
|
||||
|
@ -6995,16 +6987,21 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
return 1;
|
||||
}
|
||||
|
||||
if (cpu_has_vmx_msr_bitmap()) {
|
||||
vmx->nested.msr_bitmap =
|
||||
(unsigned long *)__get_free_page(GFP_KERNEL);
|
||||
if (!vmx->nested.msr_bitmap)
|
||||
goto out_msr_bitmap;
|
||||
}
|
||||
|
||||
vmx->nested.cached_vmcs12 = kmalloc(VMCS12_SIZE, GFP_KERNEL);
|
||||
if (!vmx->nested.cached_vmcs12)
|
||||
return -ENOMEM;
|
||||
goto out_cached_vmcs12;
|
||||
|
||||
if (enable_shadow_vmcs) {
|
||||
shadow_vmcs = alloc_vmcs();
|
||||
if (!shadow_vmcs) {
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if (!shadow_vmcs)
|
||||
goto out_shadow_vmcs;
|
||||
/* mark vmcs as shadow */
|
||||
shadow_vmcs->revision_id |= (1u << 31);
|
||||
/* init shadow vmcs */
|
||||
|
@ -7024,6 +7021,15 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
|
|||
skip_emulated_instruction(vcpu);
|
||||
nested_vmx_succeed(vcpu);
|
||||
return 1;
|
||||
|
||||
out_shadow_vmcs:
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
|
||||
out_cached_vmcs12:
|
||||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
|
||||
out_msr_bitmap:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -7098,6 +7104,10 @@ static void free_nested(struct vcpu_vmx *vmx)
|
|||
vmx->nested.vmxon = false;
|
||||
free_vpid(vmx->nested.vpid02);
|
||||
nested_release_vmcs12(vmx);
|
||||
if (vmx->nested.msr_bitmap) {
|
||||
free_page((unsigned long)vmx->nested.msr_bitmap);
|
||||
vmx->nested.msr_bitmap = NULL;
|
||||
}
|
||||
if (enable_shadow_vmcs)
|
||||
free_vmcs(vmx->nested.current_shadow_vmcs);
|
||||
kfree(vmx->nested.cached_vmcs12);
|
||||
|
@ -8419,6 +8429,12 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
|
|||
{
|
||||
u32 sec_exec_control;
|
||||
|
||||
/* Postpone execution until vmcs01 is the current VMCS. */
|
||||
if (is_guest_mode(vcpu)) {
|
||||
to_vmx(vcpu)->nested.change_vmcs01_virtual_x2apic_mode = true;
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* There is not point to enable virtualize x2apic without enable
|
||||
* apicv
|
||||
|
@ -9472,8 +9488,10 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
{
|
||||
int msr;
|
||||
struct page *page;
|
||||
unsigned long *msr_bitmap;
|
||||
unsigned long *msr_bitmap_l1;
|
||||
unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.msr_bitmap;
|
||||
|
||||
/* This shortcut is ok because we support only x2APIC MSRs so far. */
|
||||
if (!nested_cpu_has_virt_x2apic_mode(vmcs12))
|
||||
return false;
|
||||
|
||||
|
@ -9482,63 +9500,37 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
|
|||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
msr_bitmap = (unsigned long *)kmap(page);
|
||||
if (!msr_bitmap) {
|
||||
msr_bitmap_l1 = (unsigned long *)kmap(page);
|
||||
if (!msr_bitmap_l1) {
|
||||
nested_release_page_clean(page);
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
memset(msr_bitmap_l0, 0xff, PAGE_SIZE);
|
||||
|
||||
if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
|
||||
if (nested_cpu_has_apic_reg_virt(vmcs12))
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
msr, MSR_TYPE_R);
|
||||
/* TPR is allowed */
|
||||
nested_vmx_disable_intercept_for_msr(msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
|
||||
MSR_TYPE_R | MSR_TYPE_W);
|
||||
|
||||
if (nested_cpu_has_vid(vmcs12)) {
|
||||
/* EOI and self-IPI are allowed */
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_EOI >> 4),
|
||||
MSR_TYPE_W);
|
||||
nested_vmx_disable_intercept_for_msr(
|
||||
msr_bitmap,
|
||||
vmx_msr_bitmap_nested,
|
||||
msr_bitmap_l1, msr_bitmap_l0,
|
||||
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
} else {
|
||||
/*
|
||||
* Enable reading intercept of all the x2apic
|
||||
* MSRs. We should not rely on vmcs12 to do any
|
||||
* optimizations here, it may have been modified
|
||||
* by L1.
|
||||
*/
|
||||
for (msr = 0x800; msr <= 0x8ff; msr++)
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
msr,
|
||||
MSR_TYPE_R);
|
||||
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_TASKPRI >> 4),
|
||||
MSR_TYPE_W);
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_EOI >> 4),
|
||||
MSR_TYPE_W);
|
||||
__vmx_enable_intercept_for_msr(
|
||||
vmx_msr_bitmap_nested,
|
||||
APIC_BASE_MSR + (APIC_SELF_IPI >> 4),
|
||||
MSR_TYPE_W);
|
||||
}
|
||||
kunmap(page);
|
||||
nested_release_page_clean(page);
|
||||
|
@ -9957,10 +9949,10 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
}
|
||||
|
||||
if (cpu_has_vmx_msr_bitmap() &&
|
||||
exec_control & CPU_BASED_USE_MSR_BITMAPS) {
|
||||
nested_vmx_merge_msr_bitmap(vcpu, vmcs12);
|
||||
/* MSR_BITMAP will be set by following vmx_set_efer. */
|
||||
} else
|
||||
exec_control & CPU_BASED_USE_MSR_BITMAPS &&
|
||||
nested_vmx_merge_msr_bitmap(vcpu, vmcs12))
|
||||
; /* MSR_BITMAP will be set by following vmx_set_efer. */
|
||||
else
|
||||
exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
|
||||
|
||||
/*
|
||||
|
@ -10011,6 +10003,8 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
|
||||
else
|
||||
vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
if (enable_vpid) {
|
||||
/*
|
||||
|
@ -10767,6 +10761,14 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
|
|||
else
|
||||
vmcs_set_bits(PIN_BASED_VM_EXEC_CONTROL,
|
||||
PIN_BASED_VMX_PREEMPTION_TIMER);
|
||||
if (kvm_has_tsc_control)
|
||||
decache_tsc_multiplier(vmx);
|
||||
|
||||
if (vmx->nested.change_vmcs01_virtual_x2apic_mode) {
|
||||
vmx->nested.change_vmcs01_virtual_x2apic_mode = false;
|
||||
vmx_set_virtual_x2apic_mode(vcpu,
|
||||
vcpu->arch.apic_base & X2APIC_ENABLE);
|
||||
}
|
||||
|
||||
/* This is needed for same reason as it was needed in prepare_vmcs02 */
|
||||
vmx->host_rsp = 0;
|
||||
|
|
|
@ -77,7 +77,7 @@ static inline unsigned long get_padding(struct kaslr_memory_region *region)
|
|||
*/
|
||||
static inline bool kaslr_memory_enabled(void)
|
||||
{
|
||||
return kaslr_enabled() && !config_enabled(CONFIG_KASAN);
|
||||
return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN);
|
||||
}
|
||||
|
||||
/* Initialize base and padding for each memory region randomized with KASLR */
|
||||
|
|
|
@ -41,6 +41,7 @@ static DEFINE_RAW_SPINLOCK(list_lock);
|
|||
* @node: list item for parent traversal.
|
||||
* @rcu: RCU callback item for freeing.
|
||||
* @irq: back pointer to parent.
|
||||
* @enabled: true if driver enabled IRQ
|
||||
* @virq: the virtual IRQ value provided to the requesting driver.
|
||||
*
|
||||
* Every MSI/MSI-X IRQ requested for a device in a VMD domain will be mapped to
|
||||
|
@ -50,6 +51,7 @@ struct vmd_irq {
|
|||
struct list_head node;
|
||||
struct rcu_head rcu;
|
||||
struct vmd_irq_list *irq;
|
||||
bool enabled;
|
||||
unsigned int virq;
|
||||
};
|
||||
|
||||
|
@ -122,7 +124,9 @@ static void vmd_irq_enable(struct irq_data *data)
|
|||
unsigned long flags;
|
||||
|
||||
raw_spin_lock_irqsave(&list_lock, flags);
|
||||
WARN_ON(vmdirq->enabled);
|
||||
list_add_tail_rcu(&vmdirq->node, &vmdirq->irq->irq_list);
|
||||
vmdirq->enabled = true;
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
|
||||
data->chip->irq_unmask(data);
|
||||
|
@ -136,8 +140,10 @@ static void vmd_irq_disable(struct irq_data *data)
|
|||
data->chip->irq_mask(data);
|
||||
|
||||
raw_spin_lock_irqsave(&list_lock, flags);
|
||||
list_del_rcu(&vmdirq->node);
|
||||
INIT_LIST_HEAD_RCU(&vmdirq->node);
|
||||
if (vmdirq->enabled) {
|
||||
list_del_rcu(&vmdirq->node);
|
||||
vmdirq->enabled = false;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&list_lock, flags);
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ static int set_up_temporary_mappings(void)
|
|||
return result;
|
||||
}
|
||||
|
||||
temp_level4_pgt = (unsigned long)pgd - __PAGE_OFFSET;
|
||||
temp_level4_pgt = __pa(pgd);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu);
|
|||
DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info);
|
||||
|
||||
/* Linux <-> Xen vCPU id mapping */
|
||||
DEFINE_PER_CPU(int, xen_vcpu_id) = -1;
|
||||
DEFINE_PER_CPU(uint32_t, xen_vcpu_id);
|
||||
EXPORT_PER_CPU_SYMBOL(xen_vcpu_id);
|
||||
|
||||
enum xen_domain_type xen_domain_type = XEN_NATIVE;
|
||||
|
|
21
block/bio.c
21
block/bio.c
|
@ -667,18 +667,19 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
|
|||
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
|
||||
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
goto integrity_clone;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME) {
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
|
||||
goto integrity_clone;
|
||||
break;
|
||||
default:
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
break;
|
||||
}
|
||||
|
||||
bio_for_each_segment(bv, bio_src, iter)
|
||||
bio->bi_io_vec[bio->bi_vcnt++] = bv;
|
||||
|
||||
integrity_clone:
|
||||
if (bio_integrity(bio_src)) {
|
||||
int ret;
|
||||
|
||||
|
@ -1788,7 +1789,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
|
|||
* Discards need a mutable bio_vec to accommodate the payload
|
||||
* required by the DSM TRIM and UNMAP commands.
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
split = bio_clone_bioset(bio, gfp, bs);
|
||||
else
|
||||
split = bio_clone_fast(bio, gfp, bs);
|
||||
|
|
|
@ -515,7 +515,9 @@ EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
|
|||
|
||||
void blk_set_queue_dying(struct request_queue *q)
|
||||
{
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
|
||||
spin_lock_irq(q->queue_lock);
|
||||
queue_flag_set(QUEUE_FLAG_DYING, q);
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
|
||||
if (q->mq_ops)
|
||||
blk_mq_wake_waiters(q);
|
||||
|
|
|
@ -94,8 +94,30 @@ static struct bio *blk_bio_segment_split(struct request_queue *q,
|
|||
bool do_split = true;
|
||||
struct bio *new = NULL;
|
||||
const unsigned max_sectors = get_max_io_size(q, bio);
|
||||
unsigned bvecs = 0;
|
||||
|
||||
bio_for_each_segment(bv, bio, iter) {
|
||||
/*
|
||||
* With arbitrary bio size, the incoming bio may be very
|
||||
* big. We have to split the bio into small bios so that
|
||||
* each holds at most BIO_MAX_PAGES bvecs because
|
||||
* bio_clone() can fail to allocate big bvecs.
|
||||
*
|
||||
* It should have been better to apply the limit per
|
||||
* request queue in which bio_clone() is involved,
|
||||
* instead of globally. The biggest blocker is the
|
||||
* bio_clone() in bio bounce.
|
||||
*
|
||||
* If bio is splitted by this reason, we should have
|
||||
* allowed to continue bios merging, but don't do
|
||||
* that now for making the change simple.
|
||||
*
|
||||
* TODO: deal with bio bounce's bio_clone() gracefully
|
||||
* and convert the global limit into per-queue limit.
|
||||
*/
|
||||
if (bvecs++ >= BIO_MAX_PAGES)
|
||||
goto split;
|
||||
|
||||
/*
|
||||
* If the queue doesn't support SG gaps and adding this
|
||||
* offset would create a gap, disallow it.
|
||||
|
@ -172,12 +194,18 @@ void blk_queue_split(struct request_queue *q, struct bio **bio,
|
|||
struct bio *split, *res;
|
||||
unsigned nsegs;
|
||||
|
||||
if (bio_op(*bio) == REQ_OP_DISCARD)
|
||||
switch (bio_op(*bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
split = blk_bio_discard_split(q, *bio, bs, &nsegs);
|
||||
else if (bio_op(*bio) == REQ_OP_WRITE_SAME)
|
||||
break;
|
||||
case REQ_OP_WRITE_SAME:
|
||||
split = blk_bio_write_same_split(q, *bio, bs, &nsegs);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
split = blk_bio_segment_split(q, *bio, q->bio_split, &nsegs);
|
||||
break;
|
||||
}
|
||||
|
||||
/* physical segments can be figured out during splitting */
|
||||
res = split ? split : *bio;
|
||||
|
@ -213,7 +241,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
|
|||
* This should probably be returning 0, but blk_add_request_payload()
|
||||
* (Christoph!!!!)
|
||||
*/
|
||||
if (bio_op(bio) == REQ_OP_DISCARD)
|
||||
if (bio_op(bio) == REQ_OP_DISCARD || bio_op(bio) == REQ_OP_SECURE_ERASE)
|
||||
return 1;
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME)
|
||||
|
@ -385,7 +413,9 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
|||
nsegs = 0;
|
||||
cluster = blk_queue_cluster(q);
|
||||
|
||||
if (bio_op(bio) == REQ_OP_DISCARD) {
|
||||
switch (bio_op(bio)) {
|
||||
case REQ_OP_DISCARD:
|
||||
case REQ_OP_SECURE_ERASE:
|
||||
/*
|
||||
* This is a hack - drivers should be neither modifying the
|
||||
* biovec, nor relying on bi_vcnt - but because of
|
||||
|
@ -393,19 +423,16 @@ static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
|
|||
* a payload we need to set up here (thank you Christoph) and
|
||||
* bi_vcnt is really the only way of telling if we need to.
|
||||
*/
|
||||
|
||||
if (bio->bi_vcnt)
|
||||
goto single_segment;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (bio_op(bio) == REQ_OP_WRITE_SAME) {
|
||||
single_segment:
|
||||
if (!bio->bi_vcnt)
|
||||
return 0;
|
||||
/* Fall through */
|
||||
case REQ_OP_WRITE_SAME:
|
||||
*sg = sglist;
|
||||
bvec = bio_iovec(bio);
|
||||
sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
|
||||
return 1;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
for_each_bio(bio)
|
||||
|
|
|
@ -793,11 +793,12 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
|
|||
struct list_head *dptr;
|
||||
int queued;
|
||||
|
||||
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask));
|
||||
|
||||
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
|
||||
return;
|
||||
|
||||
WARN_ON(!cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask) &&
|
||||
cpu_online(hctx->next_cpu));
|
||||
|
||||
hctx->run++;
|
||||
|
||||
/*
|
||||
|
@ -1036,10 +1037,11 @@ void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
|
|||
EXPORT_SYMBOL(blk_mq_delay_queue);
|
||||
|
||||
static inline void __blk_mq_insert_req_list(struct blk_mq_hw_ctx *hctx,
|
||||
struct blk_mq_ctx *ctx,
|
||||
struct request *rq,
|
||||
bool at_head)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
|
||||
trace_block_rq_insert(hctx->queue, rq);
|
||||
|
||||
if (at_head)
|
||||
|
@ -1053,20 +1055,16 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
|
|||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
|
||||
__blk_mq_insert_req_list(hctx, ctx, rq, at_head);
|
||||
__blk_mq_insert_req_list(hctx, rq, at_head);
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
}
|
||||
|
||||
void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
||||
bool async)
|
||||
bool async)
|
||||
{
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx;
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
|
||||
|
||||
current_ctx = blk_mq_get_ctx(q);
|
||||
if (!cpu_online(ctx->cpu))
|
||||
rq->mq_ctx = ctx = current_ctx;
|
||||
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
|
||||
|
@ -1076,8 +1074,6 @@ void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
|
|||
|
||||
if (run_queue)
|
||||
blk_mq_run_hw_queue(hctx, async);
|
||||
|
||||
blk_mq_put_ctx(current_ctx);
|
||||
}
|
||||
|
||||
static void blk_mq_insert_requests(struct request_queue *q,
|
||||
|
@ -1088,14 +1084,9 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|||
|
||||
{
|
||||
struct blk_mq_hw_ctx *hctx;
|
||||
struct blk_mq_ctx *current_ctx;
|
||||
|
||||
trace_block_unplug(q, depth, !from_schedule);
|
||||
|
||||
current_ctx = blk_mq_get_ctx(q);
|
||||
|
||||
if (!cpu_online(ctx->cpu))
|
||||
ctx = current_ctx;
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
|
||||
/*
|
||||
|
@ -1107,15 +1098,14 @@ static void blk_mq_insert_requests(struct request_queue *q,
|
|||
struct request *rq;
|
||||
|
||||
rq = list_first_entry(list, struct request, queuelist);
|
||||
BUG_ON(rq->mq_ctx != ctx);
|
||||
list_del_init(&rq->queuelist);
|
||||
rq->mq_ctx = ctx;
|
||||
__blk_mq_insert_req_list(hctx, ctx, rq, false);
|
||||
__blk_mq_insert_req_list(hctx, rq, false);
|
||||
}
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
spin_unlock(&ctx->lock);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, from_schedule);
|
||||
blk_mq_put_ctx(current_ctx);
|
||||
}
|
||||
|
||||
static int plug_ctx_cmp(void *priv, struct list_head *a, struct list_head *b)
|
||||
|
@ -1630,16 +1620,17 @@ static int blk_mq_alloc_bitmap(struct blk_mq_ctxmap *bitmap, int node)
|
|||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* 'cpu' is going away. splice any existing rq_list entries from this
|
||||
* software queue to the hw queue dispatch list, and ensure that it
|
||||
* gets run.
|
||||
*/
|
||||
static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
||||
{
|
||||
struct request_queue *q = hctx->queue;
|
||||
struct blk_mq_ctx *ctx;
|
||||
LIST_HEAD(tmp);
|
||||
|
||||
/*
|
||||
* Move ctx entries to new CPU, if this one is going away.
|
||||
*/
|
||||
ctx = __blk_mq_get_ctx(q, cpu);
|
||||
ctx = __blk_mq_get_ctx(hctx->queue, cpu);
|
||||
|
||||
spin_lock(&ctx->lock);
|
||||
if (!list_empty(&ctx->rq_list)) {
|
||||
|
@ -1651,24 +1642,11 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
|
|||
if (list_empty(&tmp))
|
||||
return NOTIFY_OK;
|
||||
|
||||
ctx = blk_mq_get_ctx(q);
|
||||
spin_lock(&ctx->lock);
|
||||
|
||||
while (!list_empty(&tmp)) {
|
||||
struct request *rq;
|
||||
|
||||
rq = list_first_entry(&tmp, struct request, queuelist);
|
||||
rq->mq_ctx = ctx;
|
||||
list_move_tail(&rq->queuelist, &ctx->rq_list);
|
||||
}
|
||||
|
||||
hctx = q->mq_ops->map_queue(q, ctx->cpu);
|
||||
blk_mq_hctx_mark_pending(hctx, ctx);
|
||||
|
||||
spin_unlock(&ctx->lock);
|
||||
spin_lock(&hctx->lock);
|
||||
list_splice_tail_init(&tmp, &hctx->dispatch);
|
||||
spin_unlock(&hctx->lock);
|
||||
|
||||
blk_mq_run_hw_queue(hctx, true);
|
||||
blk_mq_put_ctx(ctx);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -366,7 +366,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
|
|||
list_for_each_prev(entry, &q->queue_head) {
|
||||
struct request *pos = list_entry_rq(entry);
|
||||
|
||||
if ((req_op(rq) == REQ_OP_DISCARD) != (req_op(pos) == REQ_OP_DISCARD))
|
||||
if (req_op(rq) != req_op(pos))
|
||||
break;
|
||||
if (rq_data_dir(rq) != rq_data_dir(pos))
|
||||
break;
|
||||
|
|
|
@ -439,7 +439,7 @@ config CRYPTO_CRC32C_INTEL
|
|||
|
||||
config CRYPT_CRC32C_VPMSUM
|
||||
tristate "CRC32c CRC algorithm (powerpc64)"
|
||||
depends on PPC64
|
||||
depends on PPC64 && ALTIVEC
|
||||
select CRYPTO_HASH
|
||||
select CRC32
|
||||
help
|
||||
|
|
|
@ -24,14 +24,14 @@
|
|||
#define ROTL64(x, y) (((x) << (y)) | ((x) >> (64 - (y))))
|
||||
|
||||
static const u64 keccakf_rndc[24] = {
|
||||
0x0000000000000001, 0x0000000000008082, 0x800000000000808a,
|
||||
0x8000000080008000, 0x000000000000808b, 0x0000000080000001,
|
||||
0x8000000080008081, 0x8000000000008009, 0x000000000000008a,
|
||||
0x0000000000000088, 0x0000000080008009, 0x000000008000000a,
|
||||
0x000000008000808b, 0x800000000000008b, 0x8000000000008089,
|
||||
0x8000000000008003, 0x8000000000008002, 0x8000000000000080,
|
||||
0x000000000000800a, 0x800000008000000a, 0x8000000080008081,
|
||||
0x8000000000008080, 0x0000000080000001, 0x8000000080008008
|
||||
0x0000000000000001ULL, 0x0000000000008082ULL, 0x800000000000808aULL,
|
||||
0x8000000080008000ULL, 0x000000000000808bULL, 0x0000000080000001ULL,
|
||||
0x8000000080008081ULL, 0x8000000000008009ULL, 0x000000000000008aULL,
|
||||
0x0000000000000088ULL, 0x0000000080008009ULL, 0x000000008000000aULL,
|
||||
0x000000008000808bULL, 0x800000000000008bULL, 0x8000000000008089ULL,
|
||||
0x8000000000008003ULL, 0x8000000000008002ULL, 0x8000000000000080ULL,
|
||||
0x000000000000800aULL, 0x800000008000000aULL, 0x8000000080008081ULL,
|
||||
0x8000000000008080ULL, 0x0000000080000001ULL, 0x8000000080008008ULL
|
||||
};
|
||||
|
||||
static const int keccakf_rotc[24] = {
|
||||
|
|
|
@ -42,6 +42,7 @@ struct apd_private_data;
|
|||
struct apd_device_desc {
|
||||
unsigned int flags;
|
||||
unsigned int fixed_clk_rate;
|
||||
struct property_entry *properties;
|
||||
int (*setup)(struct apd_private_data *pdata);
|
||||
};
|
||||
|
||||
|
@ -76,9 +77,17 @@ static struct apd_device_desc cz_i2c_desc = {
|
|||
.fixed_clk_rate = 133000000,
|
||||
};
|
||||
|
||||
static struct property_entry uart_properties[] = {
|
||||
PROPERTY_ENTRY_U32("reg-io-width", 4),
|
||||
PROPERTY_ENTRY_U32("reg-shift", 2),
|
||||
PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
|
||||
{ },
|
||||
};
|
||||
|
||||
static struct apd_device_desc cz_uart_desc = {
|
||||
.setup = acpi_apd_setup,
|
||||
.fixed_clk_rate = 48000000,
|
||||
.properties = uart_properties,
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -130,6 +139,12 @@ static int acpi_apd_create_device(struct acpi_device *adev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
if (dev_desc->properties) {
|
||||
ret = device_add_properties(&adev->dev, dev_desc->properties);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
adev->driver_data = pdata;
|
||||
pdev = acpi_create_platform_device(adev);
|
||||
if (!IS_ERR_OR_NULL(pdev))
|
||||
|
|
|
@ -75,6 +75,7 @@ struct lpss_device_desc {
|
|||
const char *clk_con_id;
|
||||
unsigned int prv_offset;
|
||||
size_t prv_size_override;
|
||||
struct property_entry *properties;
|
||||
void (*setup)(struct lpss_private_data *pdata);
|
||||
};
|
||||
|
||||
|
@ -163,11 +164,19 @@ static const struct lpss_device_desc lpt_i2c_dev_desc = {
|
|||
.prv_offset = 0x800,
|
||||
};
|
||||
|
||||
static struct property_entry uart_properties[] = {
|
||||
PROPERTY_ENTRY_U32("reg-io-width", 4),
|
||||
PROPERTY_ENTRY_U32("reg-shift", 2),
|
||||
PROPERTY_ENTRY_BOOL("snps,uart-16550-compatible"),
|
||||
{ },
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc lpt_uart_dev_desc = {
|
||||
.flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR,
|
||||
.clk_con_id = "baudclk",
|
||||
.prv_offset = 0x800,
|
||||
.setup = lpss_uart_setup,
|
||||
.properties = uart_properties,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc lpt_sdio_dev_desc = {
|
||||
|
@ -189,6 +198,7 @@ static const struct lpss_device_desc byt_uart_dev_desc = {
|
|||
.clk_con_id = "baudclk",
|
||||
.prv_offset = 0x800,
|
||||
.setup = lpss_uart_setup,
|
||||
.properties = uart_properties,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc bsw_uart_dev_desc = {
|
||||
|
@ -197,6 +207,7 @@ static const struct lpss_device_desc bsw_uart_dev_desc = {
|
|||
.clk_con_id = "baudclk",
|
||||
.prv_offset = 0x800,
|
||||
.setup = lpss_uart_setup,
|
||||
.properties = uart_properties,
|
||||
};
|
||||
|
||||
static const struct lpss_device_desc byt_spi_dev_desc = {
|
||||
|
@ -440,6 +451,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev,
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
if (dev_desc->properties) {
|
||||
ret = device_add_properties(&adev->dev, dev_desc->properties);
|
||||
if (ret)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
adev->driver_data = pdata;
|
||||
pdev = acpi_create_platform_device(adev);
|
||||
if (!IS_ERR_OR_NULL(pdev)) {
|
||||
|
|
|
@ -1266,6 +1266,7 @@ void device_del(struct device *dev)
|
|||
bus_remove_device(dev);
|
||||
device_pm_remove(dev);
|
||||
driver_deferred_probe_del(dev);
|
||||
device_remove_properties(dev);
|
||||
|
||||
/* Notify the platform of the removal, in case they
|
||||
* need to do anything...
|
||||
|
|
|
@ -3706,22 +3706,21 @@ static int floppy_open(struct block_device *bdev, fmode_t mode)
|
|||
if (UFDCS->rawcmd == 1)
|
||||
UFDCS->rawcmd = 2;
|
||||
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
UDRS->last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
|
||||
check_disk_change(bdev);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
|
||||
if (!(mode & FMODE_NDELAY)) {
|
||||
if (mode & (FMODE_READ|FMODE_WRITE)) {
|
||||
UDRS->last_checked = 0;
|
||||
clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
|
||||
check_disk_change(bdev);
|
||||
if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
}
|
||||
res = -EROFS;
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
}
|
||||
|
||||
res = -EROFS;
|
||||
|
||||
if ((mode & FMODE_WRITE) &&
|
||||
!test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
|
||||
goto out;
|
||||
|
||||
mutex_unlock(&open_lock);
|
||||
mutex_unlock(&floppy_mutex);
|
||||
return 0;
|
||||
|
|
|
@ -189,6 +189,8 @@ struct blkfront_info
|
|||
struct mutex mutex;
|
||||
struct xenbus_device *xbdev;
|
||||
struct gendisk *gd;
|
||||
u16 sector_size;
|
||||
unsigned int physical_sector_size;
|
||||
int vdevice;
|
||||
blkif_vdev_t handle;
|
||||
enum blkif_state connected;
|
||||
|
@ -910,9 +912,45 @@ static struct blk_mq_ops blkfront_mq_ops = {
|
|||
.map_queue = blk_mq_map_queue,
|
||||
};
|
||||
|
||||
static void blkif_set_queue_limits(struct blkfront_info *info)
|
||||
{
|
||||
struct request_queue *rq = info->rq;
|
||||
struct gendisk *gd = info->gd;
|
||||
unsigned int segments = info->max_indirect_segments ? :
|
||||
BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
|
||||
|
||||
if (info->feature_discard) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
|
||||
blk_queue_max_discard_sectors(rq, get_capacity(gd));
|
||||
rq->limits.discard_granularity = info->discard_granularity;
|
||||
rq->limits.discard_alignment = info->discard_alignment;
|
||||
if (info->feature_secdiscard)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
|
||||
}
|
||||
|
||||
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
||||
blk_queue_logical_block_size(rq, info->sector_size);
|
||||
blk_queue_physical_block_size(rq, info->physical_sector_size);
|
||||
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
|
||||
|
||||
/* Each segment in a request is up to an aligned page in size. */
|
||||
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
|
||||
blk_queue_max_segment_size(rq, PAGE_SIZE);
|
||||
|
||||
/* Ensure a merged request will fit in a single I/O ring slot. */
|
||||
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
|
||||
|
||||
/* Make sure buffer addresses are sector-aligned. */
|
||||
blk_queue_dma_alignment(rq, 511);
|
||||
|
||||
/* Make sure we don't use bounce buffers. */
|
||||
blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
|
||||
}
|
||||
|
||||
static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
||||
unsigned int physical_sector_size,
|
||||
unsigned int segments)
|
||||
unsigned int physical_sector_size)
|
||||
{
|
||||
struct request_queue *rq;
|
||||
struct blkfront_info *info = gd->private_data;
|
||||
|
@ -944,36 +982,11 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size,
|
|||
}
|
||||
|
||||
rq->queuedata = info;
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq);
|
||||
|
||||
if (info->feature_discard) {
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq);
|
||||
blk_queue_max_discard_sectors(rq, get_capacity(gd));
|
||||
rq->limits.discard_granularity = info->discard_granularity;
|
||||
rq->limits.discard_alignment = info->discard_alignment;
|
||||
if (info->feature_secdiscard)
|
||||
queue_flag_set_unlocked(QUEUE_FLAG_SECERASE, rq);
|
||||
}
|
||||
|
||||
/* Hard sector size and max sectors impersonate the equiv. hardware. */
|
||||
blk_queue_logical_block_size(rq, sector_size);
|
||||
blk_queue_physical_block_size(rq, physical_sector_size);
|
||||
blk_queue_max_hw_sectors(rq, (segments * XEN_PAGE_SIZE) / 512);
|
||||
|
||||
/* Each segment in a request is up to an aligned page in size. */
|
||||
blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
|
||||
blk_queue_max_segment_size(rq, PAGE_SIZE);
|
||||
|
||||
/* Ensure a merged request will fit in a single I/O ring slot. */
|
||||
blk_queue_max_segments(rq, segments / GRANTS_PER_PSEG);
|
||||
|
||||
/* Make sure buffer addresses are sector-aligned. */
|
||||
blk_queue_dma_alignment(rq, 511);
|
||||
|
||||
/* Make sure we don't use bounce buffers. */
|
||||
blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
|
||||
|
||||
gd->queue = rq;
|
||||
info->rq = gd->queue = rq;
|
||||
info->gd = gd;
|
||||
info->sector_size = sector_size;
|
||||
info->physical_sector_size = physical_sector_size;
|
||||
blkif_set_queue_limits(info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -1136,16 +1149,11 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
|
|||
gd->private_data = info;
|
||||
set_capacity(gd, capacity);
|
||||
|
||||
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size,
|
||||
info->max_indirect_segments ? :
|
||||
BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
|
||||
if (xlvbd_init_blk_queue(gd, sector_size, physical_sector_size)) {
|
||||
del_gendisk(gd);
|
||||
goto release;
|
||||
}
|
||||
|
||||
info->rq = gd->queue;
|
||||
info->gd = gd;
|
||||
|
||||
xlvbd_flush(info);
|
||||
|
||||
if (vdisk_info & VDISK_READONLY)
|
||||
|
@ -1315,7 +1323,7 @@ static void blkif_free_ring(struct blkfront_ring_info *rinfo)
|
|||
rinfo->ring_ref[i] = GRANT_INVALID_REF;
|
||||
}
|
||||
}
|
||||
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * PAGE_SIZE));
|
||||
free_pages((unsigned long)rinfo->ring.sring, get_order(info->nr_ring_pages * XEN_PAGE_SIZE));
|
||||
rinfo->ring.sring = NULL;
|
||||
|
||||
if (rinfo->irq)
|
||||
|
@ -2007,8 +2015,10 @@ static int blkif_recover(struct blkfront_info *info)
|
|||
struct split_bio *split_bio;
|
||||
|
||||
blkfront_gather_backend_features(info);
|
||||
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
|
||||
blkif_set_queue_limits(info);
|
||||
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
blk_queue_max_segments(info->rq, segs);
|
||||
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
|
||||
|
||||
for (r_index = 0; r_index < info->nr_rings; r_index++) {
|
||||
struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
|
||||
|
@ -2432,7 +2442,7 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
if (err) {
|
||||
xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
|
||||
info->xbdev->otherend);
|
||||
return;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
xenbus_switch_state(info->xbdev, XenbusStateConnected);
|
||||
|
@ -2445,6 +2455,11 @@ static void blkfront_connect(struct blkfront_info *info)
|
|||
device_add_disk(&info->xbdev->dev, info->gd);
|
||||
|
||||
info->is_ready = 1;
|
||||
return;
|
||||
|
||||
fail:
|
||||
blkif_free(info, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -66,10 +66,10 @@ static void kona_timer_disable_and_clear(void __iomem *base)
|
|||
|
||||
}
|
||||
|
||||
static void
|
||||
static int
|
||||
kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
|
||||
{
|
||||
int loop_limit = 4;
|
||||
int loop_limit = 3;
|
||||
|
||||
/*
|
||||
* Read 64-bit free running counter
|
||||
|
@ -83,18 +83,19 @@ kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
|
|||
* if new hi-word is equal to previously read hi-word then stop.
|
||||
*/
|
||||
|
||||
while (--loop_limit) {
|
||||
do {
|
||||
*msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
|
||||
*lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
|
||||
if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
|
||||
break;
|
||||
}
|
||||
} while (--loop_limit);
|
||||
if (!loop_limit) {
|
||||
pr_err("bcm_kona_timer: getting counter failed.\n");
|
||||
pr_err(" Timer will be impacted\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kona_timer_set_next_event(unsigned long clc,
|
||||
|
@ -112,8 +113,11 @@ static int kona_timer_set_next_event(unsigned long clc,
|
|||
|
||||
uint32_t lsw, msw;
|
||||
uint32_t reg;
|
||||
int ret;
|
||||
|
||||
kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
|
||||
ret = kona_timer_get_counter(timers.tmr_regs, &msw, &lsw);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Load the "next" event tick value */
|
||||
writel(lsw + clc, timers.tmr_regs + KONA_GPTIMER_STCM0_OFFSET);
|
||||
|
|
|
@ -164,7 +164,7 @@ void __init gic_clocksource_init(unsigned int frequency)
|
|||
gic_start_count();
|
||||
}
|
||||
|
||||
static void __init gic_clocksource_of_init(struct device_node *node)
|
||||
static int __init gic_clocksource_of_init(struct device_node *node)
|
||||
{
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#include <linux/of_irq.h>
|
||||
#include <linux/sched_clock.h>
|
||||
|
||||
#include <clocksource/pxa.h>
|
||||
|
||||
#include <asm/div64.h>
|
||||
|
||||
#define OSMR0 0x00 /* OS Timer 0 Match Register */
|
||||
|
|
|
@ -123,12 +123,16 @@ static struct clock_event_device sun4i_clockevent = {
|
|||
.set_next_event = sun4i_clkevt_next_event,
|
||||
};
|
||||
|
||||
static void sun4i_timer_clear_interrupt(void)
|
||||
{
|
||||
writel(TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_ST_REG);
|
||||
}
|
||||
|
||||
static irqreturn_t sun4i_timer_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct clock_event_device *evt = (struct clock_event_device *)dev_id;
|
||||
|
||||
writel(0x1, timer_base + TIMER_IRQ_ST_REG);
|
||||
sun4i_timer_clear_interrupt();
|
||||
evt->event_handler(evt);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
@ -208,6 +212,9 @@ static int __init sun4i_timer_init(struct device_node *node)
|
|||
/* Make sure timer is stopped before playing with interrupts */
|
||||
sun4i_clkevt_time_stop(0);
|
||||
|
||||
/* clear timer0 interrupt */
|
||||
sun4i_timer_clear_interrupt();
|
||||
|
||||
sun4i_clockevent.cpumask = cpu_possible_mask;
|
||||
sun4i_clockevent.irq = irq;
|
||||
|
||||
|
|
|
@ -338,7 +338,6 @@ static int __init armada_xp_timer_init(struct device_node *np)
|
|||
struct clk *clk = of_clk_get_by_name(np, "fixed");
|
||||
int ret;
|
||||
|
||||
clk = of_clk_get(np, 0);
|
||||
if (IS_ERR(clk)) {
|
||||
pr_err("Failed to get clock");
|
||||
return PTR_ERR(clk);
|
||||
|
|
|
@ -202,10 +202,10 @@ static int __init pistachio_clksrc_of_init(struct device_node *node)
|
|||
rate = clk_get_rate(fast_clk);
|
||||
|
||||
/* Disable irq's for clocksource usage */
|
||||
gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
|
||||
gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
|
||||
gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
|
||||
gpt_writel(&pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
|
||||
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 0);
|
||||
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 1);
|
||||
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 2);
|
||||
gpt_writel(pcs_gpt.base, 0, TIMER_IRQ_MASK, 3);
|
||||
|
||||
/* Enable timer block */
|
||||
writel(TIMER_ME_GLOBAL, pcs_gpt.base);
|
||||
|
|
|
@ -261,6 +261,12 @@ static int __init at91sam926x_pit_dt_init(struct device_node *node)
|
|||
return PTR_ERR(data->mck);
|
||||
}
|
||||
|
||||
ret = clk_prepare_enable(data->mck);
|
||||
if (ret) {
|
||||
pr_err("Unable to enable mck\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Get the interrupts property */
|
||||
data->irq = irq_of_parse_and_map(node, 0);
|
||||
if (!data->irq) {
|
||||
|
|
|
@ -441,6 +441,9 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
OP_ALG_AAI_CTR_MOD128);
|
||||
const bool is_rfc3686 = alg->caam.rfc3686;
|
||||
|
||||
if (!ctx->authsize)
|
||||
return 0;
|
||||
|
||||
/* NULL encryption / decryption */
|
||||
if (!ctx->enckeylen)
|
||||
return aead_null_set_sh_desc(aead);
|
||||
|
@ -614,7 +617,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
keys_fit_inline = true;
|
||||
|
||||
/* aead_givencrypt shared descriptor */
|
||||
desc = ctx->sh_desc_givenc;
|
||||
desc = ctx->sh_desc_enc;
|
||||
|
||||
/* Note: Context registers are saved. */
|
||||
init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686);
|
||||
|
@ -645,13 +648,13 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
append_operation(desc, ctx->class2_alg_type |
|
||||
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
|
||||
|
||||
/* ivsize + cryptlen = seqoutlen - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
|
||||
|
||||
/* Read and write assoclen bytes */
|
||||
append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ);
|
||||
|
||||
/* ivsize + cryptlen = seqoutlen - authsize */
|
||||
append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize);
|
||||
|
||||
/* Skip assoc data */
|
||||
append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF);
|
||||
|
||||
|
@ -697,7 +700,7 @@ static int aead_set_sh_desc(struct crypto_aead *aead)
|
|||
ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc,
|
||||
desc_bytes(desc),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
|
||||
if (dma_mapping_error(jrdev, ctx->sh_desc_enc_dma)) {
|
||||
dev_err(jrdev, "unable to map shared descriptor\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
|
|
@ -1898,6 +1898,7 @@ caam_hash_alloc(struct caam_hash_template *template,
|
|||
template->name);
|
||||
snprintf(alg->cra_driver_name, CRYPTO_MAX_ALG_NAME, "%s",
|
||||
template->driver_name);
|
||||
t_alg->ahash_alg.setkey = NULL;
|
||||
}
|
||||
alg->cra_module = THIS_MODULE;
|
||||
alg->cra_init = caam_hash_cra_init;
|
||||
|
|
|
@ -116,6 +116,9 @@ static int dax_pmem_probe(struct device *dev)
|
|||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* adjust the dax_region resource to the start of data */
|
||||
res.start += le64_to_cpu(pfn_sb->dataoff);
|
||||
|
||||
nd_region = to_nd_region(dev->parent);
|
||||
dax_region = alloc_dax_region(dev, nd_region->id, &res,
|
||||
le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP);
|
||||
|
|
|
@ -251,6 +251,14 @@ config EDAC_SBRIDGE
|
|||
Support for error detection and correction the Intel
|
||||
Sandy Bridge, Ivy Bridge and Haswell Integrated Memory Controllers.
|
||||
|
||||
config EDAC_SKX
|
||||
tristate "Intel Skylake server Integrated MC"
|
||||
depends on EDAC_MM_EDAC && PCI && X86_64 && X86_MCE_INTEL
|
||||
depends on PCI_MMCONFIG
|
||||
help
|
||||
Support for error detection and correction the Intel
|
||||
Skylake server Integrated Memory Controllers.
|
||||
|
||||
config EDAC_MPC85XX
|
||||
tristate "Freescale MPC83xx / MPC85xx"
|
||||
depends on EDAC_MM_EDAC && FSL_SOC
|
||||
|
|
|
@ -31,6 +31,7 @@ obj-$(CONFIG_EDAC_I5400) += i5400_edac.o
|
|||
obj-$(CONFIG_EDAC_I7300) += i7300_edac.o
|
||||
obj-$(CONFIG_EDAC_I7CORE) += i7core_edac.o
|
||||
obj-$(CONFIG_EDAC_SBRIDGE) += sb_edac.o
|
||||
obj-$(CONFIG_EDAC_SKX) += skx_edac.o
|
||||
obj-$(CONFIG_EDAC_E7XXX) += e7xxx_edac.o
|
||||
obj-$(CONFIG_EDAC_E752X) += e752x_edac.o
|
||||
obj-$(CONFIG_EDAC_I82443BXGX) += i82443bxgx_edac.o
|
||||
|
|
|
@ -552,9 +552,9 @@ static const struct pci_id_table pci_dev_descr_haswell_table[] = {
|
|||
/* Knight's Landing Support */
|
||||
/*
|
||||
* KNL's memory channels are swizzled between memory controllers.
|
||||
* MC0 is mapped to CH3,5,6 and MC1 is mapped to CH0,1,2
|
||||
* MC0 is mapped to CH3,4,5 and MC1 is mapped to CH0,1,2
|
||||
*/
|
||||
#define knl_channel_remap(channel) ((channel + 3) % 6)
|
||||
#define knl_channel_remap(mc, chan) ((mc) ? (chan) : (chan) + 3)
|
||||
|
||||
/* Memory controller, TAD tables, error injection - 2-8-0, 2-9-0 (2 of these) */
|
||||
#define PCI_DEVICE_ID_INTEL_KNL_IMC_MC 0x7840
|
||||
|
@ -1286,7 +1286,7 @@ static u32 knl_get_mc_route(int entry, u32 reg)
|
|||
mc = GET_BITFIELD(reg, entry*3, (entry*3)+2);
|
||||
chan = GET_BITFIELD(reg, (entry*2) + 18, (entry*2) + 18 + 1);
|
||||
|
||||
return knl_channel_remap(mc*3 + chan);
|
||||
return knl_channel_remap(mc, chan);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2997,8 +2997,15 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
|
|||
} else {
|
||||
char A = *("A");
|
||||
|
||||
channel = knl_channel_remap(channel);
|
||||
/*
|
||||
* Reported channel is in range 0-2, so we can't map it
|
||||
* back to mc. To figure out mc we check machine check
|
||||
* bank register that reported this error.
|
||||
* bank15 means mc0 and bank16 means mc1.
|
||||
*/
|
||||
channel = knl_channel_remap(m->bank == 16, channel);
|
||||
channel_mask = 1 << channel;
|
||||
|
||||
snprintf(msg, sizeof(msg),
|
||||
"%s%s err_code:%04x:%04x channel:%d (DIMM_%c)",
|
||||
overflow ? " OVERFLOW" : "",
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -50,6 +50,7 @@ config GPIO_DEVRES
|
|||
config OF_GPIO
|
||||
def_bool y
|
||||
depends on OF
|
||||
depends on HAS_IOMEM
|
||||
|
||||
config GPIO_ACPI
|
||||
def_bool y
|
||||
|
@ -188,7 +189,7 @@ config GPIO_EP93XX
|
|||
config GPIO_ETRAXFS
|
||||
bool "Axis ETRAX FS General I/O"
|
||||
depends on CRIS || COMPILE_TEST
|
||||
depends on OF
|
||||
depends on OF_GPIO
|
||||
select GPIO_GENERIC
|
||||
select GPIOLIB_IRQCHIP
|
||||
help
|
||||
|
@ -214,7 +215,7 @@ config GPIO_GENERIC_PLATFORM
|
|||
|
||||
config GPIO_GRGPIO
|
||||
tristate "Aeroflex Gaisler GRGPIO support"
|
||||
depends on OF
|
||||
depends on OF_GPIO
|
||||
select GPIO_GENERIC
|
||||
select IRQ_DOMAIN
|
||||
help
|
||||
|
@ -312,7 +313,7 @@ config GPIO_MPC8XXX
|
|||
config GPIO_MVEBU
|
||||
def_bool y
|
||||
depends on PLAT_ORION
|
||||
depends on OF
|
||||
depends on OF_GPIO
|
||||
select GENERIC_IRQ_CHIP
|
||||
|
||||
config GPIO_MXC
|
||||
|
@ -405,7 +406,7 @@ config GPIO_TEGRA
|
|||
bool "NVIDIA Tegra GPIO support"
|
||||
default ARCH_TEGRA
|
||||
depends on ARCH_TEGRA || COMPILE_TEST
|
||||
depends on OF
|
||||
depends on OF_GPIO
|
||||
help
|
||||
Say yes here to support GPIO pins on NVIDIA Tegra SoCs.
|
||||
|
||||
|
@ -1099,7 +1100,7 @@ menu "SPI GPIO expanders"
|
|||
|
||||
config GPIO_74X164
|
||||
tristate "74x164 serial-in/parallel-out 8-bits shift register"
|
||||
depends on OF
|
||||
depends on OF_GPIO
|
||||
help
|
||||
Driver for 74x164 compatible serial-in/parallel-out 8-outputs
|
||||
shift registers. This driver can be used to provide access
|
||||
|
|
|
@ -192,6 +192,10 @@ int __max730x_probe(struct max7301 *ts)
|
|||
ts->chip.parent = dev;
|
||||
ts->chip.owner = THIS_MODULE;
|
||||
|
||||
ret = gpiochip_add_data(&ts->chip, ts);
|
||||
if (ret)
|
||||
goto exit_destroy;
|
||||
|
||||
/*
|
||||
* initialize pullups according to platform data and cache the
|
||||
* register values for later use.
|
||||
|
@ -213,10 +217,6 @@ int __max730x_probe(struct max7301 *ts)
|
|||
}
|
||||
}
|
||||
|
||||
ret = gpiochip_add_data(&ts->chip, ts);
|
||||
if (ret)
|
||||
goto exit_destroy;
|
||||
|
||||
return ret;
|
||||
|
||||
exit_destroy:
|
||||
|
|
|
@ -426,6 +426,8 @@ struct amdgpu_mman {
|
|||
|
||||
/* custom LRU management */
|
||||
struct amdgpu_mman_lru log2_size[AMDGPU_TTM_LRU_SIZE];
|
||||
/* guard for log2_size array, don't add anything in between */
|
||||
struct amdgpu_mman_lru guard;
|
||||
};
|
||||
|
||||
int amdgpu_copy_buffer(struct amdgpu_ring *ring,
|
||||
|
@ -646,9 +648,9 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev);
|
|||
void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev);
|
||||
int amdgpu_gart_init(struct amdgpu_device *adev);
|
||||
void amdgpu_gart_fini(struct amdgpu_device *adev);
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages);
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages, struct page **pagelist,
|
||||
dma_addr_t *dma_addr, uint32_t flags);
|
||||
|
||||
|
|
|
@ -321,6 +321,19 @@ bool amdgpu_atombios_get_connector_info_from_object_table(struct amdgpu_device *
|
|||
(le16_to_cpu(path->usConnObjectId) &
|
||||
OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
|
||||
|
||||
/* Skip TV/CV support */
|
||||
if ((le16_to_cpu(path->usDeviceTag) ==
|
||||
ATOM_DEVICE_TV1_SUPPORT) ||
|
||||
(le16_to_cpu(path->usDeviceTag) ==
|
||||
ATOM_DEVICE_CV_SUPPORT))
|
||||
continue;
|
||||
|
||||
if (con_obj_id >= ARRAY_SIZE(object_connector_convert)) {
|
||||
DRM_ERROR("invalid con_obj_id %d for device tag 0x%04x\n",
|
||||
con_obj_id, le16_to_cpu(path->usDeviceTag));
|
||||
continue;
|
||||
}
|
||||
|
||||
connector_type =
|
||||
object_connector_convert[con_obj_id];
|
||||
connector_object_id = con_obj_id;
|
||||
|
|
|
@ -200,16 +200,7 @@ static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
|
|||
atpx->is_hybrid = false;
|
||||
if (valid_bits & ATPX_MS_HYBRID_GFX_SUPPORTED) {
|
||||
printk("ATPX Hybrid Graphics\n");
|
||||
#if 1
|
||||
/* This is a temporary hack until the D3 cold support
|
||||
* makes it upstream. The ATPX power_control method seems
|
||||
* to still work on even if the system should be using
|
||||
* the new standardized hybrid D3 cold ACPI interface.
|
||||
*/
|
||||
atpx->functions.power_cntl = true;
|
||||
#else
|
||||
atpx->functions.power_cntl = false;
|
||||
#endif
|
||||
atpx->is_hybrid = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -221,7 +221,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev)
|
|||
* Unbinds the requested pages from the gart page table and
|
||||
* replaces them with the dummy page (all asics).
|
||||
*/
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
||||
void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages)
|
||||
{
|
||||
unsigned t;
|
||||
|
@ -268,7 +268,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, unsigned offset,
|
|||
* (all asics).
|
||||
* Returns 0 for success, -EINVAL for failure.
|
||||
*/
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, unsigned offset,
|
||||
int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset,
|
||||
int pages, struct page **pagelist, dma_addr_t *dma_addr,
|
||||
uint32_t flags)
|
||||
{
|
||||
|
|
|
@ -251,8 +251,8 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo,
|
|||
|
||||
adev = amdgpu_get_adev(bo->bdev);
|
||||
ring = adev->mman.buffer_funcs_ring;
|
||||
old_start = old_mem->start << PAGE_SHIFT;
|
||||
new_start = new_mem->start << PAGE_SHIFT;
|
||||
old_start = (u64)old_mem->start << PAGE_SHIFT;
|
||||
new_start = (u64)new_mem->start << PAGE_SHIFT;
|
||||
|
||||
switch (old_mem->mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
|
@ -950,6 +950,8 @@ static struct list_head *amdgpu_ttm_lru_tail(struct ttm_buffer_object *tbo)
|
|||
struct list_head *res = lru->lru[tbo->mem.mem_type];
|
||||
|
||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||
while ((++lru)->lru[tbo->mem.mem_type] == res)
|
||||
lru->lru[tbo->mem.mem_type] = &tbo->lru;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -960,6 +962,8 @@ static struct list_head *amdgpu_ttm_swap_lru_tail(struct ttm_buffer_object *tbo)
|
|||
struct list_head *res = lru->swap_lru;
|
||||
|
||||
lru->swap_lru = &tbo->swap;
|
||||
while ((++lru)->swap_lru == res)
|
||||
lru->swap_lru = &tbo->swap;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
@ -1011,6 +1015,10 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
|
|||
lru->swap_lru = &adev->mman.bdev.glob->swap_lru;
|
||||
}
|
||||
|
||||
for (j = 0; j < TTM_NUM_MEM_TYPES; ++j)
|
||||
adev->mman.guard.lru[j] = NULL;
|
||||
adev->mman.guard.swap_lru = NULL;
|
||||
|
||||
adev->mman.initialized = true;
|
||||
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_VRAM,
|
||||
adev->mc.real_vram_size >> PAGE_SHIFT);
|
||||
|
|
|
@ -1187,7 +1187,8 @@ int amdgpu_uvd_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
r = 0;
|
||||
}
|
||||
|
||||
error:
|
||||
fence_put(fence);
|
||||
|
||||
error:
|
||||
return r;
|
||||
}
|
||||
|
|
|
@ -1535,7 +1535,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
r = amd_sched_entity_init(&ring->sched, &vm->entity,
|
||||
rq, amdgpu_sched_jobs);
|
||||
if (r)
|
||||
return r;
|
||||
goto err;
|
||||
|
||||
vm->page_directory_fence = NULL;
|
||||
|
||||
|
@ -1565,6 +1565,9 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)
|
|||
error_free_sched_entity:
|
||||
amd_sched_entity_fini(&ring->sched, &vm->entity);
|
||||
|
||||
err:
|
||||
drm_free_large(vm->page_tables);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
|
|
@ -714,7 +714,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout)
|
|||
DRM_ERROR("amdgpu: IB test timed out\n");
|
||||
r = -ETIMEDOUT;
|
||||
goto err1;
|
||||
} else if (r) {
|
||||
} else if (r < 0) {
|
||||
DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
|
||||
goto err1;
|
||||
}
|
||||
|
|
|
@ -184,7 +184,7 @@ u32 __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
|
|||
sizeof(u32)) + inx;
|
||||
|
||||
pr_debug("kfd: get kernel queue doorbell\n"
|
||||
" doorbell offset == 0x%08d\n"
|
||||
" doorbell offset == 0x%08X\n"
|
||||
" kernel address == 0x%08lX\n",
|
||||
*doorbell_off, (uintptr_t)(kfd->doorbell_kernel_ptr + inx));
|
||||
|
||||
|
|
|
@ -405,7 +405,7 @@ void amd_sched_job_recovery(struct amd_gpu_scheduler *sched)
|
|||
spin_lock(&sched->job_list_lock);
|
||||
s_job = list_first_entry_or_null(&sched->ring_mirror_list,
|
||||
struct amd_sched_job, node);
|
||||
if (s_job)
|
||||
if (s_job && sched->timeout != MAX_SCHEDULE_TIMEOUT)
|
||||
schedule_delayed_work(&s_job->work_tdr, sched->timeout);
|
||||
|
||||
list_for_each_entry_safe(s_job, tmp, &sched->ring_mirror_list, node) {
|
||||
|
|
|
@ -475,7 +475,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
|||
val,
|
||||
-1,
|
||||
&replaced);
|
||||
state->color_mgmt_changed = replaced;
|
||||
state->color_mgmt_changed |= replaced;
|
||||
return ret;
|
||||
} else if (property == config->ctm_property) {
|
||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||
|
@ -483,7 +483,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
|||
val,
|
||||
sizeof(struct drm_color_ctm),
|
||||
&replaced);
|
||||
state->color_mgmt_changed = replaced;
|
||||
state->color_mgmt_changed |= replaced;
|
||||
return ret;
|
||||
} else if (property == config->gamma_lut_property) {
|
||||
ret = drm_atomic_replace_property_blob_from_id(crtc,
|
||||
|
@ -491,7 +491,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
|||
val,
|
||||
-1,
|
||||
&replaced);
|
||||
state->color_mgmt_changed = replaced;
|
||||
state->color_mgmt_changed |= replaced;
|
||||
return ret;
|
||||
} else if (crtc->funcs->atomic_set_property)
|
||||
return crtc->funcs->atomic_set_property(crtc, state, property, val);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue