mirror of https://gitee.com/openkylin/linux.git
Linux 3.12-rc6
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1.4.15 (GNU/Linux) iQEcBAABAgAGBQJSYt1vAAoJEHm+PkMAQRiGSosH/RrKaWc9bgfq7AoEXUC3vnhZ MX1btSuXMQadTQ8HQo3U33JdWQlqgz8Rj1rOEXPGw/7W3Oyog+jiWmqhdt4C4CKc 74/RU1y+Rp+tynyUZyq08eaPo0iXTF/pFVuFFhPlqZbDlchjDKg0K3F+SynJlh92 gD6liV/UjQZExfYGZ8KpoIMfA0cB8jqIYeOCiRu5uaoCz4xWXML2vWWkA4/15gWd 8K12QVoaH5YcGK6JzkIJGiL2tcUrojTRZIGi3ERozfi7ulkIn14DoRpWZv5Q54/z FzNwgfNgcmo6q8ZdscCZa3kaA83BP4W40lYIZyZhpN7i5p0/1Dcw+DgOA2OqSx4= =pOPR -----END PGP SIGNATURE----- Merge tag 'v3.12-rc6' into devel Linux 3.12-rc6 Conflicts: drivers/gpio/gpio-lynxpoint.c
This commit is contained in:
commit
b41fb43911
|
@ -37,8 +37,8 @@ Description:
|
|||
that the USB device has been connected to the machine. This
|
||||
file is read-only.
|
||||
Users:
|
||||
PowerTOP <power@bughost.org>
|
||||
http://www.lesswatts.org/projects/powertop/
|
||||
PowerTOP <powertop@lists.01.org>
|
||||
https://01.org/powertop/
|
||||
|
||||
What: /sys/bus/usb/device/.../power/active_duration
|
||||
Date: January 2008
|
||||
|
@ -57,8 +57,8 @@ Description:
|
|||
will give an integer percentage. Note that this does not
|
||||
account for counter wrap.
|
||||
Users:
|
||||
PowerTOP <power@bughost.org>
|
||||
http://www.lesswatts.org/projects/powertop/
|
||||
PowerTOP <powertop@lists.01.org>
|
||||
https://01.org/powertop/
|
||||
|
||||
What: /sys/bus/usb/devices/<busnum>-<port[.port]>...:<config num>-<interface num>/supports_autosuspend
|
||||
Date: January 2008
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
What: /sys/devices/.../power/
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power directory contains attributes
|
||||
allowing the user space to check and modify some power
|
||||
|
@ -8,7 +8,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/wakeup attribute allows the user
|
||||
space to check if the device is enabled to wake up the system
|
||||
|
@ -34,7 +34,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/control
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/control attribute allows the user
|
||||
space to control the run-time power management of the device.
|
||||
|
@ -53,7 +53,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/async
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../async attribute allows the user space to
|
||||
enable or diasble the device's suspend and resume callbacks to
|
||||
|
@ -79,7 +79,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_count
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_count attribute contains the number
|
||||
of signaled wakeup events associated with the device. This
|
||||
|
@ -88,7 +88,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_active_count
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_active_count attribute contains the
|
||||
number of times the processing of wakeup events associated with
|
||||
|
@ -98,7 +98,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_abort_count
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_abort_count attribute contains the
|
||||
number of times the processing of a wakeup event associated with
|
||||
|
@ -109,7 +109,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_expire_count
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_expire_count attribute contains the
|
||||
number of times a wakeup event associated with the device has
|
||||
|
@ -119,7 +119,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_active
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_active attribute contains either 1,
|
||||
or 0, depending on whether or not a wakeup event associated with
|
||||
|
@ -129,7 +129,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_total_time_ms
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_total_time_ms attribute contains
|
||||
the total time of processing wakeup events associated with the
|
||||
|
@ -139,7 +139,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_max_time_ms
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_max_time_ms attribute contains
|
||||
the maximum time of processing a single wakeup event associated
|
||||
|
@ -149,7 +149,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_last_time_ms
|
||||
Date: September 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_last_time_ms attribute contains
|
||||
the value of the monotonic clock corresponding to the time of
|
||||
|
@ -160,7 +160,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/wakeup_prevent_sleep_time_ms
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../wakeup_prevent_sleep_time_ms attribute
|
||||
contains the total time the device has been preventing
|
||||
|
@ -189,7 +189,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/pm_qos_latency_us
|
||||
Date: March 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_resume_latency_us attribute
|
||||
contains the PM QoS resume latency limit for the given device,
|
||||
|
@ -207,7 +207,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/pm_qos_no_power_off
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_no_power_off attribute
|
||||
is used for manipulating the PM QoS "no power off" flag. If
|
||||
|
@ -222,7 +222,7 @@ Description:
|
|||
|
||||
What: /sys/devices/.../power/pm_qos_remote_wakeup
|
||||
Date: September 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/devices/.../power/pm_qos_remote_wakeup attribute
|
||||
is used for manipulating the PM QoS "remote wakeup required"
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
What: /sys/power/
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power directory will contain files that will
|
||||
provide a unified interface to the power management
|
||||
|
@ -8,7 +8,7 @@ Description:
|
|||
|
||||
What: /sys/power/state
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/state file controls the system power state.
|
||||
Reading from this file returns what states are supported,
|
||||
|
@ -22,7 +22,7 @@ Description:
|
|||
|
||||
What: /sys/power/disk
|
||||
Date: September 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/disk file controls the operating mode of the
|
||||
suspend-to-disk mechanism. Reading from this file returns
|
||||
|
@ -67,7 +67,7 @@ Description:
|
|||
|
||||
What: /sys/power/image_size
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/image_size file controls the size of the image
|
||||
created by the suspend-to-disk mechanism. It can be written a
|
||||
|
@ -84,7 +84,7 @@ Description:
|
|||
|
||||
What: /sys/power/pm_trace
|
||||
Date: August 2006
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/pm_trace file controls the code which saves the
|
||||
last PM event point in the RTC across reboots, so that you can
|
||||
|
@ -133,7 +133,7 @@ Description:
|
|||
|
||||
What: /sys/power/pm_async
|
||||
Date: January 2009
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/pm_async file controls the switch allowing the
|
||||
user space to enable or disable asynchronous suspend and resume
|
||||
|
@ -146,7 +146,7 @@ Description:
|
|||
|
||||
What: /sys/power/wakeup_count
|
||||
Date: July 2010
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/wakeup_count file allows user space to put the
|
||||
system into a sleep state while taking into account the
|
||||
|
@ -161,7 +161,7 @@ Description:
|
|||
|
||||
What: /sys/power/reserved_size
|
||||
Date: May 2011
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/reserved_size file allows user space to control
|
||||
the amount of memory reserved for allocations made by device
|
||||
|
@ -175,7 +175,7 @@ Description:
|
|||
|
||||
What: /sys/power/autosleep
|
||||
Date: April 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/autosleep file can be written one of the strings
|
||||
returned by reads from /sys/power/state. If that happens, a
|
||||
|
@ -192,7 +192,7 @@ Description:
|
|||
|
||||
What: /sys/power/wake_lock
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/wake_lock file allows user space to create
|
||||
wakeup source objects and activate them on demand (if one of
|
||||
|
@ -219,7 +219,7 @@ Description:
|
|||
|
||||
What: /sys/power/wake_unlock
|
||||
Date: February 2012
|
||||
Contact: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
Contact: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
Description:
|
||||
The /sys/power/wake_unlock file allows user space to deactivate
|
||||
wakeup sources created with the help of /sys/power/wake_lock.
|
||||
|
|
|
@ -4,4 +4,4 @@ CONFIG_ACPI_CUSTOM_DSDT builds the image into the kernel.
|
|||
|
||||
When to use this method is described in detail on the
|
||||
Linux/ACPI home page:
|
||||
http://www.lesswatts.org/projects/acpi/overridingDSDT.php
|
||||
https://01.org/linux-acpi/documentation/overriding-dsdt
|
||||
|
|
|
@ -1,168 +0,0 @@
|
|||
*** Memory binding ***
|
||||
|
||||
The /memory node provides basic information about the address and size
|
||||
of the physical memory. This node is usually filled or updated by the
|
||||
bootloader, depending on the actual memory configuration of the given
|
||||
hardware.
|
||||
|
||||
The memory layout is described by the following node:
|
||||
|
||||
/ {
|
||||
#address-cells = <(n)>;
|
||||
#size-cells = <(m)>;
|
||||
memory {
|
||||
device_type = "memory";
|
||||
reg = <(baseaddr1) (size1)
|
||||
(baseaddr2) (size2)
|
||||
...
|
||||
(baseaddrN) (sizeN)>;
|
||||
};
|
||||
...
|
||||
};
|
||||
|
||||
A memory node follows the typical device tree rules for "reg" property:
|
||||
n: number of cells used to store base address value
|
||||
m: number of cells used to store size value
|
||||
baseaddrX: defines a base address of the defined memory bank
|
||||
sizeX: the size of the defined memory bank
|
||||
|
||||
|
||||
More than one memory bank can be defined.
|
||||
|
||||
|
||||
*** Reserved memory regions ***
|
||||
|
||||
In /memory/reserved-memory node one can create child nodes describing
|
||||
particular reserved (excluded from normal use) memory regions. Such
|
||||
memory regions are usually designed for the special usage by various
|
||||
device drivers. A good example are contiguous memory allocations or
|
||||
memory sharing with other operating system on the same hardware board.
|
||||
Those special memory regions might depend on the board configuration and
|
||||
devices used on the target system.
|
||||
|
||||
Parameters for each memory region can be encoded into the device tree
|
||||
with the following convention:
|
||||
|
||||
[(label):] (name) {
|
||||
compatible = "linux,contiguous-memory-region", "reserved-memory-region";
|
||||
reg = <(address) (size)>;
|
||||
(linux,default-contiguous-region);
|
||||
};
|
||||
|
||||
compatible: one or more of:
|
||||
- "linux,contiguous-memory-region" - enables binding of this
|
||||
region to Contiguous Memory Allocator (special region for
|
||||
contiguous memory allocations, shared with movable system
|
||||
memory, Linux kernel-specific).
|
||||
- "reserved-memory-region" - compatibility is defined, given
|
||||
region is assigned for exclusive usage for by the respective
|
||||
devices.
|
||||
|
||||
reg: standard property defining the base address and size of
|
||||
the memory region
|
||||
|
||||
linux,default-contiguous-region: property indicating that the region
|
||||
is the default region for all contiguous memory
|
||||
allocations, Linux specific (optional)
|
||||
|
||||
It is optional to specify the base address, so if one wants to use
|
||||
autoconfiguration of the base address, '0' can be specified as a base
|
||||
address in the 'reg' property.
|
||||
|
||||
The /memory/reserved-memory node must contain the same #address-cells
|
||||
and #size-cells value as the root node.
|
||||
|
||||
|
||||
*** Device node's properties ***
|
||||
|
||||
Once regions in the /memory/reserved-memory node have been defined, they
|
||||
may be referenced by other device nodes. Bindings that wish to reference
|
||||
memory regions should explicitly document their use of the following
|
||||
property:
|
||||
|
||||
memory-region = <&phandle_to_defined_region>;
|
||||
|
||||
This property indicates that the device driver should use the memory
|
||||
region pointed by the given phandle.
|
||||
|
||||
|
||||
*** Example ***
|
||||
|
||||
This example defines a memory consisting of 4 memory banks. 3 contiguous
|
||||
regions are defined for Linux kernel, one default of all device drivers
|
||||
(named contig_mem, placed at 0x72000000, 64MiB), one dedicated to the
|
||||
framebuffer device (labelled display_mem, placed at 0x78000000, 8MiB)
|
||||
and one for multimedia processing (labelled multimedia_mem, placed at
|
||||
0x77000000, 64MiB). 'display_mem' region is then assigned to fb@12300000
|
||||
device for DMA memory allocations (Linux kernel drivers will use CMA is
|
||||
available or dma-exclusive usage otherwise). 'multimedia_mem' is
|
||||
assigned to scaler@12500000 and codec@12600000 devices for contiguous
|
||||
memory allocations when CMA driver is enabled.
|
||||
|
||||
The reason for creating a separate region for framebuffer device is to
|
||||
match the framebuffer base address to the one configured by bootloader,
|
||||
so once Linux kernel drivers starts no glitches on the displayed boot
|
||||
logo appears. Scaller and codec drivers should share the memory
|
||||
allocations.
|
||||
|
||||
/ {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
/* ... */
|
||||
|
||||
memory {
|
||||
reg = <0x40000000 0x10000000
|
||||
0x50000000 0x10000000
|
||||
0x60000000 0x10000000
|
||||
0x70000000 0x10000000>;
|
||||
|
||||
reserved-memory {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
/*
|
||||
* global autoconfigured region for contiguous allocations
|
||||
* (used only with Contiguous Memory Allocator)
|
||||
*/
|
||||
contig_region@0 {
|
||||
compatible = "linux,contiguous-memory-region";
|
||||
reg = <0x0 0x4000000>;
|
||||
linux,default-contiguous-region;
|
||||
};
|
||||
|
||||
/*
|
||||
* special region for framebuffer
|
||||
*/
|
||||
display_region: region@78000000 {
|
||||
compatible = "linux,contiguous-memory-region", "reserved-memory-region";
|
||||
reg = <0x78000000 0x800000>;
|
||||
};
|
||||
|
||||
/*
|
||||
* special region for multimedia processing devices
|
||||
*/
|
||||
multimedia_region: region@77000000 {
|
||||
compatible = "linux,contiguous-memory-region";
|
||||
reg = <0x77000000 0x4000000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
/* ... */
|
||||
|
||||
fb0: fb@12300000 {
|
||||
status = "okay";
|
||||
memory-region = <&display_region>;
|
||||
};
|
||||
|
||||
scaler: scaler@12500000 {
|
||||
status = "okay";
|
||||
memory-region = <&multimedia_region>;
|
||||
};
|
||||
|
||||
codec: codec@12600000 {
|
||||
status = "okay";
|
||||
memory-region = <&multimedia_region>;
|
||||
};
|
||||
};
|
|
@ -28,6 +28,7 @@ ALC269/270/275/276/28x/29x
|
|||
alc269-dmic Enable ALC269(VA) digital mic workaround
|
||||
alc271-dmic Enable ALC271X digital mic workaround
|
||||
inv-dmic Inverted internal mic workaround
|
||||
headset-mic Indicates a combined headset (headphone+mic) jack
|
||||
lenovo-dock Enables docking station I/O for some Lenovos
|
||||
dell-headset-multi Headset jack, which can also be used as mic-in
|
||||
dell-headset-dock Headset jack (without mic-in), and also dock I/O
|
||||
|
|
32
MAINTAINERS
32
MAINTAINERS
|
@ -237,11 +237,11 @@ F: drivers/platform/x86/acer-wmi.c
|
|||
|
||||
ACPI
|
||||
M: Len Brown <lenb@kernel.org>
|
||||
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
Q: http://patchwork.kernel.org/project/linux-acpi/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux
|
||||
W: https://01.org/linux-acpi
|
||||
Q: https://patchwork.kernel.org/project/linux-acpi/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm
|
||||
S: Supported
|
||||
F: drivers/acpi/
|
||||
F: drivers/pnp/pnpacpi/
|
||||
|
@ -256,21 +256,21 @@ F: drivers/pci/*/*/*acpi*
|
|||
ACPI FAN DRIVER
|
||||
M: Zhang Rui <rui.zhang@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/acpi/fan.c
|
||||
|
||||
ACPI THERMAL DRIVER
|
||||
M: Zhang Rui <rui.zhang@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/acpi/*thermal*
|
||||
|
||||
ACPI VIDEO DRIVER
|
||||
M: Zhang Rui <rui.zhang@intel.com>
|
||||
L: linux-acpi@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/acpi/video.c
|
||||
|
||||
|
@ -2300,7 +2300,7 @@ S: Maintained
|
|||
F: drivers/net/ethernet/ti/cpmac.c
|
||||
|
||||
CPU FREQUENCY DRIVERS
|
||||
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
M: Viresh Kumar <viresh.kumar@linaro.org>
|
||||
L: cpufreq@vger.kernel.org
|
||||
L: linux-pm@vger.kernel.org
|
||||
|
@ -2331,7 +2331,7 @@ S: Maintained
|
|||
F: drivers/cpuidle/cpuidle-big_little.c
|
||||
|
||||
CPUIDLE DRIVERS
|
||||
M: Rafael J. Wysocki <rjw@sisk.pl>
|
||||
M: Rafael J. Wysocki <rjw@rjwysocki.net>
|
||||
M: Daniel Lezcano <daniel.lezcano@linaro.org>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -3553,7 +3553,7 @@ F: fs/freevxfs/
|
|||
|
||||
FREEZER
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/freezing-of-tasks.txt
|
||||
|
@ -3624,6 +3624,12 @@ L: linux-scsi@vger.kernel.org
|
|||
S: Odd Fixes (e.g., new signatures)
|
||||
F: drivers/scsi/fdomain.*
|
||||
|
||||
GCOV BASED KERNEL PROFILING
|
||||
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
|
||||
S: Maintained
|
||||
F: kernel/gcov/
|
||||
F: Documentation/gcov.txt
|
||||
|
||||
GDT SCSI DISK ARRAY CONTROLLER DRIVER
|
||||
M: Achim Leubner <achim_leubner@adaptec.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
|
@ -3889,7 +3895,7 @@ F: drivers/video/hgafb.c
|
|||
|
||||
HIBERNATION (aka Software Suspend, aka swsusp)
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: arch/x86/power/
|
||||
|
@ -4339,7 +4345,7 @@ F: drivers/video/i810/
|
|||
INTEL MENLOW THERMAL DRIVER
|
||||
M: Sujith Thomas <sujith.thomas@intel.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
W: http://www.lesswatts.org/projects/acpi/
|
||||
W: https://01.org/linux-acpi
|
||||
S: Supported
|
||||
F: drivers/platform/x86/intel_menlow.c
|
||||
|
||||
|
@ -8101,7 +8107,7 @@ F: drivers/sh/
|
|||
SUSPEND TO RAM
|
||||
M: Len Brown <len.brown@intel.com>
|
||||
M: Pavel Machek <pavel@ucw.cz>
|
||||
M: "Rafael J. Wysocki" <rjw@sisk.pl>
|
||||
M: "Rafael J. Wysocki" <rjw@rjwysocki.net>
|
||||
L: linux-pm@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/power/
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
VERSION = 3
|
||||
PATCHLEVEL = 12
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = One Giant Leap for Frogkind
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -102,7 +102,7 @@ static int genregs_set(struct task_struct *target,
|
|||
REG_IGNORE_ONE(pad2);
|
||||
REG_IN_CHUNK(callee, efa, cregs); /* callee_regs[r25..r13] */
|
||||
REG_IGNORE_ONE(efa); /* efa update invalid */
|
||||
REG_IN_ONE(stop_pc, &ptregs->ret); /* stop_pc: PC update */
|
||||
REG_IGNORE_ONE(stop_pc); /* PC updated via @ret */
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
|
|
@ -296,10 +296,15 @@ archprepare:
|
|||
# Convert bzImage to zImage
|
||||
bzImage: zImage
|
||||
|
||||
zImage Image xipImage bootpImage uImage: vmlinux
|
||||
BOOT_TARGETS = zImage Image xipImage bootpImage uImage
|
||||
INSTALL_TARGETS = zinstall uinstall install
|
||||
|
||||
PHONY += bzImage $(BOOT_TARGETS) $(INSTALL_TARGETS)
|
||||
|
||||
$(BOOT_TARGETS): vmlinux
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
|
||||
|
||||
zinstall uinstall install: vmlinux
|
||||
$(INSTALL_TARGETS):
|
||||
$(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
|
||||
|
||||
%.dtb: | scripts
|
||||
|
|
|
@ -95,24 +95,24 @@ initrd:
|
|||
@test "$(INITRD)" != "" || \
|
||||
(echo You must specify INITRD; exit -1)
|
||||
|
||||
install: $(obj)/Image
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
install:
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
|
||||
$(obj)/Image System.map "$(INSTALL_PATH)"
|
||||
|
||||
zinstall: $(obj)/zImage
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
zinstall:
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
|
||||
$(obj)/zImage System.map "$(INSTALL_PATH)"
|
||||
|
||||
uinstall: $(obj)/uImage
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
uinstall:
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
|
||||
$(obj)/uImage System.map "$(INSTALL_PATH)"
|
||||
|
||||
zi:
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
|
||||
$(obj)/zImage System.map "$(INSTALL_PATH)"
|
||||
|
||||
i:
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
|
||||
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" \
|
||||
$(obj)/Image System.map "$(INSTALL_PATH)"
|
||||
|
||||
subdir- := bootp compressed dts
|
||||
|
|
|
@ -96,6 +96,11 @@ timer {
|
|||
<1 14 0xf08>,
|
||||
<1 11 0xf08>,
|
||||
<1 10 0xf08>;
|
||||
/* Unfortunately we need this since some versions of U-Boot
|
||||
* on Exynos don't set the CNTFRQ register, so we need the
|
||||
* value from DT.
|
||||
*/
|
||||
clock-frequency = <24000000>;
|
||||
};
|
||||
|
||||
mct@101C0000 {
|
||||
|
|
|
@ -11,7 +11,7 @@
|
|||
|
||||
/ {
|
||||
model = "TI OMAP3 BeagleBoard xM";
|
||||
compatible = "ti,omap3-beagle-xm", "ti,omap3-beagle", "ti,omap3";
|
||||
compatible = "ti,omap3-beagle-xm", "ti,omap36xx", "ti,omap3";
|
||||
|
||||
cpus {
|
||||
cpu@0 {
|
||||
|
|
|
@ -108,7 +108,7 @@ omap3_pmx_core: pinmux@48002030 {
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pinctrl-single,register-width = <16>;
|
||||
pinctrl-single,function-mask = <0x7f1f>;
|
||||
pinctrl-single,function-mask = <0xff1f>;
|
||||
};
|
||||
|
||||
omap3_pmx_wkup: pinmux@0x48002a00 {
|
||||
|
@ -117,7 +117,7 @@ omap3_pmx_wkup: pinmux@0x48002a00 {
|
|||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pinctrl-single,register-width = <16>;
|
||||
pinctrl-single,function-mask = <0x7f1f>;
|
||||
pinctrl-single,function-mask = <0xff1f>;
|
||||
};
|
||||
|
||||
gpio1: gpio@48310000 {
|
||||
|
|
|
@ -20,6 +20,20 @@
|
|||
# $4 - default install path (blank if root directory)
|
||||
#
|
||||
|
||||
verify () {
|
||||
if [ ! -f "$1" ]; then
|
||||
echo "" 1>&2
|
||||
echo " *** Missing file: $1" 1>&2
|
||||
echo ' *** You need to run "make" before "make install".' 1>&2
|
||||
echo "" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Make sure the files actually exist
|
||||
verify "$2"
|
||||
verify "$3"
|
||||
|
||||
# User may have a custom install script
|
||||
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
|
||||
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
|
||||
|
|
|
@ -51,7 +51,8 @@ void mcpm_cpu_power_down(void)
|
|||
{
|
||||
phys_reset_t phys_reset;
|
||||
|
||||
BUG_ON(!platform_ops);
|
||||
if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
/*
|
||||
|
@ -93,7 +94,8 @@ void mcpm_cpu_suspend(u64 expected_residency)
|
|||
{
|
||||
phys_reset_t phys_reset;
|
||||
|
||||
BUG_ON(!platform_ops);
|
||||
if (WARN_ON_ONCE(!platform_ops || !platform_ops->suspend))
|
||||
return;
|
||||
BUG_ON(!irqs_disabled());
|
||||
|
||||
/* Very similar to mcpm_cpu_power_down() */
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/mach/sharpsl_param.h>
|
||||
#include <asm/memory.h>
|
||||
|
||||
/*
|
||||
* Certain hardware parameters determined at the time of device manufacture,
|
||||
|
@ -25,8 +26,10 @@
|
|||
*/
|
||||
#ifdef CONFIG_ARCH_SA1100
|
||||
#define PARAM_BASE 0xe8ffc000
|
||||
#define param_start(x) (void *)(x)
|
||||
#else
|
||||
#define PARAM_BASE 0xa0000a00
|
||||
#define param_start(x) __va(x)
|
||||
#endif
|
||||
#define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a )
|
||||
|
||||
|
@ -41,7 +44,7 @@ EXPORT_SYMBOL(sharpsl_param);
|
|||
|
||||
void sharpsl_save_param(void)
|
||||
{
|
||||
memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info));
|
||||
memcpy(&sharpsl_param, param_start(PARAM_BASE), sizeof(struct sharpsl_param_info));
|
||||
|
||||
if (sharpsl_param.comadj_keyword != COMADJ_MAGIC)
|
||||
sharpsl_param.comadj=-1;
|
||||
|
|
|
@ -31,5 +31,4 @@ generic-y += termbits.h
|
|||
generic-y += termios.h
|
||||
generic-y += timex.h
|
||||
generic-y += trace_clock.h
|
||||
generic-y += types.h
|
||||
generic-y += unaligned.h
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm goto("1:\n\t"
|
||||
asm_volatile_goto("1:\n\t"
|
||||
JUMP_LABEL_NOP "\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
".word 1b, %l[l_yes], %c0\n\t"
|
||||
|
|
|
@ -76,8 +76,11 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
|
|||
*
|
||||
* This must be called with interrupts disabled.
|
||||
*
|
||||
* This does not return. Re-entry in the kernel is expected via
|
||||
* mcpm_entry_point.
|
||||
* On success this does not return. Re-entry in the kernel is expected
|
||||
* via mcpm_entry_point.
|
||||
*
|
||||
* This will return if mcpm_platform_register() has not been called
|
||||
* previously in which case the caller should take appropriate action.
|
||||
*/
|
||||
void mcpm_cpu_power_down(void);
|
||||
|
||||
|
@ -98,8 +101,11 @@ void mcpm_cpu_power_down(void);
|
|||
*
|
||||
* This must be called with interrupts disabled.
|
||||
*
|
||||
* This does not return. Re-entry in the kernel is expected via
|
||||
* mcpm_entry_point.
|
||||
* On success this does not return. Re-entry in the kernel is expected
|
||||
* via mcpm_entry_point.
|
||||
*
|
||||
* This will return if mcpm_platform_register() has not been called
|
||||
* previously in which case the caller should take appropriate action.
|
||||
*/
|
||||
void mcpm_cpu_suspend(u64 expected_residency);
|
||||
|
||||
|
|
|
@ -57,6 +57,9 @@ static inline void syscall_get_arguments(struct task_struct *task,
|
|||
unsigned int i, unsigned int n,
|
||||
unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
unsigned long *args_bad = args + SYSCALL_MAX_ARGS - i;
|
||||
unsigned int n_bad = n + i - SYSCALL_MAX_ARGS;
|
||||
|
@ -81,6 +84,9 @@ static inline void syscall_set_arguments(struct task_struct *task,
|
|||
unsigned int i, unsigned int n,
|
||||
const unsigned long *args)
|
||||
{
|
||||
if (n == 0)
|
||||
return;
|
||||
|
||||
if (i + n > SYSCALL_MAX_ARGS) {
|
||||
pr_warning("%s called with max args %d, handling only %d\n",
|
||||
__func__, i + n, SYSCALL_MAX_ARGS);
|
||||
|
|
|
@ -487,7 +487,26 @@ __fixup_smp:
|
|||
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
|
||||
and r0, r0, #0xc0000000 @ multiprocessing extensions and
|
||||
teq r0, #0x80000000 @ not part of a uniprocessor system?
|
||||
moveq pc, lr @ yes, assume SMP
|
||||
bne __fixup_smp_on_up @ no, assume UP
|
||||
|
||||
@ Core indicates it is SMP. Check for Aegis SOC where a single
|
||||
@ Cortex-A9 CPU is present but SMP operations fault.
|
||||
mov r4, #0x41000000
|
||||
orr r4, r4, #0x0000c000
|
||||
orr r4, r4, #0x00000090
|
||||
teq r3, r4 @ Check for ARM Cortex-A9
|
||||
movne pc, lr @ Not ARM Cortex-A9,
|
||||
|
||||
@ If a future SoC *does* use 0x0 as the PERIPH_BASE, then the
|
||||
@ below address check will need to be #ifdef'd or equivalent
|
||||
@ for the Aegis platform.
|
||||
mrc p15, 4, r0, c15, c0 @ get SCU base address
|
||||
teq r0, #0x0 @ '0' on actual UP A9 hardware
|
||||
beq __fixup_smp_on_up @ So its an A9 UP
|
||||
ldr r0, [r0, #4] @ read SCU Config
|
||||
and r0, r0, #0x3 @ number of CPUs
|
||||
teq r0, #0x0 @ is 1?
|
||||
movne pc, lr
|
||||
|
||||
__fixup_smp_on_up:
|
||||
adr r0, 1f
|
||||
|
|
|
@ -129,6 +129,24 @@ DT_MACHINE_START(OMAP3_DT, "Generic OMAP3 (Flattened Device Tree)")
|
|||
.restart = omap3xxx_restart,
|
||||
MACHINE_END
|
||||
|
||||
static const char *omap36xx_boards_compat[] __initdata = {
|
||||
"ti,omap36xx",
|
||||
NULL,
|
||||
};
|
||||
|
||||
DT_MACHINE_START(OMAP36XX_DT, "Generic OMAP36xx (Flattened Device Tree)")
|
||||
.reserve = omap_reserve,
|
||||
.map_io = omap3_map_io,
|
||||
.init_early = omap3630_init_early,
|
||||
.init_irq = omap_intc_of_init,
|
||||
.handle_irq = omap3_intc_handle_irq,
|
||||
.init_machine = omap_generic_init,
|
||||
.init_late = omap3_init_late,
|
||||
.init_time = omap3_sync32k_timer_init,
|
||||
.dt_compat = omap36xx_boards_compat,
|
||||
.restart = omap3xxx_restart,
|
||||
MACHINE_END
|
||||
|
||||
static const char *omap3_gp_boards_compat[] __initdata = {
|
||||
"ti,omap3-beagle",
|
||||
"timll,omap3-devkit8000",
|
||||
|
|
|
@ -167,38 +167,47 @@ static struct lp55xx_led_config rx51_lp5523_led_config[] = {
|
|||
.name = "lp5523:kb1",
|
||||
.chan_nr = 0,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb2",
|
||||
.chan_nr = 1,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb3",
|
||||
.chan_nr = 2,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb4",
|
||||
.chan_nr = 3,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:b",
|
||||
.chan_nr = 4,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:g",
|
||||
.chan_nr = 5,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:r",
|
||||
.chan_nr = 6,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb5",
|
||||
.chan_nr = 7,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}, {
|
||||
.name = "lp5523:kb6",
|
||||
.chan_nr = 8,
|
||||
.led_current = 50,
|
||||
.max_current = 100,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -272,9 +272,19 @@ static int omap2_onenand_setup_async(void __iomem *onenand_base)
|
|||
struct gpmc_timings t;
|
||||
int ret;
|
||||
|
||||
if (gpmc_onenand_data->of_node)
|
||||
if (gpmc_onenand_data->of_node) {
|
||||
gpmc_read_settings_dt(gpmc_onenand_data->of_node,
|
||||
&onenand_async);
|
||||
if (onenand_async.sync_read || onenand_async.sync_write) {
|
||||
if (onenand_async.sync_write)
|
||||
gpmc_onenand_data->flags |=
|
||||
ONENAND_SYNC_READWRITE;
|
||||
else
|
||||
gpmc_onenand_data->flags |= ONENAND_SYNC_READ;
|
||||
onenand_async.sync_read = false;
|
||||
onenand_async.sync_write = false;
|
||||
}
|
||||
}
|
||||
|
||||
omap2_onenand_set_async_mode(onenand_base);
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@
|
|||
#define OMAP_PULL_UP (1 << 4)
|
||||
#define OMAP_ALTELECTRICALSEL (1 << 5)
|
||||
|
||||
/* 34xx specific mux bit defines */
|
||||
/* omap3/4/5 specific mux bit defines */
|
||||
#define OMAP_INPUT_EN (1 << 8)
|
||||
#define OMAP_OFF_EN (1 << 9)
|
||||
#define OMAP_OFFOUT_EN (1 << 10)
|
||||
|
@ -36,8 +36,6 @@
|
|||
#define OMAP_OFF_PULL_EN (1 << 12)
|
||||
#define OMAP_OFF_PULL_UP (1 << 13)
|
||||
#define OMAP_WAKEUP_EN (1 << 14)
|
||||
|
||||
/* 44xx specific mux bit defines */
|
||||
#define OMAP_WAKEUP_EVENT (1 << 15)
|
||||
|
||||
/* Active pin states */
|
||||
|
|
|
@ -628,7 +628,7 @@ void __init omap4_local_timer_init(void)
|
|||
#endif /* CONFIG_HAVE_ARM_TWD */
|
||||
#endif /* CONFIG_ARCH_OMAP4 */
|
||||
|
||||
#ifdef CONFIG_SOC_OMAP5
|
||||
#if defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX)
|
||||
void __init omap5_realtime_timer_init(void)
|
||||
{
|
||||
omap4_sync32k_timer_init();
|
||||
|
@ -636,7 +636,7 @@ void __init omap5_realtime_timer_init(void)
|
|||
|
||||
clocksource_of_init();
|
||||
}
|
||||
#endif /* CONFIG_SOC_OMAP5 */
|
||||
#endif /* CONFIG_SOC_OMAP5 || CONFIG_SOC_DRA7XX */
|
||||
|
||||
/**
|
||||
* omap_timer_init - build and register timer device with an
|
||||
|
|
|
@ -1232,7 +1232,8 @@ __iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
|
|||
break;
|
||||
|
||||
len = (j - i) << PAGE_SHIFT;
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
||||
ret = iommu_map(mapping->domain, iova, phys, len,
|
||||
IOMMU_READ|IOMMU_WRITE);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
iova += len;
|
||||
|
@ -1431,6 +1432,27 @@ static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|||
GFP_KERNEL);
|
||||
}
|
||||
|
||||
static int __dma_direction_to_prot(enum dma_data_direction dir)
|
||||
{
|
||||
int prot;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
prot = IOMMU_READ | IOMMU_WRITE;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
prot = IOMMU_READ;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
prot = IOMMU_WRITE;
|
||||
break;
|
||||
default:
|
||||
prot = 0;
|
||||
}
|
||||
|
||||
return prot;
|
||||
}
|
||||
|
||||
/*
|
||||
* Map a part of the scatter-gather list into contiguous io address space
|
||||
*/
|
||||
|
@ -1444,6 +1466,7 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
int ret = 0;
|
||||
unsigned int count;
|
||||
struct scatterlist *s;
|
||||
int prot;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
*handle = DMA_ERROR_CODE;
|
||||
|
@ -1460,7 +1483,9 @@ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
|
|||
!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
|
||||
__dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, 0);
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, iova, phys, len, prot);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
count += len >> PAGE_SHIFT;
|
||||
|
@ -1665,19 +1690,7 @@ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *p
|
|||
if (dma_addr == DMA_ERROR_CODE)
|
||||
return dma_addr;
|
||||
|
||||
switch (dir) {
|
||||
case DMA_BIDIRECTIONAL:
|
||||
prot = IOMMU_READ | IOMMU_WRITE;
|
||||
break;
|
||||
case DMA_TO_DEVICE:
|
||||
prot = IOMMU_READ;
|
||||
break;
|
||||
case DMA_FROM_DEVICE:
|
||||
prot = IOMMU_WRITE;
|
||||
break;
|
||||
default:
|
||||
prot = 0;
|
||||
}
|
||||
prot = __dma_direction_to_prot(dir);
|
||||
|
||||
ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <linux/nodemask.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/of_fdt.h>
|
||||
#include <linux/of_reserved_mem.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/memblock.h>
|
||||
|
@ -379,8 +378,6 @@ void __init arm_memblock_init(struct meminfo *mi,
|
|||
if (mdesc->reserve)
|
||||
mdesc->reserve();
|
||||
|
||||
early_init_dt_scan_reserved_mem();
|
||||
|
||||
/*
|
||||
* reserve memory for DMA contigouos allocations,
|
||||
* must come from DMA area inside low memory
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm goto("1:\tnop\n\t"
|
||||
asm_volatile_goto("1:\tnop\n\t"
|
||||
"nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
WORD_INSN " 1b, %l[l_yes], %0\n\t"
|
||||
|
|
|
@ -73,7 +73,7 @@
|
|||
3:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
|
|
@ -67,7 +67,7 @@ LEAF(resume)
|
|||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
|
|
@ -69,7 +69,7 @@
|
|||
1:
|
||||
|
||||
#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
|
||||
PTR_L t8, __stack_chk_guard
|
||||
PTR_LA t8, __stack_chk_guard
|
||||
LONG_L t9, TASK_STACK_CANARY(a1)
|
||||
LONG_S t9, 0(t8)
|
||||
#endif
|
||||
|
|
|
@ -6,7 +6,7 @@ struct pt_regs;
|
|||
|
||||
/* traps.c */
|
||||
void parisc_terminate(char *msg, struct pt_regs *regs,
|
||||
int code, unsigned long offset);
|
||||
int code, unsigned long offset) __noreturn __cold;
|
||||
|
||||
/* mm/fault.c */
|
||||
void do_page_fault(struct pt_regs *regs, unsigned long code,
|
||||
|
|
|
@ -602,6 +602,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
|
|||
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(flush_cache_page);
|
||||
|
||||
#ifdef CONFIG_PARISC_TMPALIAS
|
||||
|
||||
|
|
|
@ -72,7 +72,6 @@ enum ipi_message_type {
|
|||
IPI_NOP=0,
|
||||
IPI_RESCHEDULE=1,
|
||||
IPI_CALL_FUNC,
|
||||
IPI_CALL_FUNC_SINGLE,
|
||||
IPI_CPU_START,
|
||||
IPI_CPU_STOP,
|
||||
IPI_CPU_TEST
|
||||
|
@ -164,11 +163,6 @@ ipi_interrupt(int irq, void *dev_id)
|
|||
generic_smp_call_function_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CALL_FUNC_SINGLE:
|
||||
smp_debug(100, KERN_DEBUG "CPU%d IPI_CALL_FUNC_SINGLE\n", this_cpu);
|
||||
generic_smp_call_function_single_interrupt();
|
||||
break;
|
||||
|
||||
case IPI_CPU_START:
|
||||
smp_debug(100, KERN_DEBUG "CPU%d IPI_CPU_START\n", this_cpu);
|
||||
break;
|
||||
|
@ -260,7 +254,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
|
|||
|
||||
void arch_send_call_function_single_ipi(int cpu)
|
||||
{
|
||||
send_IPI_single(cpu, IPI_CALL_FUNC_SINGLE);
|
||||
send_IPI_single(cpu, IPI_CALL_FUNC);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -291,11 +291,6 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
|
|||
do_exit(SIGSEGV);
|
||||
}
|
||||
|
||||
int syscall_ipi(int (*syscall) (struct pt_regs *), struct pt_regs *regs)
|
||||
{
|
||||
return syscall(regs);
|
||||
}
|
||||
|
||||
/* gdb uses break 4,8 */
|
||||
#define GDB_BREAK_INSN 0x10004
|
||||
static void handle_gdb_break(struct pt_regs *regs, int wot)
|
||||
|
@ -805,14 +800,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
|
|||
else {
|
||||
|
||||
/*
|
||||
* The kernel should never fault on its own address space.
|
||||
* The kernel should never fault on its own address space,
|
||||
* unless pagefault_disable() was called before.
|
||||
*/
|
||||
|
||||
if (fault_space == 0)
|
||||
if (fault_space == 0 && !in_atomic())
|
||||
{
|
||||
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
|
||||
parisc_terminate("Kernel Fault", regs, code, fault_address);
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@
|
|||
#ifdef __KERNEL__
|
||||
#include <linux/module.h>
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <linux/uaccess.h>
|
||||
#define s_space "%%sr1"
|
||||
#define d_space "%%sr2"
|
||||
#else
|
||||
|
@ -524,4 +524,17 @@ EXPORT_SYMBOL(copy_to_user);
|
|||
EXPORT_SYMBOL(copy_from_user);
|
||||
EXPORT_SYMBOL(copy_in_user);
|
||||
EXPORT_SYMBOL(memcpy);
|
||||
|
||||
long probe_kernel_read(void *dst, const void *src, size_t size)
|
||||
{
|
||||
unsigned long addr = (unsigned long)src;
|
||||
|
||||
if (size < 0 || addr < PAGE_SIZE)
|
||||
return -EFAULT;
|
||||
|
||||
/* check for I/O space F_EXTEND(0xfff00000) access as well? */
|
||||
|
||||
return __probe_kernel_read(dst, src, size);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -171,20 +171,25 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
|
|||
unsigned long address)
|
||||
{
|
||||
struct vm_area_struct *vma, *prev_vma;
|
||||
struct task_struct *tsk = current;
|
||||
struct mm_struct *mm = tsk->mm;
|
||||
struct task_struct *tsk;
|
||||
struct mm_struct *mm;
|
||||
unsigned long acc_type;
|
||||
int fault;
|
||||
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
unsigned int flags;
|
||||
|
||||
if (in_atomic() || !mm)
|
||||
if (in_atomic())
|
||||
goto no_context;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
if (!mm)
|
||||
goto no_context;
|
||||
|
||||
flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
if (user_mode(regs))
|
||||
flags |= FAULT_FLAG_USER;
|
||||
|
||||
acc_type = parisc_acctyp(code, regs->iir);
|
||||
|
||||
if (acc_type & VM_WRITE)
|
||||
flags |= FAULT_FLAG_WRITE;
|
||||
retry:
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm goto("1:\n\t"
|
||||
asm_volatile_goto("1:\n\t"
|
||||
"nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
JUMP_ENTRY_TYPE "1b, %l[l_yes], %c0\n\t"
|
||||
|
|
|
@ -495,14 +495,15 @@ void __do_irq(struct pt_regs *regs)
|
|||
void do_IRQ(struct pt_regs *regs)
|
||||
{
|
||||
struct pt_regs *old_regs = set_irq_regs(regs);
|
||||
struct thread_info *curtp, *irqtp;
|
||||
struct thread_info *curtp, *irqtp, *sirqtp;
|
||||
|
||||
/* Switch to the irq stack to handle this */
|
||||
curtp = current_thread_info();
|
||||
irqtp = hardirq_ctx[raw_smp_processor_id()];
|
||||
sirqtp = softirq_ctx[raw_smp_processor_id()];
|
||||
|
||||
/* Already there ? */
|
||||
if (unlikely(curtp == irqtp)) {
|
||||
if (unlikely(curtp == irqtp || curtp == sirqtp)) {
|
||||
__do_irq(regs);
|
||||
set_irq_regs(old_regs);
|
||||
return;
|
||||
|
|
|
@ -1066,7 +1066,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
|||
BEGIN_FTR_SECTION
|
||||
mfspr r8, SPRN_DSCR
|
||||
ld r7, HSTATE_DSCR(r13)
|
||||
std r8, VCPU_DSCR(r7)
|
||||
std r8, VCPU_DSCR(r9)
|
||||
mtspr SPRN_DSCR, r7
|
||||
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
|
||||
|
||||
|
|
|
@ -332,6 +332,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
unsigned long hva;
|
||||
int pfnmap = 0;
|
||||
int tsize = BOOK3E_PAGESZ_4K;
|
||||
int ret = 0;
|
||||
unsigned long mmu_seq;
|
||||
struct kvm *kvm = vcpu_e500->vcpu.kvm;
|
||||
|
||||
/* used to check for invalidations in progress */
|
||||
mmu_seq = kvm->mmu_notifier_seq;
|
||||
smp_rmb();
|
||||
|
||||
/*
|
||||
* Translate guest physical to true physical, acquiring
|
||||
|
@ -449,6 +456,12 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
|
||||
}
|
||||
|
||||
spin_lock(&kvm->mmu_lock);
|
||||
if (mmu_notifier_retry(kvm, mmu_seq)) {
|
||||
ret = -EAGAIN;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
|
||||
|
||||
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
|
||||
|
@ -457,10 +470,13 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
|
|||
/* Clear i-cache for new pages */
|
||||
kvmppc_mmu_flush_icache(pfn);
|
||||
|
||||
out:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
|
||||
/* Drop refcount on page, so that mmu notifiers can clear it */
|
||||
kvm_release_pfn_clean(pfn);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* XXX only map the one-one case, for now use TLB0 */
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm goto("0: brcl 0,0\n"
|
||||
asm_volatile_goto("0: brcl 0,0\n"
|
||||
".pushsection __jump_table, \"aw\"\n"
|
||||
ASM_ALIGN "\n"
|
||||
ASM_PTR " 0b, %l[label], %0\n"
|
||||
|
|
|
@ -40,28 +40,26 @@ static inline void *load_real_addr(void *addr)
|
|||
}
|
||||
|
||||
/*
|
||||
* Copy up to one page to vmalloc or real memory
|
||||
* Copy real to virtual or real memory
|
||||
*/
|
||||
static ssize_t copy_page_real(void *buf, void *src, size_t csize)
|
||||
static int copy_from_realmem(void *dest, void *src, size_t count)
|
||||
{
|
||||
size_t size;
|
||||
unsigned long size;
|
||||
int rc;
|
||||
|
||||
if (is_vmalloc_addr(buf)) {
|
||||
BUG_ON(csize >= PAGE_SIZE);
|
||||
/* If buf is not page aligned, copy first part */
|
||||
size = min(roundup(__pa(buf), PAGE_SIZE) - __pa(buf), csize);
|
||||
if (size) {
|
||||
if (memcpy_real(load_real_addr(buf), src, size))
|
||||
if (!count)
|
||||
return 0;
|
||||
if (!is_vmalloc_or_module_addr(dest))
|
||||
return memcpy_real(dest, src, count);
|
||||
do {
|
||||
size = min(count, PAGE_SIZE - (__pa(dest) & ~PAGE_MASK));
|
||||
if (memcpy_real(load_real_addr(dest), src, size))
|
||||
return -EFAULT;
|
||||
buf += size;
|
||||
count -= size;
|
||||
dest += size;
|
||||
src += size;
|
||||
}
|
||||
/* Copy second part */
|
||||
size = csize - size;
|
||||
return (size) ? memcpy_real(load_real_addr(buf), src, size) : 0;
|
||||
} else {
|
||||
return memcpy_real(buf, src, csize);
|
||||
}
|
||||
} while (count);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -114,7 +112,7 @@ static ssize_t copy_oldmem_page_kdump(char *buf, size_t csize,
|
|||
rc = copy_to_user_real((void __force __user *) buf,
|
||||
(void *) src, csize);
|
||||
else
|
||||
rc = copy_page_real(buf, (void *) src, csize);
|
||||
rc = copy_from_realmem(buf, (void *) src, csize);
|
||||
return (rc == 0) ? rc : csize;
|
||||
}
|
||||
|
||||
|
@ -210,7 +208,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
|
|||
if (OLDMEM_BASE) {
|
||||
if ((unsigned long) src < OLDMEM_SIZE) {
|
||||
copied = min(count, OLDMEM_SIZE - (unsigned long) src);
|
||||
rc = memcpy_real(dest, src + OLDMEM_BASE, copied);
|
||||
rc = copy_from_realmem(dest, src + OLDMEM_BASE, copied);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
@ -223,7 +221,7 @@ int copy_from_oldmem(void *dest, void *src, size_t count)
|
|||
return rc;
|
||||
}
|
||||
}
|
||||
return memcpy_real(dest + copied, src + copied, count - copied);
|
||||
return copy_from_realmem(dest + copied, src + copied, count - copied);
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -266,6 +266,7 @@ sysc_sigpending:
|
|||
tm __TI_flags+3(%r12),_TIF_SYSCALL
|
||||
jno sysc_return
|
||||
lm %r2,%r7,__PT_R2(%r11) # load svc arguments
|
||||
l %r10,__TI_sysc_table(%r12) # 31 bit system call table
|
||||
xr %r8,%r8 # svc 0 returns -ENOSYS
|
||||
clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
|
||||
jnl sysc_nr_ok # invalid svc number -> do svc 0
|
||||
|
|
|
@ -297,6 +297,7 @@ sysc_sigpending:
|
|||
tm __TI_flags+7(%r12),_TIF_SYSCALL
|
||||
jno sysc_return
|
||||
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
|
||||
lg %r10,__TI_sysc_table(%r12) # address of system call table
|
||||
lghi %r8,0 # svc 0 returns -ENOSYS
|
||||
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
|
||||
cghi %r1,NR_syscalls
|
||||
|
|
|
@ -67,6 +67,11 @@ static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
|
|||
case 0xac: /* stnsm */
|
||||
case 0xad: /* stosm */
|
||||
return -EINVAL;
|
||||
case 0xc6:
|
||||
switch (insn[0] & 0x0f) {
|
||||
case 0x00: /* exrl */
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
switch (insn[0]) {
|
||||
case 0x0101: /* pr */
|
||||
|
@ -180,7 +185,6 @@ static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
|
|||
break;
|
||||
case 0xc6:
|
||||
switch (insn[0] & 0x0f) {
|
||||
case 0x00: /* exrl */
|
||||
case 0x02: /* pfdrl */
|
||||
case 0x04: /* cghrl */
|
||||
case 0x05: /* chrl */
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm goto("1:\n\t"
|
||||
asm_volatile_goto("1:\n\t"
|
||||
"nop\n\t"
|
||||
"nop\n\t"
|
||||
".pushsection __jump_table, \"aw\"\n\t"
|
||||
|
|
|
@ -166,7 +166,7 @@ static inline int atomic_cmpxchg(atomic_t *v, int o, int n)
|
|||
*
|
||||
* Atomically sets @v to @i and returns old @v
|
||||
*/
|
||||
static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
|
||||
static inline long long atomic64_xchg(atomic64_t *v, long long n)
|
||||
{
|
||||
return xchg64(&v->counter, n);
|
||||
}
|
||||
|
@ -180,7 +180,8 @@ static inline u64 atomic64_xchg(atomic64_t *v, u64 n)
|
|||
* Atomically checks if @v holds @o and replaces it with @n if so.
|
||||
* Returns the old value at @v.
|
||||
*/
|
||||
static inline u64 atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
|
||||
static inline long long atomic64_cmpxchg(atomic64_t *v, long long o,
|
||||
long long n)
|
||||
{
|
||||
return cmpxchg64(&v->counter, o, n);
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ static inline void atomic_set(atomic_t *v, int n)
|
|||
/* A 64bit atomic type */
|
||||
|
||||
typedef struct {
|
||||
u64 __aligned(8) counter;
|
||||
long long counter;
|
||||
} atomic64_t;
|
||||
|
||||
#define ATOMIC64_INIT(val) { (val) }
|
||||
|
@ -91,14 +91,14 @@ typedef struct {
|
|||
*
|
||||
* Atomically reads the value of @v.
|
||||
*/
|
||||
static inline u64 atomic64_read(const atomic64_t *v)
|
||||
static inline long long atomic64_read(const atomic64_t *v)
|
||||
{
|
||||
/*
|
||||
* Requires an atomic op to read both 32-bit parts consistently.
|
||||
* Casting away const is safe since the atomic support routines
|
||||
* do not write to memory if the value has not been modified.
|
||||
*/
|
||||
return _atomic64_xchg_add((u64 *)&v->counter, 0);
|
||||
return _atomic64_xchg_add((long long *)&v->counter, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -108,7 +108,7 @@ static inline u64 atomic64_read(const atomic64_t *v)
|
|||
*
|
||||
* Atomically adds @i to @v.
|
||||
*/
|
||||
static inline void atomic64_add(u64 i, atomic64_t *v)
|
||||
static inline void atomic64_add(long long i, atomic64_t *v)
|
||||
{
|
||||
_atomic64_xchg_add(&v->counter, i);
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
|
|||
*
|
||||
* Atomically adds @i to @v and returns @i + @v
|
||||
*/
|
||||
static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
||||
static inline long long atomic64_add_return(long long i, atomic64_t *v)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add(&v->counter, i) + i;
|
||||
|
@ -135,7 +135,8 @@ static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
|
|||
* Atomically adds @a to @v, so long as @v was not already @u.
|
||||
* Returns non-zero if @v was not @u, and zero otherwise.
|
||||
*/
|
||||
static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
||||
static inline long long atomic64_add_unless(atomic64_t *v, long long a,
|
||||
long long u)
|
||||
{
|
||||
smp_mb(); /* barrier for proper semantics */
|
||||
return _atomic64_xchg_add_unless(&v->counter, a, u) != u;
|
||||
|
@ -151,7 +152,7 @@ static inline u64 atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
|
|||
* atomic64_set() can't be just a raw store, since it would be lost if it
|
||||
* fell between the load and store of one of the other atomic ops.
|
||||
*/
|
||||
static inline void atomic64_set(atomic64_t *v, u64 n)
|
||||
static inline void atomic64_set(atomic64_t *v, long long n)
|
||||
{
|
||||
_atomic64_xchg(&v->counter, n);
|
||||
}
|
||||
|
@ -236,11 +237,13 @@ extern struct __get_user __atomic_xchg_add_unless(volatile int *p,
|
|||
extern struct __get_user __atomic_or(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n);
|
||||
extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n);
|
||||
extern u64 __atomic64_cmpxchg(volatile u64 *p, int *lock, u64 o, u64 n);
|
||||
extern u64 __atomic64_xchg(volatile u64 *p, int *lock, u64 n);
|
||||
extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
|
||||
extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
|
||||
int *lock, u64 o, u64 n);
|
||||
extern long long __atomic64_cmpxchg(volatile long long *p, int *lock,
|
||||
long long o, long long n);
|
||||
extern long long __atomic64_xchg(volatile long long *p, int *lock, long long n);
|
||||
extern long long __atomic64_xchg_add(volatile long long *p, int *lock,
|
||||
long long n);
|
||||
extern long long __atomic64_xchg_add_unless(volatile long long *p,
|
||||
int *lock, long long o, long long n);
|
||||
|
||||
/* Return failure from the atomic wrappers. */
|
||||
struct __get_user __atomic_bad_address(int __user *addr);
|
||||
|
|
|
@ -35,10 +35,10 @@ int _atomic_xchg(int *ptr, int n);
|
|||
int _atomic_xchg_add(int *v, int i);
|
||||
int _atomic_xchg_add_unless(int *v, int a, int u);
|
||||
int _atomic_cmpxchg(int *ptr, int o, int n);
|
||||
u64 _atomic64_xchg(u64 *v, u64 n);
|
||||
u64 _atomic64_xchg_add(u64 *v, u64 i);
|
||||
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u);
|
||||
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
||||
long long _atomic64_xchg(long long *v, long long n);
|
||||
long long _atomic64_xchg_add(long long *v, long long i);
|
||||
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u);
|
||||
long long _atomic64_cmpxchg(long long *v, long long o, long long n);
|
||||
|
||||
#define xchg(ptr, n) \
|
||||
({ \
|
||||
|
@ -53,7 +53,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
|||
if (sizeof(*(ptr)) != 4) \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, (int)n); \
|
||||
(typeof(*(ptr)))_atomic_cmpxchg((int *)ptr, (int)o, \
|
||||
(int)n); \
|
||||
})
|
||||
|
||||
#define xchg64(ptr, n) \
|
||||
|
@ -61,7 +62,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
|||
if (sizeof(*(ptr)) != 8) \
|
||||
__xchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic64_xchg((u64 *)(ptr), (u64)(n)); \
|
||||
(typeof(*(ptr)))_atomic64_xchg((long long *)(ptr), \
|
||||
(long long)(n)); \
|
||||
})
|
||||
|
||||
#define cmpxchg64(ptr, o, n) \
|
||||
|
@ -69,7 +71,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
|||
if (sizeof(*(ptr)) != 8) \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
smp_mb(); \
|
||||
(typeof(*(ptr)))_atomic64_cmpxchg((u64 *)ptr, (u64)o, (u64)n); \
|
||||
(typeof(*(ptr)))_atomic64_cmpxchg((long long *)ptr, \
|
||||
(long long)o, (long long)n); \
|
||||
})
|
||||
|
||||
#else
|
||||
|
@ -81,7 +84,8 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
|||
switch (sizeof(*(ptr))) { \
|
||||
case 4: \
|
||||
__x = (typeof(__x))(unsigned long) \
|
||||
__insn_exch4((ptr), (u32)(unsigned long)(n)); \
|
||||
__insn_exch4((ptr), \
|
||||
(u32)(unsigned long)(n)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__x = (typeof(__x)) \
|
||||
|
@ -103,10 +107,12 @@ u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n);
|
|||
switch (sizeof(*(ptr))) { \
|
||||
case 4: \
|
||||
__x = (typeof(__x))(unsigned long) \
|
||||
__insn_cmpexch4((ptr), (u32)(unsigned long)(n)); \
|
||||
__insn_cmpexch4((ptr), \
|
||||
(u32)(unsigned long)(n)); \
|
||||
break; \
|
||||
case 8: \
|
||||
__x = (typeof(__x))__insn_cmpexch((ptr), (u64)(n)); \
|
||||
__x = (typeof(__x))__insn_cmpexch((ptr), \
|
||||
(long long)(n)); \
|
||||
break; \
|
||||
default: \
|
||||
__cmpxchg_called_with_bad_pointer(); \
|
||||
|
|
|
@ -15,9 +15,37 @@
|
|||
#ifndef _ASM_TILE_PERCPU_H
|
||||
#define _ASM_TILE_PERCPU_H
|
||||
|
||||
register unsigned long __my_cpu_offset __asm__("tp");
|
||||
#define __my_cpu_offset __my_cpu_offset
|
||||
#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp))
|
||||
register unsigned long my_cpu_offset_reg asm("tp");
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
/*
|
||||
* For full preemption, we can't just use the register variable
|
||||
* directly, since we need barrier() to hazard against it, causing the
|
||||
* compiler to reload anything computed from a previous "tp" value.
|
||||
* But we also don't want to use volatile asm, since we'd like the
|
||||
* compiler to be able to cache the value across multiple percpu reads.
|
||||
* So we use a fake stack read as a hazard against barrier().
|
||||
* The 'U' constraint is like 'm' but disallows postincrement.
|
||||
*/
|
||||
static inline unsigned long __my_cpu_offset(void)
|
||||
{
|
||||
unsigned long tp;
|
||||
register unsigned long *sp asm("sp");
|
||||
asm("move %0, tp" : "=r" (tp) : "U" (*sp));
|
||||
return tp;
|
||||
}
|
||||
#define __my_cpu_offset __my_cpu_offset()
|
||||
#else
|
||||
/*
|
||||
* We don't need to hazard against barrier() since "tp" doesn't ever
|
||||
* change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only
|
||||
* changes at function call points, at which we are already re-reading
|
||||
* the value of "tp" due to "my_cpu_offset_reg" being a global variable.
|
||||
*/
|
||||
#define __my_cpu_offset my_cpu_offset_reg
|
||||
#endif
|
||||
|
||||
#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp))
|
||||
|
||||
#include <asm-generic/percpu.h>
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ static struct hardwall_type hardwall_types[] = {
|
|||
0,
|
||||
"udn",
|
||||
LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
|
||||
__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
|
||||
__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock),
|
||||
NULL
|
||||
},
|
||||
#ifndef __tilepro__
|
||||
|
@ -77,7 +77,7 @@ static struct hardwall_type hardwall_types[] = {
|
|||
1, /* disabled pending hypervisor support */
|
||||
"idn",
|
||||
LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
|
||||
__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
|
||||
__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock),
|
||||
NULL
|
||||
},
|
||||
{ /* access to user-space IPI */
|
||||
|
@ -87,7 +87,7 @@ static struct hardwall_type hardwall_types[] = {
|
|||
0,
|
||||
"ipi",
|
||||
LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
|
||||
__SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
|
||||
__SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock),
|
||||
NULL
|
||||
},
|
||||
#endif
|
||||
|
|
|
@ -815,6 +815,9 @@ STD_ENTRY(interrupt_return)
|
|||
}
|
||||
bzt r28, 1f
|
||||
bnz r29, 1f
|
||||
/* Disable interrupts explicitly for preemption. */
|
||||
IRQ_DISABLE(r20,r21)
|
||||
TRACE_IRQS_OFF
|
||||
jal preempt_schedule_irq
|
||||
FEEDBACK_REENTER(interrupt_return)
|
||||
1:
|
||||
|
|
|
@ -841,6 +841,9 @@ STD_ENTRY(interrupt_return)
|
|||
}
|
||||
beqzt r28, 1f
|
||||
bnez r29, 1f
|
||||
/* Disable interrupts explicitly for preemption. */
|
||||
IRQ_DISABLE(r20,r21)
|
||||
TRACE_IRQS_OFF
|
||||
jal preempt_schedule_irq
|
||||
FEEDBACK_REENTER(interrupt_return)
|
||||
1:
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/mmzone.h>
|
||||
#include <linux/dcache.h>
|
||||
#include <linux/fs.h>
|
||||
#include <linux/string.h>
|
||||
#include <asm/backtrace.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/ucontext.h>
|
||||
|
@ -332,21 +333,18 @@ static void describe_addr(struct KBacktraceIterator *kbt,
|
|||
}
|
||||
|
||||
if (vma->vm_file) {
|
||||
char *s;
|
||||
p = d_path(&vma->vm_file->f_path, buf, bufsize);
|
||||
if (IS_ERR(p))
|
||||
p = "?";
|
||||
s = strrchr(p, '/');
|
||||
if (s)
|
||||
p = s+1;
|
||||
name = kbasename(p);
|
||||
} else {
|
||||
p = "anon";
|
||||
name = "anon";
|
||||
}
|
||||
|
||||
/* Generate a string description of the vma info. */
|
||||
namelen = strlen(p);
|
||||
namelen = strlen(name);
|
||||
remaining = (bufsize - 1) - namelen;
|
||||
memmove(buf, p, namelen);
|
||||
memmove(buf, name, namelen);
|
||||
snprintf(buf + namelen, remaining, "[%lx+%lx] ",
|
||||
vma->vm_start, vma->vm_end - vma->vm_start);
|
||||
}
|
||||
|
|
|
@ -107,19 +107,19 @@ unsigned long _atomic_xor(volatile unsigned long *p, unsigned long mask)
|
|||
EXPORT_SYMBOL(_atomic_xor);
|
||||
|
||||
|
||||
u64 _atomic64_xchg(u64 *v, u64 n)
|
||||
long long _atomic64_xchg(long long *v, long long n)
|
||||
{
|
||||
return __atomic64_xchg(v, __atomic_setup(v), n);
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic64_xchg);
|
||||
|
||||
u64 _atomic64_xchg_add(u64 *v, u64 i)
|
||||
long long _atomic64_xchg_add(long long *v, long long i)
|
||||
{
|
||||
return __atomic64_xchg_add(v, __atomic_setup(v), i);
|
||||
}
|
||||
EXPORT_SYMBOL(_atomic64_xchg_add);
|
||||
|
||||
u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
|
||||
long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
|
||||
{
|
||||
/*
|
||||
* Note: argument order is switched here since it is easier
|
||||
|
@ -130,7 +130,7 @@ u64 _atomic64_xchg_add_unless(u64 *v, u64 a, u64 u)
|
|||
}
|
||||
EXPORT_SYMBOL(_atomic64_xchg_add_unless);
|
||||
|
||||
u64 _atomic64_cmpxchg(u64 *v, u64 o, u64 n)
|
||||
long long _atomic64_cmpxchg(long long *v, long long o, long long n)
|
||||
{
|
||||
return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
|
||||
}
|
||||
|
|
|
@ -860,7 +860,7 @@ source "kernel/Kconfig.preempt"
|
|||
|
||||
config X86_UP_APIC
|
||||
bool "Local APIC support on uniprocessors"
|
||||
depends on X86_32 && !SMP && !X86_32_NON_STANDARD
|
||||
depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
|
||||
---help---
|
||||
A local APIC (Advanced Programmable Interrupt Controller) is an
|
||||
integrated interrupt controller in the CPU. If you have a single-CPU
|
||||
|
@ -885,11 +885,11 @@ config X86_UP_IOAPIC
|
|||
|
||||
config X86_LOCAL_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
|
||||
|
||||
config X86_IO_APIC
|
||||
def_bool y
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC
|
||||
depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_IOAPIC || PCI_MSI
|
||||
|
||||
config X86_VISWS_APIC
|
||||
def_bool y
|
||||
|
@ -1033,6 +1033,7 @@ config X86_REBOOTFIXUPS
|
|||
|
||||
config MICROCODE
|
||||
tristate "CPU microcode loading support"
|
||||
depends on CPU_SUP_AMD || CPU_SUP_INTEL
|
||||
select FW_LOADER
|
||||
---help---
|
||||
|
||||
|
|
|
@ -374,7 +374,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
|
|||
* Catch too early usage of this before alternatives
|
||||
* have run.
|
||||
*/
|
||||
asm goto("1: jmp %l[t_warn]\n"
|
||||
asm_volatile_goto("1: jmp %l[t_warn]\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
|
@ -388,7 +388,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
|
|||
|
||||
#endif
|
||||
|
||||
asm goto("1: jmp %l[t_no]\n"
|
||||
asm_volatile_goto("1: jmp %l[t_no]\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n"
|
||||
|
@ -453,7 +453,7 @@ static __always_inline __pure bool _static_cpu_has_safe(u16 bit)
|
|||
* have. Thus, we force the jump to the widest, 4-byte, signed relative
|
||||
* offset even though the last would often fit in less bytes.
|
||||
*/
|
||||
asm goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
|
||||
asm_volatile_goto("1: .byte 0xe9\n .long %l[t_dynamic] - 2f\n"
|
||||
"2:\n"
|
||||
".section .altinstructions,\"a\"\n"
|
||||
" .long 1b - .\n" /* src offset */
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
|
||||
static __always_inline bool arch_static_branch(struct static_key *key)
|
||||
{
|
||||
asm goto("1:"
|
||||
asm_volatile_goto("1:"
|
||||
".byte " __stringify(STATIC_KEY_INIT_NOP) "\n\t"
|
||||
".pushsection __jump_table, \"aw\" \n\t"
|
||||
_ASM_ALIGN "\n\t"
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
static inline void __mutex_fastpath_lock(atomic_t *v,
|
||||
void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
asm volatile goto(LOCK_PREFIX " decl %0\n"
|
||||
asm_volatile_goto(LOCK_PREFIX " decl %0\n"
|
||||
" jns %l[exit]\n"
|
||||
: : "m" (v->counter)
|
||||
: "memory", "cc"
|
||||
|
@ -75,7 +75,7 @@ static inline int __mutex_fastpath_lock_retval(atomic_t *count)
|
|||
static inline void __mutex_fastpath_unlock(atomic_t *v,
|
||||
void (*fail_fn)(atomic_t *))
|
||||
{
|
||||
asm volatile goto(LOCK_PREFIX " incl %0\n"
|
||||
asm_volatile_goto(LOCK_PREFIX " incl %0\n"
|
||||
" jg %l[exit]\n"
|
||||
: : "m" (v->counter)
|
||||
: "memory", "cc"
|
||||
|
|
|
@ -113,7 +113,7 @@ static int __init early_get_pnodeid(void)
|
|||
break;
|
||||
case UV3_HUB_PART_NUMBER:
|
||||
case UV3_HUB_PART_NUMBER_X:
|
||||
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE - 1;
|
||||
uv_min_hub_revision_id += UV3_HUB_REVISION_BASE;
|
||||
break;
|
||||
}
|
||||
|
||||
|
|
|
@ -1888,10 +1888,7 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
|||
userpg->cap_user_rdpmc = x86_pmu.attr_rdpmc;
|
||||
userpg->pmc_width = x86_pmu.cntval_bits;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
|
||||
return;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
|
||||
if (!sched_clock_stable)
|
||||
return;
|
||||
|
||||
userpg->cap_user_time = 1;
|
||||
|
@ -1899,11 +1896,9 @@ void arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
|
|||
userpg->time_shift = CYC2NS_SCALE_FACTOR;
|
||||
userpg->time_offset = this_cpu_read(cyc2ns_offset) - now;
|
||||
|
||||
if (sched_clock_stable && !check_tsc_disabled()) {
|
||||
userpg->cap_user_time_zero = 1;
|
||||
userpg->time_zero = this_cpu_read(cyc2ns_offset);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* callchain support
|
||||
|
|
|
@ -775,11 +775,22 @@ void __init kvm_spinlock_init(void)
|
|||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return;
|
||||
|
||||
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
|
||||
pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
|
||||
pv_lock_ops.unlock_kick = kvm_unlock_kick;
|
||||
}
|
||||
|
||||
static __init int kvm_spinlock_init_jump(void)
|
||||
{
|
||||
if (!kvm_para_available())
|
||||
return 0;
|
||||
if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
|
||||
return 0;
|
||||
|
||||
static_key_slow_inc(¶virt_ticketlocks_enabled);
|
||||
printk(KERN_INFO "KVM setup paravirtual spinlock\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(kvm_spinlock_init_jump);
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_SPINLOCKS */
|
||||
|
|
|
@ -326,6 +326,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
|
|||
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6320"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the Latitude E5410. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell Latitude E5410",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E5410"),
|
||||
},
|
||||
},
|
||||
{ /* Handle problems with rebooting on the Latitude E5420. */
|
||||
.callback = set_pci_reboot,
|
||||
.ident = "Dell Latitude E5420",
|
||||
|
|
|
@ -3255,25 +3255,29 @@ static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
if (!test_bit(VCPU_EXREG_PDPTR,
|
||||
(unsigned long *)&vcpu->arch.regs_dirty))
|
||||
return;
|
||||
|
||||
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
|
||||
vmcs_write64(GUEST_PDPTR0, vcpu->arch.mmu.pdptrs[0]);
|
||||
vmcs_write64(GUEST_PDPTR1, vcpu->arch.mmu.pdptrs[1]);
|
||||
vmcs_write64(GUEST_PDPTR2, vcpu->arch.mmu.pdptrs[2]);
|
||||
vmcs_write64(GUEST_PDPTR3, vcpu->arch.mmu.pdptrs[3]);
|
||||
vmcs_write64(GUEST_PDPTR0, mmu->pdptrs[0]);
|
||||
vmcs_write64(GUEST_PDPTR1, mmu->pdptrs[1]);
|
||||
vmcs_write64(GUEST_PDPTR2, mmu->pdptrs[2]);
|
||||
vmcs_write64(GUEST_PDPTR3, mmu->pdptrs[3]);
|
||||
}
|
||||
}
|
||||
|
||||
static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
|
||||
|
||||
if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
|
||||
vcpu->arch.mmu.pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
|
||||
vcpu->arch.mmu.pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
|
||||
vcpu->arch.mmu.pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
|
||||
vcpu->arch.mmu.pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
|
||||
mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
|
||||
mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
|
||||
mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
|
||||
mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
|
||||
}
|
||||
|
||||
__set_bit(VCPU_EXREG_PDPTR,
|
||||
|
@ -7777,10 +7781,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
|
|||
vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
|
||||
vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
|
||||
vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
|
||||
__clear_bit(VCPU_EXREG_PDPTR,
|
||||
(unsigned long *)&vcpu->arch.regs_avail);
|
||||
__clear_bit(VCPU_EXREG_PDPTR,
|
||||
(unsigned long *)&vcpu->arch.regs_dirty);
|
||||
}
|
||||
|
||||
kvm_register_write(vcpu, VCPU_REGS_RSP, vmcs12->guest_rsp);
|
||||
|
|
|
@ -278,6 +278,15 @@ static void __init xen_smp_prepare_boot_cpu(void)
|
|||
old memory can be recycled */
|
||||
make_lowmem_page_readwrite(xen_initial_gdt);
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* Xen starts us with XEN_FLAT_RING1_DS, but linux code
|
||||
* expects __USER_DS
|
||||
*/
|
||||
loadsegment(ds, __USER_DS);
|
||||
loadsegment(es, __USER_DS);
|
||||
#endif
|
||||
|
||||
xen_filter_cpu_maps();
|
||||
xen_setup_vcpu_info_placement();
|
||||
}
|
||||
|
|
|
@ -222,11 +222,16 @@ static int is_pmbr_valid(legacy_mbr *mbr, sector_t total_sectors)
|
|||
* the disk size.
|
||||
*
|
||||
* Hybrid MBRs do not necessarily comply with this.
|
||||
*
|
||||
* Consider a bad value here to be a warning to support dd'ing
|
||||
* an image from a smaller disk to a larger disk.
|
||||
*/
|
||||
if (ret == GPT_MBR_PROTECTIVE) {
|
||||
sz = le32_to_cpu(mbr->partition_record[part].size_in_lba);
|
||||
if (sz != (uint32_t) total_sectors - 1 && sz != 0xFFFFFFFF)
|
||||
ret = 0;
|
||||
pr_debug("GPT: mbr size in lba (%u) different than whole disk (%u).\n",
|
||||
sz, min_t(uint32_t,
|
||||
total_sectors - 1, 0xFFFFFFFF));
|
||||
}
|
||||
done:
|
||||
return ret;
|
||||
|
|
|
@ -24,7 +24,7 @@ menuconfig ACPI
|
|||
are configured, ACPI is used.
|
||||
|
||||
The project home page for the Linux ACPI subsystem is here:
|
||||
<http://www.lesswatts.org/projects/acpi/>
|
||||
<https://01.org/linux-acpi>
|
||||
|
||||
Linux support for ACPI is based on Intel Corporation's ACPI
|
||||
Component Architecture (ACPI CA). For more information on the
|
||||
|
@ -123,9 +123,9 @@ config ACPI_BUTTON
|
|||
default y
|
||||
help
|
||||
This driver handles events on the power, sleep, and lid buttons.
|
||||
A daemon reads /proc/acpi/event and perform user-defined actions
|
||||
such as shutting down the system. This is necessary for
|
||||
software-controlled poweroff.
|
||||
A daemon reads events from input devices or via netlink and
|
||||
performs user-defined actions such as shutting down the system.
|
||||
This is necessary for software-controlled poweroff.
|
||||
|
||||
To compile this driver as a module, choose M here:
|
||||
the module will be called button.
|
||||
|
|
|
@ -1025,60 +1025,4 @@ void acpi_dev_pm_detach(struct device *dev, bool power_off)
|
|||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_detach);
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_add_dependent - Add physical device depending for PM.
|
||||
* @handle: Handle of ACPI device node.
|
||||
* @depdev: Device depending on that node for PM.
|
||||
*/
|
||||
void acpi_dev_pm_add_dependent(acpi_handle handle, struct device *depdev)
|
||||
{
|
||||
struct acpi_device_physical_node *dep;
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!depdev || acpi_bus_get_device(handle, &adev))
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->physical_node_lock);
|
||||
|
||||
list_for_each_entry(dep, &adev->power_dependent, node)
|
||||
if (dep->dev == depdev)
|
||||
goto out;
|
||||
|
||||
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
|
||||
if (dep) {
|
||||
dep->dev = depdev;
|
||||
list_add_tail(&dep->node, &adev->power_dependent);
|
||||
}
|
||||
|
||||
out:
|
||||
mutex_unlock(&adev->physical_node_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_add_dependent);
|
||||
|
||||
/**
|
||||
* acpi_dev_pm_remove_dependent - Remove physical device depending for PM.
|
||||
* @handle: Handle of ACPI device node.
|
||||
* @depdev: Device depending on that node for PM.
|
||||
*/
|
||||
void acpi_dev_pm_remove_dependent(acpi_handle handle, struct device *depdev)
|
||||
{
|
||||
struct acpi_device_physical_node *dep;
|
||||
struct acpi_device *adev;
|
||||
|
||||
if (!depdev || acpi_bus_get_device(handle, &adev))
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->physical_node_lock);
|
||||
|
||||
list_for_each_entry(dep, &adev->power_dependent, node)
|
||||
if (dep->dev == depdev) {
|
||||
list_del(&dep->node);
|
||||
kfree(dep);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->physical_node_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_dev_pm_remove_dependent);
|
||||
#endif /* CONFIG_PM */
|
||||
|
|
|
@ -59,16 +59,9 @@ ACPI_MODULE_NAME("power");
|
|||
#define ACPI_POWER_RESOURCE_STATE_ON 0x01
|
||||
#define ACPI_POWER_RESOURCE_STATE_UNKNOWN 0xFF
|
||||
|
||||
struct acpi_power_dependent_device {
|
||||
struct list_head node;
|
||||
struct acpi_device *adev;
|
||||
struct work_struct work;
|
||||
};
|
||||
|
||||
struct acpi_power_resource {
|
||||
struct acpi_device device;
|
||||
struct list_head list_node;
|
||||
struct list_head dependent;
|
||||
char *name;
|
||||
u32 system_level;
|
||||
u32 order;
|
||||
|
@ -233,32 +226,6 @@ static int acpi_power_get_list_state(struct list_head *list, int *state)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_power_resume_dependent(struct work_struct *work)
|
||||
{
|
||||
struct acpi_power_dependent_device *dep;
|
||||
struct acpi_device_physical_node *pn;
|
||||
struct acpi_device *adev;
|
||||
int state;
|
||||
|
||||
dep = container_of(work, struct acpi_power_dependent_device, work);
|
||||
adev = dep->adev;
|
||||
if (acpi_power_get_inferred_state(adev, &state))
|
||||
return;
|
||||
|
||||
if (state > ACPI_STATE_D0)
|
||||
return;
|
||||
|
||||
mutex_lock(&adev->physical_node_lock);
|
||||
|
||||
list_for_each_entry(pn, &adev->physical_node_list, node)
|
||||
pm_request_resume(pn->dev);
|
||||
|
||||
list_for_each_entry(pn, &adev->power_dependent, node)
|
||||
pm_request_resume(pn->dev);
|
||||
|
||||
mutex_unlock(&adev->physical_node_lock);
|
||||
}
|
||||
|
||||
static int __acpi_power_on(struct acpi_power_resource *resource)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
|
@ -283,14 +250,8 @@ static int acpi_power_on_unlocked(struct acpi_power_resource *resource)
|
|||
resource->name));
|
||||
} else {
|
||||
result = __acpi_power_on(resource);
|
||||
if (result) {
|
||||
if (result)
|
||||
resource->ref_count--;
|
||||
} else {
|
||||
struct acpi_power_dependent_device *dep;
|
||||
|
||||
list_for_each_entry(dep, &resource->dependent, node)
|
||||
schedule_work(&dep->work);
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
@ -390,52 +351,6 @@ static int acpi_power_on_list(struct list_head *list)
|
|||
return result;
|
||||
}
|
||||
|
||||
static void acpi_power_add_dependent(struct acpi_power_resource *resource,
|
||||
struct acpi_device *adev)
|
||||
{
|
||||
struct acpi_power_dependent_device *dep;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
list_for_each_entry(dep, &resource->dependent, node)
|
||||
if (dep->adev == adev)
|
||||
goto out;
|
||||
|
||||
dep = kzalloc(sizeof(*dep), GFP_KERNEL);
|
||||
if (!dep)
|
||||
goto out;
|
||||
|
||||
dep->adev = adev;
|
||||
INIT_WORK(&dep->work, acpi_power_resume_dependent);
|
||||
list_add_tail(&dep->node, &resource->dependent);
|
||||
|
||||
out:
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
}
|
||||
|
||||
static void acpi_power_remove_dependent(struct acpi_power_resource *resource,
|
||||
struct acpi_device *adev)
|
||||
{
|
||||
struct acpi_power_dependent_device *dep;
|
||||
struct work_struct *work = NULL;
|
||||
|
||||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
list_for_each_entry(dep, &resource->dependent, node)
|
||||
if (dep->adev == adev) {
|
||||
list_del(&dep->node);
|
||||
work = &dep->work;
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
|
||||
if (work) {
|
||||
cancel_work_sync(work);
|
||||
kfree(dep);
|
||||
}
|
||||
}
|
||||
|
||||
static struct attribute *attrs[] = {
|
||||
NULL,
|
||||
};
|
||||
|
@ -524,8 +439,6 @@ static void acpi_power_expose_hide(struct acpi_device *adev,
|
|||
|
||||
void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
|
||||
{
|
||||
struct acpi_device_power_state *ps;
|
||||
struct acpi_power_resource_entry *entry;
|
||||
int state;
|
||||
|
||||
if (adev->wakeup.flags.valid)
|
||||
|
@ -535,16 +448,6 @@ void acpi_power_add_remove_device(struct acpi_device *adev, bool add)
|
|||
if (!adev->power.flags.power_resources)
|
||||
return;
|
||||
|
||||
ps = &adev->power.states[ACPI_STATE_D0];
|
||||
list_for_each_entry(entry, &ps->resources, node) {
|
||||
struct acpi_power_resource *resource = entry->resource;
|
||||
|
||||
if (add)
|
||||
acpi_power_add_dependent(resource, adev);
|
||||
else
|
||||
acpi_power_remove_dependent(resource, adev);
|
||||
}
|
||||
|
||||
for (state = ACPI_STATE_D0; state <= ACPI_STATE_D3_HOT; state++)
|
||||
acpi_power_expose_hide(adev,
|
||||
&adev->power.states[state].resources,
|
||||
|
@ -882,7 +785,6 @@ int acpi_add_power_resource(acpi_handle handle)
|
|||
acpi_init_device_object(device, handle, ACPI_BUS_TYPE_POWER,
|
||||
ACPI_STA_DEFAULT);
|
||||
mutex_init(&resource->resource_lock);
|
||||
INIT_LIST_HEAD(&resource->dependent);
|
||||
INIT_LIST_HEAD(&resource->list_node);
|
||||
resource->name = device->pnp.bus_id;
|
||||
strcpy(acpi_device_name(device), ACPI_POWER_DEVICE_NAME);
|
||||
|
@ -936,8 +838,10 @@ void acpi_resume_power_resources(void)
|
|||
mutex_lock(&resource->resource_lock);
|
||||
|
||||
result = acpi_power_get_state(resource->device.handle, &state);
|
||||
if (result)
|
||||
if (result) {
|
||||
mutex_unlock(&resource->resource_lock);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (state == ACPI_POWER_RESOURCE_STATE_OFF
|
||||
&& resource->ref_count) {
|
||||
|
|
|
@ -999,7 +999,6 @@ int acpi_device_add(struct acpi_device *device,
|
|||
INIT_LIST_HEAD(&device->wakeup_list);
|
||||
INIT_LIST_HEAD(&device->physical_node_list);
|
||||
mutex_init(&device->physical_node_lock);
|
||||
INIT_LIST_HEAD(&device->power_dependent);
|
||||
|
||||
new_bus_id = kzalloc(sizeof(struct acpi_device_bus_id), GFP_KERNEL);
|
||||
if (!new_bus_id) {
|
||||
|
|
|
@ -1035,17 +1035,3 @@ void ata_acpi_on_disable(struct ata_device *dev)
|
|||
{
|
||||
ata_acpi_clear_gtf(dev);
|
||||
}
|
||||
|
||||
void ata_scsi_acpi_bind(struct ata_device *dev)
|
||||
{
|
||||
acpi_handle handle = ata_dev_acpi_handle(dev);
|
||||
if (handle)
|
||||
acpi_dev_pm_add_dependent(handle, &dev->sdev->sdev_gendev);
|
||||
}
|
||||
|
||||
void ata_scsi_acpi_unbind(struct ata_device *dev)
|
||||
{
|
||||
acpi_handle handle = ata_dev_acpi_handle(dev);
|
||||
if (handle)
|
||||
acpi_dev_pm_remove_dependent(handle, &dev->sdev->sdev_gendev);
|
||||
}
|
||||
|
|
|
@ -3679,7 +3679,6 @@ void ata_scsi_scan_host(struct ata_port *ap, int sync)
|
|||
if (!IS_ERR(sdev)) {
|
||||
dev->sdev = sdev;
|
||||
scsi_device_put(sdev);
|
||||
ata_scsi_acpi_bind(dev);
|
||||
} else {
|
||||
dev->sdev = NULL;
|
||||
}
|
||||
|
@ -3767,8 +3766,6 @@ static void ata_scsi_remove_dev(struct ata_device *dev)
|
|||
struct scsi_device *sdev;
|
||||
unsigned long flags;
|
||||
|
||||
ata_scsi_acpi_unbind(dev);
|
||||
|
||||
/* Alas, we need to grab scan_mutex to ensure SCSI device
|
||||
* state doesn't change underneath us and thus
|
||||
* scsi_device_get() always succeeds. The mutex locking can
|
||||
|
|
|
@ -121,8 +121,6 @@ extern void ata_acpi_set_state(struct ata_port *ap, pm_message_t state);
|
|||
extern void ata_acpi_bind_port(struct ata_port *ap);
|
||||
extern void ata_acpi_bind_dev(struct ata_device *dev);
|
||||
extern acpi_handle ata_dev_acpi_handle(struct ata_device *dev);
|
||||
extern void ata_scsi_acpi_bind(struct ata_device *dev);
|
||||
extern void ata_scsi_acpi_unbind(struct ata_device *dev);
|
||||
#else
|
||||
static inline void ata_acpi_dissociate(struct ata_host *host) { }
|
||||
static inline int ata_acpi_on_suspend(struct ata_port *ap) { return 0; }
|
||||
|
@ -133,8 +131,6 @@ static inline void ata_acpi_set_state(struct ata_port *ap,
|
|||
pm_message_t state) { }
|
||||
static inline void ata_acpi_bind_port(struct ata_port *ap) {}
|
||||
static inline void ata_acpi_bind_dev(struct ata_device *dev) {}
|
||||
static inline void ata_scsi_acpi_bind(struct ata_device *dev) {}
|
||||
static inline void ata_scsi_acpi_unbind(struct ata_device *dev) {}
|
||||
#endif
|
||||
|
||||
/* libata-scsi.c */
|
||||
|
|
|
@ -333,8 +333,10 @@ store_mem_state(struct device *dev,
|
|||
online_type = ONLINE_KEEP;
|
||||
else if (!strncmp(buf, "offline", min_t(int, count, 7)))
|
||||
online_type = -1;
|
||||
else
|
||||
return -EINVAL;
|
||||
else {
|
||||
ret = -EINVAL;
|
||||
goto err;
|
||||
}
|
||||
|
||||
switch (online_type) {
|
||||
case ONLINE_KERNEL:
|
||||
|
@ -357,6 +359,7 @@ store_mem_state(struct device *dev,
|
|||
ret = -EINVAL; /* should never happen */
|
||||
}
|
||||
|
||||
err:
|
||||
unlock_device_hotplug();
|
||||
|
||||
if (ret)
|
||||
|
|
|
@ -640,7 +640,7 @@ struct timer_rand_state {
|
|||
*/
|
||||
void add_device_randomness(const void *buf, unsigned int size)
|
||||
{
|
||||
unsigned long time = get_cycles() ^ jiffies;
|
||||
unsigned long time = random_get_entropy() ^ jiffies;
|
||||
|
||||
mix_pool_bytes(&input_pool, buf, size, NULL);
|
||||
mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
|
||||
|
@ -677,7 +677,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
|
|||
goto out;
|
||||
|
||||
sample.jiffies = jiffies;
|
||||
sample.cycles = get_cycles();
|
||||
sample.cycles = random_get_entropy();
|
||||
sample.num = num;
|
||||
mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
|
||||
|
||||
|
@ -744,7 +744,7 @@ void add_interrupt_randomness(int irq, int irq_flags)
|
|||
struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
|
||||
struct pt_regs *regs = get_irq_regs();
|
||||
unsigned long now = jiffies;
|
||||
__u32 input[4], cycles = get_cycles();
|
||||
__u32 input[4], cycles = random_get_entropy();
|
||||
|
||||
input[0] = cycles ^ jiffies;
|
||||
input[1] = irq;
|
||||
|
@ -1459,12 +1459,11 @@ struct ctl_table random_table[] = {
|
|||
|
||||
static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
|
||||
|
||||
static int __init random_int_secret_init(void)
|
||||
int random_int_secret_init(void)
|
||||
{
|
||||
get_random_bytes(random_int_secret, sizeof(random_int_secret));
|
||||
return 0;
|
||||
}
|
||||
late_initcall(random_int_secret_init);
|
||||
|
||||
/*
|
||||
* Get a random word for internal kernel use only. Similar to urandom but
|
||||
|
@ -1483,7 +1482,7 @@ unsigned int get_random_int(void)
|
|||
|
||||
hash = get_cpu_var(get_random_int_hash);
|
||||
|
||||
hash[0] += current->pid + jiffies + get_cycles();
|
||||
hash[0] += current->pid + jiffies + random_get_entropy();
|
||||
md5_transform(hash, random_int_secret);
|
||||
ret = hash[0];
|
||||
put_cpu_var(get_random_int_hash);
|
||||
|
|
|
@ -10,6 +10,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <xen/xen.h>
|
||||
#include <xen/events.h>
|
||||
#include <xen/interface/io/tpmif.h>
|
||||
#include <xen/grant_table.h>
|
||||
|
|
|
@ -383,6 +383,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
|
|||
static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
||||
{
|
||||
int max_perf, min_perf;
|
||||
u64 val;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
|
||||
|
||||
|
@ -394,11 +395,11 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
|
|||
trace_cpu_frequency(pstate * 100000, cpu->cpu);
|
||||
|
||||
cpu->pstate.current_pstate = pstate;
|
||||
val = pstate << 8;
|
||||
if (limits.no_turbo)
|
||||
wrmsrl(MSR_IA32_PERF_CTL, BIT(32) | (pstate << 8));
|
||||
else
|
||||
wrmsrl(MSR_IA32_PERF_CTL, pstate << 8);
|
||||
val |= (u64)1 << 32;
|
||||
|
||||
wrmsrl(MSR_IA32_PERF_CTL, val);
|
||||
}
|
||||
|
||||
static inline void intel_pstate_pstate_increase(struct cpudata *cpu, int steps)
|
||||
|
@ -637,8 +638,8 @@ static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
|
|||
|
||||
static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
||||
{
|
||||
int rc, min_pstate, max_pstate;
|
||||
struct cpudata *cpu;
|
||||
int rc;
|
||||
|
||||
rc = intel_pstate_init_cpu(policy->cpu);
|
||||
if (rc)
|
||||
|
@ -652,9 +653,8 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
|
|||
else
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
|
||||
intel_pstate_get_min_max(cpu, &min_pstate, &max_pstate);
|
||||
policy->min = min_pstate * 100000;
|
||||
policy->max = max_pstate * 100000;
|
||||
policy->min = cpu->pstate.min_pstate * 100000;
|
||||
policy->max = cpu->pstate.turbo_pstate * 100000;
|
||||
|
||||
/* cpuinfo and default policy values */
|
||||
policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000;
|
||||
|
|
|
@ -166,7 +166,7 @@ static void __init s3c64xx_cpufreq_config_regulator(void)
|
|||
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
|
||||
continue;
|
||||
|
||||
dvfs = &s3c64xx_dvfs_table[freq->index];
|
||||
dvfs = &s3c64xx_dvfs_table[freq->driver_data];
|
||||
found = 0;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
|
|
|
@ -306,6 +306,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg(
|
|||
EDMA_SLOT_ANY);
|
||||
if (echan->slot[i] < 0) {
|
||||
dev_err(dev, "Failed to allocate slot\n");
|
||||
kfree(edesc);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,6 +93,7 @@ struct hpb_dmae_chan {
|
|||
void __iomem *base;
|
||||
const struct hpb_dmae_slave_config *cfg;
|
||||
char dev_id[16]; /* unique name per DMAC of channel */
|
||||
dma_addr_t slave_addr;
|
||||
};
|
||||
|
||||
struct hpb_dmae_device {
|
||||
|
@ -432,7 +433,6 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
|
|||
hpb_chan->xfer_mode = XFER_DOUBLE;
|
||||
} else {
|
||||
dev_err(hpb_chan->shdma_chan.dev, "DCR setting error");
|
||||
shdma_free_irq(&hpb_chan->shdma_chan);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
@ -446,7 +446,8 @@ hpb_dmae_alloc_chan_resources(struct hpb_dmae_chan *hpb_chan,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
|
||||
static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id,
|
||||
dma_addr_t slave_addr, bool try)
|
||||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
const struct hpb_dmae_slave_config *sc =
|
||||
|
@ -457,6 +458,7 @@ static int hpb_dmae_set_slave(struct shdma_chan *schan, int slave_id, bool try)
|
|||
if (try)
|
||||
return 0;
|
||||
chan->cfg = sc;
|
||||
chan->slave_addr = slave_addr ? : sc->addr;
|
||||
return hpb_dmae_alloc_chan_resources(chan, sc);
|
||||
}
|
||||
|
||||
|
@ -468,7 +470,7 @@ static dma_addr_t hpb_dmae_slave_addr(struct shdma_chan *schan)
|
|||
{
|
||||
struct hpb_dmae_chan *chan = to_chan(schan);
|
||||
|
||||
return chan->cfg->addr;
|
||||
return chan->slave_addr;
|
||||
}
|
||||
|
||||
static struct shdma_desc *hpb_dmae_embedded_desc(void *buf, int i)
|
||||
|
@ -614,7 +616,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
|
|||
shdma_for_each_chan(schan, &hpbdev->shdma_dev, i) {
|
||||
BUG_ON(!schan);
|
||||
|
||||
shdma_free_irq(schan);
|
||||
shdma_chan_remove(schan);
|
||||
}
|
||||
dma_dev->chancnt = 0;
|
||||
|
|
|
@ -248,13 +248,14 @@ static void lp_gpio_irq_handler(unsigned hwirq, struct irq_desc *desc)
|
|||
struct lp_gpio *lg = irq_data_get_irq_handler_data(data);
|
||||
struct irq_chip *chip = irq_data_get_irq_chip(data);
|
||||
u32 base, pin, mask;
|
||||
unsigned long reg, pending;
|
||||
unsigned long reg, ena, pending;
|
||||
|
||||
/* check from GPIO controller which pin triggered the interrupt */
|
||||
for (base = 0; base < lg->chip.ngpio; base += 32) {
|
||||
reg = lp_gpio_reg(&lg->chip, base, LP_INT_STAT);
|
||||
ena = lp_gpio_reg(&lg->chip, base, LP_INT_ENABLE);
|
||||
|
||||
while ((pending = inl(reg))) {
|
||||
while ((pending = (inl(reg) & inl(ena)))) {
|
||||
unsigned irq;
|
||||
|
||||
pin = __ffs(pending);
|
||||
|
|
|
@ -183,7 +183,7 @@ static struct gpio_desc *gpiochip_offset_to_desc(struct gpio_chip *chip,
|
|||
*/
|
||||
static int desc_to_gpio(const struct gpio_desc *desc)
|
||||
{
|
||||
return desc->chip->base + gpio_chip_hwgpio(desc);
|
||||
return desc - &gpio_desc[0];
|
||||
}
|
||||
|
||||
|
||||
|
@ -1452,7 +1452,7 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
int status = -EPROBE_DEFER;
|
||||
unsigned long flags;
|
||||
|
||||
if (!desc || !desc->chip) {
|
||||
if (!desc) {
|
||||
pr_warn("%s: invalid GPIO\n", __func__);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -1460,6 +1460,8 @@ static int gpiod_request(struct gpio_desc *desc, const char *label)
|
|||
spin_lock_irqsave(&gpio_lock, flags);
|
||||
|
||||
chip = desc->chip;
|
||||
if (chip == NULL)
|
||||
goto done;
|
||||
|
||||
if (!try_module_get(chip->owner))
|
||||
goto done;
|
||||
|
|
|
@ -2925,6 +2925,8 @@ int drm_edid_to_speaker_allocation(struct edid *edid, u8 **sadb)
|
|||
/* Speaker Allocation Data Block */
|
||||
if (dbl == 3) {
|
||||
*sadb = kmalloc(dbl, GFP_KERNEL);
|
||||
if (!*sadb)
|
||||
return -ENOMEM;
|
||||
memcpy(*sadb, &db[1], dbl);
|
||||
count = dbl;
|
||||
break;
|
||||
|
|
|
@ -407,14 +407,6 @@ static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
|
|||
struct drm_connector *connector;
|
||||
int i, j;
|
||||
|
||||
/*
|
||||
* fbdev->blank can be called from irq context in case of a panic.
|
||||
* Since we already have our own special panic handler which will
|
||||
* restore the fbdev console mode completely, just bail out early.
|
||||
*/
|
||||
if (oops_in_progress)
|
||||
return;
|
||||
|
||||
/*
|
||||
* fbdev->blank can be called from irq context in case of a panic.
|
||||
* Since we already have our own special panic handler which will
|
||||
|
|
|
@ -204,6 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
|
|||
if (IS_ERR(pages))
|
||||
return PTR_ERR(pages);
|
||||
|
||||
gt->npage = gt->gem.size / PAGE_SIZE;
|
||||
gt->pages = pages;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1290,12 +1290,9 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
* then we do not take part in VGA arbitration and the
|
||||
* vga_client_register() fails with -ENODEV.
|
||||
*/
|
||||
if (!HAS_PCH_SPLIT(dev)) {
|
||||
ret = vga_client_register(dev->pdev, dev, NULL,
|
||||
i915_vga_set_decode);
|
||||
ret = vga_client_register(dev->pdev, dev, NULL, i915_vga_set_decode);
|
||||
if (ret && ret != -ENODEV)
|
||||
goto out;
|
||||
}
|
||||
|
||||
intel_register_dsm_handler();
|
||||
|
||||
|
@ -1351,12 +1348,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
|
|||
*/
|
||||
intel_fbdev_initial_config(dev);
|
||||
|
||||
/*
|
||||
* Must do this after fbcon init so that
|
||||
* vgacon_save_screen() works during the handover.
|
||||
*/
|
||||
i915_disable_vga_mem(dev);
|
||||
|
||||
/* Only enable hotplug handling once the fbdev is fully set up. */
|
||||
dev_priv->enable_hotplug_processing = true;
|
||||
|
||||
|
|
|
@ -3881,6 +3881,9 @@
|
|||
#define GEN7_SQ_CHICKEN_MBCUNIT_CONFIG 0x9030
|
||||
#define GEN7_SQ_CHICKEN_MBCUNIT_SQINTMOB (1<<11)
|
||||
|
||||
#define HSW_SCRATCH1 0xb038
|
||||
#define HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE (1<<27)
|
||||
|
||||
#define HSW_FUSE_STRAP 0x42014
|
||||
#define HSW_CDCLK_LIMIT (1 << 24)
|
||||
|
||||
|
@ -4728,6 +4731,9 @@
|
|||
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
|
||||
#define DOP_CLOCK_GATING_DISABLE (1<<0)
|
||||
|
||||
#define HSW_ROW_CHICKEN3 0xe49c
|
||||
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
|
||||
|
||||
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
|
||||
#define INTEL_AUDIO_DEVCL 0x808629FB
|
||||
#define INTEL_AUDIO_DEVBLC 0x80862801
|
||||
|
|
|
@ -3941,8 +3941,6 @@ static void intel_connector_check_state(struct intel_connector *connector)
|
|||
* consider. */
|
||||
void intel_connector_dpms(struct drm_connector *connector, int mode)
|
||||
{
|
||||
struct intel_encoder *encoder = intel_attached_encoder(connector);
|
||||
|
||||
/* All the simple cases only support two dpms states. */
|
||||
if (mode != DRM_MODE_DPMS_ON)
|
||||
mode = DRM_MODE_DPMS_OFF;
|
||||
|
@ -3953,10 +3951,8 @@ void intel_connector_dpms(struct drm_connector *connector, int mode)
|
|||
connector->dpms = mode;
|
||||
|
||||
/* Only need to change hw state when actually enabled */
|
||||
if (encoder->base.crtc)
|
||||
intel_encoder_dpms(encoder, mode);
|
||||
else
|
||||
WARN_ON(encoder->connectors_active != false);
|
||||
if (connector->encoder)
|
||||
intel_encoder_dpms(to_intel_encoder(connector->encoder), mode);
|
||||
|
||||
intel_modeset_check_state(connector->dev);
|
||||
}
|
||||
|
@ -10049,33 +10045,6 @@ static void i915_disable_vga(struct drm_device *dev)
|
|||
POSTING_READ(vga_reg);
|
||||
}
|
||||
|
||||
static void i915_enable_vga_mem(struct drm_device *dev)
|
||||
{
|
||||
/* Enable VGA memory on Intel HD */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
outb(inb(VGA_MSR_READ) | VGA_MSR_MEM_EN, VGA_MSR_WRITE);
|
||||
vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
|
||||
VGA_RSRC_LEGACY_MEM |
|
||||
VGA_RSRC_NORMAL_IO |
|
||||
VGA_RSRC_NORMAL_MEM);
|
||||
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
}
|
||||
}
|
||||
|
||||
void i915_disable_vga_mem(struct drm_device *dev)
|
||||
{
|
||||
/* Disable VGA memory on Intel HD */
|
||||
if (HAS_PCH_SPLIT(dev)) {
|
||||
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
outb(inb(VGA_MSR_READ) & ~VGA_MSR_MEM_EN, VGA_MSR_WRITE);
|
||||
vga_set_legacy_decoding(dev->pdev, VGA_RSRC_LEGACY_IO |
|
||||
VGA_RSRC_NORMAL_IO |
|
||||
VGA_RSRC_NORMAL_MEM);
|
||||
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
|
||||
}
|
||||
}
|
||||
|
||||
void intel_modeset_init_hw(struct drm_device *dev)
|
||||
{
|
||||
intel_init_power_well(dev);
|
||||
|
@ -10354,7 +10323,6 @@ void i915_redisable_vga(struct drm_device *dev)
|
|||
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
|
||||
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
|
||||
i915_disable_vga(dev);
|
||||
i915_disable_vga_mem(dev);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -10568,8 +10536,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|||
|
||||
intel_disable_fbc(dev);
|
||||
|
||||
i915_enable_vga_mem(dev);
|
||||
|
||||
intel_disable_gt_powersave(dev);
|
||||
|
||||
ironlake_teardown_rc6(dev);
|
||||
|
|
|
@ -1467,7 +1467,7 @@ static void intel_edp_psr_setup(struct intel_dp *intel_dp)
|
|||
|
||||
/* Avoid continuous PSR exit by masking memup and hpd */
|
||||
I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
|
||||
EDP_PSR_DEBUG_MASK_HPD);
|
||||
EDP_PSR_DEBUG_MASK_HPD | EDP_PSR_DEBUG_MASK_LPSP);
|
||||
|
||||
intel_dp->psr_setup_done = true;
|
||||
}
|
||||
|
|
|
@ -793,6 +793,5 @@ extern void hsw_pc8_disable_interrupts(struct drm_device *dev);
|
|||
extern void hsw_pc8_restore_interrupts(struct drm_device *dev);
|
||||
extern void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv);
|
||||
extern void intel_aux_display_runtime_put(struct drm_i915_private *dev_priv);
|
||||
extern void i915_disable_vga_mem(struct drm_device *dev);
|
||||
|
||||
#endif /* __INTEL_DRV_H__ */
|
||||
|
|
|
@ -3864,8 +3864,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
|
|||
dev_priv->rps.rpe_delay),
|
||||
dev_priv->rps.rpe_delay);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
|
||||
|
||||
valleyview_set_rps(dev_priv->dev, dev_priv->rps.rpe_delay);
|
||||
|
||||
gen6_enable_rps_interrupts(dev);
|
||||
|
@ -4955,6 +4953,11 @@ static void haswell_init_clock_gating(struct drm_device *dev)
|
|||
I915_WRITE(GEN7_L3_CHICKEN_MODE_REGISTER,
|
||||
GEN7_WA_L3_CHICKEN_MODE);
|
||||
|
||||
/* L3 caching of data atomics doesn't work -- disable it. */
|
||||
I915_WRITE(HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
|
||||
I915_WRITE(HSW_ROW_CHICKEN3,
|
||||
_MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE));
|
||||
|
||||
/* This is required by WaCatErrorRejectionIssue:hsw */
|
||||
I915_WRITE(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG,
|
||||
I915_READ(GEN7_SQ_CHICKEN_MBCUNIT_CONFIG) |
|
||||
|
@ -5681,5 +5684,7 @@ void intel_pm_init(struct drm_device *dev)
|
|||
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
|
||||
intel_gen6_powersave_work);
|
||||
|
||||
INIT_DELAYED_WORK(&dev_priv->rps.vlv_work, vlv_rps_timer_work);
|
||||
}
|
||||
|
||||
|
|
|
@ -113,7 +113,7 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine,
|
|||
pmc->use_msi = false;
|
||||
break;
|
||||
default:
|
||||
pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", true);
|
||||
pmc->use_msi = nouveau_boolopt(device->cfgopt, "NvMSI", false);
|
||||
if (pmc->use_msi) {
|
||||
pmc->use_msi = pci_enable_msi(device->pdev) == 0;
|
||||
if (pmc->use_msi) {
|
||||
|
|
|
@ -1930,7 +1930,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
|
|||
}
|
||||
j++;
|
||||
|
||||
if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
|
||||
if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
|
||||
return -EINVAL;
|
||||
|
||||
tmp = RREG32(MC_PMG_CMD_MRS);
|
||||
|
@ -1945,7 +1945,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
|
|||
}
|
||||
j++;
|
||||
|
||||
if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
|
||||
if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
|
||||
return -EINVAL;
|
||||
break;
|
||||
case MC_SEQ_RESERVE_M >> 2:
|
||||
|
@ -1959,7 +1959,7 @@ static int btc_set_mc_special_registers(struct radeon_device *rdev,
|
|||
}
|
||||
j++;
|
||||
|
||||
if (j > SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
|
||||
if (j >= SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE)
|
||||
return -EINVAL;
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -77,6 +77,8 @@ static void cik_pcie_gen3_enable(struct radeon_device *rdev);
|
|||
static void cik_program_aspm(struct radeon_device *rdev);
|
||||
static void cik_init_pg(struct radeon_device *rdev);
|
||||
static void cik_init_cg(struct radeon_device *rdev);
|
||||
static void cik_fini_pg(struct radeon_device *rdev);
|
||||
static void cik_fini_cg(struct radeon_device *rdev);
|
||||
static void cik_enable_gui_idle_interrupt(struct radeon_device *rdev,
|
||||
bool enable);
|
||||
|
||||
|
@ -4185,6 +4187,10 @@ static void cik_gpu_soft_reset(struct radeon_device *rdev, u32 reset_mask)
|
|||
dev_info(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
|
||||
RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS));
|
||||
|
||||
/* disable CG/PG */
|
||||
cik_fini_pg(rdev);
|
||||
cik_fini_cg(rdev);
|
||||
|
||||
/* stop the rlc */
|
||||
cik_rlc_stop(rdev);
|
||||
|
||||
|
|
|
@ -3131,7 +3131,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
|
|||
rdev->config.evergreen.sx_max_export_size = 256;
|
||||
rdev->config.evergreen.sx_max_export_pos_size = 64;
|
||||
rdev->config.evergreen.sx_max_export_smx_size = 192;
|
||||
rdev->config.evergreen.max_hw_contexts = 8;
|
||||
rdev->config.evergreen.max_hw_contexts = 4;
|
||||
rdev->config.evergreen.sq_num_cf_insts = 2;
|
||||
|
||||
rdev->config.evergreen.sc_prim_fifo_size = 0x40;
|
||||
|
|
|
@ -288,8 +288,7 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
|
|||
/* fglrx clears sth in AFMT_AUDIO_PACKET_CONTROL2 here */
|
||||
|
||||
WREG32(HDMI_ACR_PACKET_CONTROL + offset,
|
||||
HDMI_ACR_AUTO_SEND | /* allow hw to sent ACR packets when required */
|
||||
HDMI_ACR_SOURCE); /* select SW CTS value */
|
||||
HDMI_ACR_AUTO_SEND); /* allow hw to sent ACR packets when required */
|
||||
|
||||
evergreen_hdmi_update_ACR(encoder, mode->clock);
|
||||
|
||||
|
|
|
@ -1501,7 +1501,7 @@
|
|||
* 6. COMMAND [29:22] | BYTE_COUNT [20:0]
|
||||
*/
|
||||
# define PACKET3_CP_DMA_DST_SEL(x) ((x) << 20)
|
||||
/* 0 - SRC_ADDR
|
||||
/* 0 - DST_ADDR
|
||||
* 1 - GDS
|
||||
*/
|
||||
# define PACKET3_CP_DMA_ENGINE(x) ((x) << 27)
|
||||
|
@ -1516,7 +1516,7 @@
|
|||
# define PACKET3_CP_DMA_CP_SYNC (1 << 31)
|
||||
/* COMMAND */
|
||||
# define PACKET3_CP_DMA_DIS_WC (1 << 21)
|
||||
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 23)
|
||||
# define PACKET3_CP_DMA_CMD_SRC_SWAP(x) ((x) << 22)
|
||||
/* 0 - none
|
||||
* 1 - 8 in 16
|
||||
* 2 - 8 in 32
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue