mirror of https://gitee.com/openkylin/linux.git
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
The mptcp conflict was overlapping additions. The SMC conflict was an additional and removal happening at the same time. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
9f6e055907
|
@ -100,6 +100,10 @@ modules.order
|
||||||
/include/ksym/
|
/include/ksym/
|
||||||
/arch/*/include/generated/
|
/arch/*/include/generated/
|
||||||
|
|
||||||
|
# Generated lkdtm tests
|
||||||
|
/tools/testing/selftests/lkdtm/*.sh
|
||||||
|
!/tools/testing/selftests/lkdtm/run.sh
|
||||||
|
|
||||||
# stgit generated dirs
|
# stgit generated dirs
|
||||||
patches-*
|
patches-*
|
||||||
|
|
||||||
|
|
5
CREDITS
5
CREDITS
|
@ -567,6 +567,11 @@ D: Original author of Amiga FFS filesystem
|
||||||
S: Orlando, Florida
|
S: Orlando, Florida
|
||||||
S: USA
|
S: USA
|
||||||
|
|
||||||
|
N: Paul Burton
|
||||||
|
E: paulburton@kernel.org
|
||||||
|
W: https://pburton.com
|
||||||
|
D: MIPS maintainer 2018-2020
|
||||||
|
|
||||||
N: Lennert Buytenhek
|
N: Lennert Buytenhek
|
||||||
E: kernel@wantstofly.org
|
E: kernel@wantstofly.org
|
||||||
D: Original (2.4) rewrite of the ethernet bridging code
|
D: Original (2.4) rewrite of the ethernet bridging code
|
||||||
|
|
|
@ -62,6 +62,30 @@ Or more shorter, written as following::
|
||||||
In both styles, same key words are automatically merged when parsing it
|
In both styles, same key words are automatically merged when parsing it
|
||||||
at boot time. So you can append similar trees or key-values.
|
at boot time. So you can append similar trees or key-values.
|
||||||
|
|
||||||
|
Same-key Values
|
||||||
|
---------------
|
||||||
|
|
||||||
|
It is prohibited that two or more values or arrays share a same-key.
|
||||||
|
For example,::
|
||||||
|
|
||||||
|
foo = bar, baz
|
||||||
|
foo = qux # !ERROR! we can not re-define same key
|
||||||
|
|
||||||
|
If you want to append the value to existing key as an array member,
|
||||||
|
you can use ``+=`` operator. For example::
|
||||||
|
|
||||||
|
foo = bar, baz
|
||||||
|
foo += qux
|
||||||
|
|
||||||
|
In this case, the key ``foo`` has ``bar``, ``baz`` and ``qux``.
|
||||||
|
|
||||||
|
However, a sub-key and a value can not co-exist under a parent key.
|
||||||
|
For example, following config is NOT allowed.::
|
||||||
|
|
||||||
|
foo = value1
|
||||||
|
foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist
|
||||||
|
|
||||||
|
|
||||||
Comments
|
Comments
|
||||||
--------
|
--------
|
||||||
|
|
||||||
|
@ -102,9 +126,13 @@ Boot Kernel With a Boot Config
|
||||||
==============================
|
==============================
|
||||||
|
|
||||||
Since the boot configuration file is loaded with initrd, it will be added
|
Since the boot configuration file is loaded with initrd, it will be added
|
||||||
to the end of the initrd (initramfs) image file. The Linux kernel decodes
|
to the end of the initrd (initramfs) image file with size, checksum and
|
||||||
the last part of the initrd image in memory to get the boot configuration
|
12-byte magic word as below.
|
||||||
data.
|
|
||||||
|
[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
|
||||||
|
|
||||||
|
The Linux kernel decodes the last part of the initrd image in memory to
|
||||||
|
get the boot configuration data.
|
||||||
Because of this "piggyback" method, there is no need to change or
|
Because of this "piggyback" method, there is no need to change or
|
||||||
update the boot loader and the kernel image itself.
|
update the boot loader and the kernel image itself.
|
||||||
|
|
||||||
|
|
|
@ -129,7 +129,7 @@ this logic.
|
||||||
|
|
||||||
As a single binary will need to support both 48-bit and 52-bit VA
|
As a single binary will need to support both 48-bit and 52-bit VA
|
||||||
spaces, the VMEMMAP must be sized large enough for 52-bit VAs and
|
spaces, the VMEMMAP must be sized large enough for 52-bit VAs and
|
||||||
also must be sized large enought to accommodate a fixed PAGE_OFFSET.
|
also must be sized large enough to accommodate a fixed PAGE_OFFSET.
|
||||||
|
|
||||||
Most code in the kernel should not need to consider the VA_BITS, for
|
Most code in the kernel should not need to consider the VA_BITS, for
|
||||||
code that does need to know the VA size the variables are
|
code that does need to know the VA size the variables are
|
||||||
|
|
|
@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
|
||||||
how the user addresses are used by the kernel:
|
how the user addresses are used by the kernel:
|
||||||
|
|
||||||
1. User addresses not accessed by the kernel but used for address space
|
1. User addresses not accessed by the kernel but used for address space
|
||||||
management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
|
management (e.g. ``mprotect()``, ``madvise()``). The use of valid
|
||||||
of valid tagged pointers in this context is always allowed.
|
tagged pointers in this context is allowed with the exception of
|
||||||
|
``brk()``, ``mmap()`` and the ``new_address`` argument to
|
||||||
|
``mremap()`` as these have the potential to alias with existing
|
||||||
|
user addresses.
|
||||||
|
|
||||||
|
NOTE: This behaviour changed in v5.6 and so some earlier kernels may
|
||||||
|
incorrectly accept valid tagged pointers for the ``brk()``,
|
||||||
|
``mmap()`` and ``mremap()`` system calls.
|
||||||
|
|
||||||
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
|
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
|
||||||
relaxation is disabled by default and the application thread needs to
|
relaxation is disabled by default and the application thread needs to
|
||||||
|
|
|
@ -551,6 +551,7 @@ options to your ``.config``:
|
||||||
Once the kernel is built and installed, a simple
|
Once the kernel is built and installed, a simple
|
||||||
|
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
modprobe example-test
|
modprobe example-test
|
||||||
|
|
||||||
...will run the tests.
|
...will run the tests.
|
||||||
|
|
|
@ -33,24 +33,40 @@ properties:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
clocks:
|
clocks:
|
||||||
minItems: 2
|
oneOf:
|
||||||
maxItems: 3
|
- items:
|
||||||
items:
|
- description: The CSI interface clock
|
||||||
- description: The CSI interface clock
|
- description: The CSI DRAM clock
|
||||||
- description: The CSI ISP clock
|
|
||||||
- description: The CSI DRAM clock
|
- items:
|
||||||
|
- description: The CSI interface clock
|
||||||
|
- description: The CSI ISP clock
|
||||||
|
- description: The CSI DRAM clock
|
||||||
|
|
||||||
clock-names:
|
clock-names:
|
||||||
minItems: 2
|
oneOf:
|
||||||
maxItems: 3
|
- items:
|
||||||
items:
|
- const: bus
|
||||||
- const: bus
|
- const: ram
|
||||||
- const: isp
|
|
||||||
- const: ram
|
- items:
|
||||||
|
- const: bus
|
||||||
|
- const: isp
|
||||||
|
- const: ram
|
||||||
|
|
||||||
resets:
|
resets:
|
||||||
maxItems: 1
|
maxItems: 1
|
||||||
|
|
||||||
|
# FIXME: This should be made required eventually once every SoC will
|
||||||
|
# have the MBUS declared.
|
||||||
|
interconnects:
|
||||||
|
maxItems: 1
|
||||||
|
|
||||||
|
# FIXME: This should be made required eventually once every SoC will
|
||||||
|
# have the MBUS declared.
|
||||||
|
interconnect-names:
|
||||||
|
const: dma-mem
|
||||||
|
|
||||||
# See ./video-interfaces.txt for details
|
# See ./video-interfaces.txt for details
|
||||||
port:
|
port:
|
||||||
type: object
|
type: object
|
||||||
|
|
|
@ -347,6 +347,7 @@ examples:
|
||||||
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
|
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
|
||||||
|
|
||||||
#iommu-cells = <1>;
|
#iommu-cells = <1>;
|
||||||
|
#reset-cells = <1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
external-memory-controller@7001b000 {
|
external-memory-controller@7001b000 {
|
||||||
|
@ -363,20 +364,23 @@ examples:
|
||||||
timing-0 {
|
timing-0 {
|
||||||
clock-frequency = <12750000>;
|
clock-frequency = <12750000>;
|
||||||
|
|
||||||
nvidia,emc-zcal-cnt-long = <0x00000042>;
|
|
||||||
nvidia,emc-auto-cal-interval = <0x001fffff>;
|
|
||||||
nvidia,emc-ctt-term-ctrl = <0x00000802>;
|
|
||||||
nvidia,emc-cfg = <0x73240000>;
|
|
||||||
nvidia,emc-cfg-2 = <0x000008c5>;
|
|
||||||
nvidia,emc-sel-dpd-ctrl = <0x00040128>;
|
|
||||||
nvidia,emc-bgbias-ctl0 = <0x00000008>;
|
|
||||||
nvidia,emc-auto-cal-config = <0xa1430000>;
|
nvidia,emc-auto-cal-config = <0xa1430000>;
|
||||||
nvidia,emc-auto-cal-config2 = <0x00000000>;
|
nvidia,emc-auto-cal-config2 = <0x00000000>;
|
||||||
nvidia,emc-auto-cal-config3 = <0x00000000>;
|
nvidia,emc-auto-cal-config3 = <0x00000000>;
|
||||||
nvidia,emc-mode-reset = <0x80001221>;
|
nvidia,emc-auto-cal-interval = <0x001fffff>;
|
||||||
|
nvidia,emc-bgbias-ctl0 = <0x00000008>;
|
||||||
|
nvidia,emc-cfg = <0x73240000>;
|
||||||
|
nvidia,emc-cfg-2 = <0x000008c5>;
|
||||||
|
nvidia,emc-ctt-term-ctrl = <0x00000802>;
|
||||||
nvidia,emc-mode-1 = <0x80100003>;
|
nvidia,emc-mode-1 = <0x80100003>;
|
||||||
nvidia,emc-mode-2 = <0x80200008>;
|
nvidia,emc-mode-2 = <0x80200008>;
|
||||||
nvidia,emc-mode-4 = <0x00000000>;
|
nvidia,emc-mode-4 = <0x00000000>;
|
||||||
|
nvidia,emc-mode-reset = <0x80001221>;
|
||||||
|
nvidia,emc-mrs-wait-cnt = <0x000e000e>;
|
||||||
|
nvidia,emc-sel-dpd-ctrl = <0x00040128>;
|
||||||
|
nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
|
||||||
|
nvidia,emc-zcal-cnt-long = <0x00000042>;
|
||||||
|
nvidia,emc-zcal-interval = <0x00000000>;
|
||||||
|
|
||||||
nvidia,emc-configuration = <
|
nvidia,emc-configuration = <
|
||||||
0x00000000 /* EMC_RC */
|
0x00000000 /* EMC_RC */
|
||||||
|
|
|
@ -124,7 +124,7 @@ not every application needs SDIO irq, e.g. MMC cards.
|
||||||
pinctrl-1 = <&mmc1_idle>;
|
pinctrl-1 = <&mmc1_idle>;
|
||||||
pinctrl-2 = <&mmc1_sleep>;
|
pinctrl-2 = <&mmc1_sleep>;
|
||||||
...
|
...
|
||||||
interrupts-extended = <&intc 64 &gpio2 28 GPIO_ACTIVE_LOW>;
|
interrupts-extended = <&intc 64 &gpio2 28 IRQ_TYPE_LEVEL_LOW>;
|
||||||
};
|
};
|
||||||
|
|
||||||
mmc1_idle : pinmux_cirq_pin {
|
mmc1_idle : pinmux_cirq_pin {
|
||||||
|
|
|
@ -56,7 +56,6 @@ patternProperties:
|
||||||
examples:
|
examples:
|
||||||
- |
|
- |
|
||||||
davinci_mdio: mdio@5c030000 {
|
davinci_mdio: mdio@5c030000 {
|
||||||
compatible = "ti,davinci_mdio";
|
|
||||||
reg = <0x5c030000 0x1000>;
|
reg = <0x5c030000 0x1000>;
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
|
@ -134,7 +134,7 @@ Sequential zone files can only be written sequentially, starting from the file
|
||||||
end, that is, write operations can only be append writes. Zonefs makes no
|
end, that is, write operations can only be append writes. Zonefs makes no
|
||||||
attempt at accepting random writes and will fail any write request that has a
|
attempt at accepting random writes and will fail any write request that has a
|
||||||
start offset not corresponding to the end of the file, or to the end of the last
|
start offset not corresponding to the end of the file, or to the end of the last
|
||||||
write issued and still in-flight (for asynchrnous I/O operations).
|
write issued and still in-flight (for asynchronous I/O operations).
|
||||||
|
|
||||||
Since dirty page writeback by the page cache does not guarantee a sequential
|
Since dirty page writeback by the page cache does not guarantee a sequential
|
||||||
write pattern, zonefs prevents buffered writes and writeable shared mappings
|
write pattern, zonefs prevents buffered writes and writeable shared mappings
|
||||||
|
@ -142,7 +142,7 @@ on sequential files. Only direct I/O writes are accepted for these files.
|
||||||
zonefs relies on the sequential delivery of write I/O requests to the device
|
zonefs relies on the sequential delivery of write I/O requests to the device
|
||||||
implemented by the block layer elevator. An elevator implementing the sequential
|
implemented by the block layer elevator. An elevator implementing the sequential
|
||||||
write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature)
|
write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature)
|
||||||
must be used. This type of elevator (e.g. mq-deadline) is the set by default
|
must be used. This type of elevator (e.g. mq-deadline) is set by default
|
||||||
for zoned block devices on device initialization.
|
for zoned block devices on device initialization.
|
||||||
|
|
||||||
There are no restrictions on the type of I/O used for read operations in
|
There are no restrictions on the type of I/O used for read operations in
|
||||||
|
@ -196,7 +196,7 @@ additional conditions that result in I/O errors.
|
||||||
may still happen in the case of a partial failure of a very large direct I/O
|
may still happen in the case of a partial failure of a very large direct I/O
|
||||||
operation split into multiple BIOs/requests or asynchronous I/O operations.
|
operation split into multiple BIOs/requests or asynchronous I/O operations.
|
||||||
If one of the write request within the set of sequential write requests
|
If one of the write request within the set of sequential write requests
|
||||||
issued to the device fails, all write requests after queued after it will
|
issued to the device fails, all write requests queued after it will
|
||||||
become unaligned and fail.
|
become unaligned and fail.
|
||||||
|
|
||||||
* Delayed write errors: similarly to regular block devices, if the device side
|
* Delayed write errors: similarly to regular block devices, if the device side
|
||||||
|
@ -207,7 +207,7 @@ additional conditions that result in I/O errors.
|
||||||
causing all data to be dropped after the sector that caused the error.
|
causing all data to be dropped after the sector that caused the error.
|
||||||
|
|
||||||
All I/O errors detected by zonefs are notified to the user with an error code
|
All I/O errors detected by zonefs are notified to the user with an error code
|
||||||
return for the system call that trigered or detected the error. The recovery
|
return for the system call that triggered or detected the error. The recovery
|
||||||
actions taken by zonefs in response to I/O errors depend on the I/O type (read
|
actions taken by zonefs in response to I/O errors depend on the I/O type (read
|
||||||
vs write) and on the reason for the error (bad sector, unaligned writes or zone
|
vs write) and on the reason for the error (bad sector, unaligned writes or zone
|
||||||
condition change).
|
condition change).
|
||||||
|
@ -222,7 +222,7 @@ condition change).
|
||||||
* A zone condition change to read-only or offline also always triggers zonefs
|
* A zone condition change to read-only or offline also always triggers zonefs
|
||||||
I/O error recovery.
|
I/O error recovery.
|
||||||
|
|
||||||
Zonefs minimal I/O error recovery may change a file size and a file access
|
Zonefs minimal I/O error recovery may change a file size and file access
|
||||||
permissions.
|
permissions.
|
||||||
|
|
||||||
* File size changes:
|
* File size changes:
|
||||||
|
@ -237,7 +237,7 @@ permissions.
|
||||||
A file size may also be reduced to reflect a delayed write error detected on
|
A file size may also be reduced to reflect a delayed write error detected on
|
||||||
fsync(): in this case, the amount of data effectively written in the zone may
|
fsync(): in this case, the amount of data effectively written in the zone may
|
||||||
be less than originally indicated by the file inode size. After such I/O
|
be less than originally indicated by the file inode size. After such I/O
|
||||||
error, zonefs always fixes a file inode size to reflect the amount of data
|
error, zonefs always fixes the file inode size to reflect the amount of data
|
||||||
persistently stored in the file zone.
|
persistently stored in the file zone.
|
||||||
|
|
||||||
* Access permission changes:
|
* Access permission changes:
|
||||||
|
@ -281,11 +281,11 @@ Further notes:
|
||||||
permissions to read-only applies to all files. The file system is remounted
|
permissions to read-only applies to all files. The file system is remounted
|
||||||
read-only.
|
read-only.
|
||||||
* Access permission and file size changes due to the device transitioning zones
|
* Access permission and file size changes due to the device transitioning zones
|
||||||
to the offline condition are permanent. Remounting or reformating the device
|
to the offline condition are permanent. Remounting or reformatting the device
|
||||||
with mkfs.zonefs (mkzonefs) will not change back offline zone files to a good
|
with mkfs.zonefs (mkzonefs) will not change back offline zone files to a good
|
||||||
state.
|
state.
|
||||||
* File access permission changes to read-only due to the device transitioning
|
* File access permission changes to read-only due to the device transitioning
|
||||||
zones to the read-only condition are permanent. Remounting or reformating
|
zones to the read-only condition are permanent. Remounting or reformatting
|
||||||
the device will not re-enable file write access.
|
the device will not re-enable file write access.
|
||||||
* File access permission changes implied by the remount-ro, zone-ro and
|
* File access permission changes implied by the remount-ro, zone-ro and
|
||||||
zone-offline mount options are temporary for zones in a good condition.
|
zone-offline mount options are temporary for zones in a good condition.
|
||||||
|
@ -301,13 +301,13 @@ Mount options
|
||||||
|
|
||||||
zonefs define the "errors=<behavior>" mount option to allow the user to specify
|
zonefs define the "errors=<behavior>" mount option to allow the user to specify
|
||||||
zonefs behavior in response to I/O errors, inode size inconsistencies or zone
|
zonefs behavior in response to I/O errors, inode size inconsistencies or zone
|
||||||
condition chages. The defined behaviors are as follow:
|
condition changes. The defined behaviors are as follow:
|
||||||
* remount-ro (default)
|
* remount-ro (default)
|
||||||
* zone-ro
|
* zone-ro
|
||||||
* zone-offline
|
* zone-offline
|
||||||
* repair
|
* repair
|
||||||
|
|
||||||
The I/O error actions defined for each behavior is detailed in the previous
|
The I/O error actions defined for each behavior are detailed in the previous
|
||||||
section.
|
section.
|
||||||
|
|
||||||
Zonefs User Space Tools
|
Zonefs User Space Tools
|
||||||
|
|
|
@ -24,6 +24,7 @@ This driver implements support for Infineon Multi-phase XDPE122 family
|
||||||
dual loop voltage regulators.
|
dual loop voltage regulators.
|
||||||
The family includes XDPE12284 and XDPE12254 devices.
|
The family includes XDPE12284 and XDPE12254 devices.
|
||||||
The devices from this family complaint with:
|
The devices from this family complaint with:
|
||||||
|
|
||||||
- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
|
- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
|
||||||
converter specification.
|
converter specification.
|
||||||
- Intel SVID rev 1.9. protocol.
|
- Intel SVID rev 1.9. protocol.
|
||||||
|
|
|
@ -765,7 +765,7 @@ is not sufficient this sometimes needs to be explicit.
|
||||||
Example::
|
Example::
|
||||||
|
|
||||||
#arch/x86/boot/Makefile
|
#arch/x86/boot/Makefile
|
||||||
subdir- := compressed/
|
subdir- := compressed
|
||||||
|
|
||||||
The above assignment instructs kbuild to descend down in the
|
The above assignment instructs kbuild to descend down in the
|
||||||
directory compressed/ when "make clean" is executed.
|
directory compressed/ when "make clean" is executed.
|
||||||
|
@ -1379,9 +1379,6 @@ See subsequent chapter for the syntax of the Kbuild file.
|
||||||
in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
|
in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
|
||||||
a wrapper of the asm-generic one.
|
a wrapper of the asm-generic one.
|
||||||
|
|
||||||
The convention is to list one subdir per line and
|
|
||||||
preferably in alphabetic order.
|
|
||||||
|
|
||||||
8 Kbuild Variables
|
8 Kbuild Variables
|
||||||
==================
|
==================
|
||||||
|
|
||||||
|
|
|
@ -487,8 +487,9 @@ phy_register_fixup_for_id()::
|
||||||
The stubs set one of the two matching criteria, and set the other one to
|
The stubs set one of the two matching criteria, and set the other one to
|
||||||
match anything.
|
match anything.
|
||||||
|
|
||||||
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
|
When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module load
|
||||||
unregister fixup and free allocate memory are required.
|
time, the module needs to unregister the fixup and free allocated memory when
|
||||||
|
it's unloaded.
|
||||||
|
|
||||||
Call one of following function before unloading module::
|
Call one of following function before unloading module::
|
||||||
|
|
||||||
|
|
|
@ -30,4 +30,4 @@ if [ -n "$parallel" ] ; then
|
||||||
parallel="-j$parallel"
|
parallel="-j$parallel"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
exec "$sphinx" "$parallel" "$@"
|
exec "$sphinx" $parallel "$@"
|
||||||
|
|
|
@ -4611,35 +4611,38 @@ unpins the VPA pages and releases all the device pages that are used to
|
||||||
track the secure pages by hypervisor.
|
track the secure pages by hypervisor.
|
||||||
|
|
||||||
4.122 KVM_S390_NORMAL_RESET
|
4.122 KVM_S390_NORMAL_RESET
|
||||||
|
---------------------------
|
||||||
|
|
||||||
Capability: KVM_CAP_S390_VCPU_RESETS
|
:Capability: KVM_CAP_S390_VCPU_RESETS
|
||||||
Architectures: s390
|
:Architectures: s390
|
||||||
Type: vcpu ioctl
|
:Type: vcpu ioctl
|
||||||
Parameters: none
|
:Parameters: none
|
||||||
Returns: 0
|
:Returns: 0
|
||||||
|
|
||||||
This ioctl resets VCPU registers and control structures according to
|
This ioctl resets VCPU registers and control structures according to
|
||||||
the cpu reset definition in the POP (Principles Of Operation).
|
the cpu reset definition in the POP (Principles Of Operation).
|
||||||
|
|
||||||
4.123 KVM_S390_INITIAL_RESET
|
4.123 KVM_S390_INITIAL_RESET
|
||||||
|
----------------------------
|
||||||
|
|
||||||
Capability: none
|
:Capability: none
|
||||||
Architectures: s390
|
:Architectures: s390
|
||||||
Type: vcpu ioctl
|
:Type: vcpu ioctl
|
||||||
Parameters: none
|
:Parameters: none
|
||||||
Returns: 0
|
:Returns: 0
|
||||||
|
|
||||||
This ioctl resets VCPU registers and control structures according to
|
This ioctl resets VCPU registers and control structures according to
|
||||||
the initial cpu reset definition in the POP. However, the cpu is not
|
the initial cpu reset definition in the POP. However, the cpu is not
|
||||||
put into ESA mode. This reset is a superset of the normal reset.
|
put into ESA mode. This reset is a superset of the normal reset.
|
||||||
|
|
||||||
4.124 KVM_S390_CLEAR_RESET
|
4.124 KVM_S390_CLEAR_RESET
|
||||||
|
--------------------------
|
||||||
|
|
||||||
Capability: KVM_CAP_S390_VCPU_RESETS
|
:Capability: KVM_CAP_S390_VCPU_RESETS
|
||||||
Architectures: s390
|
:Architectures: s390
|
||||||
Type: vcpu ioctl
|
:Type: vcpu ioctl
|
||||||
Parameters: none
|
:Parameters: none
|
||||||
Returns: 0
|
:Returns: 0
|
||||||
|
|
||||||
This ioctl resets VCPU registers and control structures according to
|
This ioctl resets VCPU registers and control structures according to
|
||||||
the clear cpu reset definition in the POP. However, the cpu is not put
|
the clear cpu reset definition in the POP. However, the cpu is not put
|
||||||
|
|
|
@ -19,7 +19,6 @@ x86-specific Documentation
|
||||||
tlb
|
tlb
|
||||||
mtrr
|
mtrr
|
||||||
pat
|
pat
|
||||||
intel_mpx
|
|
||||||
intel-iommu
|
intel-iommu
|
||||||
intel_txt
|
intel_txt
|
||||||
amd-memory-encryption
|
amd-memory-encryption
|
||||||
|
|
|
@ -3649,6 +3649,7 @@ F: sound/pci/oxygen/
|
||||||
|
|
||||||
C-SKY ARCHITECTURE
|
C-SKY ARCHITECTURE
|
||||||
M: Guo Ren <guoren@kernel.org>
|
M: Guo Ren <guoren@kernel.org>
|
||||||
|
L: linux-csky@vger.kernel.org
|
||||||
T: git https://github.com/c-sky/csky-linux.git
|
T: git https://github.com/c-sky/csky-linux.git
|
||||||
S: Supported
|
S: Supported
|
||||||
F: arch/csky/
|
F: arch/csky/
|
||||||
|
@ -11114,14 +11115,12 @@ S: Maintained
|
||||||
F: drivers/usb/image/microtek.*
|
F: drivers/usb/image/microtek.*
|
||||||
|
|
||||||
MIPS
|
MIPS
|
||||||
M: Ralf Baechle <ralf@linux-mips.org>
|
M: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
|
||||||
M: Paul Burton <paulburton@kernel.org>
|
|
||||||
L: linux-mips@vger.kernel.org
|
L: linux-mips@vger.kernel.org
|
||||||
W: http://www.linux-mips.org/
|
W: http://www.linux-mips.org/
|
||||||
T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
|
|
||||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
|
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
|
||||||
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
|
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
|
||||||
S: Supported
|
S: Maintained
|
||||||
F: Documentation/devicetree/bindings/mips/
|
F: Documentation/devicetree/bindings/mips/
|
||||||
F: Documentation/mips/
|
F: Documentation/mips/
|
||||||
F: arch/mips/
|
F: arch/mips/
|
||||||
|
|
6
Makefile
6
Makefile
|
@ -2,7 +2,7 @@
|
||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 6
|
PATCHLEVEL = 6
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc2
|
EXTRAVERSION = -rc3
|
||||||
NAME = Kleptomaniac Octopus
|
NAME = Kleptomaniac Octopus
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -68,6 +68,7 @@ unexport GREP_OPTIONS
|
||||||
#
|
#
|
||||||
# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
|
# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
|
||||||
# If KBUILD_VERBOSE equals 1 then the above command is displayed.
|
# If KBUILD_VERBOSE equals 1 then the above command is displayed.
|
||||||
|
# If KBUILD_VERBOSE equals 2 then give the reason why each target is rebuilt.
|
||||||
#
|
#
|
||||||
# To put more focus on warnings, be less verbose as default
|
# To put more focus on warnings, be less verbose as default
|
||||||
# Use 'make V=1' to see the full commands
|
# Use 'make V=1' to see the full commands
|
||||||
|
@ -1238,7 +1239,7 @@ ifneq ($(dtstree),)
|
||||||
%.dtb: include/config/kernel.release scripts_dtc
|
%.dtb: include/config/kernel.release scripts_dtc
|
||||||
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
|
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
|
||||||
|
|
||||||
PHONY += dtbs dtbs_install dt_binding_check
|
PHONY += dtbs dtbs_install dtbs_check
|
||||||
dtbs dtbs_check: include/config/kernel.release scripts_dtc
|
dtbs dtbs_check: include/config/kernel.release scripts_dtc
|
||||||
$(Q)$(MAKE) $(build)=$(dtstree)
|
$(Q)$(MAKE) $(build)=$(dtstree)
|
||||||
|
|
||||||
|
@ -1258,6 +1259,7 @@ PHONY += scripts_dtc
|
||||||
scripts_dtc: scripts_basic
|
scripts_dtc: scripts_basic
|
||||||
$(Q)$(MAKE) $(build)=scripts/dtc
|
$(Q)$(MAKE) $(build)=scripts/dtc
|
||||||
|
|
||||||
|
PHONY += dt_binding_check
|
||||||
dt_binding_check: scripts_dtc
|
dt_binding_check: scripts_dtc
|
||||||
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
|
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
|
||||||
|
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
#ifdef CONFIG_ARM64_LSE_ATOMICS
|
||||||
|
|
||||||
#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
|
#define __LSE_PREAMBLE ".arch_extension lse\n"
|
||||||
|
|
||||||
#include <linux/compiler_types.h>
|
#include <linux/compiler_types.h>
|
||||||
#include <linux/export.h>
|
#include <linux/export.h>
|
||||||
|
|
|
@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void)
|
||||||
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
|
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
|
||||||
|
|
||||||
#define untagged_addr(addr) ({ \
|
#define untagged_addr(addr) ({ \
|
||||||
u64 __addr = (__force u64)addr; \
|
u64 __addr = (__force u64)(addr); \
|
||||||
__addr &= __untagged_addr(__addr); \
|
__addr &= __untagged_addr(__addr); \
|
||||||
(__force __typeof__(addr))__addr; \
|
(__force __typeof__(addr))__addr; \
|
||||||
})
|
})
|
||||||
|
|
|
@ -9,7 +9,6 @@ config CSKY
|
||||||
select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
|
select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
|
||||||
select COMMON_CLK
|
select COMMON_CLK
|
||||||
select CLKSRC_MMIO
|
select CLKSRC_MMIO
|
||||||
select CLKSRC_OF
|
|
||||||
select CSKY_MPINTC if CPU_CK860
|
select CSKY_MPINTC if CPU_CK860
|
||||||
select CSKY_MP_TIMER if CPU_CK860
|
select CSKY_MP_TIMER if CPU_CK860
|
||||||
select CSKY_APB_INTC
|
select CSKY_APB_INTC
|
||||||
|
@ -37,6 +36,7 @@ config CSKY
|
||||||
select GX6605S_TIMER if CPU_CK610
|
select GX6605S_TIMER if CPU_CK610
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_ARCH_AUDITSYSCALL
|
select HAVE_ARCH_AUDITSYSCALL
|
||||||
|
select HAVE_COPY_THREAD_TLS
|
||||||
select HAVE_DYNAMIC_FTRACE
|
select HAVE_DYNAMIC_FTRACE
|
||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
select HAVE_FUNCTION_GRAPH_TRACER
|
select HAVE_FUNCTION_GRAPH_TRACER
|
||||||
|
@ -47,8 +47,8 @@ config CSKY
|
||||||
select HAVE_PERF_EVENTS
|
select HAVE_PERF_EVENTS
|
||||||
select HAVE_PERF_REGS
|
select HAVE_PERF_REGS
|
||||||
select HAVE_PERF_USER_STACK_DUMP
|
select HAVE_PERF_USER_STACK_DUMP
|
||||||
select HAVE_DMA_API_DEBUG
|
|
||||||
select HAVE_DMA_CONTIGUOUS
|
select HAVE_DMA_CONTIGUOUS
|
||||||
|
select HAVE_STACKPROTECTOR
|
||||||
select HAVE_SYSCALL_TRACEPOINTS
|
select HAVE_SYSCALL_TRACEPOINTS
|
||||||
select MAY_HAVE_SPARSE_IRQ
|
select MAY_HAVE_SPARSE_IRQ
|
||||||
select MODULES_USE_ELF_RELA if MODULES
|
select MODULES_USE_ELF_RELA if MODULES
|
||||||
|
@ -59,6 +59,11 @@ config CSKY
|
||||||
select TIMER_OF
|
select TIMER_OF
|
||||||
select USB_ARCH_HAS_EHCI
|
select USB_ARCH_HAS_EHCI
|
||||||
select USB_ARCH_HAS_OHCI
|
select USB_ARCH_HAS_OHCI
|
||||||
|
select GENERIC_PCI_IOMAP
|
||||||
|
select HAVE_PCI
|
||||||
|
select PCI_DOMAINS_GENERIC if PCI
|
||||||
|
select PCI_SYSCALL if PCI
|
||||||
|
select PCI_MSI if PCI
|
||||||
|
|
||||||
config CPU_HAS_CACHEV2
|
config CPU_HAS_CACHEV2
|
||||||
bool
|
bool
|
||||||
|
@ -75,7 +80,7 @@ config CPU_HAS_TLBI
|
||||||
config CPU_HAS_LDSTEX
|
config CPU_HAS_LDSTEX
|
||||||
bool
|
bool
|
||||||
help
|
help
|
||||||
For SMP, CPU needs "ldex&stex" instrcutions to atomic operations.
|
For SMP, CPU needs "ldex&stex" instructions for atomic operations.
|
||||||
|
|
||||||
config CPU_NEED_TLBSYNC
|
config CPU_NEED_TLBSYNC
|
||||||
bool
|
bool
|
||||||
|
@ -188,6 +193,40 @@ config CPU_PM_STOP
|
||||||
bool "stop"
|
bool "stop"
|
||||||
endchoice
|
endchoice
|
||||||
|
|
||||||
|
menuconfig HAVE_TCM
|
||||||
|
bool "Tightly-Coupled/Sram Memory"
|
||||||
|
select GENERIC_ALLOCATOR
|
||||||
|
help
|
||||||
|
The implementation are not only used by TCM (Tightly-Coupled Meory)
|
||||||
|
but also used by sram on SOC bus. It follow existed linux tcm
|
||||||
|
software interface, so that old tcm application codes could be
|
||||||
|
re-used directly.
|
||||||
|
|
||||||
|
if HAVE_TCM
|
||||||
|
config ITCM_RAM_BASE
|
||||||
|
hex "ITCM ram base"
|
||||||
|
default 0xffffffff
|
||||||
|
|
||||||
|
config ITCM_NR_PAGES
|
||||||
|
int "Page count of ITCM size: NR*4KB"
|
||||||
|
range 1 256
|
||||||
|
default 32
|
||||||
|
|
||||||
|
config HAVE_DTCM
|
||||||
|
bool "DTCM Support"
|
||||||
|
|
||||||
|
config DTCM_RAM_BASE
|
||||||
|
hex "DTCM ram base"
|
||||||
|
depends on HAVE_DTCM
|
||||||
|
default 0xffffffff
|
||||||
|
|
||||||
|
config DTCM_NR_PAGES
|
||||||
|
int "Page count of DTCM size: NR*4KB"
|
||||||
|
depends on HAVE_DTCM
|
||||||
|
range 1 256
|
||||||
|
default 32
|
||||||
|
endif
|
||||||
|
|
||||||
config CPU_HAS_VDSP
|
config CPU_HAS_VDSP
|
||||||
bool "CPU has VDSP coprocessor"
|
bool "CPU has VDSP coprocessor"
|
||||||
depends on CPU_HAS_FPU && CPU_HAS_FPUV2
|
depends on CPU_HAS_FPU && CPU_HAS_FPUV2
|
||||||
|
@ -196,6 +235,10 @@ config CPU_HAS_FPU
|
||||||
bool "CPU has FPU coprocessor"
|
bool "CPU has FPU coprocessor"
|
||||||
depends on CPU_CK807 || CPU_CK810 || CPU_CK860
|
depends on CPU_CK807 || CPU_CK810 || CPU_CK860
|
||||||
|
|
||||||
|
config CPU_HAS_ICACHE_INS
|
||||||
|
bool "CPU has Icache invalidate instructions"
|
||||||
|
depends on CPU_HAS_CACHEV2
|
||||||
|
|
||||||
config CPU_HAS_TEE
|
config CPU_HAS_TEE
|
||||||
bool "CPU has Trusted Execution Environment"
|
bool "CPU has Trusted Execution Environment"
|
||||||
depends on CPU_CK810
|
depends on CPU_CK810
|
||||||
|
@ -235,4 +278,6 @@ config HOTPLUG_CPU
|
||||||
Say N if you want to disable CPU hotplug.
|
Say N if you want to disable CPU hotplug.
|
||||||
endmenu
|
endmenu
|
||||||
|
|
||||||
|
source "arch/csky/Kconfig.platforms"
|
||||||
|
|
||||||
source "kernel/Kconfig.hz"
|
source "kernel/Kconfig.hz"
|
||||||
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
menu "Platform drivers selection"
|
||||||
|
|
||||||
|
config ARCH_CSKY_DW_APB_ICTL
|
||||||
|
bool "Select dw-apb interrupt controller"
|
||||||
|
select DW_APB_ICTL
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
This enables support for snps dw-apb-ictl
|
||||||
|
endmenu
|
|
@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
|
||||||
|
|
||||||
#define flush_icache_page(vma, page) do {} while (0);
|
#define flush_icache_page(vma, page) do {} while (0);
|
||||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||||
|
#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
|
||||||
#define flush_icache_user_range(vma,page,addr,len) \
|
#define flush_icache_deferred(mm) do {} while (0);
|
||||||
flush_dcache_page(page)
|
|
||||||
|
|
||||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||||
do { \
|
do { \
|
||||||
|
|
|
@ -16,14 +16,16 @@
|
||||||
#define LSAVE_A4 40
|
#define LSAVE_A4 40
|
||||||
#define LSAVE_A5 44
|
#define LSAVE_A5 44
|
||||||
|
|
||||||
|
#define usp ss1
|
||||||
|
|
||||||
.macro USPTOKSP
|
.macro USPTOKSP
|
||||||
mtcr sp, ss1
|
mtcr sp, usp
|
||||||
mfcr sp, ss0
|
mfcr sp, ss0
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro KSPTOUSP
|
.macro KSPTOUSP
|
||||||
mtcr sp, ss0
|
mtcr sp, ss0
|
||||||
mfcr sp, ss1
|
mfcr sp, usp
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro SAVE_ALL epc_inc
|
.macro SAVE_ALL epc_inc
|
||||||
|
@ -45,7 +47,13 @@
|
||||||
add lr, r13
|
add lr, r13
|
||||||
stw lr, (sp, 8)
|
stw lr, (sp, 8)
|
||||||
|
|
||||||
|
mov lr, sp
|
||||||
|
addi lr, 32
|
||||||
|
addi lr, 32
|
||||||
|
addi lr, 16
|
||||||
|
bt 2f
|
||||||
mfcr lr, ss1
|
mfcr lr, ss1
|
||||||
|
2:
|
||||||
stw lr, (sp, 16)
|
stw lr, (sp, 16)
|
||||||
|
|
||||||
stw a0, (sp, 20)
|
stw a0, (sp, 20)
|
||||||
|
@ -79,9 +87,10 @@
|
||||||
ldw a0, (sp, 12)
|
ldw a0, (sp, 12)
|
||||||
mtcr a0, epsr
|
mtcr a0, epsr
|
||||||
btsti a0, 31
|
btsti a0, 31
|
||||||
|
bt 1f
|
||||||
ldw a0, (sp, 16)
|
ldw a0, (sp, 16)
|
||||||
mtcr a0, ss1
|
mtcr a0, ss1
|
||||||
|
1:
|
||||||
ldw a0, (sp, 24)
|
ldw a0, (sp, 24)
|
||||||
ldw a1, (sp, 28)
|
ldw a1, (sp, 28)
|
||||||
ldw a2, (sp, 32)
|
ldw a2, (sp, 32)
|
||||||
|
@ -102,9 +111,9 @@
|
||||||
addi sp, 32
|
addi sp, 32
|
||||||
addi sp, 8
|
addi sp, 8
|
||||||
|
|
||||||
bt 1f
|
bt 2f
|
||||||
KSPTOUSP
|
KSPTOUSP
|
||||||
1:
|
2:
|
||||||
rte
|
rte
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -6,46 +6,80 @@
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
|
|
||||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
|
|
||||||
{
|
|
||||||
unsigned long start;
|
|
||||||
|
|
||||||
start = (unsigned long) kmap_atomic(page);
|
|
||||||
|
|
||||||
cache_wbinv_range(start, start + PAGE_SIZE);
|
|
||||||
|
|
||||||
kunmap_atomic((void *)start);
|
|
||||||
}
|
|
||||||
|
|
||||||
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
|
||||||
unsigned long vaddr, int len)
|
|
||||||
{
|
|
||||||
unsigned long kaddr;
|
|
||||||
|
|
||||||
kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
|
|
||||||
|
|
||||||
cache_wbinv_range(kaddr, kaddr + len);
|
|
||||||
|
|
||||||
kunmap_atomic((void *)kaddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
|
||||||
pte_t *pte)
|
pte_t *pte)
|
||||||
{
|
{
|
||||||
unsigned long addr, pfn;
|
unsigned long addr;
|
||||||
struct page *page;
|
struct page *page;
|
||||||
|
|
||||||
pfn = pte_pfn(*pte);
|
page = pfn_to_page(pte_pfn(*pte));
|
||||||
if (unlikely(!pfn_valid(pfn)))
|
if (page == ZERO_PAGE(0))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
page = pfn_to_page(pfn);
|
if (test_and_set_bit(PG_dcache_clean, &page->flags))
|
||||||
if (page == ZERO_PAGE(0))
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
addr = (unsigned long) kmap_atomic(page);
|
addr = (unsigned long) kmap_atomic(page);
|
||||||
|
|
||||||
cache_wbinv_range(addr, addr + PAGE_SIZE);
|
dcache_wb_range(addr, addr + PAGE_SIZE);
|
||||||
|
|
||||||
|
if (vma->vm_flags & VM_EXEC)
|
||||||
|
icache_inv_range(addr, addr + PAGE_SIZE);
|
||||||
|
|
||||||
kunmap_atomic((void *) addr);
|
kunmap_atomic((void *) addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void flush_icache_deferred(struct mm_struct *mm)
|
||||||
|
{
|
||||||
|
unsigned int cpu = smp_processor_id();
|
||||||
|
cpumask_t *mask = &mm->context.icache_stale_mask;
|
||||||
|
|
||||||
|
if (cpumask_test_cpu(cpu, mask)) {
|
||||||
|
cpumask_clear_cpu(cpu, mask);
|
||||||
|
/*
|
||||||
|
* Ensure the remote hart's writes are visible to this hart.
|
||||||
|
* This pairs with a barrier in flush_icache_mm.
|
||||||
|
*/
|
||||||
|
smp_mb();
|
||||||
|
local_icache_inv_all(NULL);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void flush_icache_mm_range(struct mm_struct *mm,
|
||||||
|
unsigned long start, unsigned long end)
|
||||||
|
{
|
||||||
|
unsigned int cpu;
|
||||||
|
cpumask_t others, *mask;
|
||||||
|
|
||||||
|
preempt_disable();
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_HAS_ICACHE_INS
|
||||||
|
if (mm == current->mm) {
|
||||||
|
icache_inv_range(start, end);
|
||||||
|
preempt_enable();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/* Mark every hart's icache as needing a flush for this MM. */
|
||||||
|
mask = &mm->context.icache_stale_mask;
|
||||||
|
cpumask_setall(mask);
|
||||||
|
|
||||||
|
/* Flush this hart's I$ now, and mark it as flushed. */
|
||||||
|
cpu = smp_processor_id();
|
||||||
|
cpumask_clear_cpu(cpu, mask);
|
||||||
|
local_icache_inv_all(NULL);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Flush the I$ of other harts concurrently executing, and mark them as
|
||||||
|
* flushed.
|
||||||
|
*/
|
||||||
|
cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
|
||||||
|
|
||||||
|
if (mm != current->active_mm || !cpumask_empty(&others)) {
|
||||||
|
on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
|
||||||
|
cpumask_clear(mask);
|
||||||
|
}
|
||||||
|
|
||||||
|
preempt_enable();
|
||||||
|
}
|
||||||
|
|
|
@ -13,24 +13,27 @@
|
||||||
#define flush_cache_all() do { } while (0)
|
#define flush_cache_all() do { } while (0)
|
||||||
#define flush_cache_mm(mm) do { } while (0)
|
#define flush_cache_mm(mm) do { } while (0)
|
||||||
#define flush_cache_dup_mm(mm) do { } while (0)
|
#define flush_cache_dup_mm(mm) do { } while (0)
|
||||||
|
#define flush_cache_range(vma, start, end) do { } while (0)
|
||||||
#define flush_cache_range(vma, start, end) \
|
|
||||||
do { \
|
|
||||||
if (vma->vm_flags & VM_EXEC) \
|
|
||||||
icache_inv_all(); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
|
||||||
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
|
|
||||||
#define flush_dcache_page(page) do { } while (0)
|
#define PG_dcache_clean PG_arch_1
|
||||||
|
|
||||||
|
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
|
||||||
|
static inline void flush_dcache_page(struct page *page)
|
||||||
|
{
|
||||||
|
if (test_bit(PG_dcache_clean, &page->flags))
|
||||||
|
clear_bit(PG_dcache_clean, &page->flags);
|
||||||
|
}
|
||||||
|
|
||||||
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
#define flush_dcache_mmap_lock(mapping) do { } while (0)
|
||||||
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
|
||||||
|
#define flush_icache_page(vma, page) do { } while (0)
|
||||||
|
|
||||||
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
|
||||||
|
|
||||||
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
|
void flush_icache_mm_range(struct mm_struct *mm,
|
||||||
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
unsigned long start, unsigned long end);
|
||||||
unsigned long vaddr, int len);
|
void flush_icache_deferred(struct mm_struct *mm);
|
||||||
|
|
||||||
#define flush_cache_vmap(start, end) do { } while (0)
|
#define flush_cache_vmap(start, end) do { } while (0)
|
||||||
#define flush_cache_vunmap(start, end) do { } while (0)
|
#define flush_cache_vunmap(start, end) do { } while (0)
|
||||||
|
@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
|
||||||
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
|
||||||
do { \
|
do { \
|
||||||
memcpy(dst, src, len); \
|
memcpy(dst, src, len); \
|
||||||
cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \
|
if (vma->vm_flags & VM_EXEC) { \
|
||||||
|
dcache_wb_range((unsigned long)dst, \
|
||||||
|
(unsigned long)dst + len); \
|
||||||
|
flush_icache_mm_range(current->mm, \
|
||||||
|
(unsigned long)dst, \
|
||||||
|
(unsigned long)dst + len); \
|
||||||
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
|
||||||
memcpy(dst, src, len)
|
memcpy(dst, src, len)
|
||||||
|
|
|
@ -31,7 +31,13 @@
|
||||||
|
|
||||||
mfcr lr, epsr
|
mfcr lr, epsr
|
||||||
stw lr, (sp, 12)
|
stw lr, (sp, 12)
|
||||||
|
btsti lr, 31
|
||||||
|
bf 1f
|
||||||
|
addi lr, sp, 152
|
||||||
|
br 2f
|
||||||
|
1:
|
||||||
mfcr lr, usp
|
mfcr lr, usp
|
||||||
|
2:
|
||||||
stw lr, (sp, 16)
|
stw lr, (sp, 16)
|
||||||
|
|
||||||
stw a0, (sp, 20)
|
stw a0, (sp, 20)
|
||||||
|
@ -64,8 +70,10 @@
|
||||||
mtcr a0, epc
|
mtcr a0, epc
|
||||||
ldw a0, (sp, 12)
|
ldw a0, (sp, 12)
|
||||||
mtcr a0, epsr
|
mtcr a0, epsr
|
||||||
|
btsti a0, 31
|
||||||
ldw a0, (sp, 16)
|
ldw a0, (sp, 16)
|
||||||
mtcr a0, usp
|
mtcr a0, usp
|
||||||
|
mtcr a0, ss0
|
||||||
|
|
||||||
#ifdef CONFIG_CPU_HAS_HILO
|
#ifdef CONFIG_CPU_HAS_HILO
|
||||||
ldw a0, (sp, 140)
|
ldw a0, (sp, 140)
|
||||||
|
@ -86,6 +94,9 @@
|
||||||
addi sp, 40
|
addi sp, 40
|
||||||
ldm r16-r30, (sp)
|
ldm r16-r30, (sp)
|
||||||
addi sp, 72
|
addi sp, 72
|
||||||
|
bf 1f
|
||||||
|
mfcr sp, ss0
|
||||||
|
1:
|
||||||
rte
|
rte
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
|
|
|
@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y
|
||||||
CONFIG_BSD_PROCESS_ACCT_V3=y
|
CONFIG_BSD_PROCESS_ACCT_V3=y
|
||||||
CONFIG_MODULES=y
|
CONFIG_MODULES=y
|
||||||
CONFIG_MODULE_UNLOAD=y
|
CONFIG_MODULE_UNLOAD=y
|
||||||
CONFIG_DEFAULT_DEADLINE=y
|
|
||||||
CONFIG_CPU_CK807=y
|
|
||||||
CONFIG_CPU_HAS_FPU=y
|
|
||||||
CONFIG_NET=y
|
CONFIG_NET=y
|
||||||
CONFIG_PACKET=y
|
CONFIG_PACKET=y
|
||||||
CONFIG_UNIX=y
|
CONFIG_UNIX=y
|
||||||
|
@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y
|
||||||
CONFIG_SERIAL_8250=y
|
CONFIG_SERIAL_8250=y
|
||||||
CONFIG_SERIAL_8250_CONSOLE=y
|
CONFIG_SERIAL_8250_CONSOLE=y
|
||||||
CONFIG_SERIAL_OF_PLATFORM=y
|
CONFIG_SERIAL_OF_PLATFORM=y
|
||||||
CONFIG_TTY_PRINTK=y
|
|
||||||
# CONFIG_VGA_CONSOLE is not set
|
# CONFIG_VGA_CONSOLE is not set
|
||||||
CONFIG_CSKY_MPTIMER=y
|
|
||||||
CONFIG_GX6605S_TIMER=y
|
|
||||||
CONFIG_PM_DEVFREQ=y
|
CONFIG_PM_DEVFREQ=y
|
||||||
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
|
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
|
||||||
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
|
||||||
|
@ -56,6 +50,4 @@ CONFIG_CRAMFS=y
|
||||||
CONFIG_ROMFS_FS=y
|
CONFIG_ROMFS_FS=y
|
||||||
CONFIG_NFS_FS=y
|
CONFIG_NFS_FS=y
|
||||||
CONFIG_PRINTK_TIME=y
|
CONFIG_PRINTK_TIME=y
|
||||||
CONFIG_DEBUG_INFO=y
|
|
||||||
CONFIG_DEBUG_FS=y
|
|
||||||
CONFIG_MAGIC_SYSRQ=y
|
CONFIG_MAGIC_SYSRQ=y
|
||||||
|
|
|
@ -28,7 +28,6 @@ generic-y += local64.h
|
||||||
generic-y += mm-arch-hooks.h
|
generic-y += mm-arch-hooks.h
|
||||||
generic-y += mmiowb.h
|
generic-y += mmiowb.h
|
||||||
generic-y += module.h
|
generic-y += module.h
|
||||||
generic-y += pci.h
|
|
||||||
generic-y += percpu.h
|
generic-y += percpu.h
|
||||||
generic-y += preempt.h
|
generic-y += preempt.h
|
||||||
generic-y += qrwlock.h
|
generic-y += qrwlock.h
|
||||||
|
|
|
@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start);
|
||||||
|
|
||||||
void icache_inv_range(unsigned long start, unsigned long end);
|
void icache_inv_range(unsigned long start, unsigned long end);
|
||||||
void icache_inv_all(void);
|
void icache_inv_all(void);
|
||||||
|
void local_icache_inv_all(void *priv);
|
||||||
|
|
||||||
void dcache_wb_range(unsigned long start, unsigned long end);
|
void dcache_wb_range(unsigned long start, unsigned long end);
|
||||||
void dcache_wbinv_all(void);
|
void dcache_wbinv_all(void);
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#ifndef __ASM_CSKY_CACHEFLUSH_H
|
#ifndef __ASM_CSKY_CACHEFLUSH_H
|
||||||
#define __ASM_CSKY_CACHEFLUSH_H
|
#define __ASM_CSKY_CACHEFLUSH_H
|
||||||
|
|
||||||
|
#include <linux/mm.h>
|
||||||
#include <abi/cacheflush.h>
|
#include <abi/cacheflush.h>
|
||||||
|
|
||||||
#endif /* __ASM_CSKY_CACHEFLUSH_H */
|
#endif /* __ASM_CSKY_CACHEFLUSH_H */
|
||||||
|
|
|
@ -5,12 +5,16 @@
|
||||||
#define __ASM_CSKY_FIXMAP_H
|
#define __ASM_CSKY_FIXMAP_H
|
||||||
|
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm/memory.h>
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
#include <linux/threads.h>
|
#include <linux/threads.h>
|
||||||
#include <asm/kmap_types.h>
|
#include <asm/kmap_types.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
enum fixed_addresses {
|
enum fixed_addresses {
|
||||||
|
#ifdef CONFIG_HAVE_TCM
|
||||||
|
FIX_TCM = TCM_NR_PAGES,
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
FIX_KMAP_BEGIN,
|
FIX_KMAP_BEGIN,
|
||||||
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
|
||||||
|
@ -18,10 +22,13 @@ enum fixed_addresses {
|
||||||
__end_of_fixed_addresses
|
__end_of_fixed_addresses
|
||||||
};
|
};
|
||||||
|
|
||||||
#define FIXADDR_TOP 0xffffc000
|
|
||||||
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
|
||||||
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
|
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
|
||||||
|
|
||||||
#include <asm-generic/fixmap.h>
|
#include <asm-generic/fixmap.h>
|
||||||
|
|
||||||
|
extern void fixrange_init(unsigned long start, unsigned long end,
|
||||||
|
pgd_t *pgd_base);
|
||||||
|
extern void __init fixaddr_init(void);
|
||||||
|
|
||||||
#endif /* __ASM_CSKY_FIXMAP_H */
|
#endif /* __ASM_CSKY_FIXMAP_H */
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifndef __ASM_CSKY_MEMORY_H
|
||||||
|
#define __ASM_CSKY_MEMORY_H
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/const.h>
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/sizes.h>
|
||||||
|
|
||||||
|
#define FIXADDR_TOP _AC(0xffffc000, UL)
|
||||||
|
#define PKMAP_BASE _AC(0xff800000, UL)
|
||||||
|
#define VMALLOC_START _AC(0xc0008000, UL)
|
||||||
|
#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_TCM
|
||||||
|
#ifdef CONFIG_HAVE_DTCM
|
||||||
|
#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES)
|
||||||
|
#else
|
||||||
|
#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES)
|
||||||
|
#endif
|
||||||
|
#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif
|
|
@ -7,6 +7,7 @@
|
||||||
typedef struct {
|
typedef struct {
|
||||||
atomic64_t asid;
|
atomic64_t asid;
|
||||||
void *vdso;
|
void *vdso;
|
||||||
|
cpumask_t icache_stale_mask;
|
||||||
} mm_context_t;
|
} mm_context_t;
|
||||||
|
|
||||||
#endif /* __ASM_CSKY_MMU_H */
|
#endif /* __ASM_CSKY_MMU_H */
|
||||||
|
|
|
@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
|
||||||
|
|
||||||
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
|
||||||
write_mmu_entryhi(next->context.asid.counter);
|
write_mmu_entryhi(next->context.asid.counter);
|
||||||
|
|
||||||
|
flush_icache_deferred(next);
|
||||||
}
|
}
|
||||||
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
|
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
|
||||||
|
|
|
@ -0,0 +1,34 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||||
|
|
||||||
|
#ifndef __ASM_CSKY_PCI_H
|
||||||
|
#define __ASM_CSKY_PCI_H
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
|
#include <asm/io.h>
|
||||||
|
|
||||||
|
#define PCIBIOS_MIN_IO 0
|
||||||
|
#define PCIBIOS_MIN_MEM 0
|
||||||
|
|
||||||
|
/* C-SKY shim does not initialize PCI bus */
|
||||||
|
#define pcibios_assign_all_busses() 1
|
||||||
|
|
||||||
|
extern int isa_dma_bridge_buggy;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PCI
|
||||||
|
static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||||
|
{
|
||||||
|
/* no legacy IRQ on csky */
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int pci_proc_domain(struct pci_bus *bus)
|
||||||
|
{
|
||||||
|
/* always show the domain in /proc */
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_PCI */
|
||||||
|
|
||||||
|
#endif /* __ASM_CSKY_PCI_H */
|
|
@ -5,6 +5,7 @@
|
||||||
#define __ASM_CSKY_PGTABLE_H
|
#define __ASM_CSKY_PGTABLE_H
|
||||||
|
|
||||||
#include <asm/fixmap.h>
|
#include <asm/fixmap.h>
|
||||||
|
#include <asm/memory.h>
|
||||||
#include <asm/addrspace.h>
|
#include <asm/addrspace.h>
|
||||||
#include <abi/pgtable-bits.h>
|
#include <abi/pgtable-bits.h>
|
||||||
#include <asm-generic/pgtable-nopmd.h>
|
#include <asm-generic/pgtable-nopmd.h>
|
||||||
|
@ -16,11 +17,6 @@
|
||||||
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
|
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
|
||||||
#define FIRST_USER_ADDRESS 0UL
|
#define FIRST_USER_ADDRESS 0UL
|
||||||
|
|
||||||
#define PKMAP_BASE (0xff800000)
|
|
||||||
|
|
||||||
#define VMALLOC_START (0xc0008000)
|
|
||||||
#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* C-SKY is two-level paging structure:
|
* C-SKY is two-level paging structure:
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -0,0 +1,29 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
#ifndef _ASM_STACKPROTECTOR_H
|
||||||
|
#define _ASM_STACKPROTECTOR_H 1
|
||||||
|
|
||||||
|
#include <linux/random.h>
|
||||||
|
#include <linux/version.h>
|
||||||
|
|
||||||
|
extern unsigned long __stack_chk_guard;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Initialize the stackprotector canary value.
|
||||||
|
*
|
||||||
|
* NOTE: this must only be called from functions that never return,
|
||||||
|
* and it must always be inlined.
|
||||||
|
*/
|
||||||
|
static __always_inline void boot_init_stack_canary(void)
|
||||||
|
{
|
||||||
|
unsigned long canary;
|
||||||
|
|
||||||
|
/* Try to get a semi random initial value. */
|
||||||
|
get_random_bytes(&canary, sizeof(canary));
|
||||||
|
canary ^= LINUX_VERSION_CODE;
|
||||||
|
canary &= CANARY_MASK;
|
||||||
|
|
||||||
|
current->stack_canary = canary;
|
||||||
|
__stack_chk_guard = current->stack_canary;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* __ASM_SH_STACKPROTECTOR_H */
|
|
@ -0,0 +1,24 @@
|
||||||
|
/* SPDX-License-Identifier: GPL-2.0 */
|
||||||
|
|
||||||
|
#ifndef __ASM_CSKY_TCM_H
|
||||||
|
#define __ASM_CSKY_TCM_H
|
||||||
|
|
||||||
|
#ifndef CONFIG_HAVE_TCM
|
||||||
|
#error "You should not be including tcm.h unless you have a TCM!"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
|
||||||
|
/* Tag variables with this */
|
||||||
|
#define __tcmdata __section(.tcm.data)
|
||||||
|
/* Tag constants with this */
|
||||||
|
#define __tcmconst __section(.tcm.rodata)
|
||||||
|
/* Tag functions inside TCM called from outside TCM with this */
|
||||||
|
#define __tcmfunc __section(.tcm.text) noinline
|
||||||
|
/* Tag function inside TCM called from inside TCM with this */
|
||||||
|
#define __tcmlocalfunc __section(.tcm.text)
|
||||||
|
|
||||||
|
void *tcm_alloc(size_t len);
|
||||||
|
void tcm_free(void *addr, size_t len);
|
||||||
|
|
||||||
|
#endif
|
|
@ -1,7 +1,10 @@
|
||||||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||||
|
|
||||||
|
#define __ARCH_WANT_STAT64
|
||||||
|
#define __ARCH_WANT_NEW_STAT
|
||||||
#define __ARCH_WANT_SYS_CLONE
|
#define __ARCH_WANT_SYS_CLONE
|
||||||
|
#define __ARCH_WANT_SYS_CLONE3
|
||||||
#define __ARCH_WANT_SET_GET_RLIMIT
|
#define __ARCH_WANT_SET_GET_RLIMIT
|
||||||
#define __ARCH_WANT_TIME32_SYSCALLS
|
#define __ARCH_WANT_TIME32_SYSCALLS
|
||||||
#include <asm-generic/unistd.h>
|
#include <asm-generic/unistd.h>
|
||||||
|
|
|
@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
|
||||||
mfcr a3, epc
|
mfcr a3, epc
|
||||||
addi a3, TRAP0_SIZE
|
addi a3, TRAP0_SIZE
|
||||||
|
|
||||||
subi sp, 8
|
subi sp, 16
|
||||||
stw a3, (sp, 0)
|
stw a3, (sp, 0)
|
||||||
mfcr a3, epsr
|
mfcr a3, epsr
|
||||||
stw a3, (sp, 4)
|
stw a3, (sp, 4)
|
||||||
|
mfcr a3, usp
|
||||||
|
stw a3, (sp, 8)
|
||||||
|
|
||||||
psrset ee
|
psrset ee
|
||||||
#ifdef CONFIG_CPU_HAS_LDSTEX
|
#ifdef CONFIG_CPU_HAS_LDSTEX
|
||||||
|
@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
|
||||||
mtcr a3, epc
|
mtcr a3, epc
|
||||||
ldw a3, (sp, 4)
|
ldw a3, (sp, 4)
|
||||||
mtcr a3, epsr
|
mtcr a3, epsr
|
||||||
addi sp, 8
|
ldw a3, (sp, 8)
|
||||||
|
mtcr a3, usp
|
||||||
|
addi sp, 16
|
||||||
KSPTOUSP
|
KSPTOUSP
|
||||||
rte
|
rte
|
||||||
END(csky_cmpxchg)
|
END(csky_cmpxchg)
|
||||||
|
|
|
@ -16,6 +16,12 @@
|
||||||
|
|
||||||
struct cpuinfo_csky cpu_data[NR_CPUS];
|
struct cpuinfo_csky cpu_data[NR_CPUS];
|
||||||
|
|
||||||
|
#ifdef CONFIG_STACKPROTECTOR
|
||||||
|
#include <linux/stackprotector.h>
|
||||||
|
unsigned long __stack_chk_guard __read_mostly;
|
||||||
|
EXPORT_SYMBOL(__stack_chk_guard);
|
||||||
|
#endif
|
||||||
|
|
||||||
asmlinkage void ret_from_fork(void);
|
asmlinkage void ret_from_fork(void);
|
||||||
asmlinkage void ret_from_kernel_thread(void);
|
asmlinkage void ret_from_kernel_thread(void);
|
||||||
|
|
||||||
|
@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
|
||||||
return sw->r15;
|
return sw->r15;
|
||||||
}
|
}
|
||||||
|
|
||||||
int copy_thread(unsigned long clone_flags,
|
int copy_thread_tls(unsigned long clone_flags,
|
||||||
unsigned long usp,
|
unsigned long usp,
|
||||||
unsigned long kthread_arg,
|
unsigned long kthread_arg,
|
||||||
struct task_struct *p)
|
struct task_struct *p,
|
||||||
|
unsigned long tls)
|
||||||
{
|
{
|
||||||
struct switch_stack *childstack;
|
struct switch_stack *childstack;
|
||||||
struct pt_regs *childregs = task_pt_regs(p);
|
struct pt_regs *childregs = task_pt_regs(p);
|
||||||
|
@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags,
|
||||||
childregs->usp = usp;
|
childregs->usp = usp;
|
||||||
if (clone_flags & CLONE_SETTLS)
|
if (clone_flags & CLONE_SETTLS)
|
||||||
task_thread_info(p)->tp_value = childregs->tls
|
task_thread_info(p)->tp_value = childregs->tls
|
||||||
= childregs->regs[0];
|
= tls;
|
||||||
|
|
||||||
childregs->a0 = 0;
|
childregs->a0 = 0;
|
||||||
childstack->r15 = (unsigned long) ret_from_fork;
|
childstack->r15 = (unsigned long) ret_from_fork;
|
||||||
|
|
|
@ -47,9 +47,6 @@ static void __init csky_memblock_init(void)
|
||||||
signed long size;
|
signed long size;
|
||||||
|
|
||||||
memblock_reserve(__pa(_stext), _end - _stext);
|
memblock_reserve(__pa(_stext), _end - _stext);
|
||||||
#ifdef CONFIG_BLK_DEV_INITRD
|
|
||||||
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
early_init_fdt_reserve_self();
|
early_init_fdt_reserve_self();
|
||||||
early_init_fdt_scan_reserved_mem();
|
early_init_fdt_scan_reserved_mem();
|
||||||
|
@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p)
|
||||||
|
|
||||||
sparse_init();
|
sparse_init();
|
||||||
|
|
||||||
|
fixaddr_init();
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
kmap_init();
|
kmap_init();
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
|
||||||
int rc;
|
int rc;
|
||||||
|
|
||||||
if (ipi_irq == 0)
|
if (ipi_irq == 0)
|
||||||
panic("%s IRQ mapping failed\n", __func__);
|
return;
|
||||||
|
|
||||||
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
|
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
|
||||||
&ipi_dummy_dev);
|
&ipi_dummy_dev);
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
|
||||||
|
|
||||||
#include <linux/clk-provider.h>
|
|
||||||
#include <linux/clocksource.h>
|
#include <linux/clocksource.h>
|
||||||
|
#include <linux/of_clk.h>
|
||||||
|
|
||||||
void __init time_init(void)
|
void __init time_init(void)
|
||||||
{
|
{
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
#include <asm/vmlinux.lds.h>
|
#include <asm/vmlinux.lds.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm/memory.h>
|
||||||
|
|
||||||
OUTPUT_ARCH(csky)
|
OUTPUT_ARCH(csky)
|
||||||
ENTRY(_start)
|
ENTRY(_start)
|
||||||
|
@ -53,6 +54,54 @@ SECTIONS
|
||||||
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
|
||||||
_edata = .;
|
_edata = .;
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_TCM
|
||||||
|
.tcm_start : {
|
||||||
|
. = ALIGN(PAGE_SIZE);
|
||||||
|
__tcm_start = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
.text_data_tcm FIXADDR_TCM : AT(__tcm_start)
|
||||||
|
{
|
||||||
|
. = ALIGN(4);
|
||||||
|
__stcm_text_data = .;
|
||||||
|
*(.tcm.text)
|
||||||
|
*(.tcm.rodata)
|
||||||
|
#ifndef CONFIG_HAVE_DTCM
|
||||||
|
*(.tcm.data)
|
||||||
|
#endif
|
||||||
|
. = ALIGN(4);
|
||||||
|
__etcm_text_data = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm);
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_DTCM
|
||||||
|
#define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE
|
||||||
|
|
||||||
|
.dtcm_start : {
|
||||||
|
__dtcm_start = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
.data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start)
|
||||||
|
{
|
||||||
|
. = ALIGN(4);
|
||||||
|
__stcm_data = .;
|
||||||
|
*(.tcm.data)
|
||||||
|
. = ALIGN(4);
|
||||||
|
__etcm_data = .;
|
||||||
|
}
|
||||||
|
|
||||||
|
. = ADDR(.dtcm_start) + SIZEOF(.data_tcm);
|
||||||
|
|
||||||
|
.tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) {
|
||||||
|
#else
|
||||||
|
.tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) {
|
||||||
|
#endif
|
||||||
|
. = ALIGN(PAGE_SIZE);
|
||||||
|
__tcm_end = .;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
EXCEPTION_TABLE(L1_CACHE_BYTES)
|
EXCEPTION_TABLE(L1_CACHE_BYTES)
|
||||||
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
|
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
|
||||||
VBR_BASE
|
VBR_BASE
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
# SPDX-License-Identifier: GPL-2.0-only
|
# SPDX-License-Identifier: GPL-2.0-only
|
||||||
ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
|
ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
|
||||||
obj-y += cachev2.o
|
obj-y += cachev2.o
|
||||||
|
CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
|
||||||
else
|
else
|
||||||
obj-y += cachev1.o
|
obj-y += cachev1.o
|
||||||
|
CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
obj-y += dma-mapping.o
|
obj-y += dma-mapping.o
|
||||||
|
@ -14,3 +16,4 @@ obj-y += syscache.o
|
||||||
obj-y += tlb.o
|
obj-y += tlb.o
|
||||||
obj-y += asid.o
|
obj-y += asid.o
|
||||||
obj-y += context.o
|
obj-y += context.o
|
||||||
|
obj-$(CONFIG_HAVE_TCM) += tcm.o
|
||||||
|
|
|
@ -94,6 +94,11 @@ void icache_inv_all(void)
|
||||||
cache_op_all(INS_CACHE|CACHE_INV, 0);
|
cache_op_all(INS_CACHE|CACHE_INV, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void local_icache_inv_all(void *priv)
|
||||||
|
{
|
||||||
|
cache_op_all(INS_CACHE|CACHE_INV, 0);
|
||||||
|
}
|
||||||
|
|
||||||
void dcache_wb_range(unsigned long start, unsigned long end)
|
void dcache_wb_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
|
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
|
||||||
|
|
|
@ -3,15 +3,25 @@
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/smp.h>
|
#include <linux/smp.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
|
|
||||||
inline void dcache_wb_line(unsigned long start)
|
#define INS_CACHE (1 << 0)
|
||||||
|
#define CACHE_INV (1 << 4)
|
||||||
|
|
||||||
|
void local_icache_inv_all(void *priv)
|
||||||
{
|
{
|
||||||
asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
|
mtcr("cr17", INS_CACHE|CACHE_INV);
|
||||||
sync_is();
|
sync_is();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void icache_inv_all(void)
|
||||||
|
{
|
||||||
|
on_each_cpu(local_icache_inv_all, NULL, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_HAS_ICACHE_INS
|
||||||
void icache_inv_range(unsigned long start, unsigned long end)
|
void icache_inv_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
||||||
|
@ -20,10 +30,16 @@ void icache_inv_range(unsigned long start, unsigned long end)
|
||||||
asm volatile("icache.iva %0\n"::"r"(i):"memory");
|
asm volatile("icache.iva %0\n"::"r"(i):"memory");
|
||||||
sync_is();
|
sync_is();
|
||||||
}
|
}
|
||||||
|
#else
|
||||||
void icache_inv_all(void)
|
void icache_inv_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
asm volatile("icache.ialls\n":::"memory");
|
icache_inv_all();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
inline void dcache_wb_line(unsigned long start)
|
||||||
|
{
|
||||||
|
asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
|
||||||
sync_is();
|
sync_is();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,27 +52,10 @@ void dcache_wb_range(unsigned long start, unsigned long end)
|
||||||
sync_is();
|
sync_is();
|
||||||
}
|
}
|
||||||
|
|
||||||
void dcache_inv_range(unsigned long start, unsigned long end)
|
|
||||||
{
|
|
||||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
|
||||||
|
|
||||||
for (; i < end; i += L1_CACHE_BYTES)
|
|
||||||
asm volatile("dcache.civa %0\n"::"r"(i):"memory");
|
|
||||||
sync_is();
|
|
||||||
}
|
|
||||||
|
|
||||||
void cache_wbinv_range(unsigned long start, unsigned long end)
|
void cache_wbinv_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
|
dcache_wb_range(start, end);
|
||||||
|
icache_inv_range(start, end);
|
||||||
for (; i < end; i += L1_CACHE_BYTES)
|
|
||||||
asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
|
|
||||||
sync_is();
|
|
||||||
|
|
||||||
i = start & ~(L1_CACHE_BYTES - 1);
|
|
||||||
for (; i < end; i += L1_CACHE_BYTES)
|
|
||||||
asm volatile("icache.iva %0\n"::"r"(i):"memory");
|
|
||||||
sync_is();
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(cache_wbinv_range);
|
EXPORT_SYMBOL(cache_wbinv_range);
|
||||||
|
|
||||||
|
|
|
@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr)
|
||||||
return pte_page(*pte);
|
return pte_page(*pte);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init fixrange_init(unsigned long start, unsigned long end,
|
static void __init kmap_pages_init(void)
|
||||||
pgd_t *pgd_base)
|
|
||||||
{
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
pgd_t *pgd;
|
|
||||||
pud_t *pud;
|
|
||||||
pmd_t *pmd;
|
|
||||||
pte_t *pte;
|
|
||||||
int i, j, k;
|
|
||||||
unsigned long vaddr;
|
|
||||||
|
|
||||||
vaddr = start;
|
|
||||||
i = __pgd_offset(vaddr);
|
|
||||||
j = __pud_offset(vaddr);
|
|
||||||
k = __pmd_offset(vaddr);
|
|
||||||
pgd = pgd_base + i;
|
|
||||||
|
|
||||||
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
|
||||||
pud = (pud_t *)pgd;
|
|
||||||
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
|
|
||||||
pmd = (pmd_t *)pud;
|
|
||||||
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
|
|
||||||
if (pmd_none(*pmd)) {
|
|
||||||
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
|
||||||
if (!pte)
|
|
||||||
panic("%s: Failed to allocate %lu bytes align=%lx\n",
|
|
||||||
__func__, PAGE_SIZE,
|
|
||||||
PAGE_SIZE);
|
|
||||||
|
|
||||||
set_pmd(pmd, __pmd(__pa(pte)));
|
|
||||||
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
|
||||||
}
|
|
||||||
vaddr += PMD_SIZE;
|
|
||||||
}
|
|
||||||
k = 0;
|
|
||||||
}
|
|
||||||
j = 0;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init fixaddr_kmap_pages_init(void)
|
|
||||||
{
|
{
|
||||||
unsigned long vaddr;
|
unsigned long vaddr;
|
||||||
pgd_t *pgd_base;
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
pgd_t *pgd;
|
pgd_t *pgd;
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
pud_t *pud;
|
pud_t *pud;
|
||||||
pte_t *pte;
|
pte_t *pte;
|
||||||
#endif
|
|
||||||
pgd_base = swapper_pg_dir;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Fixed mappings:
|
|
||||||
*/
|
|
||||||
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
|
||||||
fixrange_init(vaddr, 0, pgd_base);
|
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
|
||||||
/*
|
|
||||||
* Permanent kmaps:
|
|
||||||
*/
|
|
||||||
vaddr = PKMAP_BASE;
|
vaddr = PKMAP_BASE;
|
||||||
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
|
fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
|
||||||
|
|
||||||
pgd = swapper_pg_dir + __pgd_offset(vaddr);
|
pgd = swapper_pg_dir + __pgd_offset(vaddr);
|
||||||
pud = (pud_t *)pgd;
|
pud = (pud_t *)pgd;
|
||||||
pmd = pmd_offset(pud, vaddr);
|
pmd = pmd_offset(pud, vaddr);
|
||||||
pte = pte_offset_kernel(pmd, vaddr);
|
pte = pte_offset_kernel(pmd, vaddr);
|
||||||
pkmap_page_table = pte;
|
pkmap_page_table = pte;
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init kmap_init(void)
|
void __init kmap_init(void)
|
||||||
{
|
{
|
||||||
unsigned long vaddr;
|
unsigned long vaddr;
|
||||||
|
|
||||||
fixaddr_kmap_pages_init();
|
kmap_pages_init();
|
||||||
|
|
||||||
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
|
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
|
||||||
|
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/pfn.h>
|
#include <linux/pfn.h>
|
||||||
|
#include <linux/initrd.h>
|
||||||
|
|
||||||
#include <asm/setup.h>
|
#include <asm/setup.h>
|
||||||
#include <asm/cachectl.h>
|
#include <asm/cachectl.h>
|
||||||
|
@ -31,10 +32,50 @@
|
||||||
|
|
||||||
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
|
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
|
||||||
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
|
||||||
|
EXPORT_SYMBOL(invalid_pte_table);
|
||||||
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
|
||||||
__page_aligned_bss;
|
__page_aligned_bss;
|
||||||
EXPORT_SYMBOL(empty_zero_page);
|
EXPORT_SYMBOL(empty_zero_page);
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
static void __init setup_initrd(void)
|
||||||
|
{
|
||||||
|
unsigned long size;
|
||||||
|
|
||||||
|
if (initrd_start >= initrd_end) {
|
||||||
|
pr_err("initrd not found or empty");
|
||||||
|
goto disable;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
|
||||||
|
pr_err("initrd extends beyond end of memory");
|
||||||
|
goto disable;
|
||||||
|
}
|
||||||
|
|
||||||
|
size = initrd_end - initrd_start;
|
||||||
|
|
||||||
|
if (memblock_is_region_reserved(__pa(initrd_start), size)) {
|
||||||
|
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
|
||||||
|
__pa(initrd_start), size);
|
||||||
|
goto disable;
|
||||||
|
}
|
||||||
|
|
||||||
|
memblock_reserve(__pa(initrd_start), size);
|
||||||
|
|
||||||
|
pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
|
||||||
|
(void *)(initrd_start), size);
|
||||||
|
|
||||||
|
initrd_below_start_ok = 1;
|
||||||
|
|
||||||
|
return;
|
||||||
|
|
||||||
|
disable:
|
||||||
|
initrd_start = initrd_end = 0;
|
||||||
|
|
||||||
|
pr_err(" - disabling initrd\n");
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void __init mem_init(void)
|
void __init mem_init(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
|
@ -46,6 +87,10 @@ void __init mem_init(void)
|
||||||
#endif
|
#endif
|
||||||
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
|
||||||
|
|
||||||
|
#ifdef CONFIG_BLK_DEV_INITRD
|
||||||
|
setup_initrd();
|
||||||
|
#endif
|
||||||
|
|
||||||
memblock_free_all();
|
memblock_free_all();
|
||||||
|
|
||||||
#ifdef CONFIG_HIGHMEM
|
#ifdef CONFIG_HIGHMEM
|
||||||
|
@ -101,3 +146,50 @@ void __init pre_mmu_init(void)
|
||||||
/* Setup page mask to 4k */
|
/* Setup page mask to 4k */
|
||||||
write_mmu_pagemask(0);
|
write_mmu_pagemask(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void __init fixrange_init(unsigned long start, unsigned long end,
|
||||||
|
pgd_t *pgd_base)
|
||||||
|
{
|
||||||
|
pgd_t *pgd;
|
||||||
|
pud_t *pud;
|
||||||
|
pmd_t *pmd;
|
||||||
|
pte_t *pte;
|
||||||
|
int i, j, k;
|
||||||
|
unsigned long vaddr;
|
||||||
|
|
||||||
|
vaddr = start;
|
||||||
|
i = __pgd_offset(vaddr);
|
||||||
|
j = __pud_offset(vaddr);
|
||||||
|
k = __pmd_offset(vaddr);
|
||||||
|
pgd = pgd_base + i;
|
||||||
|
|
||||||
|
for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
|
||||||
|
pud = (pud_t *)pgd;
|
||||||
|
for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
|
||||||
|
pmd = (pmd_t *)pud;
|
||||||
|
for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
|
||||||
|
if (pmd_none(*pmd)) {
|
||||||
|
pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
|
||||||
|
if (!pte)
|
||||||
|
panic("%s: Failed to allocate %lu bytes align=%lx\n",
|
||||||
|
__func__, PAGE_SIZE,
|
||||||
|
PAGE_SIZE);
|
||||||
|
|
||||||
|
set_pmd(pmd, __pmd(__pa(pte)));
|
||||||
|
BUG_ON(pte != pte_offset_kernel(pmd, 0));
|
||||||
|
}
|
||||||
|
vaddr += PMD_SIZE;
|
||||||
|
}
|
||||||
|
k = 0;
|
||||||
|
}
|
||||||
|
j = 0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init fixaddr_init(void)
|
||||||
|
{
|
||||||
|
unsigned long vaddr;
|
||||||
|
|
||||||
|
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
||||||
|
fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
|
||||||
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
#include <linux/syscalls.h>
|
#include <linux/syscalls.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <asm/cache.h>
|
#include <asm/cacheflush.h>
|
||||||
#include <asm/cachectl.h>
|
#include <asm/cachectl.h>
|
||||||
|
|
||||||
SYSCALL_DEFINE3(cacheflush,
|
SYSCALL_DEFINE3(cacheflush,
|
||||||
|
@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush,
|
||||||
{
|
{
|
||||||
switch (cache) {
|
switch (cache) {
|
||||||
case ICACHE:
|
case ICACHE:
|
||||||
icache_inv_range((unsigned long)addr,
|
case BCACHE:
|
||||||
(unsigned long)addr + bytes);
|
flush_icache_mm_range(current->mm,
|
||||||
break;
|
(unsigned long)addr,
|
||||||
|
(unsigned long)addr + bytes);
|
||||||
case DCACHE:
|
case DCACHE:
|
||||||
dcache_wb_range((unsigned long)addr,
|
dcache_wb_range((unsigned long)addr,
|
||||||
(unsigned long)addr + bytes);
|
(unsigned long)addr + bytes);
|
||||||
break;
|
break;
|
||||||
case BCACHE:
|
|
||||||
cache_wbinv_range((unsigned long)addr,
|
|
||||||
(unsigned long)addr + bytes);
|
|
||||||
break;
|
|
||||||
default:
|
default:
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,169 @@
|
||||||
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/genalloc.h>
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/fixmap.h>
|
||||||
|
|
||||||
|
#if (CONFIG_ITCM_RAM_BASE == 0xffffffff)
|
||||||
|
#error "You should define ITCM_RAM_BASE"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_DTCM
|
||||||
|
#if (CONFIG_DTCM_RAM_BASE == 0xffffffff)
|
||||||
|
#error "You should define DTCM_RAM_BASE"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE)
|
||||||
|
#error "You should define correct DTCM_RAM_BASE"
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
|
extern char __tcm_start, __tcm_end, __dtcm_start;
|
||||||
|
|
||||||
|
static struct gen_pool *tcm_pool;
|
||||||
|
|
||||||
|
static void __init tcm_mapping_init(void)
|
||||||
|
{
|
||||||
|
pte_t *tcm_pte;
|
||||||
|
unsigned long vaddr, paddr;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
paddr = CONFIG_ITCM_RAM_BASE;
|
||||||
|
|
||||||
|
if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE)))
|
||||||
|
goto panic;
|
||||||
|
|
||||||
|
#ifndef CONFIG_HAVE_DTCM
|
||||||
|
for (i = 0; i < TCM_NR_PAGES; i++) {
|
||||||
|
#else
|
||||||
|
for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) {
|
||||||
|
#endif
|
||||||
|
vaddr = __fix_to_virt(FIX_TCM - i);
|
||||||
|
|
||||||
|
tcm_pte =
|
||||||
|
pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
|
||||||
|
|
||||||
|
set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
|
||||||
|
|
||||||
|
flush_tlb_one(vaddr);
|
||||||
|
|
||||||
|
paddr = paddr + PAGE_SIZE;
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_HAVE_DTCM
|
||||||
|
if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE)))
|
||||||
|
goto panic;
|
||||||
|
|
||||||
|
paddr = CONFIG_DTCM_RAM_BASE;
|
||||||
|
|
||||||
|
for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) {
|
||||||
|
vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
|
||||||
|
|
||||||
|
tcm_pte =
|
||||||
|
pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
|
||||||
|
|
||||||
|
set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
|
||||||
|
|
||||||
|
flush_tlb_one(vaddr);
|
||||||
|
|
||||||
|
paddr = paddr + PAGE_SIZE;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef CONFIG_HAVE_DTCM
|
||||||
|
memcpy((void *)__fix_to_virt(FIX_TCM),
|
||||||
|
&__tcm_start, &__tcm_end - &__tcm_start);
|
||||||
|
|
||||||
|
pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n",
|
||||||
|
__func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
|
||||||
|
|
||||||
|
pr_info("%s: __tcm_start va:0x%08lx size:%d\n",
|
||||||
|
__func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start);
|
||||||
|
#else
|
||||||
|
memcpy((void *)__fix_to_virt(FIX_TCM),
|
||||||
|
&__tcm_start, &__dtcm_start - &__tcm_start);
|
||||||
|
|
||||||
|
pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n",
|
||||||
|
__func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
|
||||||
|
|
||||||
|
pr_info("%s: __itcm_start va:0x%08lx size:%d\n",
|
||||||
|
__func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start);
|
||||||
|
|
||||||
|
memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
|
||||||
|
&__dtcm_start, &__tcm_end - &__dtcm_start);
|
||||||
|
|
||||||
|
pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n",
|
||||||
|
__func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
|
||||||
|
CONFIG_DTCM_RAM_BASE);
|
||||||
|
|
||||||
|
pr_info("%s: __dtcm_start va:0x%08lx size:%d\n",
|
||||||
|
__func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start);
|
||||||
|
|
||||||
|
#endif
|
||||||
|
return;
|
||||||
|
panic:
|
||||||
|
panic("TCM init error");
|
||||||
|
}
|
||||||
|
|
||||||
|
void *tcm_alloc(size_t len)
|
||||||
|
{
|
||||||
|
unsigned long vaddr;
|
||||||
|
|
||||||
|
if (!tcm_pool)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
vaddr = gen_pool_alloc(tcm_pool, len);
|
||||||
|
if (!vaddr)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return (void *) vaddr;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(tcm_alloc);
|
||||||
|
|
||||||
|
void tcm_free(void *addr, size_t len)
|
||||||
|
{
|
||||||
|
gen_pool_free(tcm_pool, (unsigned long) addr, len);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(tcm_free);
|
||||||
|
|
||||||
|
static int __init tcm_setup_pool(void)
|
||||||
|
{
|
||||||
|
#ifndef CONFIG_HAVE_DTCM
|
||||||
|
u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
|
||||||
|
- (u32) (&__tcm_end - &__tcm_start);
|
||||||
|
|
||||||
|
u32 tcm_pool_start = __fix_to_virt(FIX_TCM)
|
||||||
|
+ (u32) (&__tcm_end - &__tcm_start);
|
||||||
|
#else
|
||||||
|
u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
|
||||||
|
- (u32) (&__tcm_end - &__dtcm_start);
|
||||||
|
|
||||||
|
u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES)
|
||||||
|
+ (u32) (&__tcm_end - &__dtcm_start);
|
||||||
|
#endif
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
tcm_pool = gen_pool_create(2, -1);
|
||||||
|
|
||||||
|
ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
|
||||||
|
if (ret) {
|
||||||
|
pr_err("%s: gen_pool add failed!\n", __func__);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n",
|
||||||
|
__func__, pool_size, tcm_pool_start);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int __init tcm_init(void)
|
||||||
|
{
|
||||||
|
tcm_mapping_init();
|
||||||
|
|
||||||
|
tcm_setup_pool();
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
arch_initcall(tcm_init);
|
|
@ -1,5 +1,6 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
#include <dt-bindings/clock/jz4740-cgu.h>
|
#include <dt-bindings/clock/jz4740-cgu.h>
|
||||||
|
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
|
@ -45,14 +46,6 @@ cgu: jz4740-cgu@10000000 {
|
||||||
#clock-cells = <1>;
|
#clock-cells = <1>;
|
||||||
};
|
};
|
||||||
|
|
||||||
watchdog: watchdog@10002000 {
|
|
||||||
compatible = "ingenic,jz4740-watchdog";
|
|
||||||
reg = <0x10002000 0x10>;
|
|
||||||
|
|
||||||
clocks = <&cgu JZ4740_CLK_RTC>;
|
|
||||||
clock-names = "rtc";
|
|
||||||
};
|
|
||||||
|
|
||||||
tcu: timer@10002000 {
|
tcu: timer@10002000 {
|
||||||
compatible = "ingenic,jz4740-tcu", "simple-mfd";
|
compatible = "ingenic,jz4740-tcu", "simple-mfd";
|
||||||
reg = <0x10002000 0x1000>;
|
reg = <0x10002000 0x1000>;
|
||||||
|
@ -73,6 +66,14 @@ &cgu JZ4740_CLK_PCLK
|
||||||
|
|
||||||
interrupt-parent = <&intc>;
|
interrupt-parent = <&intc>;
|
||||||
interrupts = <23 22 21>;
|
interrupts = <23 22 21>;
|
||||||
|
|
||||||
|
watchdog: watchdog@0 {
|
||||||
|
compatible = "ingenic,jz4740-watchdog";
|
||||||
|
reg = <0x0 0xc>;
|
||||||
|
|
||||||
|
clocks = <&tcu TCU_CLK_WDT>;
|
||||||
|
clock-names = "wdt";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
rtc_dev: rtc@10003000 {
|
rtc_dev: rtc@10003000 {
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
#include <dt-bindings/clock/jz4780-cgu.h>
|
#include <dt-bindings/clock/jz4780-cgu.h>
|
||||||
|
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||||
#include <dt-bindings/dma/jz4780-dma.h>
|
#include <dt-bindings/dma/jz4780-dma.h>
|
||||||
|
|
||||||
/ {
|
/ {
|
||||||
|
@ -67,6 +68,14 @@ &cgu JZ4780_CLK_EXCLK
|
||||||
|
|
||||||
interrupt-parent = <&intc>;
|
interrupt-parent = <&intc>;
|
||||||
interrupts = <27 26 25>;
|
interrupts = <27 26 25>;
|
||||||
|
|
||||||
|
watchdog: watchdog@0 {
|
||||||
|
compatible = "ingenic,jz4780-watchdog";
|
||||||
|
reg = <0x0 0xc>;
|
||||||
|
|
||||||
|
clocks = <&tcu TCU_CLK_WDT>;
|
||||||
|
clock-names = "wdt";
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
rtc_dev: rtc@10003000 {
|
rtc_dev: rtc@10003000 {
|
||||||
|
@ -348,14 +357,6 @@ i2c4: i2c@10054000 {
|
||||||
status = "disabled";
|
status = "disabled";
|
||||||
};
|
};
|
||||||
|
|
||||||
watchdog: watchdog@10002000 {
|
|
||||||
compatible = "ingenic,jz4780-watchdog";
|
|
||||||
reg = <0x10002000 0x10>;
|
|
||||||
|
|
||||||
clocks = <&cgu JZ4780_CLK_RTCLK>;
|
|
||||||
clock-names = "rtc";
|
|
||||||
};
|
|
||||||
|
|
||||||
nemc: nemc@13410000 {
|
nemc: nemc@13410000 {
|
||||||
compatible = "ingenic,jz4780-nemc";
|
compatible = "ingenic,jz4780-nemc";
|
||||||
reg = <0x13410000 0x10000>;
|
reg = <0x13410000 0x10000>;
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
// SPDX-License-Identifier: GPL-2.0
|
// SPDX-License-Identifier: GPL-2.0
|
||||||
|
#include <dt-bindings/clock/ingenic,tcu.h>
|
||||||
#include <dt-bindings/clock/x1000-cgu.h>
|
#include <dt-bindings/clock/x1000-cgu.h>
|
||||||
#include <dt-bindings/dma/x1000-dma.h>
|
#include <dt-bindings/dma/x1000-dma.h>
|
||||||
|
|
||||||
|
@ -72,7 +73,7 @@ wdt: watchdog@0 {
|
||||||
compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
|
compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
|
||||||
reg = <0x0 0x10>;
|
reg = <0x0 0x10>;
|
||||||
|
|
||||||
clocks = <&cgu X1000_CLK_RTCLK>;
|
clocks = <&tcu TCU_CLK_WDT>;
|
||||||
clock-names = "wdt";
|
clock-names = "wdt";
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
@ -158,7 +159,6 @@ gpd: gpio@3 {
|
||||||
i2c0: i2c-controller@10050000 {
|
i2c0: i2c-controller@10050000 {
|
||||||
compatible = "ingenic,x1000-i2c";
|
compatible = "ingenic,x1000-i2c";
|
||||||
reg = <0x10050000 0x1000>;
|
reg = <0x10050000 0x1000>;
|
||||||
|
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
@ -173,7 +173,6 @@ i2c0: i2c-controller@10050000 {
|
||||||
i2c1: i2c-controller@10051000 {
|
i2c1: i2c-controller@10051000 {
|
||||||
compatible = "ingenic,x1000-i2c";
|
compatible = "ingenic,x1000-i2c";
|
||||||
reg = <0x10051000 0x1000>;
|
reg = <0x10051000 0x1000>;
|
||||||
|
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
@ -188,7 +187,6 @@ i2c1: i2c-controller@10051000 {
|
||||||
i2c2: i2c-controller@10052000 {
|
i2c2: i2c-controller@10052000 {
|
||||||
compatible = "ingenic,x1000-i2c";
|
compatible = "ingenic,x1000-i2c";
|
||||||
reg = <0x10052000 0x1000>;
|
reg = <0x10052000 0x1000>;
|
||||||
|
|
||||||
#address-cells = <1>;
|
#address-cells = <1>;
|
||||||
#size-cells = <0>;
|
#size-cells = <0>;
|
||||||
|
|
||||||
|
|
|
@ -155,9 +155,11 @@
|
||||||
* effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
|
* effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
|
||||||
* optimized memory barrier primitives."). Here we specify that the affected
|
* optimized memory barrier primitives."). Here we specify that the affected
|
||||||
* sync instructions should be emitted twice.
|
* sync instructions should be emitted twice.
|
||||||
|
* Note that this expression is evaluated by the assembler (not the compiler),
|
||||||
|
* and that the assembler evaluates '==' as 0 or -1, not 0 or 1.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
#ifdef CONFIG_CPU_CAVIUM_OCTEON
|
||||||
# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
|
# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb))
|
||||||
#else
|
#else
|
||||||
# define __SYNC_rpt(type) 1
|
# define __SYNC_rpt(type) 1
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
|
||||||
{
|
{
|
||||||
list_del(&v->list);
|
list_del(&v->list);
|
||||||
if (v->load_addr)
|
if (v->load_addr)
|
||||||
release_progmem(v);
|
release_progmem(v->load_addr);
|
||||||
kfree(v);
|
kfree(v);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ endif
|
||||||
cflags-vdso := $(ccflags-vdso) \
|
cflags-vdso := $(ccflags-vdso) \
|
||||||
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
|
||||||
-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
|
-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
|
||||||
|
-mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
|
||||||
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
|
||||||
$(call cc-option, -fno-asynchronous-unwind-tables) \
|
$(call cc-option, -fno-asynchronous-unwind-tables) \
|
||||||
$(call cc-option, -fno-stack-protector)
|
$(call cc-option, -fno-stack-protector)
|
||||||
|
@ -51,6 +52,8 @@ endif
|
||||||
|
|
||||||
CFLAGS_REMOVE_vgettimeofday.o = -pg
|
CFLAGS_REMOVE_vgettimeofday.o = -pg
|
||||||
|
|
||||||
|
DISABLE_VDSO := n
|
||||||
|
|
||||||
#
|
#
|
||||||
# For the pre-R6 code in arch/mips/vdso/vdso.h for locating
|
# For the pre-R6 code in arch/mips/vdso/vdso.h for locating
|
||||||
# the base address of VDSO, the linker will emit a R_MIPS_PC32
|
# the base address of VDSO, the linker will emit a R_MIPS_PC32
|
||||||
|
@ -64,11 +67,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
|
||||||
ifndef CONFIG_CPU_MIPSR6
|
ifndef CONFIG_CPU_MIPSR6
|
||||||
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
|
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
|
||||||
$(warning MIPS VDSO requires binutils >= 2.25)
|
$(warning MIPS VDSO requires binutils >= 2.25)
|
||||||
obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
|
DISABLE_VDSO := y
|
||||||
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
|
||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
#
|
||||||
|
# GCC (at least up to version 9.2) appears to emit function calls that make use
|
||||||
|
# of the GOT when targeting microMIPS, which we can't use in the VDSO due to
|
||||||
|
# the lack of relocations. As such, we disable the VDSO for microMIPS builds.
|
||||||
|
#
|
||||||
|
ifdef CONFIG_CPU_MICROMIPS
|
||||||
|
DISABLE_VDSO := y
|
||||||
|
endif
|
||||||
|
|
||||||
|
ifeq ($(DISABLE_VDSO),y)
|
||||||
|
obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
|
||||||
|
ccflags-vdso += -DDISABLE_MIPS_VDSO
|
||||||
|
endif
|
||||||
|
|
||||||
# VDSO linker flags.
|
# VDSO linker flags.
|
||||||
VDSO_LDFLAGS := \
|
VDSO_LDFLAGS := \
|
||||||
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
|
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
|
||||||
|
@ -81,12 +97,18 @@ GCOV_PROFILE := n
|
||||||
UBSAN_SANITIZE := n
|
UBSAN_SANITIZE := n
|
||||||
KCOV_INSTRUMENT := n
|
KCOV_INSTRUMENT := n
|
||||||
|
|
||||||
|
# Check that we don't have PIC 'jalr t9' calls left
|
||||||
|
quiet_cmd_vdso_mips_check = VDSOCHK $@
|
||||||
|
cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \
|
||||||
|
then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \
|
||||||
|
rm -f $@; /bin/false); fi
|
||||||
|
|
||||||
#
|
#
|
||||||
# Shared build commands.
|
# Shared build commands.
|
||||||
#
|
#
|
||||||
|
|
||||||
quiet_cmd_vdsold_and_vdso_check = LD $@
|
quiet_cmd_vdsold_and_vdso_check = LD $@
|
||||||
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
|
cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check)
|
||||||
|
|
||||||
quiet_cmd_vdsold = VDSO $@
|
quiet_cmd_vdsold = VDSO $@
|
||||||
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
|
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
|
||||||
|
|
|
@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
|
||||||
/*
|
/*
|
||||||
* Some number of bits at the level of the page table that points to
|
* Some number of bits at the level of the page table that points to
|
||||||
* a hugepte are used to encode the size. This masks those bits.
|
* a hugepte are used to encode the size. This masks those bits.
|
||||||
|
* On 8xx, HW assistance requires 4k alignment for the hugepte.
|
||||||
*/
|
*/
|
||||||
|
#ifdef CONFIG_PPC_8xx
|
||||||
|
#define HUGEPD_SHIFT_MASK 0xfff
|
||||||
|
#else
|
||||||
#define HUGEPD_SHIFT_MASK 0x3f
|
#define HUGEPD_SHIFT_MASK 0x3f
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifndef __ASSEMBLY__
|
#ifndef __ASSEMBLY__
|
||||||
|
|
||||||
|
|
|
@ -168,6 +168,10 @@ struct thread_struct {
|
||||||
unsigned long srr1;
|
unsigned long srr1;
|
||||||
unsigned long dar;
|
unsigned long dar;
|
||||||
unsigned long dsisr;
|
unsigned long dsisr;
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_32
|
||||||
|
unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
|
||||||
|
unsigned long lr, ctr;
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
/* Debug Registers */
|
/* Debug Registers */
|
||||||
struct debug_reg debug;
|
struct debug_reg debug;
|
||||||
|
|
|
@ -132,6 +132,18 @@ int main(void)
|
||||||
OFFSET(SRR1, thread_struct, srr1);
|
OFFSET(SRR1, thread_struct, srr1);
|
||||||
OFFSET(DAR, thread_struct, dar);
|
OFFSET(DAR, thread_struct, dar);
|
||||||
OFFSET(DSISR, thread_struct, dsisr);
|
OFFSET(DSISR, thread_struct, dsisr);
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_32
|
||||||
|
OFFSET(THR0, thread_struct, r0);
|
||||||
|
OFFSET(THR3, thread_struct, r3);
|
||||||
|
OFFSET(THR4, thread_struct, r4);
|
||||||
|
OFFSET(THR5, thread_struct, r5);
|
||||||
|
OFFSET(THR6, thread_struct, r6);
|
||||||
|
OFFSET(THR8, thread_struct, r8);
|
||||||
|
OFFSET(THR9, thread_struct, r9);
|
||||||
|
OFFSET(THR11, thread_struct, r11);
|
||||||
|
OFFSET(THLR, thread_struct, lr);
|
||||||
|
OFFSET(THCTR, thread_struct, ctr);
|
||||||
|
#endif
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_SPE
|
#ifdef CONFIG_SPE
|
||||||
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
|
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
|
||||||
|
|
|
@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void)
|
||||||
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
|
||||||
eeh_handle_normal_event(pe);
|
eeh_handle_normal_event(pe);
|
||||||
} else {
|
} else {
|
||||||
|
eeh_for_each_pe(pe, tmp_pe)
|
||||||
|
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
||||||
|
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
||||||
|
|
||||||
|
/* Notify all devices to be down */
|
||||||
|
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
||||||
|
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
||||||
|
eeh_pe_report(
|
||||||
|
"error_detected(permanent failure)", pe,
|
||||||
|
eeh_report_failure, NULL);
|
||||||
|
|
||||||
pci_lock_rescan_remove();
|
pci_lock_rescan_remove();
|
||||||
list_for_each_entry(hose, &hose_list, list_node) {
|
list_for_each_entry(hose, &hose_list, list_node) {
|
||||||
phb_pe = eeh_phb_pe_get(hose);
|
phb_pe = eeh_phb_pe_get(hose);
|
||||||
|
@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void)
|
||||||
(phb_pe->state & EEH_PE_RECOVERING))
|
(phb_pe->state & EEH_PE_RECOVERING))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
eeh_for_each_pe(pe, tmp_pe)
|
|
||||||
eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
|
|
||||||
edev->mode &= ~EEH_DEV_NO_HANDLER;
|
|
||||||
|
|
||||||
/* Notify all devices to be down */
|
|
||||||
eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
|
|
||||||
eeh_set_channel_state(pe, pci_channel_io_perm_failure);
|
|
||||||
eeh_pe_report(
|
|
||||||
"error_detected(permanent failure)", pe,
|
|
||||||
eeh_report_failure, NULL);
|
|
||||||
bus = eeh_pe_bus_get(phb_pe);
|
bus = eeh_pe_bus_get(phb_pe);
|
||||||
if (!bus) {
|
if (!bus) {
|
||||||
pr_err("%s: Cannot find PCI bus for "
|
pr_err("%s: Cannot find PCI bus for "
|
||||||
|
|
|
@ -783,7 +783,7 @@ fast_exception_return:
|
||||||
1: lis r3,exc_exit_restart_end@ha
|
1: lis r3,exc_exit_restart_end@ha
|
||||||
addi r3,r3,exc_exit_restart_end@l
|
addi r3,r3,exc_exit_restart_end@l
|
||||||
cmplw r12,r3
|
cmplw r12,r3
|
||||||
#if CONFIG_PPC_BOOK3S_601
|
#ifdef CONFIG_PPC_BOOK3S_601
|
||||||
bge 2b
|
bge 2b
|
||||||
#else
|
#else
|
||||||
bge 3f
|
bge 3f
|
||||||
|
@ -791,7 +791,7 @@ fast_exception_return:
|
||||||
lis r4,exc_exit_restart@ha
|
lis r4,exc_exit_restart@ha
|
||||||
addi r4,r4,exc_exit_restart@l
|
addi r4,r4,exc_exit_restart@l
|
||||||
cmplw r12,r4
|
cmplw r12,r4
|
||||||
#if CONFIG_PPC_BOOK3S_601
|
#ifdef CONFIG_PPC_BOOK3S_601
|
||||||
blt 2b
|
blt 2b
|
||||||
#else
|
#else
|
||||||
blt 3f
|
blt 3f
|
||||||
|
@ -1354,12 +1354,17 @@ _GLOBAL(enter_rtas)
|
||||||
mtspr SPRN_SRR0,r8
|
mtspr SPRN_SRR0,r8
|
||||||
mtspr SPRN_SRR1,r9
|
mtspr SPRN_SRR1,r9
|
||||||
RFI
|
RFI
|
||||||
1: tophys(r9,r1)
|
1: tophys_novmstack r9, r1
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
|
||||||
|
mtmsr r0
|
||||||
|
isync
|
||||||
|
#endif
|
||||||
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
|
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
|
||||||
lwz r9,8(r9) /* original msr value */
|
lwz r9,8(r9) /* original msr value */
|
||||||
addi r1,r1,INT_FRAME_SIZE
|
addi r1,r1,INT_FRAME_SIZE
|
||||||
li r0,0
|
li r0,0
|
||||||
tophys(r7, r2)
|
tophys_novmstack r7, r2
|
||||||
stw r0, THREAD + RTAS_SP(r7)
|
stw r0, THREAD + RTAS_SP(r7)
|
||||||
mtspr SPRN_SRR0,r8
|
mtspr SPRN_SRR0,r8
|
||||||
mtspr SPRN_SRR1,r9
|
mtspr SPRN_SRR1,r9
|
||||||
|
|
|
@ -290,17 +290,55 @@ MachineCheck:
|
||||||
7: EXCEPTION_PROLOG_2
|
7: EXCEPTION_PROLOG_2
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
#ifdef CONFIG_PPC_CHRP
|
#ifdef CONFIG_PPC_CHRP
|
||||||
bne cr1,1f
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
mfspr r4, SPRN_SPRG_THREAD
|
||||||
|
tovirt(r4, r4)
|
||||||
|
lwz r4, RTAS_SP(r4)
|
||||||
|
cmpwi cr1, r4, 0
|
||||||
#endif
|
#endif
|
||||||
EXC_XFER_STD(0x200, machine_check_exception)
|
beq cr1, machine_check_tramp
|
||||||
#ifdef CONFIG_PPC_CHRP
|
b machine_check_in_rtas
|
||||||
1: b machine_check_in_rtas
|
#else
|
||||||
|
b machine_check_tramp
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
/* Data access exception. */
|
/* Data access exception. */
|
||||||
. = 0x300
|
. = 0x300
|
||||||
DO_KVM 0x300
|
DO_KVM 0x300
|
||||||
DataAccess:
|
DataAccess:
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
stw r11, THR11(r10)
|
||||||
|
mfspr r10, SPRN_DSISR
|
||||||
|
mfcr r11
|
||||||
|
#ifdef CONFIG_PPC_KUAP
|
||||||
|
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
|
||||||
|
#else
|
||||||
|
andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
|
||||||
|
#endif
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
beq hash_page_dsi
|
||||||
|
.Lhash_page_dsi_cont:
|
||||||
|
mtcr r11
|
||||||
|
lwz r11, THR11(r10)
|
||||||
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||||
|
mfspr r11, SPRN_DAR
|
||||||
|
stw r11, DAR(r10)
|
||||||
|
mfspr r11, SPRN_DSISR
|
||||||
|
stw r11, DSISR(r10)
|
||||||
|
mfspr r11, SPRN_SRR0
|
||||||
|
stw r11, SRR0(r10)
|
||||||
|
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||||
|
stw r11, SRR1(r10)
|
||||||
|
mfcr r10
|
||||||
|
andi. r11, r11, MSR_PR
|
||||||
|
|
||||||
|
EXCEPTION_PROLOG_1
|
||||||
|
b handle_page_fault_tramp_1
|
||||||
|
#else /* CONFIG_VMAP_STACK */
|
||||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||||
get_and_save_dar_dsisr_on_stack r4, r5, r11
|
get_and_save_dar_dsisr_on_stack r4, r5, r11
|
||||||
BEGIN_MMU_FTR_SECTION
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
@ -316,11 +354,32 @@ BEGIN_MMU_FTR_SECTION
|
||||||
FTR_SECTION_ELSE
|
FTR_SECTION_ELSE
|
||||||
b handle_page_fault_tramp_2
|
b handle_page_fault_tramp_2
|
||||||
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
#endif /* CONFIG_VMAP_STACK */
|
||||||
|
|
||||||
/* Instruction access exception. */
|
/* Instruction access exception. */
|
||||||
. = 0x400
|
. = 0x400
|
||||||
DO_KVM 0x400
|
DO_KVM 0x400
|
||||||
InstructionAccess:
|
InstructionAccess:
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
mtspr SPRN_SPRG_SCRATCH0,r10
|
||||||
|
mtspr SPRN_SPRG_SCRATCH1,r11
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
mfspr r11, SPRN_SRR0
|
||||||
|
stw r11, SRR0(r10)
|
||||||
|
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||||
|
stw r11, SRR1(r10)
|
||||||
|
mfcr r10
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
|
||||||
|
bne hash_page_isi
|
||||||
|
.Lhash_page_isi_cont:
|
||||||
|
mfspr r11, SPRN_SRR1 /* check whether user or kernel */
|
||||||
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
andi. r11, r11, MSR_PR
|
||||||
|
|
||||||
|
EXCEPTION_PROLOG_1
|
||||||
|
EXCEPTION_PROLOG_2
|
||||||
|
#else /* CONFIG_VMAP_STACK */
|
||||||
EXCEPTION_PROLOG
|
EXCEPTION_PROLOG
|
||||||
andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
|
andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
|
||||||
beq 1f /* if so, try to put a PTE */
|
beq 1f /* if so, try to put a PTE */
|
||||||
|
@ -329,6 +388,7 @@ InstructionAccess:
|
||||||
BEGIN_MMU_FTR_SECTION
|
BEGIN_MMU_FTR_SECTION
|
||||||
bl hash_page
|
bl hash_page
|
||||||
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
#endif /* CONFIG_VMAP_STACK */
|
||||||
1: mr r4,r12
|
1: mr r4,r12
|
||||||
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
|
||||||
stw r4, _DAR(r11)
|
stw r4, _DAR(r11)
|
||||||
|
@ -344,7 +404,7 @@ Alignment:
|
||||||
EXCEPTION_PROLOG handle_dar_dsisr=1
|
EXCEPTION_PROLOG handle_dar_dsisr=1
|
||||||
save_dar_dsisr_on_stack r4, r5, r11
|
save_dar_dsisr_on_stack r4, r5, r11
|
||||||
addi r3,r1,STACK_FRAME_OVERHEAD
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
||||||
EXC_XFER_STD(0x600, alignment_exception)
|
b alignment_exception_tramp
|
||||||
|
|
||||||
/* Program check exception */
|
/* Program check exception */
|
||||||
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
|
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
|
||||||
|
@ -645,15 +705,100 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
|
||||||
|
|
||||||
. = 0x3000
|
. = 0x3000
|
||||||
|
|
||||||
|
machine_check_tramp:
|
||||||
|
EXC_XFER_STD(0x200, machine_check_exception)
|
||||||
|
|
||||||
|
alignment_exception_tramp:
|
||||||
|
EXC_XFER_STD(0x600, alignment_exception)
|
||||||
|
|
||||||
handle_page_fault_tramp_1:
|
handle_page_fault_tramp_1:
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
EXCEPTION_PROLOG_2 handle_dar_dsisr=1
|
||||||
|
#endif
|
||||||
lwz r4, _DAR(r11)
|
lwz r4, _DAR(r11)
|
||||||
lwz r5, _DSISR(r11)
|
lwz r5, _DSISR(r11)
|
||||||
/* fall through */
|
/* fall through */
|
||||||
handle_page_fault_tramp_2:
|
handle_page_fault_tramp_2:
|
||||||
EXC_XFER_LITE(0x300, handle_page_fault)
|
EXC_XFER_LITE(0x300, handle_page_fault)
|
||||||
|
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
.macro save_regs_thread thread
|
||||||
|
stw r0, THR0(\thread)
|
||||||
|
stw r3, THR3(\thread)
|
||||||
|
stw r4, THR4(\thread)
|
||||||
|
stw r5, THR5(\thread)
|
||||||
|
stw r6, THR6(\thread)
|
||||||
|
stw r8, THR8(\thread)
|
||||||
|
stw r9, THR9(\thread)
|
||||||
|
mflr r0
|
||||||
|
stw r0, THLR(\thread)
|
||||||
|
mfctr r0
|
||||||
|
stw r0, THCTR(\thread)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
.macro restore_regs_thread thread
|
||||||
|
lwz r0, THLR(\thread)
|
||||||
|
mtlr r0
|
||||||
|
lwz r0, THCTR(\thread)
|
||||||
|
mtctr r0
|
||||||
|
lwz r0, THR0(\thread)
|
||||||
|
lwz r3, THR3(\thread)
|
||||||
|
lwz r4, THR4(\thread)
|
||||||
|
lwz r5, THR5(\thread)
|
||||||
|
lwz r6, THR6(\thread)
|
||||||
|
lwz r8, THR8(\thread)
|
||||||
|
lwz r9, THR9(\thread)
|
||||||
|
.endm
|
||||||
|
|
||||||
|
hash_page_dsi:
|
||||||
|
save_regs_thread r10
|
||||||
|
mfdsisr r3
|
||||||
|
mfdar r4
|
||||||
|
mfsrr0 r5
|
||||||
|
mfsrr1 r9
|
||||||
|
rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
|
||||||
|
bl hash_page
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
restore_regs_thread r10
|
||||||
|
b .Lhash_page_dsi_cont
|
||||||
|
|
||||||
|
hash_page_isi:
|
||||||
|
mr r11, r10
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
save_regs_thread r10
|
||||||
|
li r3, 0
|
||||||
|
lwz r4, SRR0(r10)
|
||||||
|
lwz r9, SRR1(r10)
|
||||||
|
bl hash_page
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
restore_regs_thread r10
|
||||||
|
mr r10, r11
|
||||||
|
b .Lhash_page_isi_cont
|
||||||
|
|
||||||
|
.globl fast_hash_page_return
|
||||||
|
fast_hash_page_return:
|
||||||
|
andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
|
||||||
|
mfspr r10, SPRN_SPRG_THREAD
|
||||||
|
restore_regs_thread r10
|
||||||
|
bne 1f
|
||||||
|
|
||||||
|
/* DSI */
|
||||||
|
mtcr r11
|
||||||
|
lwz r11, THR11(r10)
|
||||||
|
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||||
|
SYNC
|
||||||
|
RFI
|
||||||
|
|
||||||
|
1: /* ISI */
|
||||||
|
mtcr r11
|
||||||
|
mfspr r11, SPRN_SPRG_SCRATCH1
|
||||||
|
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||||
|
SYNC
|
||||||
|
RFI
|
||||||
|
|
||||||
stack_overflow:
|
stack_overflow:
|
||||||
vmap_stack_overflow_exception
|
vmap_stack_overflow_exception
|
||||||
|
#endif
|
||||||
|
|
||||||
AltiVecUnavailable:
|
AltiVecUnavailable:
|
||||||
EXCEPTION_PROLOG
|
EXCEPTION_PROLOG
|
||||||
|
|
|
@ -64,11 +64,25 @@
|
||||||
.endm
|
.endm
|
||||||
|
|
||||||
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
|
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
|
||||||
|
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
mtcr r10
|
||||||
|
FTR_SECTION_ELSE
|
||||||
|
stw r10, _CCR(r11)
|
||||||
|
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
#else
|
||||||
stw r10,_CCR(r11) /* save registers */
|
stw r10,_CCR(r11) /* save registers */
|
||||||
|
#endif
|
||||||
|
mfspr r10, SPRN_SPRG_SCRATCH0
|
||||||
stw r12,GPR12(r11)
|
stw r12,GPR12(r11)
|
||||||
stw r9,GPR9(r11)
|
stw r9,GPR9(r11)
|
||||||
mfspr r10,SPRN_SPRG_SCRATCH0
|
|
||||||
stw r10,GPR10(r11)
|
stw r10,GPR10(r11)
|
||||||
|
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
mfcr r10
|
||||||
|
stw r10, _CCR(r11)
|
||||||
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
#endif
|
||||||
mfspr r12,SPRN_SPRG_SCRATCH1
|
mfspr r12,SPRN_SPRG_SCRATCH1
|
||||||
stw r12,GPR11(r11)
|
stw r12,GPR11(r11)
|
||||||
mflr r10
|
mflr r10
|
||||||
|
@ -83,6 +97,11 @@
|
||||||
stw r10, _DSISR(r11)
|
stw r10, _DSISR(r11)
|
||||||
.endif
|
.endif
|
||||||
lwz r9, SRR1(r12)
|
lwz r9, SRR1(r12)
|
||||||
|
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
|
||||||
|
BEGIN_MMU_FTR_SECTION
|
||||||
|
andi. r10, r9, MSR_PR
|
||||||
|
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
|
||||||
|
#endif
|
||||||
lwz r12, SRR0(r12)
|
lwz r12, SRR0(r12)
|
||||||
#else
|
#else
|
||||||
mfspr r12,SPRN_SRR0
|
mfspr r12,SPRN_SRR0
|
||||||
|
|
|
@ -256,7 +256,7 @@ InstructionTLBMiss:
|
||||||
* set. All other Linux PTE bits control the behavior
|
* set. All other Linux PTE bits control the behavior
|
||||||
* of the MMU.
|
* of the MMU.
|
||||||
*/
|
*/
|
||||||
rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
|
rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
|
||||||
rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
|
rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
|
||||||
ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
|
ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
|
||||||
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
|
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
|
||||||
|
|
|
@ -166,7 +166,11 @@ BEGIN_FTR_SECTION
|
||||||
mfspr r9,SPRN_HID0
|
mfspr r9,SPRN_HID0
|
||||||
andis. r9,r9,HID0_NAP@h
|
andis. r9,r9,HID0_NAP@h
|
||||||
beq 1f
|
beq 1f
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
addis r9, r11, nap_save_msscr0@ha
|
||||||
|
#else
|
||||||
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
|
||||||
|
#endif
|
||||||
lwz r9,nap_save_msscr0@l(r9)
|
lwz r9,nap_save_msscr0@l(r9)
|
||||||
mtspr SPRN_MSSCR0, r9
|
mtspr SPRN_MSSCR0, r9
|
||||||
sync
|
sync
|
||||||
|
@ -174,7 +178,11 @@ BEGIN_FTR_SECTION
|
||||||
1:
|
1:
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
|
||||||
BEGIN_FTR_SECTION
|
BEGIN_FTR_SECTION
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
addis r9, r11, nap_save_hid1@ha
|
||||||
|
#else
|
||||||
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
|
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
|
||||||
|
#endif
|
||||||
lwz r9,nap_save_hid1@l(r9)
|
lwz r9,nap_save_hid1@l(r9)
|
||||||
mtspr SPRN_HID1, r9
|
mtspr SPRN_HID1, r9
|
||||||
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
|
||||||
|
|
|
@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
|
||||||
* normal/non-checkpointed stack pointer.
|
* normal/non-checkpointed stack pointer.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
unsigned long ret = tsk->thread.regs->gpr[1];
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
BUG_ON(tsk != current);
|
BUG_ON(tsk != current);
|
||||||
|
|
||||||
if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
|
if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
|
||||||
|
preempt_disable();
|
||||||
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
tm_reclaim_current(TM_CAUSE_SIGNAL);
|
||||||
if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
|
if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
|
||||||
return tsk->thread.ckpt_regs.gpr[1];
|
ret = tsk->thread.ckpt_regs.gpr[1];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we treclaim, we must clear the current thread's TM bits
|
||||||
|
* before re-enabling preemption. Otherwise we might be
|
||||||
|
* preempted and have the live MSR[TS] changed behind our back
|
||||||
|
* (tm_recheckpoint_new_task() would recheckpoint). Besides, we
|
||||||
|
* enter the signal handler in non-transactional state.
|
||||||
|
*/
|
||||||
|
tsk->thread.regs->msr &= ~MSR_TS_MASK;
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
return tsk->thread.regs->gpr[1];
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
|
@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
|
||||||
*/
|
*/
|
||||||
static int save_tm_user_regs(struct pt_regs *regs,
|
static int save_tm_user_regs(struct pt_regs *regs,
|
||||||
struct mcontext __user *frame,
|
struct mcontext __user *frame,
|
||||||
struct mcontext __user *tm_frame, int sigret)
|
struct mcontext __user *tm_frame, int sigret,
|
||||||
|
unsigned long msr)
|
||||||
{
|
{
|
||||||
unsigned long msr = regs->msr;
|
|
||||||
|
|
||||||
WARN_ON(tm_suspend_disabled);
|
WARN_ON(tm_suspend_disabled);
|
||||||
|
|
||||||
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
|
|
||||||
* just indicates to userland that we were doing a transaction, but we
|
|
||||||
* don't want to return in transactional state. This also ensures
|
|
||||||
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
|
|
||||||
*/
|
|
||||||
regs->msr &= ~MSR_TS_MASK;
|
|
||||||
|
|
||||||
/* Save both sets of general registers */
|
/* Save both sets of general registers */
|
||||||
if (save_general_regs(¤t->thread.ckpt_regs, frame)
|
if (save_general_regs(¤t->thread.ckpt_regs, frame)
|
||||||
|| save_general_regs(regs, tm_frame))
|
|| save_general_regs(regs, tm_frame))
|
||||||
|
@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||||
int sigret;
|
int sigret;
|
||||||
unsigned long tramp;
|
unsigned long tramp;
|
||||||
struct pt_regs *regs = tsk->thread.regs;
|
struct pt_regs *regs = tsk->thread.regs;
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||||
|
unsigned long msr = regs->msr;
|
||||||
|
#endif
|
||||||
|
|
||||||
BUG_ON(tsk != current);
|
BUG_ON(tsk != current);
|
||||||
|
|
||||||
|
@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
tm_frame = &rt_sf->uc_transact.uc_mcontext;
|
tm_frame = &rt_sf->uc_transact.uc_mcontext;
|
||||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
if (MSR_TM_ACTIVE(msr)) {
|
||||||
if (__put_user((unsigned long)&rt_sf->uc_transact,
|
if (__put_user((unsigned long)&rt_sf->uc_transact,
|
||||||
&rt_sf->uc.uc_link) ||
|
&rt_sf->uc.uc_link) ||
|
||||||
__put_user((unsigned long)tm_frame,
|
__put_user((unsigned long)tm_frame,
|
||||||
&rt_sf->uc_transact.uc_regs))
|
&rt_sf->uc_transact.uc_regs))
|
||||||
goto badframe;
|
goto badframe;
|
||||||
if (save_tm_user_regs(regs, frame, tm_frame, sigret))
|
if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
|
||||||
goto badframe;
|
goto badframe;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||||
int sigret;
|
int sigret;
|
||||||
unsigned long tramp;
|
unsigned long tramp;
|
||||||
struct pt_regs *regs = tsk->thread.regs;
|
struct pt_regs *regs = tsk->thread.regs;
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||||
|
unsigned long msr = regs->msr;
|
||||||
|
#endif
|
||||||
|
|
||||||
BUG_ON(tsk != current);
|
BUG_ON(tsk != current);
|
||||||
|
|
||||||
|
@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
tm_mctx = &frame->mctx_transact;
|
tm_mctx = &frame->mctx_transact;
|
||||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
if (MSR_TM_ACTIVE(msr)) {
|
||||||
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
|
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
|
||||||
sigret))
|
sigret, msr))
|
||||||
goto badframe;
|
goto badframe;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
|
|
@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
|
||||||
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||||
struct sigcontext __user *tm_sc,
|
struct sigcontext __user *tm_sc,
|
||||||
struct task_struct *tsk,
|
struct task_struct *tsk,
|
||||||
int signr, sigset_t *set, unsigned long handler)
|
int signr, sigset_t *set, unsigned long handler,
|
||||||
|
unsigned long msr)
|
||||||
{
|
{
|
||||||
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
|
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
|
||||||
* process never used altivec yet (MSR_VEC is zero in pt_regs of
|
* process never used altivec yet (MSR_VEC is zero in pt_regs of
|
||||||
|
@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||||
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
|
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
|
||||||
#endif
|
#endif
|
||||||
struct pt_regs *regs = tsk->thread.regs;
|
struct pt_regs *regs = tsk->thread.regs;
|
||||||
unsigned long msr = tsk->thread.regs->msr;
|
|
||||||
long err = 0;
|
long err = 0;
|
||||||
|
|
||||||
BUG_ON(tsk != current);
|
BUG_ON(tsk != current);
|
||||||
|
|
||||||
BUG_ON(!MSR_TM_ACTIVE(regs->msr));
|
BUG_ON(!MSR_TM_ACTIVE(msr));
|
||||||
|
|
||||||
WARN_ON(tm_suspend_disabled);
|
WARN_ON(tm_suspend_disabled);
|
||||||
|
|
||||||
|
@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
|
||||||
*/
|
*/
|
||||||
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
|
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
|
||||||
|
|
||||||
/* Remove TM bits from thread's MSR. The MSR in the sigcontext
|
|
||||||
* just indicates to userland that we were doing a transaction, but we
|
|
||||||
* don't want to return in transactional state. This also ensures
|
|
||||||
* that flush_fp_to_thread won't set TIF_RESTORE_TM again.
|
|
||||||
*/
|
|
||||||
regs->msr &= ~MSR_TS_MASK;
|
|
||||||
|
|
||||||
#ifdef CONFIG_ALTIVEC
|
#ifdef CONFIG_ALTIVEC
|
||||||
err |= __put_user(v_regs, &sc->v_regs);
|
err |= __put_user(v_regs, &sc->v_regs);
|
||||||
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
|
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
|
||||||
|
@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||||
unsigned long newsp = 0;
|
unsigned long newsp = 0;
|
||||||
long err = 0;
|
long err = 0;
|
||||||
struct pt_regs *regs = tsk->thread.regs;
|
struct pt_regs *regs = tsk->thread.regs;
|
||||||
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
|
/* Save the thread's msr before get_tm_stackpointer() changes it */
|
||||||
|
unsigned long msr = regs->msr;
|
||||||
|
#endif
|
||||||
|
|
||||||
BUG_ON(tsk != current);
|
BUG_ON(tsk != current);
|
||||||
|
|
||||||
|
@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||||
err |= __put_user(0, &frame->uc.uc_flags);
|
err |= __put_user(0, &frame->uc.uc_flags);
|
||||||
err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
|
err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
|
||||||
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
|
||||||
if (MSR_TM_ACTIVE(regs->msr)) {
|
if (MSR_TM_ACTIVE(msr)) {
|
||||||
/* The ucontext_t passed to userland points to the second
|
/* The ucontext_t passed to userland points to the second
|
||||||
* ucontext_t (for transactional state) with its uc_link ptr.
|
* ucontext_t (for transactional state) with its uc_link ptr.
|
||||||
*/
|
*/
|
||||||
|
@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
|
||||||
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
|
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
|
||||||
&frame->uc_transact.uc_mcontext,
|
&frame->uc_transact.uc_mcontext,
|
||||||
tsk, ksig->sig, NULL,
|
tsk, ksig->sig, NULL,
|
||||||
(unsigned long)ksig->ka.sa.sa_handler);
|
(unsigned long)ksig->ka.sa.sa_handler,
|
||||||
|
msr);
|
||||||
} else
|
} else
|
||||||
#endif
|
#endif
|
||||||
{
|
{
|
||||||
|
|
|
@ -25,12 +25,6 @@
|
||||||
#include <asm/feature-fixups.h>
|
#include <asm/feature-fixups.h>
|
||||||
#include <asm/code-patching-asm.h>
|
#include <asm/code-patching-asm.h>
|
||||||
|
|
||||||
#ifdef CONFIG_VMAP_STACK
|
|
||||||
#define ADDR_OFFSET 0
|
|
||||||
#else
|
|
||||||
#define ADDR_OFFSET PAGE_OFFSET
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
.section .bss
|
.section .bss
|
||||||
.align 2
|
.align 2
|
||||||
|
@ -53,8 +47,8 @@ mmu_hash_lock:
|
||||||
.text
|
.text
|
||||||
_GLOBAL(hash_page)
|
_GLOBAL(hash_page)
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@h
|
lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
|
||||||
ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
|
ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
|
||||||
lis r0,0x0fff
|
lis r0,0x0fff
|
||||||
b 10f
|
b 10f
|
||||||
11: lwz r6,0(r8)
|
11: lwz r6,0(r8)
|
||||||
|
@ -72,12 +66,9 @@ _GLOBAL(hash_page)
|
||||||
cmplw 0,r4,r0
|
cmplw 0,r4,r0
|
||||||
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
|
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
|
||||||
mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
|
mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
|
||||||
#ifdef CONFIG_VMAP_STACK
|
|
||||||
tovirt(r5, r5)
|
|
||||||
#endif
|
|
||||||
blt+ 112f /* assume user more likely */
|
blt+ 112f /* assume user more likely */
|
||||||
lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */
|
lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
|
||||||
addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */
|
addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
|
||||||
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
|
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
|
||||||
112:
|
112:
|
||||||
#ifndef CONFIG_PTE_64BIT
|
#ifndef CONFIG_PTE_64BIT
|
||||||
|
@ -89,9 +80,6 @@ _GLOBAL(hash_page)
|
||||||
lwzx r8,r8,r5 /* Get L1 entry */
|
lwzx r8,r8,r5 /* Get L1 entry */
|
||||||
rlwinm. r8,r8,0,0,20 /* extract pt base address */
|
rlwinm. r8,r8,0,0,20 /* extract pt base address */
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_VMAP_STACK
|
|
||||||
tovirt(r8, r8)
|
|
||||||
#endif
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
beq- hash_page_out /* return if no mapping */
|
beq- hash_page_out /* return if no mapping */
|
||||||
#else
|
#else
|
||||||
|
@ -143,30 +131,36 @@ retry:
|
||||||
bne- retry /* retry if someone got there first */
|
bne- retry /* retry if someone got there first */
|
||||||
|
|
||||||
mfsrin r3,r4 /* get segment reg for segment */
|
mfsrin r3,r4 /* get segment reg for segment */
|
||||||
|
#ifndef CONFIG_VMAP_STACK
|
||||||
mfctr r0
|
mfctr r0
|
||||||
stw r0,_CTR(r11)
|
stw r0,_CTR(r11)
|
||||||
|
#endif
|
||||||
bl create_hpte /* add the hash table entry */
|
bl create_hpte /* add the hash table entry */
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
eieio
|
eieio
|
||||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
|
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
|
||||||
li r0,0
|
li r0,0
|
||||||
stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
|
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_VMAP_STACK
|
||||||
|
b fast_hash_page_return
|
||||||
|
#else
|
||||||
/* Return from the exception */
|
/* Return from the exception */
|
||||||
lwz r5,_CTR(r11)
|
lwz r5,_CTR(r11)
|
||||||
mtctr r5
|
mtctr r5
|
||||||
lwz r0,GPR0(r11)
|
lwz r0,GPR0(r11)
|
||||||
lwz r8,GPR8(r11)
|
lwz r8,GPR8(r11)
|
||||||
b fast_exception_return
|
b fast_exception_return
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
hash_page_out:
|
hash_page_out:
|
||||||
eieio
|
eieio
|
||||||
lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
|
lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
|
||||||
li r0,0
|
li r0,0
|
||||||
stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
|
stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
|
||||||
blr
|
blr
|
||||||
#endif /* CONFIG_SMP */
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||||
patch_site 1f, patch__hash_page_A1
|
patch_site 1f, patch__hash_page_A1
|
||||||
patch_site 2f, patch__hash_page_A2
|
patch_site 2f, patch__hash_page_A2
|
||||||
/* Get the address of the primary PTE group in the hash table (r3) */
|
/* Get the address of the primary PTE group in the hash table (r3) */
|
||||||
0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */
|
0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
|
||||||
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
|
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
|
||||||
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
|
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
|
||||||
xor r3,r3,r0 /* make primary hash */
|
xor r3,r3,r0 /* make primary hash */
|
||||||
|
@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||||
beq+ 10f /* no PTE: go look for an empty slot */
|
beq+ 10f /* no PTE: go look for an empty slot */
|
||||||
tlbie r4
|
tlbie r4
|
||||||
|
|
||||||
lis r4, (htab_hash_searches - ADDR_OFFSET)@ha
|
lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
|
||||||
lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
|
lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
|
||||||
addi r6,r6,1 /* count how many searches we do */
|
addi r6,r6,1 /* count how many searches we do */
|
||||||
stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
|
stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
|
||||||
|
|
||||||
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
|
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
|
||||||
mtctr r0
|
mtctr r0
|
||||||
|
@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||||
beq+ found_empty
|
beq+ found_empty
|
||||||
|
|
||||||
/* update counter of times that the primary PTEG is full */
|
/* update counter of times that the primary PTEG is full */
|
||||||
lis r4, (primary_pteg_full - ADDR_OFFSET)@ha
|
lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
|
||||||
lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
|
lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
|
||||||
addi r6,r6,1
|
addi r6,r6,1
|
||||||
stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
|
stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
|
||||||
|
|
||||||
patch_site 0f, patch__hash_page_C
|
patch_site 0f, patch__hash_page_C
|
||||||
/* Search the secondary PTEG for an empty slot */
|
/* Search the secondary PTEG for an empty slot */
|
||||||
|
@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
|
||||||
* lockup here but that shouldn't happen
|
* lockup here but that shouldn't happen
|
||||||
*/
|
*/
|
||||||
|
|
||||||
1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */
|
1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
|
||||||
lwz r6, (next_slot - ADDR_OFFSET)@l(r4)
|
lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
|
||||||
addi r6,r6,HPTE_SIZE /* search for candidate */
|
addi r6,r6,HPTE_SIZE /* search for candidate */
|
||||||
andi. r6,r6,7*HPTE_SIZE
|
andi. r6,r6,7*HPTE_SIZE
|
||||||
stw r6,next_slot@l(r4)
|
stw r6,next_slot@l(r4)
|
||||||
|
|
|
@ -413,7 +413,7 @@ void __init MMU_init_hw(void)
|
||||||
void __init MMU_init_hw_patch(void)
|
void __init MMU_init_hw_patch(void)
|
||||||
{
|
{
|
||||||
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
|
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
|
||||||
unsigned int hash;
|
unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
|
||||||
|
|
||||||
if (ppc_md.progress)
|
if (ppc_md.progress)
|
||||||
ppc_md.progress("hash:patch", 0x345);
|
ppc_md.progress("hash:patch", 0x345);
|
||||||
|
@ -425,11 +425,6 @@ void __init MMU_init_hw_patch(void)
|
||||||
/*
|
/*
|
||||||
* Patch up the instructions in hashtable.S:create_hpte
|
* Patch up the instructions in hashtable.S:create_hpte
|
||||||
*/
|
*/
|
||||||
if (IS_ENABLED(CONFIG_VMAP_STACK))
|
|
||||||
hash = (unsigned int)Hash;
|
|
||||||
else
|
|
||||||
hash = (unsigned int)Hash - PAGE_OFFSET;
|
|
||||||
|
|
||||||
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
||||||
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
|
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
|
||||||
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
|
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
|
||||||
|
@ -439,8 +434,7 @@ void __init MMU_init_hw_patch(void)
|
||||||
/*
|
/*
|
||||||
* Patch up the instructions in hashtable.S:flush_hash_page
|
* Patch up the instructions in hashtable.S:flush_hash_page
|
||||||
*/
|
*/
|
||||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff,
|
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
||||||
((unsigned int)Hash - PAGE_OFFSET) >> 16);
|
|
||||||
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
|
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
|
||||||
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
|
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
|
||||||
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
|
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
|
||||||
|
|
|
@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||||
if (pshift >= pdshift) {
|
if (pshift >= pdshift) {
|
||||||
cachep = PGT_CACHE(PTE_T_ORDER);
|
cachep = PGT_CACHE(PTE_T_ORDER);
|
||||||
num_hugepd = 1 << (pshift - pdshift);
|
num_hugepd = 1 << (pshift - pdshift);
|
||||||
|
new = NULL;
|
||||||
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
|
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
|
||||||
cachep = PGT_CACHE(PTE_INDEX_SIZE);
|
cachep = NULL;
|
||||||
num_hugepd = 1;
|
num_hugepd = 1;
|
||||||
|
new = pte_alloc_one(mm);
|
||||||
} else {
|
} else {
|
||||||
cachep = PGT_CACHE(pdshift - pshift);
|
cachep = PGT_CACHE(pdshift - pshift);
|
||||||
num_hugepd = 1;
|
num_hugepd = 1;
|
||||||
|
new = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!cachep) {
|
if (!cachep && !new) {
|
||||||
WARN_ONCE(1, "No page table cache created for hugetlb tables");
|
WARN_ONCE(1, "No page table cache created for hugetlb tables");
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
|
if (cachep)
|
||||||
|
new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
|
||||||
|
|
||||||
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
|
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
|
||||||
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
|
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
|
||||||
|
@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
|
||||||
if (i < num_hugepd) {
|
if (i < num_hugepd) {
|
||||||
for (i = i - 1 ; i >= 0; i--, hpdp--)
|
for (i = i - 1 ; i >= 0; i--, hpdp--)
|
||||||
*hpdp = __hugepd(0);
|
*hpdp = __hugepd(0);
|
||||||
kmem_cache_free(cachep, new);
|
if (cachep)
|
||||||
|
kmem_cache_free(cachep, new);
|
||||||
|
else
|
||||||
|
pte_free(mm, new);
|
||||||
} else {
|
} else {
|
||||||
kmemleak_ignore(new);
|
kmemleak_ignore(new);
|
||||||
}
|
}
|
||||||
|
@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
|
||||||
if (shift >= pdshift)
|
if (shift >= pdshift)
|
||||||
hugepd_free(tlb, hugepte);
|
hugepd_free(tlb, hugepte);
|
||||||
else if (IS_ENABLED(CONFIG_PPC_8xx))
|
else if (IS_ENABLED(CONFIG_PPC_8xx))
|
||||||
pgtable_free_tlb(tlb, hugepte,
|
pgtable_free_tlb(tlb, hugepte, 0);
|
||||||
get_hugepd_cache_index(PTE_INDEX_SIZE));
|
|
||||||
else
|
else
|
||||||
pgtable_free_tlb(tlb, hugepte,
|
pgtable_free_tlb(tlb, hugepte,
|
||||||
get_hugepd_cache_index(pdshift - shift));
|
get_hugepd_cache_index(pdshift - shift));
|
||||||
|
@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
|
||||||
* if we have pdshift and shift value same, we don't
|
* if we have pdshift and shift value same, we don't
|
||||||
* use pgt cache for hugepd.
|
* use pgt cache for hugepd.
|
||||||
*/
|
*/
|
||||||
if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
|
if (pdshift > shift) {
|
||||||
pgtable_cache_add(PTE_INDEX_SIZE);
|
if (!IS_ENABLED(CONFIG_PPC_8xx))
|
||||||
else if (pdshift > shift)
|
pgtable_cache_add(pdshift - shift);
|
||||||
pgtable_cache_add(pdshift - shift);
|
} else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
|
||||||
else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
|
IS_ENABLED(CONFIG_PPC_8xx)) {
|
||||||
pgtable_cache_add(PTE_T_ORDER);
|
pgtable_cache_add(PTE_T_ORDER);
|
||||||
|
}
|
||||||
|
|
||||||
configured = true;
|
configured = true;
|
||||||
}
|
}
|
||||||
|
|
|
@ -185,8 +185,7 @@ u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
|
||||||
|
|
||||||
static void __init kasan_early_hash_table(void)
|
static void __init kasan_early_hash_table(void)
|
||||||
{
|
{
|
||||||
unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
|
unsigned int hash = __pa(early_hash);
|
||||||
__pa(early_hash);
|
|
||||||
|
|
||||||
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
|
||||||
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
|
||||||
|
|
|
@ -3435,6 +3435,11 @@ getstring(char *s, int size)
|
||||||
int c;
|
int c;
|
||||||
|
|
||||||
c = skipbl();
|
c = skipbl();
|
||||||
|
if (c == '\n') {
|
||||||
|
*s = 0;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
do {
|
do {
|
||||||
if( size > 1 ){
|
if( size > 1 ){
|
||||||
*s++ = c;
|
*s++ = c;
|
||||||
|
|
|
@ -1,2 +1,4 @@
|
||||||
Image
|
Image
|
||||||
Image.gz
|
Image.gz
|
||||||
|
loader
|
||||||
|
loader.lds
|
||||||
|
|
|
@ -72,6 +72,16 @@
|
||||||
#define EXC_LOAD_PAGE_FAULT 13
|
#define EXC_LOAD_PAGE_FAULT 13
|
||||||
#define EXC_STORE_PAGE_FAULT 15
|
#define EXC_STORE_PAGE_FAULT 15
|
||||||
|
|
||||||
|
/* PMP configuration */
|
||||||
|
#define PMP_R 0x01
|
||||||
|
#define PMP_W 0x02
|
||||||
|
#define PMP_X 0x04
|
||||||
|
#define PMP_A 0x18
|
||||||
|
#define PMP_A_TOR 0x08
|
||||||
|
#define PMP_A_NA4 0x10
|
||||||
|
#define PMP_A_NAPOT 0x18
|
||||||
|
#define PMP_L 0x80
|
||||||
|
|
||||||
/* symbolic CSR names: */
|
/* symbolic CSR names: */
|
||||||
#define CSR_CYCLE 0xc00
|
#define CSR_CYCLE 0xc00
|
||||||
#define CSR_TIME 0xc01
|
#define CSR_TIME 0xc01
|
||||||
|
@ -100,6 +110,8 @@
|
||||||
#define CSR_MCAUSE 0x342
|
#define CSR_MCAUSE 0x342
|
||||||
#define CSR_MTVAL 0x343
|
#define CSR_MTVAL 0x343
|
||||||
#define CSR_MIP 0x344
|
#define CSR_MIP 0x344
|
||||||
|
#define CSR_PMPCFG0 0x3a0
|
||||||
|
#define CSR_PMPADDR0 0x3b0
|
||||||
#define CSR_MHARTID 0xf14
|
#define CSR_MHARTID 0xf14
|
||||||
|
|
||||||
#ifdef CONFIG_RISCV_M_MODE
|
#ifdef CONFIG_RISCV_M_MODE
|
||||||
|
|
|
@ -58,6 +58,12 @@ _start_kernel:
|
||||||
/* Reset all registers except ra, a0, a1 */
|
/* Reset all registers except ra, a0, a1 */
|
||||||
call reset_regs
|
call reset_regs
|
||||||
|
|
||||||
|
/* Setup a PMP to permit access to all of memory. */
|
||||||
|
li a0, -1
|
||||||
|
csrw CSR_PMPADDR0, a0
|
||||||
|
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
|
||||||
|
csrw CSR_PMPCFG0, a0
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The hartid in a0 is expected later on, and we have no firmware
|
* The hartid in a0 is expected later on, and we have no firmware
|
||||||
* to hand it to us.
|
* to hand it to us.
|
||||||
|
|
|
@ -156,6 +156,6 @@ void __init trap_init(void)
|
||||||
csr_write(CSR_SCRATCH, 0);
|
csr_write(CSR_SCRATCH, 0);
|
||||||
/* Set the exception vector address */
|
/* Set the exception vector address */
|
||||||
csr_write(CSR_TVEC, &handle_exception);
|
csr_write(CSR_TVEC, &handle_exception);
|
||||||
/* Enable all interrupts */
|
/* Enable interrupts */
|
||||||
csr_write(CSR_IE, -1);
|
csr_write(CSR_IE, IE_SIE | IE_EIE);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,18 +19,20 @@ asmlinkage void __init kasan_early_init(void)
|
||||||
for (i = 0; i < PTRS_PER_PTE; ++i)
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
||||||
set_pte(kasan_early_shadow_pte + i,
|
set_pte(kasan_early_shadow_pte + i,
|
||||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||||
PAGE_KERNEL));
|
PAGE_KERNEL));
|
||||||
|
|
||||||
for (i = 0; i < PTRS_PER_PMD; ++i)
|
for (i = 0; i < PTRS_PER_PMD; ++i)
|
||||||
set_pmd(kasan_early_shadow_pmd + i,
|
set_pmd(kasan_early_shadow_pmd + i,
|
||||||
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
|
pfn_pmd(PFN_DOWN
|
||||||
__pgprot(_PAGE_TABLE)));
|
(__pa((uintptr_t) kasan_early_shadow_pte)),
|
||||||
|
__pgprot(_PAGE_TABLE)));
|
||||||
|
|
||||||
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
||||||
i += PGDIR_SIZE, ++pgd)
|
i += PGDIR_SIZE, ++pgd)
|
||||||
set_pgd(pgd,
|
set_pgd(pgd,
|
||||||
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
pfn_pgd(PFN_DOWN
|
||||||
__pgprot(_PAGE_TABLE)));
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
||||||
|
__pgprot(_PAGE_TABLE)));
|
||||||
|
|
||||||
/* init for swapper_pg_dir */
|
/* init for swapper_pg_dir */
|
||||||
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
pgd = pgd_offset_k(KASAN_SHADOW_START);
|
||||||
|
@ -38,37 +40,43 @@ asmlinkage void __init kasan_early_init(void)
|
||||||
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
|
||||||
i += PGDIR_SIZE, ++pgd)
|
i += PGDIR_SIZE, ++pgd)
|
||||||
set_pgd(pgd,
|
set_pgd(pgd,
|
||||||
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
|
pfn_pgd(PFN_DOWN
|
||||||
__pgprot(_PAGE_TABLE)));
|
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
|
||||||
|
__pgprot(_PAGE_TABLE)));
|
||||||
|
|
||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init populate(void *start, void *end)
|
static void __init populate(void *start, void *end)
|
||||||
{
|
{
|
||||||
unsigned long i;
|
unsigned long i, offset;
|
||||||
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
|
||||||
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
unsigned long vend = PAGE_ALIGN((unsigned long)end);
|
||||||
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
|
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
|
||||||
|
unsigned long n_ptes =
|
||||||
|
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
|
||||||
unsigned long n_pmds =
|
unsigned long n_pmds =
|
||||||
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
|
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
|
||||||
n_pages / PTRS_PER_PTE;
|
|
||||||
|
pte_t *pte =
|
||||||
|
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
|
||||||
|
pmd_t *pmd =
|
||||||
|
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
|
||||||
pgd_t *pgd = pgd_offset_k(vaddr);
|
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||||
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
|
|
||||||
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
|
|
||||||
|
|
||||||
for (i = 0; i < n_pages; i++) {
|
for (i = 0; i < n_pages; i++) {
|
||||||
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||||
|
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
||||||
set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
|
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
|
||||||
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
|
set_pmd(&pmd[i],
|
||||||
|
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
|
||||||
__pgprot(_PAGE_TABLE)));
|
__pgprot(_PAGE_TABLE)));
|
||||||
|
|
||||||
for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
|
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
|
||||||
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
|
set_pgd(&pgd[i],
|
||||||
|
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
|
||||||
__pgprot(_PAGE_TABLE)));
|
__pgprot(_PAGE_TABLE)));
|
||||||
|
|
||||||
flush_tlb_all();
|
flush_tlb_all();
|
||||||
|
@ -81,7 +89,8 @@ void __init kasan_init(void)
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
|
||||||
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
|
||||||
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
|
(void *)kasan_mem_to_shadow((void *)
|
||||||
|
VMALLOC_END));
|
||||||
|
|
||||||
for_each_memblock(memory, reg) {
|
for_each_memblock(memory, reg) {
|
||||||
void *start = (void *)__va(reg->base);
|
void *start = (void *)__va(reg->base);
|
||||||
|
@ -90,14 +99,14 @@ void __init kasan_init(void)
|
||||||
if (start >= end)
|
if (start >= end)
|
||||||
break;
|
break;
|
||||||
|
|
||||||
populate(kasan_mem_to_shadow(start),
|
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
|
||||||
kasan_mem_to_shadow(end));
|
|
||||||
};
|
};
|
||||||
|
|
||||||
for (i = 0; i < PTRS_PER_PTE; i++)
|
for (i = 0; i < PTRS_PER_PTE; i++)
|
||||||
set_pte(&kasan_early_shadow_pte[i],
|
set_pte(&kasan_early_shadow_pte[i],
|
||||||
mk_pte(virt_to_page(kasan_early_shadow_page),
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
||||||
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
|
__pgprot(_PAGE_PRESENT | _PAGE_READ |
|
||||||
|
_PAGE_ACCESSED)));
|
||||||
|
|
||||||
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
||||||
init_task.kasan_depth = 0;
|
init_task.kasan_depth = 0;
|
||||||
|
|
|
@ -146,7 +146,7 @@ all: bzImage
|
||||||
#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
|
#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
|
||||||
KBUILD_IMAGE := $(boot)/bzImage
|
KBUILD_IMAGE := $(boot)/bzImage
|
||||||
|
|
||||||
install: vmlinux
|
install:
|
||||||
$(Q)$(MAKE) $(build)=$(boot) $@
|
$(Q)$(MAKE) $(build)=$(boot) $@
|
||||||
|
|
||||||
bzImage: vmlinux
|
bzImage: vmlinux
|
||||||
|
|
|
@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
|
||||||
$(obj)/startup.a: $(OBJECTS) FORCE
|
$(obj)/startup.a: $(OBJECTS) FORCE
|
||||||
$(call if_changed,ar)
|
$(call if_changed,ar)
|
||||||
|
|
||||||
install: $(CONFIGURE) $(obj)/bzImage
|
install:
|
||||||
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
|
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
|
||||||
System.map "$(INSTALL_PATH)"
|
System.map "$(INSTALL_PATH)"
|
||||||
|
|
||||||
|
|
|
@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
|
||||||
*(unsigned long *) prng.parm_block ^= seed;
|
*(unsigned long *) prng.parm_block ^= seed;
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
|
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
|
||||||
(char *) entropy, (char *) entropy,
|
(u8 *) entropy, (u8 *) entropy,
|
||||||
sizeof(entropy));
|
sizeof(entropy));
|
||||||
memcpy(prng.parm_block, entropy, sizeof(entropy));
|
memcpy(prng.parm_block, entropy, sizeof(entropy));
|
||||||
}
|
}
|
||||||
|
|
|
@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m
|
||||||
CONFIG_CRASH_DUMP=y
|
CONFIG_CRASH_DUMP=y
|
||||||
CONFIG_HIBERNATION=y
|
CONFIG_HIBERNATION=y
|
||||||
CONFIG_PM_DEBUG=y
|
CONFIG_PM_DEBUG=y
|
||||||
|
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
|
||||||
CONFIG_CMM=m
|
CONFIG_CMM=m
|
||||||
CONFIG_APPLDATA_BASE=y
|
CONFIG_APPLDATA_BASE=y
|
||||||
CONFIG_KVM=m
|
CONFIG_KVM=m
|
||||||
|
@ -474,7 +475,6 @@ CONFIG_NLMON=m
|
||||||
# CONFIG_NET_VENDOR_EMULEX is not set
|
# CONFIG_NET_VENDOR_EMULEX is not set
|
||||||
# CONFIG_NET_VENDOR_EZCHIP is not set
|
# CONFIG_NET_VENDOR_EZCHIP is not set
|
||||||
# CONFIG_NET_VENDOR_GOOGLE is not set
|
# CONFIG_NET_VENDOR_GOOGLE is not set
|
||||||
# CONFIG_NET_VENDOR_HP is not set
|
|
||||||
# CONFIG_NET_VENDOR_HUAWEI is not set
|
# CONFIG_NET_VENDOR_HUAWEI is not set
|
||||||
# CONFIG_NET_VENDOR_INTEL is not set
|
# CONFIG_NET_VENDOR_INTEL is not set
|
||||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||||
|
@ -684,7 +684,6 @@ CONFIG_CRYPTO_ADIANTUM=m
|
||||||
CONFIG_CRYPTO_XCBC=m
|
CONFIG_CRYPTO_XCBC=m
|
||||||
CONFIG_CRYPTO_VMAC=m
|
CONFIG_CRYPTO_VMAC=m
|
||||||
CONFIG_CRYPTO_CRC32=m
|
CONFIG_CRYPTO_CRC32=m
|
||||||
CONFIG_CRYPTO_XXHASH=m
|
|
||||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||||
CONFIG_CRYPTO_RMD128=m
|
CONFIG_CRYPTO_RMD128=m
|
||||||
CONFIG_CRYPTO_RMD160=m
|
CONFIG_CRYPTO_RMD160=m
|
||||||
|
@ -748,7 +747,6 @@ CONFIG_DEBUG_INFO_DWARF4=y
|
||||||
CONFIG_GDB_SCRIPTS=y
|
CONFIG_GDB_SCRIPTS=y
|
||||||
CONFIG_FRAME_WARN=1024
|
CONFIG_FRAME_WARN=1024
|
||||||
CONFIG_HEADERS_INSTALL=y
|
CONFIG_HEADERS_INSTALL=y
|
||||||
CONFIG_HEADERS_CHECK=y
|
|
||||||
CONFIG_DEBUG_SECTION_MISMATCH=y
|
CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||||
CONFIG_MAGIC_SYSRQ=y
|
CONFIG_MAGIC_SYSRQ=y
|
||||||
CONFIG_DEBUG_PAGEALLOC=y
|
CONFIG_DEBUG_PAGEALLOC=y
|
||||||
|
@ -772,9 +770,9 @@ CONFIG_DEBUG_MEMORY_INIT=y
|
||||||
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
|
||||||
CONFIG_DEBUG_PER_CPU_MAPS=y
|
CONFIG_DEBUG_PER_CPU_MAPS=y
|
||||||
CONFIG_DEBUG_SHIRQ=y
|
CONFIG_DEBUG_SHIRQ=y
|
||||||
|
CONFIG_PANIC_ON_OOPS=y
|
||||||
CONFIG_DETECT_HUNG_TASK=y
|
CONFIG_DETECT_HUNG_TASK=y
|
||||||
CONFIG_WQ_WATCHDOG=y
|
CONFIG_WQ_WATCHDOG=y
|
||||||
CONFIG_PANIC_ON_OOPS=y
|
|
||||||
CONFIG_DEBUG_TIMEKEEPING=y
|
CONFIG_DEBUG_TIMEKEEPING=y
|
||||||
CONFIG_PROVE_LOCKING=y
|
CONFIG_PROVE_LOCKING=y
|
||||||
CONFIG_LOCK_STAT=y
|
CONFIG_LOCK_STAT=y
|
||||||
|
@ -783,9 +781,20 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
|
||||||
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
|
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
|
||||||
CONFIG_DEBUG_SG=y
|
CONFIG_DEBUG_SG=y
|
||||||
CONFIG_DEBUG_NOTIFIERS=y
|
CONFIG_DEBUG_NOTIFIERS=y
|
||||||
|
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||||
CONFIG_DEBUG_CREDENTIALS=y
|
CONFIG_DEBUG_CREDENTIALS=y
|
||||||
CONFIG_RCU_TORTURE_TEST=m
|
CONFIG_RCU_TORTURE_TEST=m
|
||||||
CONFIG_RCU_CPU_STALL_TIMEOUT=300
|
CONFIG_RCU_CPU_STALL_TIMEOUT=300
|
||||||
|
CONFIG_LATENCYTOP=y
|
||||||
|
CONFIG_FUNCTION_PROFILER=y
|
||||||
|
CONFIG_STACK_TRACER=y
|
||||||
|
CONFIG_IRQSOFF_TRACER=y
|
||||||
|
CONFIG_PREEMPT_TRACER=y
|
||||||
|
CONFIG_SCHED_TRACER=y
|
||||||
|
CONFIG_FTRACE_SYSCALLS=y
|
||||||
|
CONFIG_BLK_DEV_IO_TRACE=y
|
||||||
|
CONFIG_HIST_TRIGGERS=y
|
||||||
|
CONFIG_S390_PTDUMP=y
|
||||||
CONFIG_NOTIFIER_ERROR_INJECTION=m
|
CONFIG_NOTIFIER_ERROR_INJECTION=m
|
||||||
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
|
||||||
CONFIG_FAULT_INJECTION=y
|
CONFIG_FAULT_INJECTION=y
|
||||||
|
@ -796,15 +805,6 @@ CONFIG_FAIL_IO_TIMEOUT=y
|
||||||
CONFIG_FAIL_FUTEX=y
|
CONFIG_FAIL_FUTEX=y
|
||||||
CONFIG_FAULT_INJECTION_DEBUG_FS=y
|
CONFIG_FAULT_INJECTION_DEBUG_FS=y
|
||||||
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
|
||||||
CONFIG_LATENCYTOP=y
|
|
||||||
CONFIG_IRQSOFF_TRACER=y
|
|
||||||
CONFIG_PREEMPT_TRACER=y
|
|
||||||
CONFIG_SCHED_TRACER=y
|
|
||||||
CONFIG_FTRACE_SYSCALLS=y
|
|
||||||
CONFIG_STACK_TRACER=y
|
|
||||||
CONFIG_BLK_DEV_IO_TRACE=y
|
|
||||||
CONFIG_FUNCTION_PROFILER=y
|
|
||||||
CONFIG_HIST_TRIGGERS=y
|
|
||||||
CONFIG_LKDTM=m
|
CONFIG_LKDTM=m
|
||||||
CONFIG_TEST_LIST_SORT=y
|
CONFIG_TEST_LIST_SORT=y
|
||||||
CONFIG_TEST_SORT=y
|
CONFIG_TEST_SORT=y
|
||||||
|
@ -814,5 +814,3 @@ CONFIG_INTERVAL_TREE_TEST=m
|
||||||
CONFIG_PERCPU_TEST=m
|
CONFIG_PERCPU_TEST=m
|
||||||
CONFIG_ATOMIC64_SELFTEST=y
|
CONFIG_ATOMIC64_SELFTEST=y
|
||||||
CONFIG_TEST_BPF=m
|
CONFIG_TEST_BPF=m
|
||||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
|
||||||
CONFIG_S390_PTDUMP=y
|
|
||||||
|
|
|
@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m
|
||||||
CONFIG_CRASH_DUMP=y
|
CONFIG_CRASH_DUMP=y
|
||||||
CONFIG_HIBERNATION=y
|
CONFIG_HIBERNATION=y
|
||||||
CONFIG_PM_DEBUG=y
|
CONFIG_PM_DEBUG=y
|
||||||
|
CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
|
||||||
CONFIG_CMM=m
|
CONFIG_CMM=m
|
||||||
CONFIG_APPLDATA_BASE=y
|
CONFIG_APPLDATA_BASE=y
|
||||||
CONFIG_KVM=m
|
CONFIG_KVM=m
|
||||||
|
@ -470,7 +471,6 @@ CONFIG_NLMON=m
|
||||||
# CONFIG_NET_VENDOR_EMULEX is not set
|
# CONFIG_NET_VENDOR_EMULEX is not set
|
||||||
# CONFIG_NET_VENDOR_EZCHIP is not set
|
# CONFIG_NET_VENDOR_EZCHIP is not set
|
||||||
# CONFIG_NET_VENDOR_GOOGLE is not set
|
# CONFIG_NET_VENDOR_GOOGLE is not set
|
||||||
# CONFIG_NET_VENDOR_HP is not set
|
|
||||||
# CONFIG_NET_VENDOR_HUAWEI is not set
|
# CONFIG_NET_VENDOR_HUAWEI is not set
|
||||||
# CONFIG_NET_VENDOR_INTEL is not set
|
# CONFIG_NET_VENDOR_INTEL is not set
|
||||||
# CONFIG_NET_VENDOR_MARVELL is not set
|
# CONFIG_NET_VENDOR_MARVELL is not set
|
||||||
|
@ -677,7 +677,6 @@ CONFIG_CRYPTO_ADIANTUM=m
|
||||||
CONFIG_CRYPTO_XCBC=m
|
CONFIG_CRYPTO_XCBC=m
|
||||||
CONFIG_CRYPTO_VMAC=m
|
CONFIG_CRYPTO_VMAC=m
|
||||||
CONFIG_CRYPTO_CRC32=m
|
CONFIG_CRYPTO_CRC32=m
|
||||||
CONFIG_CRYPTO_XXHASH=m
|
|
||||||
CONFIG_CRYPTO_MICHAEL_MIC=m
|
CONFIG_CRYPTO_MICHAEL_MIC=m
|
||||||
CONFIG_CRYPTO_RMD128=m
|
CONFIG_CRYPTO_RMD128=m
|
||||||
CONFIG_CRYPTO_RMD160=m
|
CONFIG_CRYPTO_RMD160=m
|
||||||
|
@ -739,18 +738,18 @@ CONFIG_DEBUG_SECTION_MISMATCH=y
|
||||||
CONFIG_MAGIC_SYSRQ=y
|
CONFIG_MAGIC_SYSRQ=y
|
||||||
CONFIG_DEBUG_MEMORY_INIT=y
|
CONFIG_DEBUG_MEMORY_INIT=y
|
||||||
CONFIG_PANIC_ON_OOPS=y
|
CONFIG_PANIC_ON_OOPS=y
|
||||||
|
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
||||||
CONFIG_RCU_TORTURE_TEST=m
|
CONFIG_RCU_TORTURE_TEST=m
|
||||||
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
CONFIG_RCU_CPU_STALL_TIMEOUT=60
|
||||||
CONFIG_LATENCYTOP=y
|
CONFIG_LATENCYTOP=y
|
||||||
|
CONFIG_FUNCTION_PROFILER=y
|
||||||
|
CONFIG_STACK_TRACER=y
|
||||||
CONFIG_SCHED_TRACER=y
|
CONFIG_SCHED_TRACER=y
|
||||||
CONFIG_FTRACE_SYSCALLS=y
|
CONFIG_FTRACE_SYSCALLS=y
|
||||||
CONFIG_STACK_TRACER=y
|
|
||||||
CONFIG_BLK_DEV_IO_TRACE=y
|
CONFIG_BLK_DEV_IO_TRACE=y
|
||||||
CONFIG_FUNCTION_PROFILER=y
|
|
||||||
CONFIG_HIST_TRIGGERS=y
|
CONFIG_HIST_TRIGGERS=y
|
||||||
|
CONFIG_S390_PTDUMP=y
|
||||||
CONFIG_LKDTM=m
|
CONFIG_LKDTM=m
|
||||||
CONFIG_PERCPU_TEST=m
|
CONFIG_PERCPU_TEST=m
|
||||||
CONFIG_ATOMIC64_SELFTEST=y
|
CONFIG_ATOMIC64_SELFTEST=y
|
||||||
CONFIG_TEST_BPF=m
|
CONFIG_TEST_BPF=m
|
||||||
CONFIG_BUG_ON_DATA_CORRUPTION=y
|
|
||||||
CONFIG_S390_PTDUMP=y
|
|
||||||
|
|
|
@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
|
||||||
|
|
||||||
static inline void storage_key_init_range(unsigned long start, unsigned long end)
|
static inline void storage_key_init_range(unsigned long start, unsigned long end)
|
||||||
{
|
{
|
||||||
if (PAGE_DEFAULT_KEY)
|
if (PAGE_DEFAULT_KEY != 0)
|
||||||
__storage_key_init_range(start, end);
|
__storage_key_init_range(start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,6 @@ void s390_update_cpu_mhz(void);
|
||||||
void cpu_detect_mhz_feature(void);
|
void cpu_detect_mhz_feature(void);
|
||||||
|
|
||||||
extern const struct seq_operations cpuinfo_op;
|
extern const struct seq_operations cpuinfo_op;
|
||||||
extern int sysctl_ieee_emulation_warnings;
|
|
||||||
extern void execve_tail(void);
|
extern void execve_tail(void);
|
||||||
extern void __bpon(void);
|
extern void __bpon(void);
|
||||||
|
|
||||||
|
|
|
@ -201,7 +201,7 @@ struct slib {
|
||||||
* @scount: SBAL count
|
* @scount: SBAL count
|
||||||
* @sflags: whole SBAL flags
|
* @sflags: whole SBAL flags
|
||||||
* @length: length
|
* @length: length
|
||||||
* @addr: address
|
* @addr: absolute data address
|
||||||
*/
|
*/
|
||||||
struct qdio_buffer_element {
|
struct qdio_buffer_element {
|
||||||
u8 eflags;
|
u8 eflags;
|
||||||
|
@ -211,7 +211,7 @@ struct qdio_buffer_element {
|
||||||
u8 scount;
|
u8 scount;
|
||||||
u8 sflags;
|
u8 sflags;
|
||||||
u32 length;
|
u32 length;
|
||||||
void *addr;
|
u64 addr;
|
||||||
} __attribute__ ((packed, aligned(16)));
|
} __attribute__ ((packed, aligned(16)));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -227,7 +227,7 @@ struct qdio_buffer {
|
||||||
* @sbal: absolute SBAL address
|
* @sbal: absolute SBAL address
|
||||||
*/
|
*/
|
||||||
struct sl_element {
|
struct sl_element {
|
||||||
unsigned long sbal;
|
u64 sbal;
|
||||||
} __attribute__ ((packed));
|
} __attribute__ ((packed));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -29,9 +29,6 @@
|
||||||
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
|
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
|
||||||
#include "../../mm/ident_map.c"
|
#include "../../mm/ident_map.c"
|
||||||
|
|
||||||
/* Used by pgtable.h asm code to force instruction serialization. */
|
|
||||||
unsigned long __force_order;
|
|
||||||
|
|
||||||
/* Used to track our page table allocation area. */
|
/* Used to track our page table allocation area. */
|
||||||
struct alloc_pgt_data {
|
struct alloc_pgt_data {
|
||||||
unsigned char *pgt_buf;
|
unsigned char *pgt_buf;
|
||||||
|
|
|
@ -292,6 +292,14 @@ enum x86emul_mode {
|
||||||
#define X86EMUL_SMM_MASK (1 << 6)
|
#define X86EMUL_SMM_MASK (1 << 6)
|
||||||
#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7)
|
#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* fastop functions are declared as taking a never-defined fastop parameter,
|
||||||
|
* so they can't be called from C directly.
|
||||||
|
*/
|
||||||
|
struct fastop;
|
||||||
|
|
||||||
|
typedef void (*fastop_t)(struct fastop *);
|
||||||
|
|
||||||
struct x86_emulate_ctxt {
|
struct x86_emulate_ctxt {
|
||||||
const struct x86_emulate_ops *ops;
|
const struct x86_emulate_ops *ops;
|
||||||
|
|
||||||
|
@ -324,7 +332,10 @@ struct x86_emulate_ctxt {
|
||||||
struct operand src;
|
struct operand src;
|
||||||
struct operand src2;
|
struct operand src2;
|
||||||
struct operand dst;
|
struct operand dst;
|
||||||
int (*execute)(struct x86_emulate_ctxt *ctxt);
|
union {
|
||||||
|
int (*execute)(struct x86_emulate_ctxt *ctxt);
|
||||||
|
fastop_t fop;
|
||||||
|
};
|
||||||
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
|
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
|
||||||
/*
|
/*
|
||||||
* The following six fields are cleared together,
|
* The following six fields are cleared together,
|
||||||
|
|
|
@ -1122,6 +1122,7 @@ struct kvm_x86_ops {
|
||||||
int (*handle_exit)(struct kvm_vcpu *vcpu,
|
int (*handle_exit)(struct kvm_vcpu *vcpu,
|
||||||
enum exit_fastpath_completion exit_fastpath);
|
enum exit_fastpath_completion exit_fastpath);
|
||||||
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||||
|
void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
|
||||||
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
|
||||||
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
|
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
|
||||||
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
|
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
|
||||||
|
@ -1146,7 +1147,7 @@ struct kvm_x86_ops {
|
||||||
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
|
||||||
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
|
||||||
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
|
||||||
void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
|
||||||
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
|
||||||
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
|
||||||
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
|
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
|
||||||
|
|
|
@ -512,6 +512,8 @@
|
||||||
#define MSR_K7_HWCR 0xc0010015
|
#define MSR_K7_HWCR 0xc0010015
|
||||||
#define MSR_K7_HWCR_SMMLOCK_BIT 0
|
#define MSR_K7_HWCR_SMMLOCK_BIT 0
|
||||||
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
|
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
|
||||||
|
#define MSR_K7_HWCR_IRPERF_EN_BIT 30
|
||||||
|
#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
|
||||||
#define MSR_K7_FID_VID_CTL 0xc0010041
|
#define MSR_K7_FID_VID_CTL 0xc0010041
|
||||||
#define MSR_K7_FID_VID_STATUS 0xc0010042
|
#define MSR_K7_FID_VID_STATUS 0xc0010042
|
||||||
|
|
||||||
|
|
|
@ -72,7 +72,7 @@
|
||||||
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
|
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
|
||||||
#define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
|
#define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
|
||||||
#define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
|
#define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
|
||||||
#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE 0x04000000
|
#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
|
||||||
|
|
||||||
#define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
|
#define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
|
||||||
#define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
|
#define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
|
||||||
|
|
|
@ -81,6 +81,7 @@
|
||||||
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
|
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
|
||||||
#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */
|
#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */
|
||||||
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
|
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
|
||||||
|
#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */
|
||||||
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
|
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
|
||||||
|
|
||||||
#endif /* _ASM_X86_VMXFEATURES_H */
|
#endif /* _ASM_X86_VMXFEATURES_H */
|
||||||
|
|
|
@ -390,6 +390,7 @@ struct kvm_sync_regs {
|
||||||
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
|
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
|
||||||
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
|
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
|
||||||
#define KVM_STATE_NESTED_EVMCS 0x00000004
|
#define KVM_STATE_NESTED_EVMCS 0x00000004
|
||||||
|
#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
|
||||||
|
|
||||||
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
|
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
|
||||||
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
|
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
|
|
||||||
static const int amd_erratum_383[];
|
static const int amd_erratum_383[];
|
||||||
static const int amd_erratum_400[];
|
static const int amd_erratum_400[];
|
||||||
|
static const int amd_erratum_1054[];
|
||||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
|
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -972,6 +973,15 @@ static void init_amd(struct cpuinfo_x86 *c)
|
||||||
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
|
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
|
||||||
if (!cpu_has(c, X86_FEATURE_XENPV))
|
if (!cpu_has(c, X86_FEATURE_XENPV))
|
||||||
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Turn on the Instructions Retired free counter on machines not
|
||||||
|
* susceptible to erratum #1054 "Instructions Retired Performance
|
||||||
|
* Counter May Be Inaccurate".
|
||||||
|
*/
|
||||||
|
if (cpu_has(c, X86_FEATURE_IRPERF) &&
|
||||||
|
!cpu_has_amd_erratum(c, amd_erratum_1054))
|
||||||
|
msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
|
@ -1099,6 +1109,10 @@ static const int amd_erratum_400[] =
|
||||||
static const int amd_erratum_383[] =
|
static const int amd_erratum_383[] =
|
||||||
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
|
||||||
|
|
||||||
|
/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
|
||||||
|
static const int amd_erratum_1054[] =
|
||||||
|
AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
|
||||||
|
|
||||||
|
|
||||||
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1163,9 +1163,12 @@ static const struct sysfs_ops threshold_ops = {
|
||||||
.store = store,
|
.store = store,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void threshold_block_release(struct kobject *kobj);
|
||||||
|
|
||||||
static struct kobj_type threshold_ktype = {
|
static struct kobj_type threshold_ktype = {
|
||||||
.sysfs_ops = &threshold_ops,
|
.sysfs_ops = &threshold_ops,
|
||||||
.default_attrs = default_attrs,
|
.default_attrs = default_attrs,
|
||||||
|
.release = threshold_block_release,
|
||||||
};
|
};
|
||||||
|
|
||||||
static const char *get_name(unsigned int bank, struct threshold_block *b)
|
static const char *get_name(unsigned int bank, struct threshold_block *b)
|
||||||
|
@ -1198,8 +1201,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
|
||||||
return buf_mcatype;
|
return buf_mcatype;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
|
static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
|
||||||
unsigned int block, u32 address)
|
unsigned int bank, unsigned int block,
|
||||||
|
u32 address)
|
||||||
{
|
{
|
||||||
struct threshold_block *b = NULL;
|
struct threshold_block *b = NULL;
|
||||||
u32 low, high;
|
u32 low, high;
|
||||||
|
@ -1243,16 +1247,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
|
||||||
|
|
||||||
INIT_LIST_HEAD(&b->miscj);
|
INIT_LIST_HEAD(&b->miscj);
|
||||||
|
|
||||||
if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
|
if (tb->blocks)
|
||||||
list_add(&b->miscj,
|
list_add(&b->miscj, &tb->blocks->miscj);
|
||||||
&per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
|
else
|
||||||
} else {
|
tb->blocks = b;
|
||||||
per_cpu(threshold_banks, cpu)[bank]->blocks = b;
|
|
||||||
}
|
|
||||||
|
|
||||||
err = kobject_init_and_add(&b->kobj, &threshold_ktype,
|
err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
|
||||||
per_cpu(threshold_banks, cpu)[bank]->kobj,
|
|
||||||
get_name(bank, b));
|
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
recurse:
|
recurse:
|
||||||
|
@ -1260,7 +1260,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
|
||||||
if (!address)
|
if (!address)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err = allocate_threshold_blocks(cpu, bank, block, address);
|
err = allocate_threshold_blocks(cpu, tb, bank, block, address);
|
||||||
if (err)
|
if (err)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
|
|
||||||
|
@ -1345,8 +1345,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||||
goto out_free;
|
goto out_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
per_cpu(threshold_banks, cpu)[bank] = b;
|
|
||||||
|
|
||||||
if (is_shared_bank(bank)) {
|
if (is_shared_bank(bank)) {
|
||||||
refcount_set(&b->cpus, 1);
|
refcount_set(&b->cpus, 1);
|
||||||
|
|
||||||
|
@ -1357,9 +1355,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
|
err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
|
||||||
if (!err)
|
if (err)
|
||||||
goto out;
|
goto out_free;
|
||||||
|
|
||||||
|
per_cpu(threshold_banks, cpu)[bank] = b;
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
|
||||||
out_free:
|
out_free:
|
||||||
kfree(b);
|
kfree(b);
|
||||||
|
@ -1368,8 +1370,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void deallocate_threshold_block(unsigned int cpu,
|
static void threshold_block_release(struct kobject *kobj)
|
||||||
unsigned int bank)
|
{
|
||||||
|
kfree(to_block(kobj));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
|
||||||
{
|
{
|
||||||
struct threshold_block *pos = NULL;
|
struct threshold_block *pos = NULL;
|
||||||
struct threshold_block *tmp = NULL;
|
struct threshold_block *tmp = NULL;
|
||||||
|
@ -1379,13 +1385,11 @@ static void deallocate_threshold_block(unsigned int cpu,
|
||||||
return;
|
return;
|
||||||
|
|
||||||
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
|
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
|
||||||
kobject_put(&pos->kobj);
|
|
||||||
list_del(&pos->miscj);
|
list_del(&pos->miscj);
|
||||||
kfree(pos);
|
kobject_put(&pos->kobj);
|
||||||
}
|
}
|
||||||
|
|
||||||
kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
|
kobject_put(&head->blocks->kobj);
|
||||||
per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __threshold_remove_blocks(struct threshold_bank *b)
|
static void __threshold_remove_blocks(struct threshold_bank *b)
|
||||||
|
|
|
@ -191,25 +191,6 @@
|
||||||
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
|
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
|
||||||
#define FASTOP_SIZE 8
|
#define FASTOP_SIZE 8
|
||||||
|
|
||||||
/*
|
|
||||||
* fastop functions have a special calling convention:
|
|
||||||
*
|
|
||||||
* dst: rax (in/out)
|
|
||||||
* src: rdx (in/out)
|
|
||||||
* src2: rcx (in)
|
|
||||||
* flags: rflags (in/out)
|
|
||||||
* ex: rsi (in:fastop pointer, out:zero if exception)
|
|
||||||
*
|
|
||||||
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
|
|
||||||
* different operand sizes can be reached by calculation, rather than a jump
|
|
||||||
* table (which would be bigger than the code).
|
|
||||||
*
|
|
||||||
* fastop functions are declared as taking a never-defined fastop parameter,
|
|
||||||
* so they can't be called from C directly.
|
|
||||||
*/
|
|
||||||
|
|
||||||
struct fastop;
|
|
||||||
|
|
||||||
struct opcode {
|
struct opcode {
|
||||||
u64 flags : 56;
|
u64 flags : 56;
|
||||||
u64 intercept : 8;
|
u64 intercept : 8;
|
||||||
|
@ -311,8 +292,19 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
|
||||||
#define ON64(x)
|
#define ON64(x)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
typedef void (*fastop_t)(struct fastop *);
|
/*
|
||||||
|
* fastop functions have a special calling convention:
|
||||||
|
*
|
||||||
|
* dst: rax (in/out)
|
||||||
|
* src: rdx (in/out)
|
||||||
|
* src2: rcx (in)
|
||||||
|
* flags: rflags (in/out)
|
||||||
|
* ex: rsi (in:fastop pointer, out:zero if exception)
|
||||||
|
*
|
||||||
|
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
|
||||||
|
* different operand sizes can be reached by calculation, rather than a jump
|
||||||
|
* table (which would be bigger than the code).
|
||||||
|
*/
|
||||||
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
|
||||||
|
|
||||||
#define __FOP_FUNC(name) \
|
#define __FOP_FUNC(name) \
|
||||||
|
@ -5683,7 +5675,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
|
||||||
|
|
||||||
if (ctxt->execute) {
|
if (ctxt->execute) {
|
||||||
if (ctxt->d & Fastop)
|
if (ctxt->d & Fastop)
|
||||||
rc = fastop(ctxt, (fastop_t)ctxt->execute);
|
rc = fastop(ctxt, ctxt->fop);
|
||||||
else
|
else
|
||||||
rc = ctxt->execute(ctxt);
|
rc = ctxt->execute(ctxt);
|
||||||
if (rc != X86EMUL_CONTINUE)
|
if (rc != X86EMUL_CONTINUE)
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue