ARM:
* GICv4.1 support * 32bit host removal PPC: * secure (encrypted) using under the Protected Execution Framework ultravisor s390: * allow disabling GISA (hardware interrupt injection) and protected VMs/ultravisor support. x86: * New dirty bitmap flag that sets all bits in the bitmap when dirty page logging is enabled; this is faster because it doesn't require bulk modification of the page tables. * Initial work on making nested SVM event injection more similar to VMX, and less buggy. * Various cleanups to MMU code (though the big ones and related optimizations were delayed to 5.8). Instead of using cr3 in function names which occasionally means eptp, KVM too has standardized on "pgd". * A large refactoring of CPUID features, which now use an array that parallels the core x86_features. * Some removal of pointer chasing from kvm_x86_ops, which will also be switched to static calls as soon as they are available. * New Tigerlake CPUID features. * More bugfixes, optimizations and cleanups. Generic: * selftests: cleanups, new MMU notifier stress test, steal-time test * CSV output for kvm_stat. KVM/MIPS has been broken since 5.5, it does not compile due to a patch committed by MIPS maintainers. I had already prepared a fix, but the MIPS maintainers prefer to fix it in generic code rather than KVM so they are taking care of it. -----BEGIN PGP SIGNATURE----- iQFIBAABCAAyFiEE8TM4V0tmI4mGbHaCv/vSX3jHroMFAl6GOnIUHHBib256aW5p QHJlZGhhdC5jb20ACgkQv/vSX3jHroMfxwf/ZKLZiRoaovXCOG71M/eHtQb8ZIqU 3MPy+On3eC5Sk/aBxWUL9EFZsbYG6kYdbZ1VOvG9XPBoLlnkDSm/IR0kaELHtnjj oGVda/tvGn46Ne39y8xBptmb91WDcWH0vFthT/CwlMxAw3xjr+gG7Qyo+8F2CW6m SSSuLiHSBnyO1cQKruBTHZ8qnR8LlnfXEqtd6Y4LFLic0LbLIoIdRcT3wjQrcZrm Djd7wbTEYZjUfoqZ72ekwEDUsONcDLDSKcguDO9pSMSCGhpxCVT5Vy68KRpoIMs2 nzNWDKjvqQo5zb2+GWxJgkd12Hv+n7PCXZMbVrWBu1pQsewUns9m4mkpGw== =6fGt -----END PGP SIGNATURE----- Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm Pull kvm updates from Paolo Bonzini: "ARM: - GICv4.1 support - 32bit host removal PPC: - secure (encrypted) using under the Protected Execution Framework ultravisor s390: - allow disabling GISA (hardware interrupt injection) and protected VMs/ultravisor support. x86: - New dirty bitmap flag that sets all bits in the bitmap when dirty page logging is enabled; this is faster because it doesn't require bulk modification of the page tables. - Initial work on making nested SVM event injection more similar to VMX, and less buggy. - Various cleanups to MMU code (though the big ones and related optimizations were delayed to 5.8). Instead of using cr3 in function names which occasionally means eptp, KVM too has standardized on "pgd". - A large refactoring of CPUID features, which now use an array that parallels the core x86_features. - Some removal of pointer chasing from kvm_x86_ops, which will also be switched to static calls as soon as they are available. - New Tigerlake CPUID features. - More bugfixes, optimizations and cleanups. Generic: - selftests: cleanups, new MMU notifier stress test, steal-time test - CSV output for kvm_stat" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (277 commits) x86/kvm: fix a missing-prototypes "vmread_error" KVM: x86: Fix BUILD_BUG() in __cpuid_entry_get_reg() w/ CONFIG_UBSAN=y KVM: VMX: Add a trampoline to fix VMREAD error handling KVM: SVM: Annotate svm_x86_ops as __initdata KVM: VMX: Annotate vmx_x86_ops as __initdata KVM: x86: Drop __exit from kvm_x86_ops' hardware_unsetup() KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection KVM: x86: Set kvm_x86_ops only after ->hardware_setup() completes KVM: VMX: Configure runtime hooks using vmx_x86_ops KVM: VMX: Move hardware_setup() definition below vmx_x86_ops KVM: x86: Move init-only kvm_x86_ops to separate struct KVM: Pass kvm_init()'s opaque param to additional arch funcs s390/gmap: return proper error code on ksm unsharing KVM: selftests: Fix cosmetic copy-paste error in vm_mem_region_move() KVM: Fix out of range accesses to memslots KVM: X86: Micro-optimize IPI fastpath delay KVM: X86: Delay read msr data iff writes ICR MSR KVM: PPC: Book3S HV: Add a capability for enabling secure guests KVM: arm64: GICv4.1: Expose HW-based SGIs in debugfs KVM: arm64: GICv4.1: Allow non-trapping WFI when using HW SGIs ...
This commit is contained in:
commit
8c1b724ddb
|
@ -3821,6 +3821,11 @@
|
|||
before loading.
|
||||
See Documentation/admin-guide/blockdev/ramdisk.rst.
|
||||
|
||||
prot_virt= [S390] enable hosting protected virtual machines
|
||||
isolated from the hypervisor (if hardware supports
|
||||
that).
|
||||
Format: <bool>
|
||||
|
||||
psi= [KNL] Enable or disable pressure stall information
|
||||
tracking.
|
||||
Format: <bool>
|
||||
|
|
|
@ -1574,8 +1574,8 @@ This ioctl would set vcpu's xcr to the value userspace specified.
|
|||
};
|
||||
|
||||
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
|
||||
#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
|
||||
#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
|
||||
#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) /* deprecated */
|
||||
#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) /* deprecated */
|
||||
|
||||
struct kvm_cpuid_entry2 {
|
||||
__u32 function;
|
||||
|
@ -1626,13 +1626,6 @@ emulate them efficiently. The fields in each entry are defined as follows:
|
|||
|
||||
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
|
||||
if the index field is valid
|
||||
KVM_CPUID_FLAG_STATEFUL_FUNC:
|
||||
if cpuid for this function returns different values for successive
|
||||
invocations; there will be several entries with the same function,
|
||||
all with this flag set
|
||||
KVM_CPUID_FLAG_STATE_READ_NEXT:
|
||||
for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
|
||||
the first entry to be read by a cpu
|
||||
|
||||
eax, ebx, ecx, edx:
|
||||
the values returned by the cpuid instruction for
|
||||
|
@ -2117,7 +2110,8 @@ Errors:
|
|||
|
||||
====== ============================================================
|
||||
ENOENT no such register
|
||||
EINVAL invalid register ID, or no such register
|
||||
EINVAL invalid register ID, or no such register or used with VMs in
|
||||
protected virtualization mode on s390
|
||||
EPERM (arm64) register access not allowed before vcpu finalization
|
||||
====== ============================================================
|
||||
|
||||
|
@ -2552,7 +2546,8 @@ Errors include:
|
|||
|
||||
======== ============================================================
|
||||
ENOENT no such register
|
||||
EINVAL invalid register ID, or no such register
|
||||
EINVAL invalid register ID, or no such register or used with VMs in
|
||||
protected virtualization mode on s390
|
||||
EPERM (arm64) register access not allowed before vcpu finalization
|
||||
======== ============================================================
|
||||
|
||||
|
@ -3347,8 +3342,8 @@ The member 'flags' is used for passing flags from userspace.
|
|||
::
|
||||
|
||||
#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
|
||||
#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
|
||||
#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
|
||||
#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1) /* deprecated */
|
||||
#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2) /* deprecated */
|
||||
|
||||
struct kvm_cpuid_entry2 {
|
||||
__u32 function;
|
||||
|
@ -3394,13 +3389,6 @@ The fields in each entry are defined as follows:
|
|||
|
||||
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
|
||||
if the index field is valid
|
||||
KVM_CPUID_FLAG_STATEFUL_FUNC:
|
||||
if cpuid for this function returns different values for successive
|
||||
invocations; there will be several entries with the same function,
|
||||
all with this flag set
|
||||
KVM_CPUID_FLAG_STATE_READ_NEXT:
|
||||
for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
|
||||
the first entry to be read by a cpu
|
||||
|
||||
eax, ebx, ecx, edx:
|
||||
|
||||
|
@ -4649,6 +4637,60 @@ the clear cpu reset definition in the POP. However, the cpu is not put
|
|||
into ESA mode. This reset is a superset of the initial reset.
|
||||
|
||||
|
||||
4.125 KVM_S390_PV_COMMAND
|
||||
-------------------------
|
||||
|
||||
:Capability: KVM_CAP_S390_PROTECTED
|
||||
:Architectures: s390
|
||||
:Type: vm ioctl
|
||||
:Parameters: struct kvm_pv_cmd
|
||||
:Returns: 0 on success, < 0 on error
|
||||
|
||||
::
|
||||
|
||||
struct kvm_pv_cmd {
|
||||
__u32 cmd; /* Command to be executed */
|
||||
__u16 rc; /* Ultravisor return code */
|
||||
__u16 rrc; /* Ultravisor return reason code */
|
||||
__u64 data; /* Data or address */
|
||||
__u32 flags; /* flags for future extensions. Must be 0 for now */
|
||||
__u32 reserved[3];
|
||||
};
|
||||
|
||||
cmd values:
|
||||
|
||||
KVM_PV_ENABLE
|
||||
Allocate memory and register the VM with the Ultravisor, thereby
|
||||
donating memory to the Ultravisor that will become inaccessible to
|
||||
KVM. All existing CPUs are converted to protected ones. After this
|
||||
command has succeeded, any CPU added via hotplug will become
|
||||
protected during its creation as well.
|
||||
|
||||
Errors:
|
||||
|
||||
===== =============================
|
||||
EINTR an unmasked signal is pending
|
||||
===== =============================
|
||||
|
||||
KVM_PV_DISABLE
|
||||
|
||||
Deregister the VM from the Ultravisor and reclaim the memory that
|
||||
had been donated to the Ultravisor, making it usable by the kernel
|
||||
again. All registered VCPUs are converted back to non-protected
|
||||
ones.
|
||||
|
||||
KVM_PV_VM_SET_SEC_PARMS
|
||||
Pass the image header from VM memory to the Ultravisor in
|
||||
preparation of image unpacking and verification.
|
||||
|
||||
KVM_PV_VM_UNPACK
|
||||
Unpack (protect and decrypt) a page of the encrypted boot image.
|
||||
|
||||
KVM_PV_VM_VERIFY
|
||||
Verify the integrity of the unpacked image. Only if this succeeds,
|
||||
KVM is allowed to start protected VCPUs.
|
||||
|
||||
|
||||
5. The kvm_run structure
|
||||
========================
|
||||
|
||||
|
@ -5707,8 +5749,13 @@ and injected exceptions.
|
|||
:Architectures: x86, arm, arm64, mips
|
||||
:Parameters: args[0] whether feature should be enabled or not
|
||||
|
||||
With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
|
||||
clear and write-protect all pages that are returned as dirty.
|
||||
Valid flags are::
|
||||
|
||||
#define KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (1 << 0)
|
||||
#define KVM_DIRTY_LOG_INITIALLY_SET (1 << 1)
|
||||
|
||||
With KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE is set, KVM_GET_DIRTY_LOG will not
|
||||
automatically clear and write-protect all pages that are returned as dirty.
|
||||
Rather, userspace will have to do this operation separately using
|
||||
KVM_CLEAR_DIRTY_LOG.
|
||||
|
||||
|
@ -5719,18 +5766,42 @@ than requiring to sync a full memslot; this ensures that KVM does not
|
|||
take spinlocks for an extended period of time. Second, in some cases a
|
||||
large amount of time can pass between a call to KVM_GET_DIRTY_LOG and
|
||||
userspace actually using the data in the page. Pages can be modified
|
||||
during this time, which is inefficint for both the guest and userspace:
|
||||
during this time, which is inefficient for both the guest and userspace:
|
||||
the guest will incur a higher penalty due to write protection faults,
|
||||
while userspace can see false reports of dirty pages. Manual reprotection
|
||||
helps reducing this time, improving guest performance and reducing the
|
||||
number of dirty log false positives.
|
||||
|
||||
With KVM_DIRTY_LOG_INITIALLY_SET set, all the bits of the dirty bitmap
|
||||
will be initialized to 1 when created. This also improves performance because
|
||||
dirty logging can be enabled gradually in small chunks on the first call
|
||||
to KVM_CLEAR_DIRTY_LOG. KVM_DIRTY_LOG_INITIALLY_SET depends on
|
||||
KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE (it is also only available on
|
||||
x86 for now).
|
||||
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 was previously available under the name
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT, but the implementation had bugs that make
|
||||
it hard or impossible to use it correctly. The availability of
|
||||
KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed.
|
||||
Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT.
|
||||
|
||||
7.19 KVM_CAP_PPC_SECURE_GUEST
|
||||
------------------------------
|
||||
|
||||
:Architectures: ppc
|
||||
|
||||
This capability indicates that KVM is running on a host that has
|
||||
ultravisor firmware and thus can support a secure guest. On such a
|
||||
system, a guest can ask the ultravisor to make it a secure guest,
|
||||
one whose memory is inaccessible to the host except for pages which
|
||||
are explicitly requested to be shared with the host. The ultravisor
|
||||
notifies KVM when a guest requests to become a secure guest, and KVM
|
||||
has the opportunity to veto the transition.
|
||||
|
||||
If present, this capability can be enabled for a VM, meaning that KVM
|
||||
will allow the transition to secure guest mode. Otherwise KVM will
|
||||
veto the transition.
|
||||
|
||||
8. Other capabilities.
|
||||
======================
|
||||
|
||||
|
@ -6027,3 +6098,14 @@ Architectures: s390
|
|||
|
||||
This capability indicates that the KVM_S390_NORMAL_RESET and
|
||||
KVM_S390_CLEAR_RESET ioctls are available.
|
||||
|
||||
8.23 KVM_CAP_S390_PROTECTED
|
||||
|
||||
Architecture: s390
|
||||
|
||||
|
||||
This capability indicates that the Ultravisor has been initialized and
|
||||
KVM can therefore start protected VMs.
|
||||
This capability governs the KVM_S390_PV_COMMAND ioctl and the
|
||||
KVM_MP_STATE_LOAD MP_STATE. KVM_SET_MP_STATE can fail for protected
|
||||
guests when the state change is invalid.
|
||||
|
|
|
@ -11,6 +11,11 @@ hypervisor when running as a guest (under Xen, KVM or any other
|
|||
hypervisor), or any hypervisor-specific interaction when the kernel is
|
||||
used as a host.
|
||||
|
||||
Note: KVM/arm has been removed from the kernel. The API described
|
||||
here is still valid though, as it allows the kernel to kexec when
|
||||
booted at HYP. It can also be used by a hypervisor other than KVM
|
||||
if necessary.
|
||||
|
||||
On arm and arm64 (without VHE), the kernel doesn't run in hypervisor
|
||||
mode, but still needs to interact with it, allowing a built-in
|
||||
hypervisor to be either installed or torn down.
|
||||
|
|
|
@ -108,16 +108,9 @@ Groups:
|
|||
mask or unmask the adapter, as specified in mask
|
||||
|
||||
KVM_S390_IO_ADAPTER_MAP
|
||||
perform a gmap translation for the guest address provided in addr,
|
||||
pin a userspace page for the translated address and add it to the
|
||||
list of mappings
|
||||
|
||||
.. note:: A new mapping will be created unconditionally; therefore,
|
||||
the calling code should avoid making duplicate mappings.
|
||||
|
||||
This is now a no-op. The mapping is purely done by the irq route.
|
||||
KVM_S390_IO_ADAPTER_UNMAP
|
||||
release a userspace page for the translated address specified in addr
|
||||
from the list of mappings
|
||||
This is now a no-op. The mapping is purely done by the irq route.
|
||||
|
||||
KVM_DEV_FLIC_AISM
|
||||
modify the adapter-interruption-suppression mode for a given isc if the
|
||||
|
|
|
@ -18,6 +18,8 @@ KVM
|
|||
nested-vmx
|
||||
ppc-pv
|
||||
s390-diag
|
||||
s390-pv
|
||||
s390-pv-boot
|
||||
timekeeping
|
||||
vcpu-requests
|
||||
|
||||
|
|
|
@ -96,19 +96,18 @@ will happen:
|
|||
We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
|
||||
|
||||
For direct sp, we can easily avoid it since the spte of direct sp is fixed
|
||||
to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
|
||||
to pin gfn to pfn, because after gfn_to_pfn_atomic():
|
||||
to gfn. For indirect sp, we disabled fast page fault for simplicity.
|
||||
|
||||
A solution for indirect sp could be to pin the gfn, for example via
|
||||
kvm_vcpu_gfn_to_pfn_atomic, before the cmpxchg. After the pinning:
|
||||
|
||||
- We have held the refcount of pfn that means the pfn can not be freed and
|
||||
be reused for another gfn.
|
||||
- The pfn is writable that means it can not be shared between different gfns
|
||||
- The pfn is writable and therefore it cannot be shared between different gfns
|
||||
by KSM.
|
||||
|
||||
Then, we can ensure the dirty bitmaps is correctly set for a gfn.
|
||||
|
||||
Currently, to simplify the whole things, we disable fast page fault for
|
||||
indirect shadow page.
|
||||
|
||||
2) Dirty bit tracking
|
||||
|
||||
In the origin code, the spte can be fast updated (non-atomically) if the
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
======================================
|
||||
s390 (IBM Z) Boot/IPL of Protected VMs
|
||||
======================================
|
||||
|
||||
Summary
|
||||
-------
|
||||
The memory of Protected Virtual Machines (PVMs) is not accessible to
|
||||
I/O or the hypervisor. In those cases where the hypervisor needs to
|
||||
access the memory of a PVM, that memory must be made accessible.
|
||||
Memory made accessible to the hypervisor will be encrypted. See
|
||||
:doc:`s390-pv` for details."
|
||||
|
||||
On IPL (boot) a small plaintext bootloader is started, which provides
|
||||
information about the encrypted components and necessary metadata to
|
||||
KVM to decrypt the protected virtual machine.
|
||||
|
||||
Based on this data, KVM will make the protected virtual machine known
|
||||
to the Ultravisor (UV) and instruct it to secure the memory of the
|
||||
PVM, decrypt the components and verify the data and address list
|
||||
hashes, to ensure integrity. Afterwards KVM can run the PVM via the
|
||||
SIE instruction which the UV will intercept and execute on KVM's
|
||||
behalf.
|
||||
|
||||
As the guest image is just like an opaque kernel image that does the
|
||||
switch into PV mode itself, the user can load encrypted guest
|
||||
executables and data via every available method (network, dasd, scsi,
|
||||
direct kernel, ...) without the need to change the boot process.
|
||||
|
||||
|
||||
Diag308
|
||||
-------
|
||||
This diagnose instruction is the basic mechanism to handle IPL and
|
||||
related operations for virtual machines. The VM can set and retrieve
|
||||
IPL information blocks, that specify the IPL method/devices and
|
||||
request VM memory and subsystem resets, as well as IPLs.
|
||||
|
||||
For PVMs this concept has been extended with new subcodes:
|
||||
|
||||
Subcode 8: Set an IPL Information Block of type 5 (information block
|
||||
for PVMs)
|
||||
Subcode 9: Store the saved block in guest memory
|
||||
Subcode 10: Move into Protected Virtualization mode
|
||||
|
||||
The new PV load-device-specific-parameters field specifies all data
|
||||
that is necessary to move into PV mode.
|
||||
|
||||
* PV Header origin
|
||||
* PV Header length
|
||||
* List of Components composed of
|
||||
* AES-XTS Tweak prefix
|
||||
* Origin
|
||||
* Size
|
||||
|
||||
The PV header contains the keys and hashes, which the UV will use to
|
||||
decrypt and verify the PV, as well as control flags and a start PSW.
|
||||
|
||||
The components are for instance an encrypted kernel, kernel parameters
|
||||
and initrd. The components are decrypted by the UV.
|
||||
|
||||
After the initial import of the encrypted data, all defined pages will
|
||||
contain the guest content. All non-specified pages will start out as
|
||||
zero pages on first access.
|
||||
|
||||
|
||||
When running in protected virtualization mode, some subcodes will result in
|
||||
exceptions or return error codes.
|
||||
|
||||
Subcodes 4 and 7, which specify operations that do not clear the guest
|
||||
memory, will result in specification exceptions. This is because the
|
||||
UV will clear all memory when a secure VM is removed, and therefore
|
||||
non-clearing IPL subcodes are not allowed.
|
||||
|
||||
Subcodes 8, 9, 10 will result in specification exceptions.
|
||||
Re-IPL into a protected mode is only possible via a detour into non
|
||||
protected mode.
|
||||
|
||||
Keys
|
||||
----
|
||||
Every CEC will have a unique public key to enable tooling to build
|
||||
encrypted images.
|
||||
See `s390-tools <https://github.com/ibm-s390-tools/s390-tools/>`_
|
||||
for the tooling.
|
|
@ -0,0 +1,116 @@
|
|||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=========================================
|
||||
s390 (IBM Z) Ultravisor and Protected VMs
|
||||
=========================================
|
||||
|
||||
Summary
|
||||
-------
|
||||
Protected virtual machines (PVM) are KVM VMs that do not allow KVM to
|
||||
access VM state like guest memory or guest registers. Instead, the
|
||||
PVMs are mostly managed by a new entity called Ultravisor (UV). The UV
|
||||
provides an API that can be used by PVMs and KVM to request management
|
||||
actions.
|
||||
|
||||
Each guest starts in non-protected mode and then may make a request to
|
||||
transition into protected mode. On transition, KVM registers the guest
|
||||
and its VCPUs with the Ultravisor and prepares everything for running
|
||||
it.
|
||||
|
||||
The Ultravisor will secure and decrypt the guest's boot memory
|
||||
(i.e. kernel/initrd). It will safeguard state changes like VCPU
|
||||
starts/stops and injected interrupts while the guest is running.
|
||||
|
||||
As access to the guest's state, such as the SIE state description, is
|
||||
normally needed to be able to run a VM, some changes have been made in
|
||||
the behavior of the SIE instruction. A new format 4 state description
|
||||
has been introduced, where some fields have different meanings for a
|
||||
PVM. SIE exits are minimized as much as possible to improve speed and
|
||||
reduce exposed guest state.
|
||||
|
||||
|
||||
Interrupt injection
|
||||
-------------------
|
||||
Interrupt injection is safeguarded by the Ultravisor. As KVM doesn't
|
||||
have access to the VCPUs' lowcores, injection is handled via the
|
||||
format 4 state description.
|
||||
|
||||
Machine check, external, IO and restart interruptions each can be
|
||||
injected on SIE entry via a bit in the interrupt injection control
|
||||
field (offset 0x54). If the guest cpu is not enabled for the interrupt
|
||||
at the time of injection, a validity interception is recognized. The
|
||||
format 4 state description contains fields in the interception data
|
||||
block where data associated with the interrupt can be transported.
|
||||
|
||||
Program and Service Call exceptions have another layer of
|
||||
safeguarding; they can only be injected for instructions that have
|
||||
been intercepted into KVM. The exceptions need to be a valid outcome
|
||||
of an instruction emulation by KVM, e.g. we can never inject a
|
||||
addressing exception as they are reported by SIE since KVM has no
|
||||
access to the guest memory.
|
||||
|
||||
|
||||
Mask notification interceptions
|
||||
-------------------------------
|
||||
KVM cannot intercept lctl(g) and lpsw(e) anymore in order to be
|
||||
notified when a PVM enables a certain class of interrupt. As a
|
||||
replacement, two new interception codes have been introduced: One
|
||||
indicating that the contents of CRs 0, 6, or 14 have been changed,
|
||||
indicating different interruption subclasses; and one indicating that
|
||||
PSW bit 13 has been changed, indicating that a machine check
|
||||
intervention was requested and those are now enabled.
|
||||
|
||||
Instruction emulation
|
||||
---------------------
|
||||
With the format 4 state description for PVMs, the SIE instruction already
|
||||
interprets more instructions than it does with format 2. It is not able
|
||||
to interpret every instruction, but needs to hand some tasks to KVM;
|
||||
therefore, the SIE and the ultravisor safeguard emulation inputs and outputs.
|
||||
|
||||
The control structures associated with SIE provide the Secure
|
||||
Instruction Data Area (SIDA), the Interception Parameters (IP) and the
|
||||
Secure Interception General Register Save Area. Guest GRs and most of
|
||||
the instruction data, such as I/O data structures, are filtered.
|
||||
Instruction data is copied to and from the SIDA when needed. Guest
|
||||
GRs are put into / retrieved from the Secure Interception General
|
||||
Register Save Area.
|
||||
|
||||
Only GR values needed to emulate an instruction will be copied into this
|
||||
save area and the real register numbers will be hidden.
|
||||
|
||||
The Interception Parameters state description field still contains the
|
||||
the bytes of the instruction text, but with pre-set register values
|
||||
instead of the actual ones. I.e. each instruction always uses the same
|
||||
instruction text, in order not to leak guest instruction text.
|
||||
This also implies that the register content that a guest had in r<n>
|
||||
may be in r<m> from the hypervisor's point of view.
|
||||
|
||||
The Secure Instruction Data Area contains instruction storage
|
||||
data. Instruction data, i.e. data being referenced by an instruction
|
||||
like the SCCB for sclp, is moved via the SIDA. When an instruction is
|
||||
intercepted, the SIE will only allow data and program interrupts for
|
||||
this instruction to be moved to the guest via the two data areas
|
||||
discussed before. Other data is either ignored or results in validity
|
||||
interceptions.
|
||||
|
||||
|
||||
Instruction emulation interceptions
|
||||
-----------------------------------
|
||||
There are two types of SIE secure instruction intercepts: the normal
|
||||
and the notification type. Normal secure instruction intercepts will
|
||||
make the guest pending for instruction completion of the intercepted
|
||||
instruction type, i.e. on SIE entry it is attempted to complete
|
||||
emulation of the instruction with the data provided by KVM. That might
|
||||
be a program exception or instruction completion.
|
||||
|
||||
The notification type intercepts inform KVM about guest environment
|
||||
changes due to guest instruction interpretation. Such an interception
|
||||
is recognized, for example, for the store prefix instruction to provide
|
||||
the new lowcore location. On SIE reentry, any KVM data in the data areas
|
||||
is ignored and execution continues as if the guest instruction had
|
||||
completed. For that reason KVM is not allowed to inject a program
|
||||
interrupt.
|
||||
|
||||
Links
|
||||
-----
|
||||
`KVM Forum 2019 presentation <https://static.sched.com/hosted_files/kvmforum2019/3b/ibm_protected_vms_s390x.pdf>`_
|
|
@ -9244,7 +9244,7 @@ F: virt/kvm/*
|
|||
F: tools/kvm/
|
||||
F: tools/testing/selftests/kvm/
|
||||
|
||||
KERNEL VIRTUAL MACHINE FOR ARM/ARM64 (KVM/arm, KVM/arm64)
|
||||
KERNEL VIRTUAL MACHINE FOR ARM64 (KVM/arm64)
|
||||
M: Marc Zyngier <maz@kernel.org>
|
||||
R: James Morse <james.morse@arm.com>
|
||||
R: Julien Thierry <julien.thierry.kdev@gmail.com>
|
||||
|
@ -9253,9 +9253,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
|||
L: kvmarm@lists.cs.columbia.edu
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm.git
|
||||
S: Maintained
|
||||
F: arch/arm/include/uapi/asm/kvm*
|
||||
F: arch/arm/include/asm/kvm*
|
||||
F: arch/arm/kvm/
|
||||
F: arch/arm64/include/uapi/asm/kvm*
|
||||
F: arch/arm64/include/asm/kvm*
|
||||
F: arch/arm64/kvm/
|
||||
|
@ -9290,6 +9287,7 @@ L: kvm@vger.kernel.org
|
|||
W: http://www.ibm.com/developerworks/linux/linux390/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux.git
|
||||
S: Supported
|
||||
F: Documentation/virt/kvm/s390*
|
||||
F: arch/s390/include/uapi/asm/kvm*
|
||||
F: arch/s390/include/asm/gmap.h
|
||||
F: arch/s390/include/asm/kvm*
|
||||
|
|
|
@ -2090,5 +2090,3 @@ source "drivers/firmware/Kconfig"
|
|||
if CRYPTO
|
||||
source "arch/arm/crypto/Kconfig"
|
||||
endif
|
||||
|
||||
source "arch/arm/kvm/Kconfig"
|
||||
|
|
|
@ -278,7 +278,6 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/
|
|||
core-$(CONFIG_FPE_FASTFPE) += $(patsubst $(srctree)/%,%,$(wildcard $(srctree)/arch/arm/fastfpe/))
|
||||
core-$(CONFIG_VFP) += arch/arm/vfp/
|
||||
core-$(CONFIG_XEN) += arch/arm/xen/
|
||||
core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/
|
||||
core-$(CONFIG_VDSO) += arch/arm/vdso/
|
||||
|
||||
# If we have a machine-specific directory, then include it in the build.
|
||||
|
|
|
@ -236,5 +236,3 @@ CONFIG_CRYPTO_GCM=y
|
|||
CONFIG_CRYPTO_XCBC=y
|
||||
CONFIG_CRYPTO_SHA256=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_VIRTUALIZATION=y
|
||||
CONFIG_KVM=y
|
||||
|
|
|
@ -38,71 +38,6 @@
|
|||
#define ICC_AP1R2 __ICC_AP1Rx(2)
|
||||
#define ICC_AP1R3 __ICC_AP1Rx(3)
|
||||
|
||||
#define ICC_HSRE __ACCESS_CP15(c12, 4, c9, 5)
|
||||
|
||||
#define ICH_VSEIR __ACCESS_CP15(c12, 4, c9, 4)
|
||||
#define ICH_HCR __ACCESS_CP15(c12, 4, c11, 0)
|
||||
#define ICH_VTR __ACCESS_CP15(c12, 4, c11, 1)
|
||||
#define ICH_MISR __ACCESS_CP15(c12, 4, c11, 2)
|
||||
#define ICH_EISR __ACCESS_CP15(c12, 4, c11, 3)
|
||||
#define ICH_ELRSR __ACCESS_CP15(c12, 4, c11, 5)
|
||||
#define ICH_VMCR __ACCESS_CP15(c12, 4, c11, 7)
|
||||
|
||||
#define __LR0(x) __ACCESS_CP15(c12, 4, c12, x)
|
||||
#define __LR8(x) __ACCESS_CP15(c12, 4, c13, x)
|
||||
|
||||
#define ICH_LR0 __LR0(0)
|
||||
#define ICH_LR1 __LR0(1)
|
||||
#define ICH_LR2 __LR0(2)
|
||||
#define ICH_LR3 __LR0(3)
|
||||
#define ICH_LR4 __LR0(4)
|
||||
#define ICH_LR5 __LR0(5)
|
||||
#define ICH_LR6 __LR0(6)
|
||||
#define ICH_LR7 __LR0(7)
|
||||
#define ICH_LR8 __LR8(0)
|
||||
#define ICH_LR9 __LR8(1)
|
||||
#define ICH_LR10 __LR8(2)
|
||||
#define ICH_LR11 __LR8(3)
|
||||
#define ICH_LR12 __LR8(4)
|
||||
#define ICH_LR13 __LR8(5)
|
||||
#define ICH_LR14 __LR8(6)
|
||||
#define ICH_LR15 __LR8(7)
|
||||
|
||||
/* LR top half */
|
||||
#define __LRC0(x) __ACCESS_CP15(c12, 4, c14, x)
|
||||
#define __LRC8(x) __ACCESS_CP15(c12, 4, c15, x)
|
||||
|
||||
#define ICH_LRC0 __LRC0(0)
|
||||
#define ICH_LRC1 __LRC0(1)
|
||||
#define ICH_LRC2 __LRC0(2)
|
||||
#define ICH_LRC3 __LRC0(3)
|
||||
#define ICH_LRC4 __LRC0(4)
|
||||
#define ICH_LRC5 __LRC0(5)
|
||||
#define ICH_LRC6 __LRC0(6)
|
||||
#define ICH_LRC7 __LRC0(7)
|
||||
#define ICH_LRC8 __LRC8(0)
|
||||
#define ICH_LRC9 __LRC8(1)
|
||||
#define ICH_LRC10 __LRC8(2)
|
||||
#define ICH_LRC11 __LRC8(3)
|
||||
#define ICH_LRC12 __LRC8(4)
|
||||
#define ICH_LRC13 __LRC8(5)
|
||||
#define ICH_LRC14 __LRC8(6)
|
||||
#define ICH_LRC15 __LRC8(7)
|
||||
|
||||
#define __ICH_AP0Rx(x) __ACCESS_CP15(c12, 4, c8, x)
|
||||
#define ICH_AP0R0 __ICH_AP0Rx(0)
|
||||
#define ICH_AP0R1 __ICH_AP0Rx(1)
|
||||
#define ICH_AP0R2 __ICH_AP0Rx(2)
|
||||
#define ICH_AP0R3 __ICH_AP0Rx(3)
|
||||
|
||||
#define __ICH_AP1Rx(x) __ACCESS_CP15(c12, 4, c9, x)
|
||||
#define ICH_AP1R0 __ICH_AP1Rx(0)
|
||||
#define ICH_AP1R1 __ICH_AP1Rx(1)
|
||||
#define ICH_AP1R2 __ICH_AP1Rx(2)
|
||||
#define ICH_AP1R3 __ICH_AP1Rx(3)
|
||||
|
||||
/* A32-to-A64 mappings used by VGIC save/restore */
|
||||
|
||||
#define CPUIF_MAP(a32, a64) \
|
||||
static inline void write_ ## a64(u32 val) \
|
||||
{ \
|
||||
|
@ -113,21 +48,6 @@ static inline u32 read_ ## a64(void) \
|
|||
return read_sysreg(a32); \
|
||||
} \
|
||||
|
||||
#define CPUIF_MAP_LO_HI(a32lo, a32hi, a64) \
|
||||
static inline void write_ ## a64(u64 val) \
|
||||
{ \
|
||||
write_sysreg(lower_32_bits(val), a32lo);\
|
||||
write_sysreg(upper_32_bits(val), a32hi);\
|
||||
} \
|
||||
static inline u64 read_ ## a64(void) \
|
||||
{ \
|
||||
u64 val = read_sysreg(a32lo); \
|
||||
\
|
||||
val |= (u64)read_sysreg(a32hi) << 32; \
|
||||
\
|
||||
return val; \
|
||||
}
|
||||
|
||||
CPUIF_MAP(ICC_PMR, ICC_PMR_EL1)
|
||||
CPUIF_MAP(ICC_AP0R0, ICC_AP0R0_EL1)
|
||||
CPUIF_MAP(ICC_AP0R1, ICC_AP0R1_EL1)
|
||||
|
@ -138,40 +58,6 @@ CPUIF_MAP(ICC_AP1R1, ICC_AP1R1_EL1)
|
|||
CPUIF_MAP(ICC_AP1R2, ICC_AP1R2_EL1)
|
||||
CPUIF_MAP(ICC_AP1R3, ICC_AP1R3_EL1)
|
||||
|
||||
CPUIF_MAP(ICH_HCR, ICH_HCR_EL2)
|
||||
CPUIF_MAP(ICH_VTR, ICH_VTR_EL2)
|
||||
CPUIF_MAP(ICH_MISR, ICH_MISR_EL2)
|
||||
CPUIF_MAP(ICH_EISR, ICH_EISR_EL2)
|
||||
CPUIF_MAP(ICH_ELRSR, ICH_ELRSR_EL2)
|
||||
CPUIF_MAP(ICH_VMCR, ICH_VMCR_EL2)
|
||||
CPUIF_MAP(ICH_AP0R3, ICH_AP0R3_EL2)
|
||||
CPUIF_MAP(ICH_AP0R2, ICH_AP0R2_EL2)
|
||||
CPUIF_MAP(ICH_AP0R1, ICH_AP0R1_EL2)
|
||||
CPUIF_MAP(ICH_AP0R0, ICH_AP0R0_EL2)
|
||||
CPUIF_MAP(ICH_AP1R3, ICH_AP1R3_EL2)
|
||||
CPUIF_MAP(ICH_AP1R2, ICH_AP1R2_EL2)
|
||||
CPUIF_MAP(ICH_AP1R1, ICH_AP1R1_EL2)
|
||||
CPUIF_MAP(ICH_AP1R0, ICH_AP1R0_EL2)
|
||||
CPUIF_MAP(ICC_HSRE, ICC_SRE_EL2)
|
||||
CPUIF_MAP(ICC_SRE, ICC_SRE_EL1)
|
||||
|
||||
CPUIF_MAP_LO_HI(ICH_LR15, ICH_LRC15, ICH_LR15_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR14, ICH_LRC14, ICH_LR14_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR13, ICH_LRC13, ICH_LR13_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR12, ICH_LRC12, ICH_LR12_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR11, ICH_LRC11, ICH_LR11_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR10, ICH_LRC10, ICH_LR10_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR9, ICH_LRC9, ICH_LR9_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR8, ICH_LRC8, ICH_LR8_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR7, ICH_LRC7, ICH_LR7_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR6, ICH_LRC6, ICH_LR6_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR5, ICH_LRC5, ICH_LR5_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR4, ICH_LRC4, ICH_LR4_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR3, ICH_LRC3, ICH_LR3_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR2, ICH_LRC2, ICH_LR2_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR1, ICH_LRC1, ICH_LR1_EL2)
|
||||
CPUIF_MAP_LO_HI(ICH_LR0, ICH_LRC0, ICH_LR0_EL2)
|
||||
|
||||
#define read_gicreg(r) read_##r()
|
||||
#define write_gicreg(v, r) write_##r(v)
|
||||
|
||||
|
|
|
@ -1,239 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_ARM_H__
|
||||
#define __ARM_KVM_ARM_H__
|
||||
|
||||
#include <linux/const.h>
|
||||
#include <linux/types.h>
|
||||
|
||||
/* Hyp Configuration Register (HCR) bits */
|
||||
#define HCR_TGE (1 << 27)
|
||||
#define HCR_TVM (1 << 26)
|
||||
#define HCR_TTLB (1 << 25)
|
||||
#define HCR_TPU (1 << 24)
|
||||
#define HCR_TPC (1 << 23)
|
||||
#define HCR_TSW (1 << 22)
|
||||
#define HCR_TAC (1 << 21)
|
||||
#define HCR_TIDCP (1 << 20)
|
||||
#define HCR_TSC (1 << 19)
|
||||
#define HCR_TID3 (1 << 18)
|
||||
#define HCR_TID2 (1 << 17)
|
||||
#define HCR_TID1 (1 << 16)
|
||||
#define HCR_TID0 (1 << 15)
|
||||
#define HCR_TWE (1 << 14)
|
||||
#define HCR_TWI (1 << 13)
|
||||
#define HCR_DC (1 << 12)
|
||||
#define HCR_BSU (3 << 10)
|
||||
#define HCR_BSU_IS (1 << 10)
|
||||
#define HCR_FB (1 << 9)
|
||||
#define HCR_VA (1 << 8)
|
||||
#define HCR_VI (1 << 7)
|
||||
#define HCR_VF (1 << 6)
|
||||
#define HCR_AMO (1 << 5)
|
||||
#define HCR_IMO (1 << 4)
|
||||
#define HCR_FMO (1 << 3)
|
||||
#define HCR_PTW (1 << 2)
|
||||
#define HCR_SWIO (1 << 1)
|
||||
#define HCR_VM 1
|
||||
|
||||
/*
|
||||
* The bits we set in HCR:
|
||||
* TAC: Trap ACTLR
|
||||
* TSC: Trap SMC
|
||||
* TVM: Trap VM ops (until MMU and caches are on)
|
||||
* TSW: Trap cache operations by set/way
|
||||
* TWI: Trap WFI
|
||||
* TWE: Trap WFE
|
||||
* TIDCP: Trap L2CTLR/L2ECTLR
|
||||
* BSU_IS: Upgrade barriers to the inner shareable domain
|
||||
* FB: Force broadcast of all maintainance operations
|
||||
* AMO: Override CPSR.A and enable signaling with VA
|
||||
* IMO: Override CPSR.I and enable signaling with VI
|
||||
* FMO: Override CPSR.F and enable signaling with VF
|
||||
* SWIO: Turn set/way invalidates into set/way clean+invalidate
|
||||
*/
|
||||
#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \
|
||||
HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \
|
||||
HCR_TVM | HCR_TWE | HCR_SWIO | HCR_TIDCP)
|
||||
|
||||
/* System Control Register (SCTLR) bits */
|
||||
#define SCTLR_TE (1 << 30)
|
||||
#define SCTLR_EE (1 << 25)
|
||||
#define SCTLR_V (1 << 13)
|
||||
|
||||
/* Hyp System Control Register (HSCTLR) bits */
|
||||
#define HSCTLR_TE (1 << 30)
|
||||
#define HSCTLR_EE (1 << 25)
|
||||
#define HSCTLR_FI (1 << 21)
|
||||
#define HSCTLR_WXN (1 << 19)
|
||||
#define HSCTLR_I (1 << 12)
|
||||
#define HSCTLR_C (1 << 2)
|
||||
#define HSCTLR_A (1 << 1)
|
||||
#define HSCTLR_M 1
|
||||
#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \
|
||||
HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE)
|
||||
|
||||
/* TTBCR and HTCR Registers bits */
|
||||
#define TTBCR_EAE (1 << 31)
|
||||
#define TTBCR_IMP (1 << 30)
|
||||
#define TTBCR_SH1 (3 << 28)
|
||||
#define TTBCR_ORGN1 (3 << 26)
|
||||
#define TTBCR_IRGN1 (3 << 24)
|
||||
#define TTBCR_EPD1 (1 << 23)
|
||||
#define TTBCR_A1 (1 << 22)
|
||||
#define TTBCR_T1SZ (7 << 16)
|
||||
#define TTBCR_SH0 (3 << 12)
|
||||
#define TTBCR_ORGN0 (3 << 10)
|
||||
#define TTBCR_IRGN0 (3 << 8)
|
||||
#define TTBCR_EPD0 (1 << 7)
|
||||
#define TTBCR_T0SZ (7 << 0)
|
||||
#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0)
|
||||
|
||||
/* Hyp System Trap Register */
|
||||
#define HSTR_T(x) (1 << x)
|
||||
#define HSTR_TTEE (1 << 16)
|
||||
#define HSTR_TJDBX (1 << 17)
|
||||
|
||||
/* Hyp Coprocessor Trap Register */
|
||||
#define HCPTR_TCP(x) (1 << x)
|
||||
#define HCPTR_TCP_MASK (0x3fff)
|
||||
#define HCPTR_TASE (1 << 15)
|
||||
#define HCPTR_TTA (1 << 20)
|
||||
#define HCPTR_TCPAC (1 << 31)
|
||||
|
||||
/* Hyp Debug Configuration Register bits */
|
||||
#define HDCR_TDRA (1 << 11)
|
||||
#define HDCR_TDOSA (1 << 10)
|
||||
#define HDCR_TDA (1 << 9)
|
||||
#define HDCR_TDE (1 << 8)
|
||||
#define HDCR_HPME (1 << 7)
|
||||
#define HDCR_TPM (1 << 6)
|
||||
#define HDCR_TPMCR (1 << 5)
|
||||
#define HDCR_HPMN_MASK (0x1F)
|
||||
|
||||
/*
|
||||
* The architecture supports 40-bit IPA as input to the 2nd stage translations
|
||||
* and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address
|
||||
* space.
|
||||
*/
|
||||
#define KVM_PHYS_SHIFT (40)
|
||||
|
||||
#define PTRS_PER_S2_PGD (_AC(1, ULL) << (KVM_PHYS_SHIFT - 30))
|
||||
|
||||
/* Virtualization Translation Control Register (VTCR) bits */
|
||||
#define VTCR_SH0 (3 << 12)
|
||||
#define VTCR_ORGN0 (3 << 10)
|
||||
#define VTCR_IRGN0 (3 << 8)
|
||||
#define VTCR_SL0 (3 << 6)
|
||||
#define VTCR_S (1 << 4)
|
||||
#define VTCR_T0SZ (0xf)
|
||||
#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \
|
||||
VTCR_S | VTCR_T0SZ)
|
||||
#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0)
|
||||
#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */
|
||||
#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */
|
||||
#define KVM_VTCR_SL0 VTCR_SL_L1
|
||||
/* stage-2 input address range defined as 2^(32-T0SZ) */
|
||||
#define KVM_T0SZ (32 - KVM_PHYS_SHIFT)
|
||||
#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ)
|
||||
#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S)
|
||||
|
||||
/* Virtualization Translation Table Base Register (VTTBR) bits */
|
||||
#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */
|
||||
#define VTTBR_X (14 - KVM_T0SZ)
|
||||
#else
|
||||
#define VTTBR_X (5 - KVM_T0SZ)
|
||||
#endif
|
||||
#define VTTBR_CNP_BIT _AC(1, UL)
|
||||
#define VTTBR_BADDR_MASK (((_AC(1, ULL) << (40 - VTTBR_X)) - 1) << VTTBR_X)
|
||||
#define VTTBR_VMID_SHIFT _AC(48, ULL)
|
||||
#define VTTBR_VMID_MASK(size) (_AT(u64, (1 << size) - 1) << VTTBR_VMID_SHIFT)
|
||||
|
||||
/* Hyp Syndrome Register (HSR) bits */
|
||||
#define HSR_EC_SHIFT (26)
|
||||
#define HSR_EC (_AC(0x3f, UL) << HSR_EC_SHIFT)
|
||||
#define HSR_IL (_AC(1, UL) << 25)
|
||||
#define HSR_ISS (HSR_IL - 1)
|
||||
#define HSR_ISV_SHIFT (24)
|
||||
#define HSR_ISV (_AC(1, UL) << HSR_ISV_SHIFT)
|
||||
#define HSR_SRT_SHIFT (16)
|
||||
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
|
||||
#define HSR_CM (1 << 8)
|
||||
#define HSR_FSC (0x3f)
|
||||
#define HSR_FSC_TYPE (0x3c)
|
||||
#define HSR_SSE (1 << 21)
|
||||
#define HSR_WNR (1 << 6)
|
||||
#define HSR_CV_SHIFT (24)
|
||||
#define HSR_CV (_AC(1, UL) << HSR_CV_SHIFT)
|
||||
#define HSR_COND_SHIFT (20)
|
||||
#define HSR_COND (_AC(0xf, UL) << HSR_COND_SHIFT)
|
||||
|
||||
#define FSC_FAULT (0x04)
|
||||
#define FSC_ACCESS (0x08)
|
||||
#define FSC_PERM (0x0c)
|
||||
#define FSC_SEA (0x10)
|
||||
#define FSC_SEA_TTW0 (0x14)
|
||||
#define FSC_SEA_TTW1 (0x15)
|
||||
#define FSC_SEA_TTW2 (0x16)
|
||||
#define FSC_SEA_TTW3 (0x17)
|
||||
#define FSC_SECC (0x18)
|
||||
#define FSC_SECC_TTW0 (0x1c)
|
||||
#define FSC_SECC_TTW1 (0x1d)
|
||||
#define FSC_SECC_TTW2 (0x1e)
|
||||
#define FSC_SECC_TTW3 (0x1f)
|
||||
|
||||
/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
|
||||
#define HPFAR_MASK (~0xf)
|
||||
|
||||
#define HSR_EC_UNKNOWN (0x00)
|
||||
#define HSR_EC_WFI (0x01)
|
||||
#define HSR_EC_CP15_32 (0x03)
|
||||
#define HSR_EC_CP15_64 (0x04)
|
||||
#define HSR_EC_CP14_MR (0x05)
|
||||
#define HSR_EC_CP14_LS (0x06)
|
||||
#define HSR_EC_CP_0_13 (0x07)
|
||||
#define HSR_EC_CP10_ID (0x08)
|
||||
#define HSR_EC_JAZELLE (0x09)
|
||||
#define HSR_EC_BXJ (0x0A)
|
||||
#define HSR_EC_CP14_64 (0x0C)
|
||||
#define HSR_EC_SVC_HYP (0x11)
|
||||
#define HSR_EC_HVC (0x12)
|
||||
#define HSR_EC_SMC (0x13)
|
||||
#define HSR_EC_IABT (0x20)
|
||||
#define HSR_EC_IABT_HYP (0x21)
|
||||
#define HSR_EC_DABT (0x24)
|
||||
#define HSR_EC_DABT_HYP (0x25)
|
||||
#define HSR_EC_MAX (0x3f)
|
||||
|
||||
#define HSR_WFI_IS_WFE (_AC(1, UL) << 0)
|
||||
|
||||
#define HSR_HVC_IMM_MASK ((_AC(1, UL) << 16) - 1)
|
||||
|
||||
#define HSR_DABT_S1PTW (_AC(1, UL) << 7)
|
||||
#define HSR_DABT_CM (_AC(1, UL) << 8)
|
||||
|
||||
#define kvm_arm_exception_type \
|
||||
{0, "RESET" }, \
|
||||
{1, "UNDEFINED" }, \
|
||||
{2, "SOFTWARE" }, \
|
||||
{3, "PREF_ABORT" }, \
|
||||
{4, "DATA_ABORT" }, \
|
||||
{5, "IRQ" }, \
|
||||
{6, "FIQ" }, \
|
||||
{7, "HVC" }
|
||||
|
||||
#define HSRECN(x) { HSR_EC_##x, #x }
|
||||
|
||||
#define kvm_arm_exception_class \
|
||||
HSRECN(UNKNOWN), HSRECN(WFI), HSRECN(CP15_32), HSRECN(CP15_64), \
|
||||
HSRECN(CP14_MR), HSRECN(CP14_LS), HSRECN(CP_0_13), HSRECN(CP10_ID), \
|
||||
HSRECN(JAZELLE), HSRECN(BXJ), HSRECN(CP14_64), HSRECN(SVC_HYP), \
|
||||
HSRECN(HVC), HSRECN(SMC), HSRECN(IABT), HSRECN(IABT_HYP), \
|
||||
HSRECN(DABT), HSRECN(DABT_HYP)
|
||||
|
||||
|
||||
#endif /* __ARM_KVM_ARM_H__ */
|
|
@ -1,77 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_ASM_H__
|
||||
#define __ARM_KVM_ASM_H__
|
||||
|
||||
#include <asm/virt.h>
|
||||
|
||||
#define ARM_EXIT_WITH_ABORT_BIT 31
|
||||
#define ARM_EXCEPTION_CODE(x) ((x) & ~(1U << ARM_EXIT_WITH_ABORT_BIT))
|
||||
#define ARM_EXCEPTION_IS_TRAP(x) \
|
||||
(ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_PREF_ABORT || \
|
||||
ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_DATA_ABORT || \
|
||||
ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_HVC)
|
||||
#define ARM_ABORT_PENDING(x) !!((x) & (1U << ARM_EXIT_WITH_ABORT_BIT))
|
||||
|
||||
#define ARM_EXCEPTION_RESET 0
|
||||
#define ARM_EXCEPTION_UNDEFINED 1
|
||||
#define ARM_EXCEPTION_SOFTWARE 2
|
||||
#define ARM_EXCEPTION_PREF_ABORT 3
|
||||
#define ARM_EXCEPTION_DATA_ABORT 4
|
||||
#define ARM_EXCEPTION_IRQ 5
|
||||
#define ARM_EXCEPTION_FIQ 6
|
||||
#define ARM_EXCEPTION_HVC 7
|
||||
#define ARM_EXCEPTION_HYP_GONE HVC_STUB_ERR
|
||||
/*
|
||||
* The rr_lo_hi macro swaps a pair of registers depending on
|
||||
* current endianness. It is used in conjunction with ldrd and strd
|
||||
* instructions that load/store a 64-bit value from/to memory to/from
|
||||
* a pair of registers which are used with the mrrc and mcrr instructions.
|
||||
* If used with the ldrd/strd instructions, the a1 parameter is the first
|
||||
* source/destination register and the a2 parameter is the second
|
||||
* source/destination register. Note that the ldrd/strd instructions
|
||||
* already swap the bytes within the words correctly according to the
|
||||
* endianness setting, but the order of the registers need to be effectively
|
||||
* swapped when used with the mrrc/mcrr instructions.
|
||||
*/
|
||||
#ifdef CONFIG_CPU_ENDIAN_BE8
|
||||
#define rr_lo_hi(a1, a2) a2, a1
|
||||
#else
|
||||
#define rr_lo_hi(a1, a2) a1, a2
|
||||
#endif
|
||||
|
||||
#define kvm_ksym_ref(kva) (kva)
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
struct kvm;
|
||||
struct kvm_vcpu;
|
||||
|
||||
extern char __kvm_hyp_init[];
|
||||
extern char __kvm_hyp_init_end[];
|
||||
|
||||
extern void __kvm_flush_vm_context(void);
|
||||
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
|
||||
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high);
|
||||
|
||||
/* no VHE on 32-bit :( */
|
||||
static inline int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu) { BUG(); return 0; }
|
||||
|
||||
extern int __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __init_stage2_translation(void);
|
||||
|
||||
extern u64 __vgic_v3_get_ich_vtr_el2(void);
|
||||
extern u64 __vgic_v3_read_vmcr(void);
|
||||
extern void __vgic_v3_write_vmcr(u32 vmcr);
|
||||
extern void __vgic_v3_init_lrs(void);
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
|
@ -1,36 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 Rusty Russell IBM Corporation
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_COPROC_H__
|
||||
#define __ARM_KVM_COPROC_H__
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_coproc_target_table {
|
||||
unsigned target;
|
||||
const struct coproc_reg *table;
|
||||
size_t num;
|
||||
};
|
||||
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table);
|
||||
|
||||
int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices);
|
||||
void kvm_coproc_table_init(void);
|
||||
|
||||
struct kvm_one_reg;
|
||||
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
|
||||
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
||||
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
||||
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
|
||||
#endif /* __ARM_KVM_COPROC_H__ */
|
|
@ -1,372 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_EMULATE_H__
|
||||
#define __ARM_KVM_EMULATE_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/cputype.h>
|
||||
|
||||
/* arm64 compatibility macros */
|
||||
#define PSR_AA32_MODE_FIQ FIQ_MODE
|
||||
#define PSR_AA32_MODE_SVC SVC_MODE
|
||||
#define PSR_AA32_MODE_ABT ABT_MODE
|
||||
#define PSR_AA32_MODE_UND UND_MODE
|
||||
#define PSR_AA32_T_BIT PSR_T_BIT
|
||||
#define PSR_AA32_F_BIT PSR_F_BIT
|
||||
#define PSR_AA32_I_BIT PSR_I_BIT
|
||||
#define PSR_AA32_A_BIT PSR_A_BIT
|
||||
#define PSR_AA32_E_BIT PSR_E_BIT
|
||||
#define PSR_AA32_IT_MASK PSR_IT_MASK
|
||||
#define PSR_AA32_GE_MASK 0x000f0000
|
||||
#define PSR_AA32_DIT_BIT 0x00200000
|
||||
#define PSR_AA32_PAN_BIT 0x00400000
|
||||
#define PSR_AA32_SSBS_BIT 0x00800000
|
||||
#define PSR_AA32_Q_BIT PSR_Q_BIT
|
||||
#define PSR_AA32_V_BIT PSR_V_BIT
|
||||
#define PSR_AA32_C_BIT PSR_C_BIT
|
||||
#define PSR_AA32_Z_BIT PSR_Z_BIT
|
||||
#define PSR_AA32_N_BIT PSR_N_BIT
|
||||
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
|
||||
|
||||
static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
return vcpu_reg(vcpu, reg_num);
|
||||
}
|
||||
|
||||
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline unsigned long vpcu_read_spsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return *__vcpu_spsr(vcpu);
|
||||
}
|
||||
|
||||
static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
|
||||
{
|
||||
*__vcpu_spsr(vcpu) = v;
|
||||
}
|
||||
|
||||
static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
|
||||
{
|
||||
return spsr;
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu,
|
||||
u8 reg_num)
|
||||
{
|
||||
return *vcpu_reg(vcpu, reg_num);
|
||||
}
|
||||
|
||||
static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
|
||||
unsigned long val)
|
||||
{
|
||||
*vcpu_reg(vcpu, reg_num) = val;
|
||||
}
|
||||
|
||||
bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
|
||||
void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
|
||||
void kvm_inject_undef32(struct kvm_vcpu *vcpu);
|
||||
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
|
||||
void kvm_inject_vabt(struct kvm_vcpu *vcpu);
|
||||
|
||||
static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_inject_undef32(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
kvm_inject_dabt32(vcpu, addr);
|
||||
}
|
||||
|
||||
static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
|
||||
{
|
||||
kvm_inject_pabt32(vcpu, addr);
|
||||
}
|
||||
|
||||
static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_condition_valid32(vcpu);
|
||||
}
|
||||
|
||||
static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
|
||||
{
|
||||
kvm_skip_instr32(vcpu, is_wide_instr);
|
||||
}
|
||||
|
||||
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr = HCR_GUEST_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_hcr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu->arch.hcr;
|
||||
}
|
||||
|
||||
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr &= ~HCR_TWE;
|
||||
}
|
||||
|
||||
static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr |= HCR_TWE;
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
|
||||
}
|
||||
|
||||
static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (unsigned long *)&vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
|
||||
}
|
||||
|
||||
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_cpsr(vcpu) |= PSR_T_BIT;
|
||||
}
|
||||
|
||||
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
|
||||
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
|
||||
}
|
||||
|
||||
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
|
||||
return cpsr_mode > USR_MODE;
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.hsr;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
if (hsr & HSR_CV)
|
||||
return (hsr & HSR_COND) >> HSR_COND_SHIFT;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->arch.fault.hxfar;
|
||||
}
|
||||
|
||||
static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & (HSR_CM | HSR_WNR | HSR_FSC);
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
|
||||
}
|
||||
|
||||
/* Get Access Size from a data abort */
|
||||
static inline unsigned int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
|
||||
case 0:
|
||||
return 1;
|
||||
case 1:
|
||||
return 2;
|
||||
case 2:
|
||||
return 4;
|
||||
default:
|
||||
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
|
||||
return 4;
|
||||
}
|
||||
}
|
||||
|
||||
/* This one is not specific to Data Abort */
|
||||
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC;
|
||||
}
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
switch (kvm_vcpu_trap_get_fault(vcpu)) {
|
||||
case FSC_SEA:
|
||||
case FSC_SEA_TTW0:
|
||||
case FSC_SEA_TTW1:
|
||||
case FSC_SEA_TTW2:
|
||||
case FSC_SEA_TTW3:
|
||||
case FSC_SECC:
|
||||
case FSC_SECC_TTW0:
|
||||
case FSC_SECC_TTW1:
|
||||
case FSC_SECC_TTW2:
|
||||
case FSC_SECC_TTW3:
|
||||
return true;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (kvm_vcpu_trap_is_iabt(vcpu))
|
||||
return false;
|
||||
|
||||
return kvm_vcpu_dabt_iswrite(vcpu);
|
||||
}
|
||||
|
||||
static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
|
||||
bool flag)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_cpsr(vcpu) |= PSR_E_BIT;
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return !!(*vcpu_cpsr(vcpu) & PSR_E_BIT);
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
|
||||
unsigned long data,
|
||||
unsigned int len)
|
||||
{
|
||||
if (kvm_vcpu_is_be(vcpu)) {
|
||||
switch (len) {
|
||||
case 1:
|
||||
return data & 0xff;
|
||||
case 2:
|
||||
return be16_to_cpu(data & 0xffff);
|
||||
default:
|
||||
return be32_to_cpu(data);
|
||||
}
|
||||
} else {
|
||||
switch (len) {
|
||||
case 1:
|
||||
return data & 0xff;
|
||||
case 2:
|
||||
return le16_to_cpu(data & 0xffff);
|
||||
default:
|
||||
return le32_to_cpu(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
|
||||
unsigned long data,
|
||||
unsigned int len)
|
||||
{
|
||||
if (kvm_vcpu_is_be(vcpu)) {
|
||||
switch (len) {
|
||||
case 1:
|
||||
return data & 0xff;
|
||||
case 2:
|
||||
return cpu_to_be16(data & 0xffff);
|
||||
default:
|
||||
return cpu_to_be32(data);
|
||||
}
|
||||
} else {
|
||||
switch (len) {
|
||||
case 1:
|
||||
return data & 0xff;
|
||||
case 2:
|
||||
return cpu_to_le16(data & 0xffff);
|
||||
default:
|
||||
return cpu_to_le32(data);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#endif /* __ARM_KVM_EMULATE_H__ */
|
|
@ -1,456 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_HOST_H__
|
||||
#define __ARM_KVM_HOST_H__
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/kvm_types.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/fpstate.h>
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
|
||||
|
||||
#define KVM_USER_MEM_SLOTS 32
|
||||
#define KVM_HAVE_ONE_REG
|
||||
#define KVM_HALT_POLL_NS_DEFAULT 500000
|
||||
|
||||
#define KVM_VCPU_MAX_FEATURES 2
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
|
||||
#ifdef CONFIG_ARM_GIC_V3
|
||||
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
|
||||
#else
|
||||
#define KVM_MAX_VCPUS VGIC_V2_MAX_CPUS
|
||||
#endif
|
||||
|
||||
#define KVM_REQ_SLEEP \
|
||||
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
|
||||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
static inline int kvm_arm_init_sve(void) { return 0; }
|
||||
|
||||
u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode);
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
void kvm_reset_coprocs(struct kvm_vcpu *vcpu);
|
||||
|
||||
struct kvm_vmid {
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
u64 vmid_gen;
|
||||
u32 vmid;
|
||||
};
|
||||
|
||||
struct kvm_arch {
|
||||
/* The last vcpu id that ran on each physical CPU */
|
||||
int __percpu *last_vcpu_ran;
|
||||
|
||||
/*
|
||||
* Anything that is not used directly from assembly code goes
|
||||
* here.
|
||||
*/
|
||||
|
||||
/* The VMID generation used for the virt. memory system */
|
||||
struct kvm_vmid vmid;
|
||||
|
||||
/* Stage-2 page table */
|
||||
pgd_t *pgd;
|
||||
phys_addr_t pgd_phys;
|
||||
|
||||
/* Interrupt controller */
|
||||
struct vgic_dist vgic;
|
||||
int max_vcpus;
|
||||
|
||||
/* Mandated version of PSCI */
|
||||
u32 psci_version;
|
||||
|
||||
/*
|
||||
* If we encounter a data abort without valid instruction syndrome
|
||||
* information, report this to user space. User space can (and
|
||||
* should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
|
||||
* supported.
|
||||
*/
|
||||
bool return_nisv_io_abort_to_user;
|
||||
};
|
||||
|
||||
#define KVM_NR_MEM_OBJS 40
|
||||
|
||||
/*
|
||||
* We don't want allocation failures within the mmu code, so we preallocate
|
||||
* enough memory for a single page fault in a cache.
|
||||
*/
|
||||
struct kvm_mmu_memory_cache {
|
||||
int nobjs;
|
||||
void *objects[KVM_NR_MEM_OBJS];
|
||||
};
|
||||
|
||||
struct kvm_vcpu_fault_info {
|
||||
u32 hsr; /* Hyp Syndrome Register */
|
||||
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
|
||||
u32 hpfar; /* Hyp IPA Fault Address Register */
|
||||
};
|
||||
|
||||
/*
|
||||
* 0 is reserved as an invalid value.
|
||||
* Order should be kept in sync with the save/restore code.
|
||||
*/
|
||||
enum vcpu_sysreg {
|
||||
__INVALID_SYSREG__,
|
||||
c0_MPIDR, /* MultiProcessor ID Register */
|
||||
c0_CSSELR, /* Cache Size Selection Register */
|
||||
c1_SCTLR, /* System Control Register */
|
||||
c1_ACTLR, /* Auxiliary Control Register */
|
||||
c1_CPACR, /* Coprocessor Access Control */
|
||||
c2_TTBR0, /* Translation Table Base Register 0 */
|
||||
c2_TTBR0_high, /* TTBR0 top 32 bits */
|
||||
c2_TTBR1, /* Translation Table Base Register 1 */
|
||||
c2_TTBR1_high, /* TTBR1 top 32 bits */
|
||||
c2_TTBCR, /* Translation Table Base Control R. */
|
||||
c3_DACR, /* Domain Access Control Register */
|
||||
c5_DFSR, /* Data Fault Status Register */
|
||||
c5_IFSR, /* Instruction Fault Status Register */
|
||||
c5_ADFSR, /* Auxilary Data Fault Status R */
|
||||
c5_AIFSR, /* Auxilary Instrunction Fault Status R */
|
||||
c6_DFAR, /* Data Fault Address Register */
|
||||
c6_IFAR, /* Instruction Fault Address Register */
|
||||
c7_PAR, /* Physical Address Register */
|
||||
c7_PAR_high, /* PAR top 32 bits */
|
||||
c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
|
||||
c10_PRRR, /* Primary Region Remap Register */
|
||||
c10_NMRR, /* Normal Memory Remap Register */
|
||||
c12_VBAR, /* Vector Base Address Register */
|
||||
c13_CID, /* Context ID Register */
|
||||
c13_TID_URW, /* Thread ID, User R/W */
|
||||
c13_TID_URO, /* Thread ID, User R/O */
|
||||
c13_TID_PRIV, /* Thread ID, Privileged */
|
||||
c14_CNTKCTL, /* Timer Control Register (PL1) */
|
||||
c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
|
||||
c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
|
||||
NR_CP15_REGS /* Number of regs (incl. invalid) */
|
||||
};
|
||||
|
||||
struct kvm_cpu_context {
|
||||
struct kvm_regs gp_regs;
|
||||
struct vfp_hard_struct vfp;
|
||||
u32 cp15[NR_CP15_REGS];
|
||||
};
|
||||
|
||||
struct kvm_host_data {
|
||||
struct kvm_cpu_context host_ctxt;
|
||||
};
|
||||
|
||||
typedef struct kvm_host_data kvm_host_data_t;
|
||||
|
||||
static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
|
||||
{
|
||||
/* The host's MPIDR is immutable, so let's set it up at boot time */
|
||||
cpu_ctxt->cp15[c0_MPIDR] = read_cpuid_mpidr();
|
||||
}
|
||||
|
||||
struct vcpu_reset_state {
|
||||
unsigned long pc;
|
||||
unsigned long r0;
|
||||
bool be;
|
||||
bool reset;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct kvm_cpu_context ctxt;
|
||||
|
||||
int target; /* Processor target */
|
||||
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
|
||||
|
||||
/* The CPU type we expose to the VM */
|
||||
u32 midr;
|
||||
|
||||
/* HYP trapping configuration */
|
||||
u32 hcr;
|
||||
|
||||
/* Exception Information */
|
||||
struct kvm_vcpu_fault_info fault;
|
||||
|
||||
/* Host FP context */
|
||||
struct kvm_cpu_context *host_cpu_context;
|
||||
|
||||
/* VGIC state */
|
||||
struct vgic_cpu vgic_cpu;
|
||||
struct arch_timer_cpu timer_cpu;
|
||||
|
||||
/*
|
||||
* Anything that is not used directly from assembly code goes
|
||||
* here.
|
||||
*/
|
||||
|
||||
/* vcpu power-off state */
|
||||
bool power_off;
|
||||
|
||||
/* Don't run the guest (internal implementation need) */
|
||||
bool pause;
|
||||
|
||||
/* Cache some mmu pages needed inside spinlock regions */
|
||||
struct kvm_mmu_memory_cache mmu_page_cache;
|
||||
|
||||
struct vcpu_reset_state reset_state;
|
||||
|
||||
/* Detect first run of a vcpu */
|
||||
bool has_run_once;
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
ulong remote_tlb_flush;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_stat {
|
||||
u64 halt_successful_poll;
|
||||
u64 halt_attempted_poll;
|
||||
u64 halt_poll_invalid;
|
||||
u64 halt_wakeup;
|
||||
u64 hvc_exit_stat;
|
||||
u64 wfe_exit_stat;
|
||||
u64 wfi_exit_stat;
|
||||
u64 mmio_exit_user;
|
||||
u64 mmio_exit_kernel;
|
||||
u64 exits;
|
||||
};
|
||||
|
||||
#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
|
||||
|
||||
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
||||
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
|
||||
|
||||
unsigned long __kvm_call_hyp(void *hypfn, ...);
|
||||
|
||||
/*
|
||||
* The has_vhe() part doesn't get emitted, but is used for type-checking.
|
||||
*/
|
||||
#define kvm_call_hyp(f, ...) \
|
||||
do { \
|
||||
if (has_vhe()) { \
|
||||
f(__VA_ARGS__); \
|
||||
} else { \
|
||||
__kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__); \
|
||||
} \
|
||||
} while(0)
|
||||
|
||||
#define kvm_call_hyp_ret(f, ...) \
|
||||
({ \
|
||||
typeof(f(__VA_ARGS__)) ret; \
|
||||
\
|
||||
if (has_vhe()) { \
|
||||
ret = f(__VA_ARGS__); \
|
||||
} else { \
|
||||
ret = __kvm_call_hyp(kvm_ksym_ref(f), \
|
||||
##__VA_ARGS__); \
|
||||
} \
|
||||
\
|
||||
ret; \
|
||||
})
|
||||
|
||||
void force_vm_exit(const cpumask_t *mask);
|
||||
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events);
|
||||
|
||||
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events);
|
||||
|
||||
#define KVM_ARCH_WANT_MMU_NOTIFIER
|
||||
int kvm_unmap_hva_range(struct kvm *kvm,
|
||||
unsigned long start, unsigned long end);
|
||||
int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
|
||||
|
||||
void kvm_arm_halt_guest(struct kvm *kvm);
|
||||
void kvm_arm_resume_guest(struct kvm *kvm);
|
||||
|
||||
int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
|
||||
unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu);
|
||||
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
||||
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
||||
|
||||
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index);
|
||||
|
||||
static inline void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index) {}
|
||||
|
||||
/* MMIO helpers */
|
||||
void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
|
||||
unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
|
||||
|
||||
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
phys_addr_t fault_ipa);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
{
|
||||
/*
|
||||
* Call initialization code, and switch to the full blown HYP
|
||||
* code. The init code doesn't need to preserve these
|
||||
* registers as r0-r3 are already callee saved according to
|
||||
* the AAPCS.
|
||||
* Note that we slightly misuse the prototype by casting the
|
||||
* stack pointer to a void *.
|
||||
|
||||
* The PGDs are always passed as the third argument, in order
|
||||
* to be passed into r2-r3 to the init code (yes, this is
|
||||
* compliant with the PCS!).
|
||||
*/
|
||||
|
||||
__kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
|
||||
}
|
||||
|
||||
static inline void __cpu_init_stage2(void)
|
||||
{
|
||||
kvm_call_hyp(__init_stage2_translation);
|
||||
}
|
||||
|
||||
static inline int kvm_arch_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_perf_init(void);
|
||||
int kvm_perf_teardown(void);
|
||||
|
||||
static inline long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return SMCCC_RET_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
static inline gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return GPA_INVALID;
|
||||
}
|
||||
|
||||
static inline void kvm_update_stolen_time(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot);
|
||||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
||||
static inline bool kvm_arch_requires_vhe(void) { return false; }
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
||||
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
static inline void kvm_arm_init_debug(void) {}
|
||||
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr);
|
||||
|
||||
/*
|
||||
* VFP/NEON switching is all done by the hyp switch code, so no need to
|
||||
* coordinate with host context handling for this state:
|
||||
*/
|
||||
static inline void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#define KVM_BP_HARDEN_UNKNOWN -1
|
||||
#define KVM_BP_HARDEN_WA_NEEDED 0
|
||||
#define KVM_BP_HARDEN_NOT_REQUIRED 1
|
||||
|
||||
static inline int kvm_arm_harden_branch_predictor(void)
|
||||
{
|
||||
switch(read_cpuid_part()) {
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
case ARM_CPU_PART_BRAHMA_B15:
|
||||
case ARM_CPU_PART_CORTEX_A12:
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
case ARM_CPU_PART_CORTEX_A17:
|
||||
return KVM_BP_HARDEN_WA_NEEDED;
|
||||
#endif
|
||||
case ARM_CPU_PART_CORTEX_A7:
|
||||
return KVM_BP_HARDEN_NOT_REQUIRED;
|
||||
default:
|
||||
return KVM_BP_HARDEN_UNKNOWN;
|
||||
}
|
||||
}
|
||||
|
||||
#define KVM_SSBD_UNKNOWN -1
|
||||
#define KVM_SSBD_FORCE_DISABLE 0
|
||||
#define KVM_SSBD_KERNEL 1
|
||||
#define KVM_SSBD_FORCE_ENABLE 2
|
||||
#define KVM_SSBD_MITIGATED 3
|
||||
|
||||
static inline int kvm_arm_have_ssbd(void)
|
||||
{
|
||||
/* No way to detect it yet, pretend it is not there. */
|
||||
return KVM_SSBD_UNKNOWN;
|
||||
}
|
||||
|
||||
static inline void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu) {}
|
||||
static inline void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu) {}
|
||||
|
||||
#define __KVM_HAVE_ARCH_VM_ALLOC
|
||||
struct kvm *kvm_arch_alloc_vm(void);
|
||||
void kvm_arch_free_vm(struct kvm *kvm);
|
||||
|
||||
static inline int kvm_arm_setup_stage2(struct kvm *kvm, unsigned long type)
|
||||
{
|
||||
/*
|
||||
* On 32bit ARM, VMs get a static 40bit IPA stage2 setup,
|
||||
* so any non-zero value used as type is illegal.
|
||||
*/
|
||||
if (type)
|
||||
return -EINVAL;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static inline bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_HOST_H__ */
|
|
@ -1,127 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_HYP_H__
|
||||
#define __ARM_KVM_HYP_H__
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/cp15.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/vfp.h>
|
||||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
||||
#define __ACCESS_VFP(CRn) \
|
||||
"mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
|
||||
|
||||
#define write_special(v, r) \
|
||||
asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
|
||||
#define read_special(r) ({ \
|
||||
u32 __val; \
|
||||
asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
|
||||
__val; \
|
||||
})
|
||||
|
||||
#define TTBR0 __ACCESS_CP15_64(0, c2)
|
||||
#define TTBR1 __ACCESS_CP15_64(1, c2)
|
||||
#define VTTBR __ACCESS_CP15_64(6, c2)
|
||||
#define PAR __ACCESS_CP15_64(0, c7)
|
||||
#define CNTP_CVAL __ACCESS_CP15_64(2, c14)
|
||||
#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
|
||||
#define CNTVOFF __ACCESS_CP15_64(4, c14)
|
||||
|
||||
#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
|
||||
#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
|
||||
#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
|
||||
#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
|
||||
#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
|
||||
#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
|
||||
#define HCR __ACCESS_CP15(c1, 4, c1, 0)
|
||||
#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
|
||||
#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
|
||||
#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
|
||||
#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
|
||||
#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
|
||||
#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
|
||||
#define DACR __ACCESS_CP15(c3, 0, c0, 0)
|
||||
#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
|
||||
#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
|
||||
#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
|
||||
#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
|
||||
#define HSR __ACCESS_CP15(c5, 4, c2, 0)
|
||||
#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
|
||||
#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
|
||||
#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
|
||||
#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
|
||||
#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
|
||||
#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
|
||||
#define BPIALLIS __ACCESS_CP15(c7, 0, c1, 6)
|
||||
#define ICIMVAU __ACCESS_CP15(c7, 0, c5, 1)
|
||||
#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
|
||||
#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
|
||||
#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0)
|
||||
#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
|
||||
#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
|
||||
#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
|
||||
#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
|
||||
#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
|
||||
#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
|
||||
#define CID __ACCESS_CP15(c13, 0, c0, 1)
|
||||
#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
|
||||
#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
|
||||
#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
|
||||
#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
|
||||
#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
|
||||
#define CNTP_CTL __ACCESS_CP15(c14, 0, c2, 1)
|
||||
#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
|
||||
#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
|
||||
|
||||
#define VFP_FPEXC __ACCESS_VFP(FPEXC)
|
||||
|
||||
/* AArch64 compatibility macros, only for the timer so far */
|
||||
#define read_sysreg_el0(r) read_sysreg(r##_EL0)
|
||||
#define write_sysreg_el0(v, r) write_sysreg(v, r##_EL0)
|
||||
|
||||
#define SYS_CNTP_CTL_EL0 CNTP_CTL
|
||||
#define SYS_CNTP_CVAL_EL0 CNTP_CVAL
|
||||
#define SYS_CNTV_CTL_EL0 CNTV_CTL
|
||||
#define SYS_CNTV_CVAL_EL0 CNTV_CVAL
|
||||
|
||||
#define cntvoff_el2 CNTVOFF
|
||||
#define cnthctl_el2 CNTHCTL
|
||||
|
||||
void __timer_enable_traps(struct kvm_vcpu *vcpu);
|
||||
void __timer_disable_traps(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
|
||||
|
||||
void __sysreg_save_state(struct kvm_cpu_context *ctxt);
|
||||
void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
|
||||
|
||||
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_activate_traps(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_deactivate_traps(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_save_aprs(struct kvm_vcpu *vcpu);
|
||||
void __vgic_v3_restore_aprs(struct kvm_vcpu *vcpu);
|
||||
|
||||
asmlinkage void __vfp_save_state(struct vfp_hard_struct *vfp);
|
||||
asmlinkage void __vfp_restore_state(struct vfp_hard_struct *vfp);
|
||||
static inline bool __vfp_enabled(void)
|
||||
{
|
||||
return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
|
||||
}
|
||||
|
||||
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
|
||||
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
|
||||
|
||||
asmlinkage int __guest_enter(struct kvm_vcpu *vcpu,
|
||||
struct kvm_cpu_context *host);
|
||||
asmlinkage int __hyp_do_panic(const char *, int, u32);
|
||||
|
||||
#endif /* __ARM_KVM_HYP_H__ */
|
|
@ -1,435 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_MMU_H__
|
||||
#define __ARM_KVM_MMU_H__
|
||||
|
||||
#include <asm/memory.h>
|
||||
#include <asm/page.h>
|
||||
|
||||
/*
|
||||
* We directly use the kernel VA for the HYP, as we can directly share
|
||||
* the mapping (HTTBR "covers" TTBR1).
|
||||
*/
|
||||
#define kern_hyp_va(kva) (kva)
|
||||
|
||||
/* Contrary to arm64, there is no need to generate a PC-relative address */
|
||||
#define hyp_symbol_addr(s) \
|
||||
({ \
|
||||
typeof(s) *addr = &(s); \
|
||||
addr; \
|
||||
})
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/stage2_pgtable.h>
|
||||
|
||||
/* Ensure compatibility with arm64 */
|
||||
#define VA_BITS 32
|
||||
|
||||
#define kvm_phys_shift(kvm) KVM_PHYS_SHIFT
|
||||
#define kvm_phys_size(kvm) (1ULL << kvm_phys_shift(kvm))
|
||||
#define kvm_phys_mask(kvm) (kvm_phys_size(kvm) - 1ULL)
|
||||
#define kvm_vttbr_baddr_mask(kvm) VTTBR_BADDR_MASK
|
||||
|
||||
#define stage2_pgd_size(kvm) (PTRS_PER_S2_PGD * sizeof(pgd_t))
|
||||
|
||||
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
|
||||
int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
|
||||
void __iomem **kaddr,
|
||||
void __iomem **haddr);
|
||||
int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
|
||||
void **haddr);
|
||||
void free_hyp_pgds(void);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
int kvm_alloc_stage2_pgd(struct kvm *kvm);
|
||||
void kvm_free_stage2_pgd(struct kvm *kvm);
|
||||
int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
|
||||
phys_addr_t pa, unsigned long size, bool writable);
|
||||
|
||||
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
||||
|
||||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
phys_addr_t kvm_get_idmap_vector(void);
|
||||
int kvm_mmu_init(void);
|
||||
void kvm_clear_hyp_idmap(void);
|
||||
|
||||
#define kvm_mk_pmd(ptep) __pmd(__pa(ptep) | PMD_TYPE_TABLE)
|
||||
#define kvm_mk_pud(pmdp) __pud(__pa(pmdp) | PMD_TYPE_TABLE)
|
||||
#define kvm_mk_pgd(pudp) ({ BUILD_BUG(); 0; })
|
||||
|
||||
#define kvm_pfn_pte(pfn, prot) pfn_pte(pfn, prot)
|
||||
#define kvm_pfn_pmd(pfn, prot) pfn_pmd(pfn, prot)
|
||||
#define kvm_pfn_pud(pfn, prot) (__pud(0))
|
||||
|
||||
#define kvm_pud_pfn(pud) ({ WARN_ON(1); 0; })
|
||||
|
||||
|
||||
#define kvm_pmd_mkhuge(pmd) pmd_mkhuge(pmd)
|
||||
/* No support for pud hugepages */
|
||||
#define kvm_pud_mkhuge(pud) ( {WARN_ON(1); pud; })
|
||||
|
||||
/*
|
||||
* The following kvm_*pud*() functions are provided strictly to allow
|
||||
* sharing code with arm64. They should never be called in practice.
|
||||
*/
|
||||
static inline void kvm_set_s2pud_readonly(pud_t *pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pud_readonly(pud_t *pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline void kvm_set_pud(pud_t *pud, pud_t new_pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
static inline pud_t kvm_s2pud_mkwrite(pud_t pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return pud;
|
||||
}
|
||||
|
||||
static inline pud_t kvm_s2pud_mkexec(pud_t pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return pud;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pud_exec(pud_t *pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline pud_t kvm_s2pud_mkyoung(pud_t pud)
|
||||
{
|
||||
BUG();
|
||||
return pud;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pud_young(pud_t pud)
|
||||
{
|
||||
WARN_ON(1);
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline pte_t kvm_s2pte_mkwrite(pte_t pte)
|
||||
{
|
||||
pte_val(pte) |= L_PTE_S2_RDWR;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pmd_t kvm_s2pmd_mkwrite(pmd_t pmd)
|
||||
{
|
||||
pmd_val(pmd) |= L_PMD_S2_RDWR;
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline pte_t kvm_s2pte_mkexec(pte_t pte)
|
||||
{
|
||||
pte_val(pte) &= ~L_PTE_XN;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static inline pmd_t kvm_s2pmd_mkexec(pmd_t pmd)
|
||||
{
|
||||
pmd_val(pmd) &= ~PMD_SECT_XN;
|
||||
return pmd;
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pte_readonly(pte_t *pte)
|
||||
{
|
||||
pte_val(*pte) = (pte_val(*pte) & ~L_PTE_S2_RDWR) | L_PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_readonly(pte_t *pte)
|
||||
{
|
||||
return (pte_val(*pte) & L_PTE_S2_RDWR) == L_PTE_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pte_exec(pte_t *pte)
|
||||
{
|
||||
return !(pte_val(*pte) & L_PTE_XN);
|
||||
}
|
||||
|
||||
static inline void kvm_set_s2pmd_readonly(pmd_t *pmd)
|
||||
{
|
||||
pmd_val(*pmd) = (pmd_val(*pmd) & ~L_PMD_S2_RDWR) | L_PMD_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_readonly(pmd_t *pmd)
|
||||
{
|
||||
return (pmd_val(*pmd) & L_PMD_S2_RDWR) == L_PMD_S2_RDONLY;
|
||||
}
|
||||
|
||||
static inline bool kvm_s2pmd_exec(pmd_t *pmd)
|
||||
{
|
||||
return !(pmd_val(*pmd) & PMD_SECT_XN);
|
||||
}
|
||||
|
||||
static inline bool kvm_page_empty(void *ptr)
|
||||
{
|
||||
struct page *ptr_page = virt_to_page(ptr);
|
||||
return page_count(ptr_page) == 1;
|
||||
}
|
||||
|
||||
#define kvm_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
|
||||
#define kvm_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
|
||||
#define kvm_pud_table_empty(kvm, pudp) false
|
||||
|
||||
#define hyp_pte_table_empty(ptep) kvm_page_empty(ptep)
|
||||
#define hyp_pmd_table_empty(pmdp) kvm_page_empty(pmdp)
|
||||
#define hyp_pud_table_empty(pudp) false
|
||||
|
||||
struct kvm;
|
||||
|
||||
#define kvm_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l))
|
||||
|
||||
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
|
||||
}
|
||||
|
||||
static inline void __clean_dcache_guest_page(kvm_pfn_t pfn, unsigned long size)
|
||||
{
|
||||
/*
|
||||
* Clean the dcache to the Point of Coherency.
|
||||
*
|
||||
* We need to do this through a kernel mapping (using the
|
||||
* user-space mapping has proved to be the wrong
|
||||
* solution). For that, we need to kmap one page at a time,
|
||||
* and iterate over the range.
|
||||
*/
|
||||
|
||||
VM_BUG_ON(size & ~PAGE_MASK);
|
||||
|
||||
while (size) {
|
||||
void *va = kmap_atomic_pfn(pfn);
|
||||
|
||||
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
|
||||
|
||||
size -= PAGE_SIZE;
|
||||
pfn++;
|
||||
|
||||
kunmap_atomic(va);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __invalidate_icache_guest_page(kvm_pfn_t pfn,
|
||||
unsigned long size)
|
||||
{
|
||||
u32 iclsz;
|
||||
|
||||
/*
|
||||
* If we are going to insert an instruction page and the icache is
|
||||
* either VIPT or PIPT, there is a potential problem where the host
|
||||
* (or another VM) may have used the same page as this guest, and we
|
||||
* read incorrect data from the icache. If we're using a PIPT cache,
|
||||
* we can invalidate just that page, but if we are using a VIPT cache
|
||||
* we need to invalidate the entire icache - damn shame - as written
|
||||
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
|
||||
*
|
||||
* VIVT caches are tagged using both the ASID and the VMID and doesn't
|
||||
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
|
||||
*/
|
||||
|
||||
VM_BUG_ON(size & ~PAGE_MASK);
|
||||
|
||||
if (icache_is_vivt_asid_tagged())
|
||||
return;
|
||||
|
||||
if (!icache_is_pipt()) {
|
||||
/* any kind of VIPT cache */
|
||||
__flush_icache_all();
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* CTR IminLine contains Log2 of the number of words in the
|
||||
* cache line, so we can get the number of words as
|
||||
* 2 << (IminLine - 1). To get the number of bytes, we
|
||||
* multiply by 4 (the number of bytes in a 32-bit word), and
|
||||
* get 4 << (IminLine).
|
||||
*/
|
||||
iclsz = 4 << (read_cpuid(CPUID_CACHETYPE) & 0xf);
|
||||
|
||||
while (size) {
|
||||
void *va = kmap_atomic_pfn(pfn);
|
||||
void *end = va + PAGE_SIZE;
|
||||
void *addr = va;
|
||||
|
||||
do {
|
||||
write_sysreg(addr, ICIMVAU);
|
||||
addr += iclsz;
|
||||
} while (addr < end);
|
||||
|
||||
dsb(ishst);
|
||||
isb();
|
||||
|
||||
size -= PAGE_SIZE;
|
||||
pfn++;
|
||||
|
||||
kunmap_atomic(va);
|
||||
}
|
||||
|
||||
/* Check if we need to invalidate the BTB */
|
||||
if ((read_cpuid_ext(CPUID_EXT_MMFR1) >> 28) != 4) {
|
||||
write_sysreg(0, BPIALLIS);
|
||||
dsb(ishst);
|
||||
isb();
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __kvm_flush_dcache_pte(pte_t pte)
|
||||
{
|
||||
void *va = kmap_atomic(pte_page(pte));
|
||||
|
||||
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
|
||||
|
||||
kunmap_atomic(va);
|
||||
}
|
||||
|
||||
static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
|
||||
{
|
||||
unsigned long size = PMD_SIZE;
|
||||
kvm_pfn_t pfn = pmd_pfn(pmd);
|
||||
|
||||
while (size) {
|
||||
void *va = kmap_atomic_pfn(pfn);
|
||||
|
||||
kvm_flush_dcache_to_poc(va, PAGE_SIZE);
|
||||
|
||||
pfn++;
|
||||
size -= PAGE_SIZE;
|
||||
|
||||
kunmap_atomic(va);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void __kvm_flush_dcache_pud(pud_t pud)
|
||||
{
|
||||
}
|
||||
|
||||
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
|
||||
|
||||
void kvm_set_way_flush(struct kvm_vcpu *vcpu);
|
||||
void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
|
||||
|
||||
static inline bool __kvm_cpu_uses_extended_idmap(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline unsigned long __kvm_idmap_ptrs_per_pgd(void)
|
||||
{
|
||||
return PTRS_PER_PGD;
|
||||
}
|
||||
|
||||
static inline void __kvm_extend_hypmap(pgd_t *boot_hyp_pgd,
|
||||
pgd_t *hyp_pgd,
|
||||
pgd_t *merged_hyp_pgd,
|
||||
unsigned long hyp_idmap_start) { }
|
||||
|
||||
static inline unsigned int kvm_get_vmid_bits(void)
|
||||
{
|
||||
return 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* We are not in the kvm->srcu critical section most of the time, so we take
|
||||
* the SRCU read lock here. Since we copy the data from the user page, we
|
||||
* can immediately drop the lock again.
|
||||
*/
|
||||
static inline int kvm_read_guest_lock(struct kvm *kvm,
|
||||
gpa_t gpa, void *data, unsigned long len)
|
||||
{
|
||||
int srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
int ret = kvm_read_guest(kvm, gpa, data, len);
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
|
||||
const void *data, unsigned long len)
|
||||
{
|
||||
int srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
int ret = kvm_write_guest(kvm, gpa, data, len);
|
||||
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline void *kvm_get_hyp_vector(void)
|
||||
{
|
||||
switch(read_cpuid_part()) {
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
case ARM_CPU_PART_CORTEX_A12:
|
||||
case ARM_CPU_PART_CORTEX_A17:
|
||||
{
|
||||
extern char __kvm_hyp_vector_bp_inv[];
|
||||
return kvm_ksym_ref(__kvm_hyp_vector_bp_inv);
|
||||
}
|
||||
|
||||
case ARM_CPU_PART_BRAHMA_B15:
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
{
|
||||
extern char __kvm_hyp_vector_ic_inv[];
|
||||
return kvm_ksym_ref(__kvm_hyp_vector_ic_inv);
|
||||
}
|
||||
#endif
|
||||
default:
|
||||
{
|
||||
extern char __kvm_hyp_vector[];
|
||||
return kvm_ksym_ref(__kvm_hyp_vector);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static inline int kvm_map_vectors(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int hyp_map_aux_data(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define kvm_phys_to_vttbr(addr) (addr)
|
||||
|
||||
static inline void kvm_set_ipa_limit(void) {}
|
||||
|
||||
static __always_inline u64 kvm_get_vttbr(struct kvm *kvm)
|
||||
{
|
||||
struct kvm_vmid *vmid = &kvm->arch.vmid;
|
||||
u64 vmid_field, baddr;
|
||||
|
||||
baddr = kvm->arch.pgd_phys;
|
||||
vmid_field = (u64)vmid->vmid << VTTBR_VMID_SHIFT;
|
||||
return kvm_phys_to_vttbr(baddr) | vmid_field;
|
||||
}
|
||||
|
||||
#endif /* !__ASSEMBLY__ */
|
||||
|
||||
#endif /* __ARM_KVM_MMU_H__ */
|
|
@ -1,14 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
/* Copyright (C) 2018 - Arm Ltd */
|
||||
|
||||
#ifndef __ARM_KVM_RAS_H__
|
||||
#define __ARM_KVM_RAS_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
static inline int kvm_handle_guest_sea(phys_addr_t addr, unsigned int esr)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
||||
#endif /* __ARM_KVM_RAS_H__ */
|
|
@ -104,26 +104,6 @@
|
|||
*/
|
||||
#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */
|
||||
|
||||
/*
|
||||
* 2nd stage PTE definitions for LPAE.
|
||||
*/
|
||||
#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
|
||||
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
|
||||
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
|
||||
#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
|
||||
#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
|
||||
|
||||
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
#define L_PMD_S2_RDONLY (_AT(pmdval_t, 1) << 6) /* HAP[1] */
|
||||
#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
|
||||
|
||||
/*
|
||||
* Hyp-mode PL2 PTE definitions for LPAE.
|
||||
*/
|
||||
#define L_PTE_HYP L_PTE_USER
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
#define pud_none(pud) (!pud_val(pud))
|
||||
|
|
|
@ -80,9 +80,6 @@ extern void __pgd_error(const char *file, int line, pgd_t);
|
|||
|
||||
extern pgprot_t pgprot_user;
|
||||
extern pgprot_t pgprot_kernel;
|
||||
extern pgprot_t pgprot_hyp_device;
|
||||
extern pgprot_t pgprot_s2;
|
||||
extern pgprot_t pgprot_s2_device;
|
||||
|
||||
#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b))
|
||||
|
||||
|
@ -95,12 +92,6 @@ extern pgprot_t pgprot_s2_device;
|
|||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
|
||||
#define PAGE_KERNEL_EXEC pgprot_kernel
|
||||
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
|
||||
#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
|
||||
#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
|
||||
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY | L_PTE_XN)
|
||||
#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY | L_PTE_XN)
|
||||
|
||||
#define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE)
|
||||
#define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN)
|
||||
|
|
|
@ -10,8 +10,6 @@ extern char __idmap_text_start[];
|
|||
extern char __idmap_text_end[];
|
||||
extern char __entry_text_start[];
|
||||
extern char __entry_text_end[];
|
||||
extern char __hyp_idmap_text_start[];
|
||||
extern char __hyp_idmap_text_end[];
|
||||
|
||||
static inline bool in_entry_text(unsigned long addr)
|
||||
{
|
||||
|
@ -22,9 +20,7 @@ static inline bool in_entry_text(unsigned long addr)
|
|||
static inline bool in_idmap_text(unsigned long addr)
|
||||
{
|
||||
void *a = (void *)addr;
|
||||
return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) ||
|
||||
memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end,
|
||||
a, 1);
|
||||
return memory_contains(__idmap_text_start, __idmap_text_end, a, 1);
|
||||
}
|
||||
|
||||
#endif /* _ASM_ARM_SECTIONS_H */
|
||||
|
|
|
@ -1,75 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2016 - ARM Ltd
|
||||
*
|
||||
* stage2 page table helpers
|
||||
*/
|
||||
|
||||
#ifndef __ARM_S2_PGTABLE_H_
|
||||
#define __ARM_S2_PGTABLE_H_
|
||||
|
||||
/*
|
||||
* kvm_mmu_cache_min_pages() is the number of pages required
|
||||
* to install a stage-2 translation. We pre-allocate the entry
|
||||
* level table at VM creation. Since we have a 3 level page-table,
|
||||
* we need only two pages to add a new mapping.
|
||||
*/
|
||||
#define kvm_mmu_cache_min_pages(kvm) 2
|
||||
|
||||
#define stage2_pgd_none(kvm, pgd) pgd_none(pgd)
|
||||
#define stage2_pgd_clear(kvm, pgd) pgd_clear(pgd)
|
||||
#define stage2_pgd_present(kvm, pgd) pgd_present(pgd)
|
||||
#define stage2_pgd_populate(kvm, pgd, pud) pgd_populate(NULL, pgd, pud)
|
||||
#define stage2_pud_offset(kvm, pgd, address) pud_offset(pgd, address)
|
||||
#define stage2_pud_free(kvm, pud) do { } while (0)
|
||||
|
||||
#define stage2_pud_none(kvm, pud) pud_none(pud)
|
||||
#define stage2_pud_clear(kvm, pud) pud_clear(pud)
|
||||
#define stage2_pud_present(kvm, pud) pud_present(pud)
|
||||
#define stage2_pud_populate(kvm, pud, pmd) pud_populate(NULL, pud, pmd)
|
||||
#define stage2_pmd_offset(kvm, pud, address) pmd_offset(pud, address)
|
||||
#define stage2_pmd_free(kvm, pmd) free_page((unsigned long)pmd)
|
||||
|
||||
#define stage2_pud_huge(kvm, pud) pud_huge(pud)
|
||||
|
||||
/* Open coded p*d_addr_end that can deal with 64bit addresses */
|
||||
static inline phys_addr_t
|
||||
stage2_pgd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t boundary = (addr + PGDIR_SIZE) & PGDIR_MASK;
|
||||
|
||||
return (boundary - 1 < end - 1) ? boundary : end;
|
||||
}
|
||||
|
||||
#define stage2_pud_addr_end(kvm, addr, end) (end)
|
||||
|
||||
static inline phys_addr_t
|
||||
stage2_pmd_addr_end(struct kvm *kvm, phys_addr_t addr, phys_addr_t end)
|
||||
{
|
||||
phys_addr_t boundary = (addr + PMD_SIZE) & PMD_MASK;
|
||||
|
||||
return (boundary - 1 < end - 1) ? boundary : end;
|
||||
}
|
||||
|
||||
#define stage2_pgd_index(kvm, addr) pgd_index(addr)
|
||||
|
||||
#define stage2_pte_table_empty(kvm, ptep) kvm_page_empty(ptep)
|
||||
#define stage2_pmd_table_empty(kvm, pmdp) kvm_page_empty(pmdp)
|
||||
#define stage2_pud_table_empty(kvm, pudp) false
|
||||
|
||||
static inline bool kvm_stage2_has_pud(struct kvm *kvm)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define S2_PMD_MASK PMD_MASK
|
||||
#define S2_PMD_SIZE PMD_SIZE
|
||||
#define S2_PUD_MASK PUD_MASK
|
||||
#define S2_PUD_SIZE PUD_SIZE
|
||||
|
||||
static inline bool kvm_stage2_has_pmd(struct kvm *kvm)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif /* __ARM_S2_PGTABLE_H_ */
|
|
@ -39,8 +39,6 @@ static inline void sync_boot_mode(void)
|
|||
sync_cache_r(&__boot_cpu_mode);
|
||||
}
|
||||
|
||||
void __hyp_set_vectors(unsigned long phys_vector_base);
|
||||
void __hyp_reset_vectors(void);
|
||||
#else
|
||||
#define __boot_cpu_mode (SVC_MODE)
|
||||
#define sync_boot_mode()
|
||||
|
@ -67,18 +65,6 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
static inline bool has_vhe(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
/* The section containing the hypervisor idmap text */
|
||||
extern char __hyp_idmap_text_start[];
|
||||
extern char __hyp_idmap_text_end[];
|
||||
|
||||
/* The section containing the hypervisor text */
|
||||
extern char __hyp_text_start[];
|
||||
extern char __hyp_text_end[];
|
||||
#endif
|
||||
|
||||
#else
|
||||
|
@ -87,9 +73,6 @@ extern char __hyp_text_end[];
|
|||
|
||||
#define HVC_SET_VECTORS 0
|
||||
#define HVC_SOFT_RESTART 1
|
||||
#define HVC_RESET_VECTORS 2
|
||||
|
||||
#define HVC_STUB_HCALL_NR 3
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
|
|
|
@ -1,314 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License, version 2, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_H__
|
||||
#define __ARM_KVM_H__
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/psci.h>
|
||||
#include <asm/ptrace.h>
|
||||
|
||||
#define __KVM_HAVE_GUEST_DEBUG
|
||||
#define __KVM_HAVE_IRQ_LINE
|
||||
#define __KVM_HAVE_READONLY_MEM
|
||||
#define __KVM_HAVE_VCPU_EVENTS
|
||||
|
||||
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
|
||||
|
||||
#define KVM_REG_SIZE(id) \
|
||||
(1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
|
||||
|
||||
/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */
|
||||
#define KVM_ARM_SVC_sp svc_regs[0]
|
||||
#define KVM_ARM_SVC_lr svc_regs[1]
|
||||
#define KVM_ARM_SVC_spsr svc_regs[2]
|
||||
#define KVM_ARM_ABT_sp abt_regs[0]
|
||||
#define KVM_ARM_ABT_lr abt_regs[1]
|
||||
#define KVM_ARM_ABT_spsr abt_regs[2]
|
||||
#define KVM_ARM_UND_sp und_regs[0]
|
||||
#define KVM_ARM_UND_lr und_regs[1]
|
||||
#define KVM_ARM_UND_spsr und_regs[2]
|
||||
#define KVM_ARM_IRQ_sp irq_regs[0]
|
||||
#define KVM_ARM_IRQ_lr irq_regs[1]
|
||||
#define KVM_ARM_IRQ_spsr irq_regs[2]
|
||||
|
||||
/* Valid only for fiq_regs in struct kvm_regs */
|
||||
#define KVM_ARM_FIQ_r8 fiq_regs[0]
|
||||
#define KVM_ARM_FIQ_r9 fiq_regs[1]
|
||||
#define KVM_ARM_FIQ_r10 fiq_regs[2]
|
||||
#define KVM_ARM_FIQ_fp fiq_regs[3]
|
||||
#define KVM_ARM_FIQ_ip fiq_regs[4]
|
||||
#define KVM_ARM_FIQ_sp fiq_regs[5]
|
||||
#define KVM_ARM_FIQ_lr fiq_regs[6]
|
||||
#define KVM_ARM_FIQ_spsr fiq_regs[7]
|
||||
|
||||
struct kvm_regs {
|
||||
struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
|
||||
unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
|
||||
unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
|
||||
unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
|
||||
unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
|
||||
unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
|
||||
};
|
||||
|
||||
/* Supported Processor Types */
|
||||
#define KVM_ARM_TARGET_CORTEX_A15 0
|
||||
#define KVM_ARM_TARGET_CORTEX_A7 1
|
||||
#define KVM_ARM_NUM_TARGETS 2
|
||||
|
||||
/* KVM_ARM_SET_DEVICE_ADDR ioctl id encoding */
|
||||
#define KVM_ARM_DEVICE_TYPE_SHIFT 0
|
||||
#define KVM_ARM_DEVICE_TYPE_MASK (0xffff << KVM_ARM_DEVICE_TYPE_SHIFT)
|
||||
#define KVM_ARM_DEVICE_ID_SHIFT 16
|
||||
#define KVM_ARM_DEVICE_ID_MASK (0xffff << KVM_ARM_DEVICE_ID_SHIFT)
|
||||
|
||||
/* Supported device IDs */
|
||||
#define KVM_ARM_DEVICE_VGIC_V2 0
|
||||
|
||||
/* Supported VGIC address types */
|
||||
#define KVM_VGIC_V2_ADDR_TYPE_DIST 0
|
||||
#define KVM_VGIC_V2_ADDR_TYPE_CPU 1
|
||||
|
||||
#define KVM_VGIC_V2_DIST_SIZE 0x1000
|
||||
#define KVM_VGIC_V2_CPU_SIZE 0x2000
|
||||
|
||||
/* Supported VGICv3 address types */
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
|
||||
#define KVM_VGIC_ITS_ADDR_TYPE 4
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
|
||||
|
||||
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
|
||||
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
|
||||
#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
|
||||
|
||||
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
|
||||
#define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */
|
||||
|
||||
struct kvm_vcpu_init {
|
||||
__u32 target;
|
||||
__u32 features[7];
|
||||
};
|
||||
|
||||
struct kvm_sregs {
|
||||
};
|
||||
|
||||
struct kvm_fpu {
|
||||
};
|
||||
|
||||
struct kvm_guest_debug_arch {
|
||||
};
|
||||
|
||||
struct kvm_debug_exit_arch {
|
||||
};
|
||||
|
||||
struct kvm_sync_regs {
|
||||
/* Used with KVM_CAP_ARM_USER_IRQ */
|
||||
__u64 device_irq_level;
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
/* for KVM_GET/SET_VCPU_EVENTS */
|
||||
struct kvm_vcpu_events {
|
||||
struct {
|
||||
__u8 serror_pending;
|
||||
__u8 serror_has_esr;
|
||||
__u8 ext_dabt_pending;
|
||||
/* Align it to 8 bytes */
|
||||
__u8 pad[5];
|
||||
__u64 serror_esr;
|
||||
} exception;
|
||||
__u32 reserved[12];
|
||||
};
|
||||
|
||||
/* If you need to interpret the index values, here is the key: */
|
||||
#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000
|
||||
#define KVM_REG_ARM_COPROC_SHIFT 16
|
||||
#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007
|
||||
#define KVM_REG_ARM_32_OPC2_SHIFT 0
|
||||
#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078
|
||||
#define KVM_REG_ARM_OPC1_SHIFT 3
|
||||
#define KVM_REG_ARM_CRM_MASK 0x0000000000000780
|
||||
#define KVM_REG_ARM_CRM_SHIFT 7
|
||||
#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800
|
||||
#define KVM_REG_ARM_32_CRN_SHIFT 11
|
||||
/*
|
||||
* For KVM currently all guest registers are nonsecure, but we reserve a bit
|
||||
* in the encoding to distinguish secure from nonsecure for AArch32 system
|
||||
* registers that are banked by security. This is 1 for the secure banked
|
||||
* register, and 0 for the nonsecure banked register or if the register is
|
||||
* not banked by security.
|
||||
*/
|
||||
#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000
|
||||
#define KVM_REG_ARM_SECURE_SHIFT 28
|
||||
|
||||
#define ARM_CP15_REG_SHIFT_MASK(x,n) \
|
||||
(((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK)
|
||||
|
||||
#define __ARM_CP15_REG(op1,crn,crm,op2) \
|
||||
(KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(op1, OPC1) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(crn, 32_CRN) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(crm, CRM) | \
|
||||
ARM_CP15_REG_SHIFT_MASK(op2, 32_OPC2))
|
||||
|
||||
#define ARM_CP15_REG32(...) (__ARM_CP15_REG(__VA_ARGS__) | KVM_REG_SIZE_U32)
|
||||
|
||||
#define __ARM_CP15_REG64(op1,crm) \
|
||||
(__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64)
|
||||
#define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__)
|
||||
|
||||
/* PL1 Physical Timer Registers */
|
||||
#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1)
|
||||
#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14)
|
||||
#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14)
|
||||
|
||||
/* Virtual Timer Registers */
|
||||
#define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1)
|
||||
#define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14)
|
||||
#define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14)
|
||||
|
||||
/* Normal registers are mapped as coprocessor 16. */
|
||||
#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4)
|
||||
|
||||
/* Some registers need more space to represent values. */
|
||||
#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00
|
||||
#define KVM_REG_ARM_DEMUX_ID_SHIFT 8
|
||||
#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT)
|
||||
#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF
|
||||
#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0
|
||||
|
||||
/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */
|
||||
#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF
|
||||
#define KVM_REG_ARM_VFP_BASE_REG 0x0
|
||||
#define KVM_REG_ARM_VFP_FPSID 0x1000
|
||||
#define KVM_REG_ARM_VFP_FPSCR 0x1001
|
||||
#define KVM_REG_ARM_VFP_MVFR1 0x1006
|
||||
#define KVM_REG_ARM_VFP_MVFR0 0x1007
|
||||
#define KVM_REG_ARM_VFP_FPEXC 0x1008
|
||||
#define KVM_REG_ARM_VFP_FPINST 0x1009
|
||||
#define KVM_REG_ARM_VFP_FPINST2 0x100A
|
||||
|
||||
/* KVM-as-firmware specific pseudo-registers */
|
||||
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
|
||||
#define KVM_REG_ARM_FW_REG(r) (KVM_REG_ARM | KVM_REG_SIZE_U64 | \
|
||||
KVM_REG_ARM_FW | ((r) & 0xffff))
|
||||
#define KVM_REG_ARM_PSCI_VERSION KVM_REG_ARM_FW_REG(0)
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1 KVM_REG_ARM_FW_REG(1)
|
||||
/* Higher values mean better protection. */
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL 1
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED 2
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2 KVM_REG_ARM_FW_REG(2)
|
||||
/* Higher values mean better protection. */
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL 0
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN 1
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL 2
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED 3
|
||||
#define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED (1U << 4)
|
||||
|
||||
/* Device Control API: ARM VGIC */
|
||||
#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
|
||||
#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
|
||||
#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
|
||||
#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT 32
|
||||
#define KVM_DEV_ARM_VGIC_V3_MPIDR_MASK \
|
||||
(0xffffffffULL << KVM_DEV_ARM_VGIC_V3_MPIDR_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
|
||||
#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_SYSREG_INSTR_MASK (0xffff)
|
||||
#define KVM_DEV_ARM_VGIC_GRP_NR_IRQS 3
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
|
||||
#define KVM_DEV_ARM_VGIC_GRP_REDIST_REGS 5
|
||||
#define KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS 6
|
||||
#define KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO 7
|
||||
#define KVM_DEV_ARM_VGIC_GRP_ITS_REGS 8
|
||||
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT 10
|
||||
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_MASK \
|
||||
(0x3fffffULL << KVM_DEV_ARM_VGIC_LINE_LEVEL_INFO_SHIFT)
|
||||
#define KVM_DEV_ARM_VGIC_LINE_LEVEL_INTID_MASK 0x3ff
|
||||
#define VGIC_LEVEL_INFO_LINE_LEVEL 0
|
||||
|
||||
/* Device Control API on vcpu fd */
|
||||
#define KVM_ARM_VCPU_PMU_V3_CTRL 0
|
||||
#define KVM_ARM_VCPU_PMU_V3_IRQ 0
|
||||
#define KVM_ARM_VCPU_PMU_V3_INIT 1
|
||||
#define KVM_ARM_VCPU_TIMER_CTRL 1
|
||||
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
|
||||
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
|
||||
|
||||
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
|
||||
#define KVM_DEV_ARM_ITS_SAVE_TABLES 1
|
||||
#define KVM_DEV_ARM_ITS_RESTORE_TABLES 2
|
||||
#define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3
|
||||
#define KVM_DEV_ARM_ITS_CTRL_RESET 4
|
||||
|
||||
/* KVM_IRQ_LINE irq field index values */
|
||||
#define KVM_ARM_IRQ_VCPU2_SHIFT 28
|
||||
#define KVM_ARM_IRQ_VCPU2_MASK 0xf
|
||||
#define KVM_ARM_IRQ_TYPE_SHIFT 24
|
||||
#define KVM_ARM_IRQ_TYPE_MASK 0xf
|
||||
#define KVM_ARM_IRQ_VCPU_SHIFT 16
|
||||
#define KVM_ARM_IRQ_VCPU_MASK 0xff
|
||||
#define KVM_ARM_IRQ_NUM_SHIFT 0
|
||||
#define KVM_ARM_IRQ_NUM_MASK 0xffff
|
||||
|
||||
/* irq_type field */
|
||||
#define KVM_ARM_IRQ_TYPE_CPU 0
|
||||
#define KVM_ARM_IRQ_TYPE_SPI 1
|
||||
#define KVM_ARM_IRQ_TYPE_PPI 2
|
||||
|
||||
/* out-of-kernel GIC cpu interrupt injection irq_number field */
|
||||
#define KVM_ARM_IRQ_CPU_IRQ 0
|
||||
#define KVM_ARM_IRQ_CPU_FIQ 1
|
||||
|
||||
/*
|
||||
* This used to hold the highest supported SPI, but it is now obsolete
|
||||
* and only here to provide source code level compatibility with older
|
||||
* userland. The highest SPI number can be set via KVM_DEV_ARM_VGIC_GRP_NR_IRQS.
|
||||
*/
|
||||
#ifndef __KERNEL__
|
||||
#define KVM_ARM_IRQ_GIC_MAX 127
|
||||
#endif
|
||||
|
||||
/* One single KVM irqchip, ie. the VGIC */
|
||||
#define KVM_NR_IRQCHIPS 1
|
||||
|
||||
/* PSCI interface */
|
||||
#define KVM_PSCI_FN_BASE 0x95c1ba5e
|
||||
#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n))
|
||||
|
||||
#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0)
|
||||
#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1)
|
||||
#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2)
|
||||
#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3)
|
||||
|
||||
#define KVM_PSCI_RET_SUCCESS PSCI_RET_SUCCESS
|
||||
#define KVM_PSCI_RET_NI PSCI_RET_NOT_SUPPORTED
|
||||
#define KVM_PSCI_RET_INVAL PSCI_RET_INVALID_PARAMS
|
||||
#define KVM_PSCI_RET_DENIED PSCI_RET_DENIED
|
||||
|
||||
#endif /* __ARM_KVM_H__ */
|
|
@ -11,9 +11,6 @@
|
|||
#include <linux/sched.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
#include <linux/kvm_host.h>
|
||||
#endif
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/glue-df.h>
|
||||
#include <asm/glue-pf.h>
|
||||
|
@ -167,14 +164,6 @@ int main(void)
|
|||
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
|
||||
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
|
||||
BLANK();
|
||||
#ifdef CONFIG_KVM_ARM_HOST
|
||||
DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
|
||||
DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
|
||||
DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
|
||||
DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
|
||||
DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
|
||||
#endif
|
||||
BLANK();
|
||||
#ifdef CONFIG_VDSO
|
||||
DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
|
||||
#endif
|
||||
|
|
|
@ -189,19 +189,19 @@ ARM_BE8(orr r7, r7, #(1 << 25)) @ HSCTLR.EE
|
|||
ENDPROC(__hyp_stub_install_secondary)
|
||||
|
||||
__hyp_stub_do_trap:
|
||||
#ifdef ZIMAGE
|
||||
teq r0, #HVC_SET_VECTORS
|
||||
bne 1f
|
||||
/* Only the ZIMAGE stubs can change the HYP vectors */
|
||||
mcr p15, 4, r1, c12, c0, 0 @ set HVBAR
|
||||
b __hyp_stub_exit
|
||||
#endif
|
||||
|
||||
1: teq r0, #HVC_SOFT_RESTART
|
||||
bne 1f
|
||||
bne 2f
|
||||
bx r1
|
||||
|
||||
1: teq r0, #HVC_RESET_VECTORS
|
||||
beq __hyp_stub_exit
|
||||
|
||||
ldr r0, =HVC_STUB_ERR
|
||||
2: ldr r0, =HVC_STUB_ERR
|
||||
__ERET
|
||||
|
||||
__hyp_stub_exit:
|
||||
|
@ -210,26 +210,9 @@ __hyp_stub_exit:
|
|||
ENDPROC(__hyp_stub_do_trap)
|
||||
|
||||
/*
|
||||
* __hyp_set_vectors: Call this after boot to set the initial hypervisor
|
||||
* vectors as part of hypervisor installation. On an SMP system, this should
|
||||
* be called on each CPU.
|
||||
*
|
||||
* r0 must be the physical address of the new vector table (which must lie in
|
||||
* the bottom 4GB of physical address space.
|
||||
*
|
||||
* r0 must be 32-byte aligned.
|
||||
*
|
||||
* Before calling this, you must check that the stub hypervisor is installed
|
||||
* everywhere, by waiting for any secondary CPUs to be brought up and then
|
||||
* checking that BOOT_CPU_MODE_HAVE_HYP(__boot_cpu_mode) is true.
|
||||
*
|
||||
* If not, there is a pre-existing hypervisor, some CPUs failed to boot, or
|
||||
* something else went wrong... in such cases, trying to install a new
|
||||
* hypervisor is unlikely to work as desired.
|
||||
*
|
||||
* When you call into your shiny new hypervisor, sp_hyp will contain junk,
|
||||
* so you will need to set that to something sensible at the new hypervisor's
|
||||
* initialisation entry point.
|
||||
* __hyp_set_vectors is only used when ZIMAGE must bounce between HYP
|
||||
* and SVC. For the kernel itself, the vectors are set once and for
|
||||
* all by the stubs.
|
||||
*/
|
||||
ENTRY(__hyp_set_vectors)
|
||||
mov r1, r0
|
||||
|
@ -245,12 +228,6 @@ ENTRY(__hyp_soft_restart)
|
|||
ret lr
|
||||
ENDPROC(__hyp_soft_restart)
|
||||
|
||||
ENTRY(__hyp_reset_vectors)
|
||||
mov r0, #HVC_RESET_VECTORS
|
||||
__HVC(0)
|
||||
ret lr
|
||||
ENDPROC(__hyp_reset_vectors)
|
||||
|
||||
#ifndef ZIMAGE
|
||||
.align 2
|
||||
.L__boot_cpu_mode_offset:
|
||||
|
|
|
@ -162,14 +162,6 @@ SECTIONS
|
|||
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
||||
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
||||
|
||||
/*
|
||||
* The HYP init code can't be more than a page long,
|
||||
* and should not cross a page boundary.
|
||||
* The above comment applies as well.
|
||||
*/
|
||||
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
|
||||
"HYP init code too big or misaligned")
|
||||
|
||||
#ifdef CONFIG_XIP_DEFLATED_DATA
|
||||
/*
|
||||
* The .bss is used as a stack area for __inflate_kernel_data() whose stack
|
||||
|
|
|
@ -170,12 +170,4 @@ __start_rodata_section_aligned = ALIGN(__start_rodata, 1 << SECTION_SHIFT);
|
|||
ASSERT((__proc_info_end - __proc_info_begin), "missing CPU support")
|
||||
ASSERT((__arch_info_end - __arch_info_begin), "no machine record defined")
|
||||
|
||||
/*
|
||||
* The HYP init code can't be more than a page long,
|
||||
* and should not cross a page boundary.
|
||||
* The above comment applies as well.
|
||||
*/
|
||||
ASSERT(__hyp_idmap_text_end - (__hyp_idmap_text_start & PAGE_MASK) <= PAGE_SIZE,
|
||||
"HYP init code too big or misaligned")
|
||||
|
||||
#endif /* CONFIG_XIP_KERNEL */
|
||||
|
|
|
@ -31,20 +31,11 @@
|
|||
*(.proc.info.init) \
|
||||
__proc_info_end = .;
|
||||
|
||||
#define HYPERVISOR_TEXT \
|
||||
__hyp_text_start = .; \
|
||||
*(.hyp.text) \
|
||||
__hyp_text_end = .;
|
||||
|
||||
#define IDMAP_TEXT \
|
||||
ALIGN_FUNCTION(); \
|
||||
__idmap_text_start = .; \
|
||||
*(.idmap.text) \
|
||||
__idmap_text_end = .; \
|
||||
. = ALIGN(PAGE_SIZE); \
|
||||
__hyp_idmap_text_start = .; \
|
||||
*(.hyp.idmap.text) \
|
||||
__hyp_idmap_text_end = .;
|
||||
|
||||
#define ARM_DISCARD \
|
||||
*(.ARM.exidx.exit.text) \
|
||||
|
@ -72,7 +63,6 @@
|
|||
SCHED_TEXT \
|
||||
CPUIDLE_TEXT \
|
||||
LOCK_TEXT \
|
||||
HYPERVISOR_TEXT \
|
||||
KPROBES_TEXT \
|
||||
*(.gnu.warning) \
|
||||
*(.glue_7) \
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# KVM configuration
|
||||
#
|
||||
|
||||
source "virt/kvm/Kconfig"
|
||||
source "virt/lib/Kconfig"
|
||||
|
||||
menuconfig VIRTUALIZATION
|
||||
bool "Virtualization"
|
||||
---help---
|
||||
Say Y here to get to see options for using your Linux host to run
|
||||
other operating systems inside virtual machines (guests).
|
||||
This option alone does not add any kernel code.
|
||||
|
||||
If you say N, all options in this submenu will be skipped and
|
||||
disabled.
|
||||
|
||||
if VIRTUALIZATION
|
||||
|
||||
config KVM
|
||||
bool "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on MMU && OF
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ARM_GIC
|
||||
select ARM_GIC_V3
|
||||
select ARM_GIC_V3_ITS
|
||||
select HAVE_KVM_CPU_RELAX_INTERCEPT
|
||||
select HAVE_KVM_ARCH_TLB_FLUSH_ALL
|
||||
select KVM_MMIO
|
||||
select KVM_ARM_HOST
|
||||
select KVM_GENERIC_DIRTYLOG_READ_PROTECT
|
||||
select SRCU
|
||||
select MMU_NOTIFIER
|
||||
select KVM_VFIO
|
||||
select HAVE_KVM_EVENTFD
|
||||
select HAVE_KVM_IRQFD
|
||||
select HAVE_KVM_IRQCHIP
|
||||
select HAVE_KVM_IRQ_ROUTING
|
||||
select HAVE_KVM_MSI
|
||||
select IRQ_BYPASS_MANAGER
|
||||
select HAVE_KVM_IRQ_BYPASS
|
||||
depends on ARM_VIRT_EXT && ARM_LPAE && ARM_ARCH_TIMER
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
|
||||
This module provides access to the hardware capabilities through
|
||||
a character device node named /dev/kvm.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config KVM_ARM_HOST
|
||||
bool
|
||||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
source "drivers/vhost/Kconfig"
|
||||
|
||||
endif # VIRTUALIZATION
|
|
@ -1,43 +0,0 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Makefile for Kernel-based Virtual Machine module
|
||||
#
|
||||
|
||||
plus_virt := $(call as-instr,.arch_extension virt,+virt)
|
||||
ifeq ($(plus_virt),+virt)
|
||||
plus_virt_def := -DREQUIRES_VIRT=1
|
||||
endif
|
||||
|
||||
KVM := ../../../virt/kvm
|
||||
|
||||
ccflags-y += -I $(srctree)/$(src) -I $(srctree)/virt/kvm/arm/vgic
|
||||
CFLAGS_$(KVM)/arm/arm.o := $(plus_virt_def)
|
||||
|
||||
AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
|
||||
|
||||
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp/
|
||||
|
||||
obj-y += kvm-arm.o init.o interrupts.o
|
||||
obj-y += handle_exit.o guest.o emulate.o reset.o
|
||||
obj-y += coproc.o coproc_a15.o coproc_a7.o vgic-v3-coproc.o
|
||||
obj-y += $(KVM)/arm/arm.o $(KVM)/arm/mmu.o $(KVM)/arm/mmio.o
|
||||
obj-y += $(KVM)/arm/psci.o $(KVM)/arm/perf.o $(KVM)/arm/hypercalls.o
|
||||
obj-y += $(KVM)/arm/aarch32.o
|
||||
|
||||
obj-y += $(KVM)/arm/vgic/vgic.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-init.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-v2.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-v3.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-v4.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio-v3.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-its.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-debug.o
|
||||
obj-y += $(KVM)/irqchip.o
|
||||
obj-y += $(KVM)/arm/arch_timer.o
|
File diff suppressed because it is too large
Load Diff
|
@ -1,130 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Authors: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#ifndef __ARM_KVM_COPROC_LOCAL_H__
|
||||
#define __ARM_KVM_COPROC_LOCAL_H__
|
||||
|
||||
struct coproc_params {
|
||||
unsigned long CRn;
|
||||
unsigned long CRm;
|
||||
unsigned long Op1;
|
||||
unsigned long Op2;
|
||||
unsigned long Rt1;
|
||||
unsigned long Rt2;
|
||||
bool is_64bit;
|
||||
bool is_write;
|
||||
};
|
||||
|
||||
struct coproc_reg {
|
||||
/* MRC/MCR/MRRC/MCRR instruction which accesses it. */
|
||||
unsigned long CRn;
|
||||
unsigned long CRm;
|
||||
unsigned long Op1;
|
||||
unsigned long Op2;
|
||||
|
||||
bool is_64bit;
|
||||
|
||||
/* Trapped access from guest, if non-NULL. */
|
||||
bool (*access)(struct kvm_vcpu *,
|
||||
const struct coproc_params *,
|
||||
const struct coproc_reg *);
|
||||
|
||||
/* Initialization for vcpu. */
|
||||
void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
|
||||
|
||||
/* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
|
||||
unsigned long reg;
|
||||
|
||||
/* Value (usually reset value) */
|
||||
u64 val;
|
||||
};
|
||||
|
||||
static inline void print_cp_instr(const struct coproc_params *p)
|
||||
{
|
||||
/* Look, we even formatted it for you to paste into the table! */
|
||||
if (p->is_64bit) {
|
||||
kvm_pr_unimpl(" { CRm64(%2lu), Op1(%2lu), is64, func_%s },\n",
|
||||
p->CRn, p->Op1, p->is_write ? "write" : "read");
|
||||
} else {
|
||||
kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32,"
|
||||
" func_%s },\n",
|
||||
p->CRn, p->CRm, p->Op1, p->Op2,
|
||||
p->is_write ? "write" : "read");
|
||||
}
|
||||
}
|
||||
|
||||
static inline bool ignore_write(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
static inline bool read_zero(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p)
|
||||
{
|
||||
*vcpu_reg(vcpu, p->Rt1) = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Reset functions */
|
||||
static inline void reset_unknown(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
|
||||
vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
|
||||
}
|
||||
|
||||
static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
|
||||
vcpu_cp15(vcpu, r->reg) = r->val;
|
||||
}
|
||||
|
||||
static inline void reset_unknown64(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_reg *r)
|
||||
{
|
||||
BUG_ON(!r->reg);
|
||||
BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
|
||||
|
||||
vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
|
||||
vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
|
||||
}
|
||||
|
||||
static inline int cmp_reg(const struct coproc_reg *i1,
|
||||
const struct coproc_reg *i2)
|
||||
{
|
||||
BUG_ON(i1 == i2);
|
||||
if (!i1)
|
||||
return 1;
|
||||
else if (!i2)
|
||||
return -1;
|
||||
if (i1->CRn != i2->CRn)
|
||||
return i1->CRn - i2->CRn;
|
||||
if (i1->CRm != i2->CRm)
|
||||
return i1->CRm - i2->CRm;
|
||||
if (i1->Op1 != i2->Op1)
|
||||
return i1->Op1 - i2->Op1;
|
||||
if (i1->Op2 != i2->Op2)
|
||||
return i1->Op2 - i2->Op2;
|
||||
return i2->is_64bit - i1->is_64bit;
|
||||
}
|
||||
|
||||
|
||||
#define CRn(_x) .CRn = _x
|
||||
#define CRm(_x) .CRm = _x
|
||||
#define CRm64(_x) .CRn = _x, .CRm = 0
|
||||
#define Op1(_x) .Op1 = _x
|
||||
#define Op2(_x) .Op2 = _x
|
||||
#define is64 .is_64bit = true
|
||||
#define is32 .is_64bit = false
|
||||
|
||||
bool access_vm_reg(struct kvm_vcpu *vcpu,
|
||||
const struct coproc_params *p,
|
||||
const struct coproc_reg *r);
|
||||
|
||||
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
|
|
@ -1,39 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Authors: Rusty Russell <rusty@rustcorp.au>
|
||||
* Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "coproc.h"
|
||||
|
||||
/*
|
||||
* A15-specific CP15 registers.
|
||||
* CRn denotes the primary register number, but is copied to the CRm in the
|
||||
* user space API for 64-bit register access in line with the terminology used
|
||||
* in the ARM ARM.
|
||||
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
|
||||
* registers preceding 32-bit ones.
|
||||
*/
|
||||
static const struct coproc_reg a15_regs[] = {
|
||||
/* SCTLR: swapped by interrupt.S. */
|
||||
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
|
||||
access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
|
||||
};
|
||||
|
||||
static struct kvm_coproc_target_table a15_target_table = {
|
||||
.target = KVM_ARM_TARGET_CORTEX_A15,
|
||||
.table = a15_regs,
|
||||
.num = ARRAY_SIZE(a15_regs),
|
||||
};
|
||||
|
||||
static int __init coproc_a15_init(void)
|
||||
{
|
||||
kvm_register_target_coproc_table(&a15_target_table);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(coproc_a15_init);
|
|
@ -1,42 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Copyright (C) 2013 - ARM Ltd
|
||||
*
|
||||
* Authors: Rusty Russell <rusty@rustcorp.au>
|
||||
* Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
* Jonathan Austin <jonathan.austin@arm.com>
|
||||
*/
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include "coproc.h"
|
||||
|
||||
/*
|
||||
* Cortex-A7 specific CP15 registers.
|
||||
* CRn denotes the primary register number, but is copied to the CRm in the
|
||||
* user space API for 64-bit register access in line with the terminology used
|
||||
* in the ARM ARM.
|
||||
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit
|
||||
* registers preceding 32-bit ones.
|
||||
*/
|
||||
static const struct coproc_reg a7_regs[] = {
|
||||
/* SCTLR: swapped by interrupt.S. */
|
||||
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
|
||||
access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
|
||||
};
|
||||
|
||||
static struct kvm_coproc_target_table a7_target_table = {
|
||||
.target = KVM_ARM_TARGET_CORTEX_A7,
|
||||
.table = a7_regs,
|
||||
.num = ARRAY_SIZE(a7_regs),
|
||||
};
|
||||
|
||||
static int __init coproc_a7_init(void)
|
||||
{
|
||||
kvm_register_target_coproc_table(&a7_target_table);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(coproc_a7_init);
|
|
@ -1,166 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/mm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/opcodes.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
#define VCPU_NR_MODES 6
|
||||
#define VCPU_REG_OFFSET_USR 0
|
||||
#define VCPU_REG_OFFSET_FIQ 1
|
||||
#define VCPU_REG_OFFSET_IRQ 2
|
||||
#define VCPU_REG_OFFSET_SVC 3
|
||||
#define VCPU_REG_OFFSET_ABT 4
|
||||
#define VCPU_REG_OFFSET_UND 5
|
||||
#define REG_OFFSET(_reg) \
|
||||
(offsetof(struct kvm_regs, _reg) / sizeof(u32))
|
||||
|
||||
#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
|
||||
|
||||
static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
|
||||
/* USR/SYS Registers */
|
||||
[VCPU_REG_OFFSET_USR] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
|
||||
},
|
||||
|
||||
/* FIQ Registers */
|
||||
[VCPU_REG_OFFSET_FIQ] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7),
|
||||
REG_OFFSET(fiq_regs[0]), /* r8 */
|
||||
REG_OFFSET(fiq_regs[1]), /* r9 */
|
||||
REG_OFFSET(fiq_regs[2]), /* r10 */
|
||||
REG_OFFSET(fiq_regs[3]), /* r11 */
|
||||
REG_OFFSET(fiq_regs[4]), /* r12 */
|
||||
REG_OFFSET(fiq_regs[5]), /* r13 */
|
||||
REG_OFFSET(fiq_regs[6]), /* r14 */
|
||||
},
|
||||
|
||||
/* IRQ Registers */
|
||||
[VCPU_REG_OFFSET_IRQ] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(irq_regs[0]), /* r13 */
|
||||
REG_OFFSET(irq_regs[1]), /* r14 */
|
||||
},
|
||||
|
||||
/* SVC Registers */
|
||||
[VCPU_REG_OFFSET_SVC] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(svc_regs[0]), /* r13 */
|
||||
REG_OFFSET(svc_regs[1]), /* r14 */
|
||||
},
|
||||
|
||||
/* ABT Registers */
|
||||
[VCPU_REG_OFFSET_ABT] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(abt_regs[0]), /* r13 */
|
||||
REG_OFFSET(abt_regs[1]), /* r14 */
|
||||
},
|
||||
|
||||
/* UND Registers */
|
||||
[VCPU_REG_OFFSET_UND] = {
|
||||
USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
|
||||
USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
|
||||
USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
|
||||
USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
|
||||
USR_REG_OFFSET(12),
|
||||
REG_OFFSET(und_regs[0]), /* r13 */
|
||||
REG_OFFSET(und_regs[1]), /* r14 */
|
||||
},
|
||||
};
|
||||
|
||||
/*
|
||||
* Return a pointer to the register number valid in the current mode of
|
||||
* the virtual CPU.
|
||||
*/
|
||||
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
|
||||
{
|
||||
unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
|
||||
|
||||
switch (mode) {
|
||||
case USR_MODE...SVC_MODE:
|
||||
mode &= ~MODE32_BIT; /* 0 ... 3 */
|
||||
break;
|
||||
|
||||
case ABT_MODE:
|
||||
mode = VCPU_REG_OFFSET_ABT;
|
||||
break;
|
||||
|
||||
case UND_MODE:
|
||||
mode = VCPU_REG_OFFSET_UND;
|
||||
break;
|
||||
|
||||
case SYSTEM_MODE:
|
||||
mode = VCPU_REG_OFFSET_USR;
|
||||
break;
|
||||
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
|
||||
return reg_array + vcpu_reg_offsets[mode][reg_num];
|
||||
}
|
||||
|
||||
/*
|
||||
* Return the SPSR for the current mode of the virtual CPU.
|
||||
*/
|
||||
unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
|
||||
switch (mode) {
|
||||
case SVC_MODE:
|
||||
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
|
||||
case ABT_MODE:
|
||||
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
|
||||
case UND_MODE:
|
||||
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
|
||||
case IRQ_MODE:
|
||||
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
|
||||
case FIQ_MODE:
|
||||
return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Inject exceptions into the guest
|
||||
*/
|
||||
|
||||
/**
|
||||
* kvm_inject_vabt - inject an async abort / SError into the guest
|
||||
* @vcpu: The VCPU to receive the exception
|
||||
*
|
||||
* It is assumed that this code is called from the VCPU thread and that the
|
||||
* VCPU therefore is not currently executing guest code.
|
||||
*/
|
||||
void kvm_inject_vabt(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
*vcpu_hcr(vcpu) |= HCR_VA;
|
||||
}
|
|
@ -1,387 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
#include <kvm/arm_psci.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
|
||||
#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM }
|
||||
#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU }
|
||||
|
||||
struct kvm_stats_debugfs_item debugfs_entries[] = {
|
||||
VCPU_STAT(halt_successful_poll),
|
||||
VCPU_STAT(halt_attempted_poll),
|
||||
VCPU_STAT(halt_poll_invalid),
|
||||
VCPU_STAT(halt_wakeup),
|
||||
VCPU_STAT(hvc_exit_stat),
|
||||
VCPU_STAT(wfe_exit_stat),
|
||||
VCPU_STAT(wfi_exit_stat),
|
||||
VCPU_STAT(mmio_exit_user),
|
||||
VCPU_STAT(mmio_exit_kernel),
|
||||
VCPU_STAT(exits),
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static u64 core_reg_offset_from_id(u64 id)
|
||||
{
|
||||
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
|
||||
}
|
||||
|
||||
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
|
||||
struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
|
||||
u64 off;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != 4)
|
||||
return -ENOENT;
|
||||
|
||||
/* Our ID is an index into the kvm_regs struct. */
|
||||
off = core_reg_offset_from_id(reg->id);
|
||||
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
|
||||
return -ENOENT;
|
||||
|
||||
return put_user(((u32 *)regs)[off], uaddr);
|
||||
}
|
||||
|
||||
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
|
||||
struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
|
||||
u64 off, val;
|
||||
|
||||
if (KVM_REG_SIZE(reg->id) != 4)
|
||||
return -ENOENT;
|
||||
|
||||
/* Our ID is an index into the kvm_regs struct. */
|
||||
off = core_reg_offset_from_id(reg->id);
|
||||
if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id))
|
||||
return -ENOENT;
|
||||
|
||||
if (get_user(val, uaddr) != 0)
|
||||
return -EFAULT;
|
||||
|
||||
if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) {
|
||||
unsigned long mode = val & MODE_MASK;
|
||||
switch (mode) {
|
||||
case USR_MODE:
|
||||
case FIQ_MODE:
|
||||
case IRQ_MODE:
|
||||
case SVC_MODE:
|
||||
case ABT_MODE:
|
||||
case UND_MODE:
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
((u32 *)regs)[off] = val;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
#define NUM_TIMER_REGS 3
|
||||
|
||||
static bool is_timer_reg(u64 index)
|
||||
{
|
||||
switch (index) {
|
||||
case KVM_REG_ARM_TIMER_CTL:
|
||||
case KVM_REG_ARM_TIMER_CNT:
|
||||
case KVM_REG_ARM_TIMER_CVAL:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static int copy_timer_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
if (put_user(KVM_REG_ARM_TIMER_CTL, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
if (put_user(KVM_REG_ARM_TIMER_CNT, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
if (put_user(KVM_REG_ARM_TIMER_CVAL, uindices))
|
||||
return -EFAULT;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int set_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
int ret;
|
||||
|
||||
ret = copy_from_user(&val, uaddr, KVM_REG_SIZE(reg->id));
|
||||
if (ret != 0)
|
||||
return -EFAULT;
|
||||
|
||||
return kvm_arm_timer_set_reg(vcpu, reg->id, val);
|
||||
}
|
||||
|
||||
static int get_timer_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
void __user *uaddr = (void __user *)(long)reg->addr;
|
||||
u64 val;
|
||||
|
||||
val = kvm_arm_timer_get_reg(vcpu, reg->id);
|
||||
return copy_to_user(uaddr, &val, KVM_REG_SIZE(reg->id)) ? -EFAULT : 0;
|
||||
}
|
||||
|
||||
static unsigned long num_core_regs(void)
|
||||
{
|
||||
return sizeof(struct kvm_regs) / sizeof(u32);
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG
|
||||
*
|
||||
* This is for all registers.
|
||||
*/
|
||||
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return num_core_regs() + kvm_arm_num_coproc_regs(vcpu)
|
||||
+ kvm_arm_get_fw_num_regs(vcpu)
|
||||
+ NUM_TIMER_REGS;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_arm_copy_reg_indices - get indices of all registers.
|
||||
*
|
||||
* We do core registers right here, then we append coproc regs.
|
||||
*/
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
unsigned int i;
|
||||
const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE;
|
||||
int ret;
|
||||
|
||||
for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) {
|
||||
if (put_user(core_reg | i, uindices))
|
||||
return -EFAULT;
|
||||
uindices++;
|
||||
}
|
||||
|
||||
ret = kvm_arm_copy_fw_reg_indices(vcpu, uindices);
|
||||
if (ret)
|
||||
return ret;
|
||||
uindices += kvm_arm_get_fw_num_regs(vcpu);
|
||||
|
||||
ret = copy_timer_indices(vcpu, uindices);
|
||||
if (ret)
|
||||
return ret;
|
||||
uindices += NUM_TIMER_REGS;
|
||||
|
||||
return kvm_arm_copy_coproc_indices(vcpu, uindices);
|
||||
}
|
||||
|
||||
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/* We currently use nothing arch-specific in upper 32 bits */
|
||||
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Register group 16 means we want a core register. */
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
|
||||
return get_core_reg(vcpu, reg);
|
||||
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
|
||||
return kvm_arm_get_fw_reg(vcpu, reg);
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return get_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_coproc_get_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
|
||||
{
|
||||
/* We currently use nothing arch-specific in upper 32 bits */
|
||||
if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32)
|
||||
return -EINVAL;
|
||||
|
||||
/* Register group 16 means we set a core register. */
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE)
|
||||
return set_core_reg(vcpu, reg);
|
||||
|
||||
if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_FW)
|
||||
return kvm_arm_set_fw_reg(vcpu, reg);
|
||||
|
||||
if (is_timer_reg(reg->id))
|
||||
return set_timer_reg(vcpu, reg);
|
||||
|
||||
return kvm_arm_coproc_set_reg(vcpu, reg);
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
|
||||
struct kvm_sregs *sregs)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events)
|
||||
{
|
||||
events->exception.serror_pending = !!(*vcpu_hcr(vcpu) & HCR_VA);
|
||||
|
||||
/*
|
||||
* We never return a pending ext_dabt here because we deliver it to
|
||||
* the virtual CPU directly when setting the event and it's no longer
|
||||
* 'pending' at this point.
|
||||
*/
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
|
||||
struct kvm_vcpu_events *events)
|
||||
{
|
||||
bool serror_pending = events->exception.serror_pending;
|
||||
bool has_esr = events->exception.serror_has_esr;
|
||||
bool ext_dabt_pending = events->exception.ext_dabt_pending;
|
||||
|
||||
if (serror_pending && has_esr)
|
||||
return -EINVAL;
|
||||
else if (serror_pending)
|
||||
kvm_inject_vabt(vcpu);
|
||||
|
||||
if (ext_dabt_pending)
|
||||
kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __attribute_const__ kvm_target_cpu(void)
|
||||
{
|
||||
switch (read_cpuid_part()) {
|
||||
case ARM_CPU_PART_CORTEX_A7:
|
||||
return KVM_ARM_TARGET_CORTEX_A7;
|
||||
case ARM_CPU_PART_CORTEX_A15:
|
||||
return KVM_ARM_TARGET_CORTEX_A15;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init)
|
||||
{
|
||||
int target = kvm_target_cpu();
|
||||
|
||||
if (target < 0)
|
||||
return -ENODEV;
|
||||
|
||||
memset(init, 0, sizeof(*init));
|
||||
|
||||
/*
|
||||
* For now, we don't return any features.
|
||||
* In future, we might use features to return target
|
||||
* specific features available for the preferred
|
||||
* target type.
|
||||
*/
|
||||
init->target = (__u32)target;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
||||
struct kvm_translation *tr)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
|
||||
struct kvm_guest_debug *dbg)
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_ARM_VCPU_TIMER_CTRL:
|
||||
ret = kvm_arm_timer_set_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_ARM_VCPU_TIMER_CTRL:
|
||||
ret = kvm_arm_timer_get_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
|
||||
struct kvm_device_attr *attr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
switch (attr->group) {
|
||||
case KVM_ARM_VCPU_TIMER_CTRL:
|
||||
ret = kvm_arm_timer_has_attr(vcpu, attr);
|
||||
break;
|
||||
default:
|
||||
ret = -ENXIO;
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
|
@ -1,175 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
#include <trace/events/kvm.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
||||
typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
|
||||
|
||||
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
int ret;
|
||||
|
||||
trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
|
||||
kvm_vcpu_hvc_get_imm(vcpu));
|
||||
vcpu->stat.hvc_exit_stat++;
|
||||
|
||||
ret = kvm_hvc_call_handler(vcpu);
|
||||
if (ret < 0) {
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
/*
|
||||
* "If an SMC instruction executed at Non-secure EL1 is
|
||||
* trapped to EL2 because HCR_EL2.TSC is 1, the exception is a
|
||||
* Trap exception, not a Secure Monitor Call exception [...]"
|
||||
*
|
||||
* We need to advance the PC after the trap, as it would
|
||||
* otherwise return to the same address...
|
||||
*/
|
||||
vcpu_set_reg(vcpu, 0, ~0UL);
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_handle_wfx - handle a WFI or WFE instructions trapped in guests
|
||||
* @vcpu: the vcpu pointer
|
||||
* @run: the kvm_run structure pointer
|
||||
*
|
||||
* WFE: Yield the CPU and come back to this vcpu when the scheduler
|
||||
* decides to.
|
||||
* WFI: Simply call kvm_vcpu_block(), which will halt execution of
|
||||
* world-switches and schedule other host processes until there is an
|
||||
* incoming IRQ or FIQ to the VM.
|
||||
*/
|
||||
static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
if (kvm_vcpu_get_hsr(vcpu) & HSR_WFI_IS_WFE) {
|
||||
trace_kvm_wfx(*vcpu_pc(vcpu), true);
|
||||
vcpu->stat.wfe_exit_stat++;
|
||||
kvm_vcpu_on_spin(vcpu, vcpu_mode_priv(vcpu));
|
||||
} else {
|
||||
trace_kvm_wfx(*vcpu_pc(vcpu), false);
|
||||
vcpu->stat.wfi_exit_stat++;
|
||||
kvm_vcpu_block(vcpu);
|
||||
kvm_clear_request(KVM_REQ_UNHALT, vcpu);
|
||||
}
|
||||
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
|
||||
kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n",
|
||||
hsr);
|
||||
|
||||
kvm_inject_undefined(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static exit_handle_fn arm_exit_handlers[] = {
|
||||
[0 ... HSR_EC_MAX] = kvm_handle_unknown_ec,
|
||||
[HSR_EC_WFI] = kvm_handle_wfx,
|
||||
[HSR_EC_CP15_32] = kvm_handle_cp15_32,
|
||||
[HSR_EC_CP15_64] = kvm_handle_cp15_64,
|
||||
[HSR_EC_CP14_MR] = kvm_handle_cp14_32,
|
||||
[HSR_EC_CP14_LS] = kvm_handle_cp14_load_store,
|
||||
[HSR_EC_CP14_64] = kvm_handle_cp14_64,
|
||||
[HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access,
|
||||
[HSR_EC_CP10_ID] = kvm_handle_cp10_id,
|
||||
[HSR_EC_HVC] = handle_hvc,
|
||||
[HSR_EC_SMC] = handle_smc,
|
||||
[HSR_EC_IABT] = kvm_handle_guest_abort,
|
||||
[HSR_EC_DABT] = kvm_handle_guest_abort,
|
||||
};
|
||||
|
||||
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
return arm_exit_handlers[hsr_ec];
|
||||
}
|
||||
|
||||
/*
|
||||
* Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
|
||||
* proper exit to userspace.
|
||||
*/
|
||||
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index)
|
||||
{
|
||||
exit_handle_fn exit_handler;
|
||||
|
||||
if (ARM_ABORT_PENDING(exception_index)) {
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
|
||||
/*
|
||||
* HVC/SMC already have an adjusted PC, which we need
|
||||
* to correct in order to return to after having
|
||||
* injected the abort.
|
||||
*/
|
||||
if (hsr_ec == HSR_EC_HVC || hsr_ec == HSR_EC_SMC) {
|
||||
u32 adj = kvm_vcpu_trap_il_is32bit(vcpu) ? 4 : 2;
|
||||
*vcpu_pc(vcpu) -= adj;
|
||||
}
|
||||
|
||||
kvm_inject_vabt(vcpu);
|
||||
return 1;
|
||||
}
|
||||
|
||||
exception_index = ARM_EXCEPTION_CODE(exception_index);
|
||||
|
||||
switch (exception_index) {
|
||||
case ARM_EXCEPTION_IRQ:
|
||||
return 1;
|
||||
case ARM_EXCEPTION_HVC:
|
||||
/*
|
||||
* See ARM ARM B1.14.1: "Hyp traps on instructions
|
||||
* that fail their condition code check"
|
||||
*/
|
||||
if (!kvm_condition_valid(vcpu)) {
|
||||
kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
|
||||
return 1;
|
||||
}
|
||||
|
||||
exit_handler = kvm_get_exit_handler(vcpu);
|
||||
|
||||
return exit_handler(vcpu, run);
|
||||
case ARM_EXCEPTION_DATA_ABORT:
|
||||
kvm_inject_vabt(vcpu);
|
||||
return 1;
|
||||
case ARM_EXCEPTION_HYP_GONE:
|
||||
/*
|
||||
* HYP has been reset to the hyp-stub. This happens
|
||||
* when a guest is pre-empted by kvm_reboot()'s
|
||||
* shutdown call.
|
||||
*/
|
||||
run->exit_reason = KVM_EXIT_FAIL_ENTRY;
|
||||
return 0;
|
||||
default:
|
||||
kvm_pr_unimpl("Unsupported exception type: %d",
|
||||
exception_index);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
return 0;
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Makefile for Kernel-based Virtual Machine module, HYP part
|
||||
#
|
||||
|
||||
ccflags-y += -fno-stack-protector -DDISABLE_BRANCH_PROFILING
|
||||
|
||||
KVM=../../../../virt/kvm
|
||||
|
||||
CFLAGS_ARMV7VE :=$(call cc-option, -march=armv7ve)
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v3-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/aarch32.o
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
|
||||
CFLAGS_banked-sr.o += $(CFLAGS_ARMV7VE)
|
||||
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += switch.o
|
||||
CFLAGS_switch.o += $(CFLAGS_ARMV7VE)
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
|
||||
|
||||
# KVM code is run at a different exception code with a different map, so
|
||||
# compiler instrumentation that inserts callbacks or checks into the code may
|
||||
# cause crashes. Just disable it.
|
||||
GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
|
@ -1,70 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Original code:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
/*
|
||||
* gcc before 4.9 doesn't understand -march=armv7ve, so we have to
|
||||
* trick the assembler.
|
||||
*/
|
||||
__asm__(".arch_extension virt");
|
||||
|
||||
void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr);
|
||||
ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp);
|
||||
ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR);
|
||||
ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc);
|
||||
ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc);
|
||||
ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc);
|
||||
ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt);
|
||||
ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt);
|
||||
ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt);
|
||||
ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und);
|
||||
ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und);
|
||||
ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und);
|
||||
ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq);
|
||||
ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq);
|
||||
ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq);
|
||||
ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq);
|
||||
}
|
||||
|
||||
void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr);
|
||||
write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp);
|
||||
write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq);
|
||||
write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq);
|
||||
}
|
|
@ -1,72 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Original code:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
|
||||
{
|
||||
return (u64 *)(ctxt->cp15 + idx);
|
||||
}
|
||||
|
||||
void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
|
||||
ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
|
||||
ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
|
||||
*cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
|
||||
*cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
|
||||
ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
|
||||
ctxt->cp15[c3_DACR] = read_sysreg(DACR);
|
||||
ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
|
||||
ctxt->cp15[c5_IFSR] = read_sysreg(IFSR);
|
||||
ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR);
|
||||
ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
|
||||
ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
|
||||
ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
|
||||
*cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
|
||||
ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
|
||||
ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
|
||||
ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
|
||||
ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1);
|
||||
ctxt->cp15[c12_VBAR] = read_sysreg(VBAR);
|
||||
ctxt->cp15[c13_CID] = read_sysreg(CID);
|
||||
ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW);
|
||||
ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO);
|
||||
ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV);
|
||||
ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL);
|
||||
}
|
||||
|
||||
void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
|
||||
{
|
||||
write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR);
|
||||
write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
|
||||
write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
|
||||
write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
|
||||
write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
|
||||
write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
|
||||
write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
|
||||
write_sysreg(ctxt->cp15[c3_DACR], DACR);
|
||||
write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
|
||||
write_sysreg(ctxt->cp15[c5_IFSR], IFSR);
|
||||
write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR);
|
||||
write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
|
||||
write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
|
||||
write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
|
||||
write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
|
||||
write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
|
||||
write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
|
||||
write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
|
||||
write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1);
|
||||
write_sysreg(ctxt->cp15[c12_VBAR], VBAR);
|
||||
write_sysreg(ctxt->cp15[c13_CID], CID);
|
||||
write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW);
|
||||
write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO);
|
||||
write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV);
|
||||
write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL);
|
||||
}
|
|
@ -1,121 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2016 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
||||
.arch_extension virt
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
|
||||
|
||||
/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
|
||||
ENTRY(__guest_enter)
|
||||
@ Save host registers
|
||||
add r1, r1, #(USR_REGS_OFFSET + S_R4)
|
||||
stm r1!, {r4-r12}
|
||||
str lr, [r1, #4] @ Skip SP_usr (already saved)
|
||||
|
||||
@ Restore guest registers
|
||||
add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
|
||||
ldr lr, [r0, #S_LR]
|
||||
ldm r0, {r0-r12}
|
||||
|
||||
clrex
|
||||
eret
|
||||
ENDPROC(__guest_enter)
|
||||
|
||||
ENTRY(__guest_exit)
|
||||
/*
|
||||
* return convention:
|
||||
* guest r0, r1, r2 saved on the stack
|
||||
* r0: vcpu pointer
|
||||
* r1: exception code
|
||||
*/
|
||||
|
||||
add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
|
||||
stm r2!, {r3-r12}
|
||||
str lr, [r2, #4]
|
||||
add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
|
||||
pop {r3, r4, r5} @ r0, r1, r2
|
||||
stm r2, {r3-r5}
|
||||
|
||||
ldr r0, [r0, #VCPU_HOST_CTXT]
|
||||
add r0, r0, #(USR_REGS_OFFSET + S_R4)
|
||||
ldm r0!, {r4-r12}
|
||||
ldr lr, [r0, #4]
|
||||
|
||||
mov r0, r1
|
||||
mrs r1, SPSR
|
||||
mrs r2, ELR_hyp
|
||||
mrc p15, 4, r3, c5, c2, 0 @ HSR
|
||||
|
||||
/*
|
||||
* Force loads and stores to complete before unmasking aborts
|
||||
* and forcing the delivery of the exception. This gives us a
|
||||
* single instruction window, which the handler will try to
|
||||
* match.
|
||||
*/
|
||||
dsb sy
|
||||
cpsie a
|
||||
|
||||
.global abort_guest_exit_start
|
||||
abort_guest_exit_start:
|
||||
|
||||
isb
|
||||
|
||||
.global abort_guest_exit_end
|
||||
abort_guest_exit_end:
|
||||
|
||||
/*
|
||||
* If we took an abort, r0[31] will be set, and cmp will set
|
||||
* the N bit in PSTATE.
|
||||
*/
|
||||
cmp r0, #0
|
||||
msrmi SPSR_cxsf, r1
|
||||
msrmi ELR_hyp, r2
|
||||
mcrmi p15, 4, r3, c5, c2, 0 @ HSR
|
||||
|
||||
bx lr
|
||||
ENDPROC(__guest_exit)
|
||||
|
||||
/*
|
||||
* If VFPv3 support is not available, then we will not switch the VFP
|
||||
* registers; however cp10 and cp11 accesses will still trap and fallback
|
||||
* to the regular coprocessor emulation code, which currently will
|
||||
* inject an undefined exception to the guest.
|
||||
*/
|
||||
#ifdef CONFIG_VFPv3
|
||||
ENTRY(__vfp_guest_restore)
|
||||
push {r3, r4, lr}
|
||||
|
||||
@ NEON/VFP used. Turn on VFP access.
|
||||
mrc p15, 4, r1, c1, c1, 2 @ HCPTR
|
||||
bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
|
||||
mcr p15, 4, r1, c1, c1, 2 @ HCPTR
|
||||
isb
|
||||
|
||||
@ Switch VFP/NEON hardware state to the guest's
|
||||
mov r4, r0
|
||||
ldr r0, [r0, #VCPU_HOST_CTXT]
|
||||
add r0, r0, #CPU_CTXT_VFP
|
||||
bl __vfp_save_state
|
||||
add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
|
||||
bl __vfp_restore_state
|
||||
|
||||
pop {r3, r4, lr}
|
||||
pop {r0, r1, r2}
|
||||
clrex
|
||||
eret
|
||||
ENDPROC(__vfp_guest_restore)
|
||||
#endif
|
||||
|
||||
.popsection
|
||||
|
|
@ -1,295 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
|
||||
.arch_extension virt
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
.macro load_vcpu reg
|
||||
mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
|
||||
.endm
|
||||
|
||||
/********************************************************************
|
||||
* Hypervisor exception vector and handlers
|
||||
*
|
||||
*
|
||||
* The KVM/ARM Hypervisor ABI is defined as follows:
|
||||
*
|
||||
* Entry to Hyp mode from the host kernel will happen _only_ when an HVC
|
||||
* instruction is issued since all traps are disabled when running the host
|
||||
* kernel as per the Hyp-mode initialization at boot time.
|
||||
*
|
||||
* HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
|
||||
* below) when the HVC instruction is called from SVC mode (i.e. a guest or the
|
||||
* host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
|
||||
* instructions are called from within Hyp-mode.
|
||||
*
|
||||
* Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
|
||||
* Switching to Hyp mode is done through a simple HVC #0 instruction. The
|
||||
* exception vector code will check that the HVC comes from VMID==0.
|
||||
* - r0 contains a pointer to a HYP function
|
||||
* - r1, r2, and r3 contain arguments to the above function.
|
||||
* - The HYP function will be called with its arguments in r0, r1 and r2.
|
||||
* On HYP function return, we return directly to SVC.
|
||||
*
|
||||
* Note that the above is used to execute code in Hyp-mode from a host-kernel
|
||||
* point of view, and is a different concept from performing a world-switch and
|
||||
* executing guest code SVC mode (with a VMID != 0).
|
||||
*/
|
||||
|
||||
.align 5
|
||||
__kvm_hyp_vector:
|
||||
.global __kvm_hyp_vector
|
||||
|
||||
@ Hyp-mode exception vector
|
||||
W(b) hyp_reset
|
||||
W(b) hyp_undef
|
||||
W(b) hyp_svc
|
||||
W(b) hyp_pabt
|
||||
W(b) hyp_dabt
|
||||
W(b) hyp_hvc
|
||||
W(b) hyp_irq
|
||||
W(b) hyp_fiq
|
||||
|
||||
#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
|
||||
.align 5
|
||||
__kvm_hyp_vector_ic_inv:
|
||||
.global __kvm_hyp_vector_ic_inv
|
||||
|
||||
/*
|
||||
* We encode the exception entry in the bottom 3 bits of
|
||||
* SP, and we have to guarantee to be 8 bytes aligned.
|
||||
*/
|
||||
W(add) sp, sp, #1 /* Reset 7 */
|
||||
W(add) sp, sp, #1 /* Undef 6 */
|
||||
W(add) sp, sp, #1 /* Syscall 5 */
|
||||
W(add) sp, sp, #1 /* Prefetch abort 4 */
|
||||
W(add) sp, sp, #1 /* Data abort 3 */
|
||||
W(add) sp, sp, #1 /* HVC 2 */
|
||||
W(add) sp, sp, #1 /* IRQ 1 */
|
||||
W(nop) /* FIQ 0 */
|
||||
|
||||
mcr p15, 0, r0, c7, c5, 0 /* ICIALLU */
|
||||
isb
|
||||
|
||||
b decode_vectors
|
||||
|
||||
.align 5
|
||||
__kvm_hyp_vector_bp_inv:
|
||||
.global __kvm_hyp_vector_bp_inv
|
||||
|
||||
/*
|
||||
* We encode the exception entry in the bottom 3 bits of
|
||||
* SP, and we have to guarantee to be 8 bytes aligned.
|
||||
*/
|
||||
W(add) sp, sp, #1 /* Reset 7 */
|
||||
W(add) sp, sp, #1 /* Undef 6 */
|
||||
W(add) sp, sp, #1 /* Syscall 5 */
|
||||
W(add) sp, sp, #1 /* Prefetch abort 4 */
|
||||
W(add) sp, sp, #1 /* Data abort 3 */
|
||||
W(add) sp, sp, #1 /* HVC 2 */
|
||||
W(add) sp, sp, #1 /* IRQ 1 */
|
||||
W(nop) /* FIQ 0 */
|
||||
|
||||
mcr p15, 0, r0, c7, c5, 6 /* BPIALL */
|
||||
isb
|
||||
|
||||
decode_vectors:
|
||||
|
||||
#ifdef CONFIG_THUMB2_KERNEL
|
||||
/*
|
||||
* Yet another silly hack: Use VPIDR as a temp register.
|
||||
* Thumb2 is really a pain, as SP cannot be used with most
|
||||
* of the bitwise instructions. The vect_br macro ensures
|
||||
* things gets cleaned-up.
|
||||
*/
|
||||
mcr p15, 4, r0, c0, c0, 0 /* VPIDR */
|
||||
mov r0, sp
|
||||
and r0, r0, #7
|
||||
sub sp, sp, r0
|
||||
push {r1, r2}
|
||||
mov r1, r0
|
||||
mrc p15, 4, r0, c0, c0, 0 /* VPIDR */
|
||||
mrc p15, 0, r2, c0, c0, 0 /* MIDR */
|
||||
mcr p15, 4, r2, c0, c0, 0 /* VPIDR */
|
||||
#endif
|
||||
|
||||
.macro vect_br val, targ
|
||||
ARM( eor sp, sp, #\val )
|
||||
ARM( tst sp, #7 )
|
||||
ARM( eorne sp, sp, #\val )
|
||||
|
||||
THUMB( cmp r1, #\val )
|
||||
THUMB( popeq {r1, r2} )
|
||||
|
||||
beq \targ
|
||||
.endm
|
||||
|
||||
vect_br 0, hyp_fiq
|
||||
vect_br 1, hyp_irq
|
||||
vect_br 2, hyp_hvc
|
||||
vect_br 3, hyp_dabt
|
||||
vect_br 4, hyp_pabt
|
||||
vect_br 5, hyp_svc
|
||||
vect_br 6, hyp_undef
|
||||
vect_br 7, hyp_reset
|
||||
#endif
|
||||
|
||||
.macro invalid_vector label, cause
|
||||
.align
|
||||
\label: mov r0, #\cause
|
||||
b __hyp_panic
|
||||
.endm
|
||||
|
||||
invalid_vector hyp_reset ARM_EXCEPTION_RESET
|
||||
invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
|
||||
invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
|
||||
invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
|
||||
invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
|
||||
|
||||
ENTRY(__hyp_do_panic)
|
||||
mrs lr, cpsr
|
||||
bic lr, lr, #MODE_MASK
|
||||
orr lr, lr, #SVC_MODE
|
||||
THUMB( orr lr, lr, #PSR_T_BIT )
|
||||
msr spsr_cxsf, lr
|
||||
ldr lr, =panic
|
||||
msr ELR_hyp, lr
|
||||
ldr lr, =__kvm_call_hyp
|
||||
clrex
|
||||
eret
|
||||
ENDPROC(__hyp_do_panic)
|
||||
|
||||
hyp_hvc:
|
||||
/*
|
||||
* Getting here is either because of a trap from a guest,
|
||||
* or from executing HVC from the host kernel, which means
|
||||
* "do something in Hyp mode".
|
||||
*/
|
||||
push {r0, r1, r2}
|
||||
|
||||
@ Check syndrome register
|
||||
mrc p15, 4, r1, c5, c2, 0 @ HSR
|
||||
lsr r0, r1, #HSR_EC_SHIFT
|
||||
cmp r0, #HSR_EC_HVC
|
||||
bne guest_trap @ Not HVC instr.
|
||||
|
||||
/*
|
||||
* Let's check if the HVC came from VMID 0 and allow simple
|
||||
* switch to Hyp mode
|
||||
*/
|
||||
mrrc p15, 6, r0, r2, c2
|
||||
lsr r2, r2, #16
|
||||
and r2, r2, #0xff
|
||||
cmp r2, #0
|
||||
bne guest_hvc_trap @ Guest called HVC
|
||||
|
||||
/*
|
||||
* Getting here means host called HVC, we shift parameters and branch
|
||||
* to Hyp function.
|
||||
*/
|
||||
pop {r0, r1, r2}
|
||||
|
||||
/*
|
||||
* Check if we have a kernel function, which is guaranteed to be
|
||||
* bigger than the maximum hyp stub hypercall
|
||||
*/
|
||||
cmp r0, #HVC_STUB_HCALL_NR
|
||||
bhs 1f
|
||||
|
||||
/*
|
||||
* Not a kernel function, treat it as a stub hypercall.
|
||||
* Compute the physical address for __kvm_handle_stub_hvc
|
||||
* (as the code lives in the idmaped page) and branch there.
|
||||
* We hijack ip (r12) as a tmp register.
|
||||
*/
|
||||
push {r1}
|
||||
ldr r1, =kimage_voffset
|
||||
ldr r1, [r1]
|
||||
ldr ip, =__kvm_handle_stub_hvc
|
||||
sub ip, ip, r1
|
||||
pop {r1}
|
||||
|
||||
bx ip
|
||||
|
||||
1:
|
||||
/*
|
||||
* Pushing r2 here is just a way of keeping the stack aligned to
|
||||
* 8 bytes on any path that can trigger a HYP exception. Here,
|
||||
* we may well be about to jump into the guest, and the guest
|
||||
* exit would otherwise be badly decoded by our fancy
|
||||
* "decode-exception-without-a-branch" code...
|
||||
*/
|
||||
push {r2, lr}
|
||||
|
||||
mov lr, r0
|
||||
mov r0, r1
|
||||
mov r1, r2
|
||||
mov r2, r3
|
||||
|
||||
THUMB( orr lr, #1)
|
||||
blx lr @ Call the HYP function
|
||||
|
||||
pop {r2, lr}
|
||||
eret
|
||||
|
||||
guest_hvc_trap:
|
||||
movw r2, #:lower16:ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
movt r2, #:upper16:ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
ldr r0, [sp] @ Guest's r0
|
||||
teq r0, r2
|
||||
bne guest_trap
|
||||
add sp, sp, #12
|
||||
@ Returns:
|
||||
@ r0 = 0
|
||||
@ r1 = HSR value (perfectly predictable)
|
||||
@ r2 = ARM_SMCCC_ARCH_WORKAROUND_1
|
||||
mov r0, #0
|
||||
eret
|
||||
|
||||
guest_trap:
|
||||
load_vcpu r0 @ Load VCPU pointer to r0
|
||||
|
||||
#ifdef CONFIG_VFPv3
|
||||
@ Check for a VFP access
|
||||
lsr r1, r1, #HSR_EC_SHIFT
|
||||
cmp r1, #HSR_EC_CP_0_13
|
||||
beq __vfp_guest_restore
|
||||
#endif
|
||||
|
||||
mov r1, #ARM_EXCEPTION_HVC
|
||||
b __guest_exit
|
||||
|
||||
hyp_irq:
|
||||
push {r0, r1, r2}
|
||||
mov r1, #ARM_EXCEPTION_IRQ
|
||||
load_vcpu r0 @ Load VCPU pointer to r0
|
||||
b __guest_exit
|
||||
|
||||
hyp_dabt:
|
||||
push {r0, r1}
|
||||
mrs r0, ELR_hyp
|
||||
ldr r1, =abort_guest_exit_start
|
||||
THUMB( add r1, r1, #1)
|
||||
cmp r0, r1
|
||||
ldrne r1, =abort_guest_exit_end
|
||||
THUMB( addne r1, r1, #1)
|
||||
cmpne r0, r1
|
||||
pop {r0, r1}
|
||||
bne __hyp_panic
|
||||
|
||||
orr r0, r0, #(1 << ARM_EXIT_WITH_ABORT_BIT)
|
||||
eret
|
||||
|
||||
.ltorg
|
||||
|
||||
.popsection
|
|
@ -1,22 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2016 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
|
||||
void __hyp_text __init_stage2_translation(void)
|
||||
{
|
||||
u64 val;
|
||||
|
||||
val = read_sysreg(VTCR) & ~VTCR_MASK;
|
||||
|
||||
val |= read_sysreg(HTCR) & VTCR_HTCR_SH;
|
||||
val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S;
|
||||
|
||||
write_sysreg(val, VTCR);
|
||||
}
|
|
@ -1,242 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2015 - ARM Ltd
|
||||
* Author: Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
#include <linux/jump_label.h>
|
||||
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
__asm__(".arch_extension virt");
|
||||
|
||||
/*
|
||||
* Activate the traps, saving the host's fpexc register before
|
||||
* overwriting it. We'll restore it on VM exit.
|
||||
*/
|
||||
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* We are about to set HCPTR.TCP10/11 to trap all floating point
|
||||
* register accesses to HYP, however, the ARM ARM clearly states that
|
||||
* traps are only taken to HYP if the operation would not otherwise
|
||||
* trap to SVC. Therefore, always make sure that for 32-bit guests,
|
||||
* we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
|
||||
*/
|
||||
val = read_sysreg(VFP_FPEXC);
|
||||
*fpexc_host = val;
|
||||
if (!(val & FPEXC_EN)) {
|
||||
write_sysreg(val | FPEXC_EN, VFP_FPEXC);
|
||||
isb();
|
||||
}
|
||||
|
||||
write_sysreg(vcpu->arch.hcr, HCR);
|
||||
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
|
||||
write_sysreg(HSTR_T(15), HSTR);
|
||||
write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
|
||||
val = read_sysreg(HDCR);
|
||||
val |= HDCR_TPM | HDCR_TPMCR; /* trap performance monitors */
|
||||
val |= HDCR_TDRA | HDCR_TDOSA | HDCR_TDA; /* trap debug regs */
|
||||
write_sysreg(val, HDCR);
|
||||
}
|
||||
|
||||
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* If we pended a virtual abort, preserve it until it gets
|
||||
* cleared. See B1.9.9 (Virtual Abort exception) for details,
|
||||
* but the crucial bit is the zeroing of HCR.VA in the
|
||||
* pseudocode.
|
||||
*/
|
||||
if (vcpu->arch.hcr & HCR_VA)
|
||||
vcpu->arch.hcr = read_sysreg(HCR);
|
||||
|
||||
write_sysreg(0, HCR);
|
||||
write_sysreg(0, HSTR);
|
||||
val = read_sysreg(HDCR);
|
||||
write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
|
||||
write_sysreg(0, HCPTR);
|
||||
}
|
||||
|
||||
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
|
||||
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
|
||||
write_sysreg(vcpu->arch.midr, VPIDR);
|
||||
}
|
||||
|
||||
static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
write_sysreg(0, VTTBR);
|
||||
write_sysreg(read_sysreg(MIDR), VPIDR);
|
||||
}
|
||||
|
||||
|
||||
static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
||||
__vgic_v3_save_state(vcpu);
|
||||
__vgic_v3_deactivate_traps(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
|
||||
__vgic_v3_activate_traps(vcpu);
|
||||
__vgic_v3_restore_state(vcpu);
|
||||
}
|
||||
}
|
||||
|
||||
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 hsr = read_sysreg(HSR);
|
||||
u8 ec = hsr >> HSR_EC_SHIFT;
|
||||
u32 hpfar, far;
|
||||
|
||||
vcpu->arch.fault.hsr = hsr;
|
||||
|
||||
if (ec == HSR_EC_IABT)
|
||||
far = read_sysreg(HIFAR);
|
||||
else if (ec == HSR_EC_DABT)
|
||||
far = read_sysreg(HDFAR);
|
||||
else
|
||||
return true;
|
||||
|
||||
/*
|
||||
* B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
|
||||
*
|
||||
* Abort on the stage 2 translation for a memory access from a
|
||||
* Non-secure PL1 or PL0 mode:
|
||||
*
|
||||
* For any Access flag fault or Translation fault, and also for any
|
||||
* Permission fault on the stage 2 translation of a memory access
|
||||
* made as part of a translation table walk for a stage 1 translation,
|
||||
* the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
|
||||
* is UNKNOWN.
|
||||
*/
|
||||
if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
|
||||
u64 par, tmp;
|
||||
|
||||
par = read_sysreg(PAR);
|
||||
write_sysreg(far, ATS1CPR);
|
||||
isb();
|
||||
|
||||
tmp = read_sysreg(PAR);
|
||||
write_sysreg(par, PAR);
|
||||
|
||||
if (unlikely(tmp & 1))
|
||||
return false; /* Translation failed, back to guest */
|
||||
|
||||
hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
|
||||
} else {
|
||||
hpfar = read_sysreg(HPFAR);
|
||||
}
|
||||
|
||||
vcpu->arch.fault.hxfar = far;
|
||||
vcpu->arch.fault.hpfar = hpfar;
|
||||
return true;
|
||||
}
|
||||
|
||||
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
struct kvm_cpu_context *guest_ctxt;
|
||||
bool fp_enabled;
|
||||
u64 exit_code;
|
||||
u32 fpexc;
|
||||
|
||||
vcpu = kern_hyp_va(vcpu);
|
||||
write_sysreg(vcpu, HTPIDR);
|
||||
|
||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
||||
guest_ctxt = &vcpu->arch.ctxt;
|
||||
|
||||
__sysreg_save_state(host_ctxt);
|
||||
__banked_save_state(host_ctxt);
|
||||
|
||||
__activate_traps(vcpu, &fpexc);
|
||||
__activate_vm(vcpu);
|
||||
|
||||
__vgic_restore_state(vcpu);
|
||||
__timer_enable_traps(vcpu);
|
||||
|
||||
__sysreg_restore_state(guest_ctxt);
|
||||
__banked_restore_state(guest_ctxt);
|
||||
|
||||
/* Jump in the fire! */
|
||||
again:
|
||||
exit_code = __guest_enter(vcpu, host_ctxt);
|
||||
/* And we're baaack! */
|
||||
|
||||
if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
|
||||
goto again;
|
||||
|
||||
fp_enabled = __vfp_enabled();
|
||||
|
||||
__banked_save_state(guest_ctxt);
|
||||
__sysreg_save_state(guest_ctxt);
|
||||
__timer_disable_traps(vcpu);
|
||||
|
||||
__vgic_save_state(vcpu);
|
||||
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
|
||||
__banked_restore_state(host_ctxt);
|
||||
__sysreg_restore_state(host_ctxt);
|
||||
|
||||
if (fp_enabled) {
|
||||
__vfp_save_state(&guest_ctxt->vfp);
|
||||
__vfp_restore_state(&host_ctxt->vfp);
|
||||
}
|
||||
|
||||
write_sysreg(fpexc, VFP_FPEXC);
|
||||
|
||||
return exit_code;
|
||||
}
|
||||
|
||||
static const char * const __hyp_panic_string[] = {
|
||||
[ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
|
||||
[ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
|
||||
[ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
|
||||
};
|
||||
|
||||
void __hyp_text __noreturn __hyp_panic(int cause)
|
||||
{
|
||||
u32 elr = read_special(ELR_hyp);
|
||||
u32 val;
|
||||
|
||||
if (cause == ARM_EXCEPTION_DATA_ABORT)
|
||||
val = read_sysreg(HDFAR);
|
||||
else
|
||||
val = read_special(SPSR);
|
||||
|
||||
if (read_sysreg(VTTBR)) {
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
|
||||
vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
|
||||
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
|
||||
__timer_disable_traps(vcpu);
|
||||
__deactivate_traps(vcpu);
|
||||
__deactivate_vm(vcpu);
|
||||
__banked_restore_state(host_ctxt);
|
||||
__sysreg_restore_state(host_ctxt);
|
||||
}
|
||||
|
||||
/* Call panic for real */
|
||||
__hyp_do_panic(__hyp_panic_string[cause], elr, val);
|
||||
|
||||
unreachable();
|
||||
}
|
|
@ -1,68 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Original code:
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*
|
||||
* Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
|
||||
*/
|
||||
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
|
||||
/**
|
||||
* Flush per-VMID TLBs
|
||||
*
|
||||
* __kvm_tlb_flush_vmid(struct kvm *kvm);
|
||||
*
|
||||
* We rely on the hardware to broadcast the TLB invalidation to all CPUs
|
||||
* inside the inner-shareable domain (which is the case for all v7
|
||||
* implementations). If we come across a non-IS SMP implementation, we'll
|
||||
* have to use an IPI based mechanism. Until then, we stick to the simple
|
||||
* hardware assisted version.
|
||||
*
|
||||
* As v7 does not support flushing per IPA, just nuke the whole TLB
|
||||
* instead, ignoring the ipa value.
|
||||
*/
|
||||
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
|
||||
{
|
||||
dsb(ishst);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
kvm = kern_hyp_va(kvm);
|
||||
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, TLBIALLIS);
|
||||
dsb(ish);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, VTTBR);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
|
||||
{
|
||||
__kvm_tlb_flush_vmid(kvm);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
|
||||
|
||||
/* Switch to requested VMID */
|
||||
write_sysreg(kvm_get_vttbr(kvm), VTTBR);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, TLBIALL);
|
||||
dsb(nsh);
|
||||
isb();
|
||||
|
||||
write_sysreg(0, VTTBR);
|
||||
}
|
||||
|
||||
void __hyp_text __kvm_flush_vm_context(void)
|
||||
{
|
||||
write_sysreg(0, TLBIALLNSNHIS);
|
||||
write_sysreg(0, ICIALLUIS);
|
||||
dsb(ish);
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/vfpmacros.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.text, "ax"
|
||||
|
||||
/* void __vfp_save_state(struct vfp_hard_struct *vfp); */
|
||||
ENTRY(__vfp_save_state)
|
||||
push {r4, r5}
|
||||
VFPFMRX r1, FPEXC
|
||||
|
||||
@ Make sure *really* VFP is enabled so we can touch the registers.
|
||||
orr r5, r1, #FPEXC_EN
|
||||
tst r5, #FPEXC_EX @ Check for VFP Subarchitecture
|
||||
bic r5, r5, #FPEXC_EX @ FPEXC_EX disable
|
||||
VFPFMXR FPEXC, r5
|
||||
isb
|
||||
|
||||
VFPFMRX r2, FPSCR
|
||||
beq 1f
|
||||
|
||||
@ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
|
||||
@ we only need to save them if FPEXC_EX is set.
|
||||
VFPFMRX r3, FPINST
|
||||
tst r5, #FPEXC_FP2V
|
||||
VFPFMRX r4, FPINST2, ne @ vmrsne
|
||||
1:
|
||||
VFPFSTMIA r0, r5 @ Save VFP registers
|
||||
stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2
|
||||
pop {r4, r5}
|
||||
bx lr
|
||||
ENDPROC(__vfp_save_state)
|
||||
|
||||
/* void __vfp_restore_state(struct vfp_hard_struct *vfp);
|
||||
* Assume FPEXC_EN is on and FPEXC_EX is off */
|
||||
ENTRY(__vfp_restore_state)
|
||||
VFPFLDMIA r0, r1 @ Load VFP registers
|
||||
ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2
|
||||
|
||||
VFPFMXR FPSCR, r1
|
||||
tst r0, #FPEXC_EX @ Check for VFP Subarchitecture
|
||||
beq 1f
|
||||
VFPFMXR FPINST, r2
|
||||
tst r0, #FPEXC_FP2V
|
||||
VFPFMXR FPINST2, r3, ne
|
||||
1:
|
||||
VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN)
|
||||
bx lr
|
||||
ENDPROC(__vfp_restore_state)
|
||||
|
||||
.popsection
|
|
@ -1,157 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/unified.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
/********************************************************************
|
||||
* Hypervisor initialization
|
||||
* - should be called with:
|
||||
* r0 = top of Hyp stack (kernel VA)
|
||||
* r1 = pointer to hyp vectors
|
||||
* r2,r3 = Hypervisor pgd pointer
|
||||
*
|
||||
* The init scenario is:
|
||||
* - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
|
||||
* runtime vectors
|
||||
* - Invalidate TLBs
|
||||
* - Set stack and vectors
|
||||
* - Setup the page tables
|
||||
* - Enable the MMU
|
||||
* - Profit! (or eret, if you only care about the code).
|
||||
*
|
||||
* Another possibility is to get a HYP stub hypercall.
|
||||
* We discriminate between the two by checking if r0 contains a value
|
||||
* that is less than HVC_STUB_HCALL_NR.
|
||||
*/
|
||||
|
||||
.text
|
||||
.pushsection .hyp.idmap.text,"ax"
|
||||
.align 5
|
||||
__kvm_hyp_init:
|
||||
.globl __kvm_hyp_init
|
||||
|
||||
@ Hyp-mode exception vector
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) .
|
||||
W(b) __do_hyp_init
|
||||
W(b) .
|
||||
W(b) .
|
||||
|
||||
__do_hyp_init:
|
||||
@ Check for a stub hypercall
|
||||
cmp r0, #HVC_STUB_HCALL_NR
|
||||
blo __kvm_handle_stub_hvc
|
||||
|
||||
@ Set stack pointer
|
||||
mov sp, r0
|
||||
|
||||
@ Set HVBAR to point to the HYP vectors
|
||||
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
|
||||
|
||||
@ Set the HTTBR to point to the hypervisor PGD pointer passed
|
||||
mcrr p15, 4, rr_lo_hi(r2, r3), c2
|
||||
|
||||
@ Set the HTCR and VTCR to the same shareability and cacheability
|
||||
@ settings as the non-secure TTBCR and with T0SZ == 0.
|
||||
mrc p15, 4, r0, c2, c0, 2 @ HTCR
|
||||
ldr r2, =HTCR_MASK
|
||||
bic r0, r0, r2
|
||||
mrc p15, 0, r1, c2, c0, 2 @ TTBCR
|
||||
and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ)
|
||||
orr r0, r0, r1
|
||||
mcr p15, 4, r0, c2, c0, 2 @ HTCR
|
||||
|
||||
@ Use the same memory attributes for hyp. accesses as the kernel
|
||||
@ (copy MAIRx ro HMAIRx).
|
||||
mrc p15, 0, r0, c10, c2, 0
|
||||
mcr p15, 4, r0, c10, c2, 0
|
||||
mrc p15, 0, r0, c10, c2, 1
|
||||
mcr p15, 4, r0, c10, c2, 1
|
||||
|
||||
@ Invalidate the stale TLBs from Bootloader
|
||||
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
|
||||
dsb ish
|
||||
|
||||
@ Set the HSCTLR to:
|
||||
@ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel)
|
||||
@ - Endianness: Kernel config
|
||||
@ - Fast Interrupt Features: Kernel config
|
||||
@ - Write permission implies XN: disabled
|
||||
@ - Instruction cache: enabled
|
||||
@ - Data/Unified cache: enabled
|
||||
@ - MMU: enabled (this code must be run from an identity mapping)
|
||||
mrc p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
ldr r2, =HSCTLR_MASK
|
||||
bic r0, r0, r2
|
||||
mrc p15, 0, r1, c1, c0, 0 @ SCTLR
|
||||
ldr r2, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C)
|
||||
and r1, r1, r2
|
||||
ARM( ldr r2, =(HSCTLR_M) )
|
||||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_TE) )
|
||||
orr r1, r1, r2
|
||||
orr r0, r0, r1
|
||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
isb
|
||||
|
||||
eret
|
||||
|
||||
ENTRY(__kvm_handle_stub_hvc)
|
||||
cmp r0, #HVC_SOFT_RESTART
|
||||
bne 1f
|
||||
|
||||
/* The target is expected in r1 */
|
||||
msr ELR_hyp, r1
|
||||
mrs r0, cpsr
|
||||
bic r0, r0, #MODE_MASK
|
||||
orr r0, r0, #HYP_MODE
|
||||
THUMB( orr r0, r0, #PSR_T_BIT )
|
||||
msr spsr_cxsf, r0
|
||||
b reset
|
||||
|
||||
1: cmp r0, #HVC_RESET_VECTORS
|
||||
bne 1f
|
||||
|
||||
reset:
|
||||
/* We're now in idmap, disable MMU */
|
||||
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
|
||||
ldr r0, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
|
||||
bic r1, r1, r0
|
||||
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
|
||||
|
||||
/*
|
||||
* Install stub vectors, using ardb's VA->PA trick.
|
||||
*/
|
||||
0: adr r0, 0b @ PA(0)
|
||||
movw r1, #:lower16:__hyp_stub_vectors - 0b @ VA(stub) - VA(0)
|
||||
movt r1, #:upper16:__hyp_stub_vectors - 0b
|
||||
add r1, r1, r0 @ PA(stub)
|
||||
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
|
||||
b exit
|
||||
|
||||
1: ldr r0, =HVC_STUB_ERR
|
||||
eret
|
||||
|
||||
exit:
|
||||
mov r0, #0
|
||||
eret
|
||||
ENDPROC(__kvm_handle_stub_hvc)
|
||||
|
||||
.ltorg
|
||||
|
||||
.globl __kvm_hyp_init_end
|
||||
__kvm_hyp_init_end:
|
||||
|
||||
.popsection
|
|
@ -1,36 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
|
||||
.text
|
||||
|
||||
/********************************************************************
|
||||
* Call function in Hyp mode
|
||||
*
|
||||
*
|
||||
* unsigned long kvm_call_hyp(void *hypfn, ...);
|
||||
*
|
||||
* This is not really a variadic function in the classic C-way and care must
|
||||
* be taken when calling this to ensure parameters are passed in registers
|
||||
* only, since the stack will change between the caller and the callee.
|
||||
*
|
||||
* Call the function with the first argument containing a pointer to the
|
||||
* function you wish to call in Hyp mode, and subsequent arguments will be
|
||||
* passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
|
||||
* function pointer can be passed). The function being called must be mapped
|
||||
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
|
||||
* passed in r0 (strictly 32bit).
|
||||
*
|
||||
* The calling convention follows the standard AAPCS:
|
||||
* r0 - r3: caller save
|
||||
* r12: caller save
|
||||
* rest: callee save
|
||||
*/
|
||||
ENTRY(__kvm_call_hyp)
|
||||
hvc #0
|
||||
bx lr
|
||||
ENDPROC(__kvm_call_hyp)
|
|
@ -1,16 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0-only */
|
||||
/*
|
||||
* irq.h: in kernel interrupt controller related definitions
|
||||
* Copyright (c) 2016 Red Hat, Inc.
|
||||
*
|
||||
* This header is included by irqchip.c. However, on ARM, interrupt
|
||||
* controller declarations are located in include/kvm/arm_vgic.h since
|
||||
* they are mostly shared between arm and arm64.
|
||||
*/
|
||||
|
||||
#ifndef __IRQ_H
|
||||
#define __IRQ_H
|
||||
|
||||
#include <kvm/arm_vgic.h>
|
||||
|
||||
#endif
|
|
@ -1,86 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
|
||||
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
#include <linux/compiler.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/kvm.h>
|
||||
|
||||
#include <asm/unified.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#include <kvm/arm_arch_timer.h>
|
||||
|
||||
/******************************************************************************
|
||||
* Cortex-A15 and Cortex-A7 Reset Values
|
||||
*/
|
||||
|
||||
static struct kvm_regs cortexa_regs_reset = {
|
||||
.usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT,
|
||||
};
|
||||
|
||||
|
||||
/*******************************************************************************
|
||||
* Exported reset function
|
||||
*/
|
||||
|
||||
/**
|
||||
* kvm_reset_vcpu - sets core registers and cp15 registers to reset value
|
||||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* This function finds the right table above and sets the registers on the
|
||||
* virtual CPU struct to their architecturally defined reset values.
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_regs *reset_regs;
|
||||
|
||||
switch (vcpu->arch.target) {
|
||||
case KVM_ARM_TARGET_CORTEX_A7:
|
||||
case KVM_ARM_TARGET_CORTEX_A15:
|
||||
reset_regs = &cortexa_regs_reset;
|
||||
vcpu->arch.midr = read_cpuid_id();
|
||||
break;
|
||||
default:
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Reset core registers */
|
||||
memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
|
||||
|
||||
/* Reset CP15 registers */
|
||||
kvm_reset_coprocs(vcpu);
|
||||
|
||||
/*
|
||||
* Additional reset state handling that PSCI may have imposed on us.
|
||||
* Must be done after all the sys_reg reset.
|
||||
*/
|
||||
if (READ_ONCE(vcpu->arch.reset_state.reset)) {
|
||||
unsigned long target_pc = vcpu->arch.reset_state.pc;
|
||||
|
||||
/* Gracefully handle Thumb2 entry point */
|
||||
if (target_pc & 1) {
|
||||
target_pc &= ~1UL;
|
||||
vcpu_set_thumb(vcpu);
|
||||
}
|
||||
|
||||
/* Propagate caller endianness */
|
||||
if (vcpu->arch.reset_state.be)
|
||||
kvm_vcpu_set_be(vcpu);
|
||||
|
||||
*vcpu_pc(vcpu) = target_pc;
|
||||
vcpu_set_reg(vcpu, 0, vcpu->arch.reset_state.r0);
|
||||
|
||||
vcpu->arch.reset_state.reset = false;
|
||||
}
|
||||
|
||||
/* Reset arch_timer context */
|
||||
return kvm_timer_vcpu_reset(vcpu);
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0 */
|
||||
#if !defined(_TRACE_ARM_KVM_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||
#define _TRACE_ARM_KVM_H
|
||||
|
||||
#include <linux/tracepoint.h>
|
||||
|
||||
#undef TRACE_SYSTEM
|
||||
#define TRACE_SYSTEM kvm
|
||||
|
||||
/* Architecturally implementation defined CP15 register access */
|
||||
TRACE_EVENT(kvm_emulate_cp15_imp,
|
||||
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
|
||||
unsigned long CRm, unsigned long Op2, bool is_write),
|
||||
TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned int, Op1 )
|
||||
__field( unsigned int, Rt1 )
|
||||
__field( unsigned int, CRn )
|
||||
__field( unsigned int, CRm )
|
||||
__field( unsigned int, Op2 )
|
||||
__field( bool, is_write )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->is_write = is_write;
|
||||
__entry->Op1 = Op1;
|
||||
__entry->Rt1 = Rt1;
|
||||
__entry->CRn = CRn;
|
||||
__entry->CRm = CRm;
|
||||
__entry->Op2 = Op2;
|
||||
),
|
||||
|
||||
TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u",
|
||||
(__entry->is_write) ? "mcr" : "mrc",
|
||||
__entry->Op1, __entry->Rt1, __entry->CRn,
|
||||
__entry->CRm, __entry->Op2)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_wfx,
|
||||
TP_PROTO(unsigned long vcpu_pc, bool is_wfe),
|
||||
TP_ARGS(vcpu_pc, is_wfe),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
__field( bool, is_wfe )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->is_wfe = is_wfe;
|
||||
),
|
||||
|
||||
TP_printk("guest executed wf%c at: 0x%08lx",
|
||||
__entry->is_wfe ? 'e' : 'i', __entry->vcpu_pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_hvc,
|
||||
TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm),
|
||||
TP_ARGS(vcpu_pc, r0, imm),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field( unsigned long, vcpu_pc )
|
||||
__field( unsigned long, r0 )
|
||||
__field( unsigned long, imm )
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->vcpu_pc = vcpu_pc;
|
||||
__entry->r0 = r0;
|
||||
__entry->imm = imm;
|
||||
),
|
||||
|
||||
TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx",
|
||||
__entry->vcpu_pc, __entry->r0, __entry->imm)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_ARM_KVM_H */
|
||||
|
||||
#undef TRACE_INCLUDE_PATH
|
||||
#define TRACE_INCLUDE_PATH .
|
||||
#undef TRACE_INCLUDE_FILE
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* This part must be outside protection */
|
||||
#include <trace/define_trace.h>
|
|
@ -1,27 +0,0 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* VGIC system registers handling functions for AArch32 mode
|
||||
*/
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include "vgic.h"
|
||||
|
||||
int vgic_v3_has_cpu_sysregs_attr(struct kvm_vcpu *vcpu, bool is_write, u64 id,
|
||||
u64 *reg)
|
||||
{
|
||||
/*
|
||||
* TODO: Implement for AArch32
|
||||
*/
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
int vgic_v3_cpu_sysregs_uaccess(struct kvm_vcpu *vcpu, bool is_write, u64 id,
|
||||
u64 *reg)
|
||||
{
|
||||
/*
|
||||
* TODO: Implement for AArch32
|
||||
*/
|
||||
return -ENXIO;
|
||||
}
|
|
@ -21,7 +21,7 @@ menuconfig ARCH_EXYNOS
|
|||
select EXYNOS_SROM
|
||||
select EXYNOS_PM_DOMAINS if PM_GENERIC_DOMAINS
|
||||
select GPIOLIB
|
||||
select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5 && VIRTUALIZATION
|
||||
select HAVE_ARM_ARCH_TIMER if ARCH_EXYNOS5
|
||||
select HAVE_ARM_SCU if SMP
|
||||
select HAVE_S3C2410_I2C if I2C
|
||||
select HAVE_S3C2410_WATCHDOG if WATCHDOG
|
||||
|
|
|
@ -63,9 +63,6 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
|
|||
static unsigned int ecc_mask __initdata = 0;
|
||||
pgprot_t pgprot_user;
|
||||
pgprot_t pgprot_kernel;
|
||||
pgprot_t pgprot_hyp_device;
|
||||
pgprot_t pgprot_s2;
|
||||
pgprot_t pgprot_s2_device;
|
||||
|
||||
EXPORT_SYMBOL(pgprot_user);
|
||||
EXPORT_SYMBOL(pgprot_kernel);
|
||||
|
@ -75,15 +72,8 @@ struct cachepolicy {
|
|||
unsigned int cr_mask;
|
||||
pmdval_t pmd;
|
||||
pteval_t pte;
|
||||
pteval_t pte_s2;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_ARM_LPAE
|
||||
#define s2_policy(policy) policy
|
||||
#else
|
||||
#define s2_policy(policy) 0
|
||||
#endif
|
||||
|
||||
unsigned long kimage_voffset __ro_after_init;
|
||||
|
||||
static struct cachepolicy cache_policies[] __initdata = {
|
||||
|
@ -92,31 +82,26 @@ static struct cachepolicy cache_policies[] __initdata = {
|
|||
.cr_mask = CR_W|CR_C,
|
||||
.pmd = PMD_SECT_UNCACHED,
|
||||
.pte = L_PTE_MT_UNCACHED,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
|
||||
}, {
|
||||
.policy = "buffered",
|
||||
.cr_mask = CR_C,
|
||||
.pmd = PMD_SECT_BUFFERED,
|
||||
.pte = L_PTE_MT_BUFFERABLE,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED),
|
||||
}, {
|
||||
.policy = "writethrough",
|
||||
.cr_mask = 0,
|
||||
.pmd = PMD_SECT_WT,
|
||||
.pte = L_PTE_MT_WRITETHROUGH,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH),
|
||||
}, {
|
||||
.policy = "writeback",
|
||||
.cr_mask = 0,
|
||||
.pmd = PMD_SECT_WB,
|
||||
.pte = L_PTE_MT_WRITEBACK,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
|
||||
}, {
|
||||
.policy = "writealloc",
|
||||
.cr_mask = 0,
|
||||
.pmd = PMD_SECT_WBWA,
|
||||
.pte = L_PTE_MT_WRITEALLOC,
|
||||
.pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK),
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -246,9 +231,6 @@ static struct mem_type mem_types[] __ro_after_init = {
|
|||
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
|
||||
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
|
||||
L_PTE_SHARED,
|
||||
.prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
|
||||
s2_policy(L_PTE_S2_MT_DEV_SHARED) |
|
||||
L_PTE_SHARED,
|
||||
.prot_l1 = PMD_TYPE_TABLE,
|
||||
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
|
||||
.domain = DOMAIN_IO,
|
||||
|
@ -434,7 +416,6 @@ static void __init build_mem_type_table(void)
|
|||
struct cachepolicy *cp;
|
||||
unsigned int cr = get_cr();
|
||||
pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
|
||||
pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot;
|
||||
int cpu_arch = cpu_architecture();
|
||||
int i;
|
||||
|
||||
|
@ -558,9 +539,6 @@ static void __init build_mem_type_table(void)
|
|||
*/
|
||||
cp = &cache_policies[cachepolicy];
|
||||
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
|
||||
s2_pgprot = cp->pte_s2;
|
||||
hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
|
||||
s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
|
||||
|
||||
#ifndef CONFIG_ARM_LPAE
|
||||
/*
|
||||
|
@ -604,7 +582,6 @@ static void __init build_mem_type_table(void)
|
|||
user_pgprot |= L_PTE_SHARED;
|
||||
kern_pgprot |= L_PTE_SHARED;
|
||||
vecs_pgprot |= L_PTE_SHARED;
|
||||
s2_pgprot |= L_PTE_SHARED;
|
||||
mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
|
||||
mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
|
||||
mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
|
||||
|
@ -666,9 +643,6 @@ static void __init build_mem_type_table(void)
|
|||
pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
|
||||
pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
|
||||
L_PTE_DIRTY | kern_pgprot);
|
||||
pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot);
|
||||
pgprot_s2_device = __pgprot(s2_device_pgprot);
|
||||
pgprot_hyp_device = __pgprot(hyp_device_pgprot);
|
||||
|
||||
mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
|
||||
mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
|
||||
|
|
|
@ -89,7 +89,8 @@ static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
|
|||
static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.hcr_el2 &= ~HCR_TWE;
|
||||
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count))
|
||||
if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
|
||||
vcpu->kvm->arch.vgic.nassgireq)
|
||||
vcpu->arch.hcr_el2 &= ~HCR_TWI;
|
||||
else
|
||||
vcpu->arch.hcr_el2 |= HCR_TWI;
|
||||
|
|
|
@ -44,6 +44,7 @@
|
|||
#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
|
||||
#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
|
||||
#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
|
||||
#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
|
||||
|
||||
|
|
|
@ -11,7 +11,6 @@
|
|||
#include <linux/kvm_host.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
|
|
|
@ -25,7 +25,6 @@
|
|||
#include <asm/kvm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/sigcontext.h>
|
||||
|
||||
#include "trace.h"
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <asm/kprobes.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/fpsimd.h>
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_hyp.h>
|
||||
#include <asm/kvm_mmu.h>
|
||||
#include <asm/perf_event.h>
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
#include <asm/cputype.h>
|
||||
#include <asm/kvm_arm.h>
|
||||
#include <asm/kvm_asm.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_emulate.h>
|
||||
#include <asm/kvm_coproc.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
|
|
@ -1133,7 +1133,7 @@ extern unsigned long kvm_mips_get_ramsize(struct kvm *kvm);
|
|||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
||||
struct kvm_memory_slot *slot) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
||||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
||||
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
|
||||
|
|
|
@ -118,12 +118,12 @@ void kvm_arch_hardware_disable(void)
|
|||
kvm_mips_callbacks->hardware_disable();
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void)
|
||||
int kvm_arch_hardware_setup(void *opaque)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_check_processor_compat(void)
|
||||
int kvm_arch_check_processor_compat(void *opaque)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -188,12 +188,6 @@ long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl,
|
|||
return -ENOIOCTLCMD;
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_arch_flush_shadow_all(struct kvm *kvm)
|
||||
{
|
||||
/* Flush whole GPA */
|
||||
|
@ -230,7 +224,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old,
|
||||
struct kvm_memory_slot *old,
|
||||
const struct kvm_memory_slot *new,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
|
@ -984,69 +978,16 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
|||
return r;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
|
||||
* @kvm: kvm instance
|
||||
* @log: slot id and address to which we copy the log
|
||||
*
|
||||
* Steps 1-4 below provide general overview of dirty page logging. See
|
||||
* kvm_get_dirty_log_protect() function description for additional details.
|
||||
*
|
||||
* We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we
|
||||
* always flush the TLB (step 4) even if previous step failed and the dirty
|
||||
* bitmap may be corrupt. Regardless of previous outcome the KVM logging API
|
||||
* does not preclude user space subsequent dirty log read. Flushing TLB ensures
|
||||
* writes will be marked dirty for next log read.
|
||||
*
|
||||
* 1. Take a snapshot of the bit and clear it if needed.
|
||||
* 2. Write protect the corresponding page.
|
||||
* 3. Copy the snapshot to the userspace.
|
||||
* 4. Flush TLB's if needed.
|
||||
*/
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
bool flush = false;
|
||||
int r;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = kvm_get_dirty_log_protect(kvm, log, &flush);
|
||||
|
||||
if (flush) {
|
||||
slots = kvm_memslots(kvm);
|
||||
memslot = id_to_memslot(slots, log->slot);
|
||||
|
||||
/* Let implementation handle TLB/GVA invalidation */
|
||||
kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_clear_dirty_log(struct kvm *kvm, struct kvm_clear_dirty_log *log)
|
||||
void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
bool flush = false;
|
||||
int r;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = kvm_clear_dirty_log_protect(kvm, log, &flush);
|
||||
|
||||
if (flush) {
|
||||
slots = kvm_memslots(kvm);
|
||||
memslot = id_to_memslot(slots, log->slot);
|
||||
|
||||
/* Let implementation handle TLB/GVA invalidation */
|
||||
kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
|
||||
}
|
||||
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return r;
|
||||
/* Let implementation handle TLB/GVA invalidation */
|
||||
kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot);
|
||||
}
|
||||
|
||||
long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
|
||||
|
|
|
@ -150,4 +150,7 @@
|
|||
|
||||
#define KVM_INST_FETCH_FAILED -1
|
||||
|
||||
/* Extract PO and XOP opcode fields */
|
||||
#define PO_XOP_OPCODE_MASK 0xfc0007fe
|
||||
|
||||
#endif /* __POWERPC_KVM_ASM_H__ */
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
#ifdef CONFIG_PPC_UV
|
||||
int kvmppc_uvmem_init(void);
|
||||
void kvmppc_uvmem_free(void);
|
||||
bool kvmppc_uvmem_available(void);
|
||||
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
|
||||
void kvmppc_uvmem_slot_free(struct kvm *kvm,
|
||||
const struct kvm_memory_slot *slot);
|
||||
|
@ -30,6 +31,11 @@ static inline int kvmppc_uvmem_init(void)
|
|||
|
||||
static inline void kvmppc_uvmem_free(void) { }
|
||||
|
||||
static inline bool kvmppc_uvmem_available(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline int
|
||||
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
|
||||
{
|
||||
|
|
|
@ -303,6 +303,7 @@ struct kvm_arch {
|
|||
u8 radix;
|
||||
u8 fwnmi_enabled;
|
||||
u8 secure_guest;
|
||||
u8 svm_enabled;
|
||||
bool threads_indep;
|
||||
bool nested_enable;
|
||||
pgd_t *pgtable;
|
||||
|
|
|
@ -107,8 +107,6 @@ extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
|
|||
unsigned int gtlb_idx);
|
||||
extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
|
||||
extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
|
||||
extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
|
||||
extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
|
||||
extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
|
||||
|
@ -200,14 +198,11 @@ extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
|
|||
extern int kvmppc_core_init_vm(struct kvm *kvm);
|
||||
extern void kvmppc_core_destroy_vm(struct kvm *kvm);
|
||||
extern void kvmppc_core_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
extern int kvmppc_core_create_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *slot,
|
||||
unsigned long npages);
|
||||
struct kvm_memory_slot *slot);
|
||||
extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_userspace_memory_region *mem);
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change);
|
||||
extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old,
|
||||
|
@ -280,7 +275,8 @@ struct kvmppc_ops {
|
|||
void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
|
||||
int (*prepare_memory_region)(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_userspace_memory_region *mem);
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change);
|
||||
void (*commit_memory_region)(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old,
|
||||
|
@ -291,11 +287,7 @@ struct kvmppc_ops {
|
|||
int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
|
||||
int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
|
||||
void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
void (*mmu_destroy)(struct kvm_vcpu *vcpu);
|
||||
void (*free_memslot)(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont);
|
||||
int (*create_memslot)(struct kvm_memory_slot *slot,
|
||||
unsigned long npages);
|
||||
void (*free_memslot)(struct kvm_memory_slot *slot);
|
||||
int (*init_vm)(struct kvm *kvm);
|
||||
void (*destroy_vm)(struct kvm *kvm);
|
||||
int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
|
||||
|
@ -321,6 +313,7 @@ struct kvmppc_ops {
|
|||
int size);
|
||||
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
|
||||
int size);
|
||||
int (*enable_svm)(struct kvm *kvm);
|
||||
int (*svm_off)(struct kvm *kvm);
|
||||
};
|
||||
|
||||
|
|
|
@ -799,21 +799,19 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
|
|||
return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
|
||||
}
|
||||
|
||||
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
{
|
||||
return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
|
||||
}
|
||||
|
||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||
{
|
||||
kvm->arch.kvm_ops->free_memslot(free, dont);
|
||||
}
|
||||
|
||||
int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return kvm->arch.kvm_ops->create_memslot(slot, npages);
|
||||
kvm->arch.kvm_ops->free_memslot(slot);
|
||||
}
|
||||
|
||||
void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
|
@ -823,9 +821,11 @@ void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
|||
|
||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
|
||||
return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem,
|
||||
change);
|
||||
}
|
||||
|
||||
void kvmppc_core_commit_memory_region(struct kvm *kvm,
|
||||
|
@ -858,11 +858,6 @@ int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
{
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
|
|||
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
|
||||
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
|
||||
|
||||
extern int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu);
|
||||
extern void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
|
|
|
@ -234,7 +234,7 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
case 2:
|
||||
case 6:
|
||||
pte->may_write = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 3:
|
||||
case 5:
|
||||
case 7:
|
||||
|
|
|
@ -356,7 +356,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
|
|||
/* From mm/mmu_context_hash32.c */
|
||||
#define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff)
|
||||
|
||||
int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
|
||||
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
int err;
|
||||
|
|
|
@ -311,7 +311,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
|
|||
case 2:
|
||||
case 6:
|
||||
gpte->may_write = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case 3:
|
||||
case 5:
|
||||
case 7:
|
||||
|
|
|
@ -384,7 +384,7 @@ void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
|
|||
__destroy_context(to_book3s(vcpu)->context_id[0]);
|
||||
}
|
||||
|
||||
int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
|
||||
int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
|
||||
int err;
|
||||
|
|
|
@ -485,18 +485,18 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
__be64 *hptep;
|
||||
unsigned long mmu_seq, psize, pte_size;
|
||||
unsigned long gpa_base, gfn_base;
|
||||
unsigned long gpa, gfn, hva, pfn;
|
||||
unsigned long gpa, gfn, hva, pfn, hpa;
|
||||
struct kvm_memory_slot *memslot;
|
||||
unsigned long *rmap;
|
||||
struct revmap_entry *rev;
|
||||
struct page *page, *pages[1];
|
||||
long index, ret, npages;
|
||||
struct page *page;
|
||||
long index, ret;
|
||||
bool is_ci;
|
||||
unsigned int writing, write_ok;
|
||||
struct vm_area_struct *vma;
|
||||
bool writing, write_ok;
|
||||
unsigned int shift;
|
||||
unsigned long rcbits;
|
||||
long mmio_update;
|
||||
struct mm_struct *mm;
|
||||
pte_t pte, *ptep;
|
||||
|
||||
if (kvm_is_radix(kvm))
|
||||
return kvmppc_book3s_radix_page_fault(run, vcpu, ea, dsisr);
|
||||
|
@ -570,59 +570,62 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
smp_rmb();
|
||||
|
||||
ret = -EFAULT;
|
||||
is_ci = false;
|
||||
pfn = 0;
|
||||
page = NULL;
|
||||
mm = kvm->mm;
|
||||
pte_size = PAGE_SIZE;
|
||||
writing = (dsisr & DSISR_ISSTORE) != 0;
|
||||
/* If writing != 0, then the HPTE must allow writing, if we get here */
|
||||
write_ok = writing;
|
||||
hva = gfn_to_hva_memslot(memslot, gfn);
|
||||
npages = get_user_pages_fast(hva, 1, writing ? FOLL_WRITE : 0, pages);
|
||||
if (npages < 1) {
|
||||
/* Check if it's an I/O mapping */
|
||||
down_read(&mm->mmap_sem);
|
||||
vma = find_vma(mm, hva);
|
||||
if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
|
||||
(vma->vm_flags & VM_PFNMAP)) {
|
||||
pfn = vma->vm_pgoff +
|
||||
((hva - vma->vm_start) >> PAGE_SHIFT);
|
||||
pte_size = psize;
|
||||
is_ci = pte_ci(__pte((pgprot_val(vma->vm_page_prot))));
|
||||
write_ok = vma->vm_flags & VM_WRITE;
|
||||
}
|
||||
up_read(&mm->mmap_sem);
|
||||
if (!pfn)
|
||||
goto out_put;
|
||||
|
||||
/*
|
||||
* Do a fast check first, since __gfn_to_pfn_memslot doesn't
|
||||
* do it with !atomic && !async, which is how we call it.
|
||||
* We always ask for write permission since the common case
|
||||
* is that the page is writable.
|
||||
*/
|
||||
if (__get_user_pages_fast(hva, 1, 1, &page) == 1) {
|
||||
write_ok = true;
|
||||
} else {
|
||||
page = pages[0];
|
||||
pfn = page_to_pfn(page);
|
||||
if (PageHuge(page)) {
|
||||
page = compound_head(page);
|
||||
pte_size <<= compound_order(page);
|
||||
}
|
||||
/* if the guest wants write access, see if that is OK */
|
||||
if (!writing && hpte_is_writable(r)) {
|
||||
pte_t *ptep, pte;
|
||||
unsigned long flags;
|
||||
/*
|
||||
* We need to protect against page table destruction
|
||||
* hugepage split and collapse.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ptep = find_current_mm_pte(mm->pgd, hva, NULL, NULL);
|
||||
if (ptep) {
|
||||
pte = kvmppc_read_update_linux_pte(ptep, 1);
|
||||
if (__pte_write(pte))
|
||||
write_ok = 1;
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
/* Call KVM generic code to do the slow-path check */
|
||||
pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL,
|
||||
writing, &write_ok);
|
||||
if (is_error_noslot_pfn(pfn))
|
||||
return -EFAULT;
|
||||
page = NULL;
|
||||
if (pfn_valid(pfn)) {
|
||||
page = pfn_to_page(pfn);
|
||||
if (PageReserved(page))
|
||||
page = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Read the PTE from the process' radix tree and use that
|
||||
* so we get the shift and attribute bits.
|
||||
*/
|
||||
local_irq_disable();
|
||||
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
|
||||
/*
|
||||
* If the PTE disappeared temporarily due to a THP
|
||||
* collapse, just return and let the guest try again.
|
||||
*/
|
||||
if (!ptep) {
|
||||
local_irq_enable();
|
||||
if (page)
|
||||
put_page(page);
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
pte = *ptep;
|
||||
local_irq_enable();
|
||||
hpa = pte_pfn(pte) << PAGE_SHIFT;
|
||||
pte_size = PAGE_SIZE;
|
||||
if (shift)
|
||||
pte_size = 1ul << shift;
|
||||
is_ci = pte_ci(pte);
|
||||
|
||||
if (psize > pte_size)
|
||||
goto out_put;
|
||||
if (pte_size > psize)
|
||||
hpa |= hva & (pte_size - psize);
|
||||
|
||||
/* Check WIMG vs. the actual page we're accessing */
|
||||
if (!hpte_cache_flags_ok(r, is_ci)) {
|
||||
|
@ -636,14 +639,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
}
|
||||
|
||||
/*
|
||||
* Set the HPTE to point to pfn.
|
||||
* Since the pfn is at PAGE_SIZE granularity, make sure we
|
||||
* Set the HPTE to point to hpa.
|
||||
* Since the hpa is at PAGE_SIZE granularity, make sure we
|
||||
* don't mask out lower-order bits if psize < PAGE_SIZE.
|
||||
*/
|
||||
if (psize < PAGE_SIZE)
|
||||
psize = PAGE_SIZE;
|
||||
r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) |
|
||||
((pfn << PAGE_SHIFT) & ~(psize - 1));
|
||||
r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa;
|
||||
if (hpte_is_writable(r) && !write_ok)
|
||||
r = hpte_make_readonly(r);
|
||||
ret = RESUME_GUEST;
|
||||
|
@ -708,20 +710,13 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
asm volatile("ptesync" : : : "memory");
|
||||
preempt_enable();
|
||||
if (page && hpte_is_writable(r))
|
||||
SetPageDirty(page);
|
||||
set_page_dirty_lock(page);
|
||||
|
||||
out_put:
|
||||
trace_kvm_page_fault_exit(vcpu, hpte, ret);
|
||||
|
||||
if (page) {
|
||||
/*
|
||||
* We drop pages[0] here, not page because page might
|
||||
* have been set to the head page of a compound, but
|
||||
* we have to drop the reference on the correct tail
|
||||
* page to match the get inside gup()
|
||||
*/
|
||||
put_page(pages[0]);
|
||||
}
|
||||
if (page)
|
||||
put_page(page);
|
||||
return ret;
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -425,7 +425,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full,
|
|||
unsigned int lpid)
|
||||
{
|
||||
if (full) {
|
||||
memset(pte, 0, sizeof(long) << PTE_INDEX_SIZE);
|
||||
memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE);
|
||||
} else {
|
||||
pte_t *p = pte;
|
||||
unsigned long it;
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <asm/hvcall.h>
|
||||
#include <asm/synch.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/tce.h>
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
#include <asm/hvcall.h>
|
||||
#include <asm/synch.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/tce.h>
|
||||
|
|
|
@ -72,7 +72,6 @@
|
|||
#include <asm/xics.h>
|
||||
#include <asm/xive.h>
|
||||
#include <asm/hw_breakpoint.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_book3s_uvmem.h>
|
||||
#include <asm/ultravisor.h>
|
||||
|
||||
|
@ -1074,25 +1073,35 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
|||
kvmppc_get_gpr(vcpu, 6));
|
||||
break;
|
||||
case H_SVM_PAGE_IN:
|
||||
ret = kvmppc_h_svm_page_in(vcpu->kvm,
|
||||
kvmppc_get_gpr(vcpu, 4),
|
||||
kvmppc_get_gpr(vcpu, 5),
|
||||
kvmppc_get_gpr(vcpu, 6));
|
||||
ret = H_UNSUPPORTED;
|
||||
if (kvmppc_get_srr1(vcpu) & MSR_S)
|
||||
ret = kvmppc_h_svm_page_in(vcpu->kvm,
|
||||
kvmppc_get_gpr(vcpu, 4),
|
||||
kvmppc_get_gpr(vcpu, 5),
|
||||
kvmppc_get_gpr(vcpu, 6));
|
||||
break;
|
||||
case H_SVM_PAGE_OUT:
|
||||
ret = kvmppc_h_svm_page_out(vcpu->kvm,
|
||||
kvmppc_get_gpr(vcpu, 4),
|
||||
kvmppc_get_gpr(vcpu, 5),
|
||||
kvmppc_get_gpr(vcpu, 6));
|
||||
ret = H_UNSUPPORTED;
|
||||
if (kvmppc_get_srr1(vcpu) & MSR_S)
|
||||
ret = kvmppc_h_svm_page_out(vcpu->kvm,
|
||||
kvmppc_get_gpr(vcpu, 4),
|
||||
kvmppc_get_gpr(vcpu, 5),
|
||||
kvmppc_get_gpr(vcpu, 6));
|
||||
break;
|
||||
case H_SVM_INIT_START:
|
||||
ret = kvmppc_h_svm_init_start(vcpu->kvm);
|
||||
ret = H_UNSUPPORTED;
|
||||
if (kvmppc_get_srr1(vcpu) & MSR_S)
|
||||
ret = kvmppc_h_svm_init_start(vcpu->kvm);
|
||||
break;
|
||||
case H_SVM_INIT_DONE:
|
||||
ret = kvmppc_h_svm_init_done(vcpu->kvm);
|
||||
ret = H_UNSUPPORTED;
|
||||
if (kvmppc_get_srr1(vcpu) & MSR_S)
|
||||
ret = kvmppc_h_svm_init_done(vcpu->kvm);
|
||||
break;
|
||||
case H_SVM_INIT_ABORT:
|
||||
ret = kvmppc_h_svm_init_abort(vcpu->kvm);
|
||||
ret = H_UNSUPPORTED;
|
||||
if (kvmppc_get_srr1(vcpu) & MSR_S)
|
||||
ret = kvmppc_h_svm_init_abort(vcpu->kvm);
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -3616,6 +3625,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
|
|||
if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested &&
|
||||
kvmppc_get_gpr(vcpu, 3) == H_CEDE) {
|
||||
kvmppc_nested_cede(vcpu);
|
||||
kvmppc_set_gpr(vcpu, 3, 0);
|
||||
trap = 0;
|
||||
}
|
||||
} else {
|
||||
|
@ -4400,7 +4410,7 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
|||
slots = kvm_memslots(kvm);
|
||||
memslot = id_to_memslot(slots, log->slot);
|
||||
r = -ENOENT;
|
||||
if (!memslot->dirty_bitmap)
|
||||
if (!memslot || !memslot->dirty_bitmap)
|
||||
goto out;
|
||||
|
||||
/*
|
||||
|
@ -4447,29 +4457,26 @@ static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
|
|||
return r;
|
||||
}
|
||||
|
||||
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot)
|
||||
{
|
||||
if (!dont || free->arch.rmap != dont->arch.rmap) {
|
||||
vfree(free->arch.rmap);
|
||||
free->arch.rmap = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
slot->arch.rmap = vzalloc(array_size(npages, sizeof(*slot->arch.rmap)));
|
||||
if (!slot->arch.rmap)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
vfree(slot->arch.rmap);
|
||||
slot->arch.rmap = NULL;
|
||||
}
|
||||
|
||||
static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
struct kvm_memory_slot *slot,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
unsigned long npages = mem->memory_size >> PAGE_SHIFT;
|
||||
|
||||
if (change == KVM_MR_CREATE) {
|
||||
slot->arch.rmap = vzalloc(array_size(npages,
|
||||
sizeof(*slot->arch.rmap)));
|
||||
if (!slot->arch.rmap)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -4558,11 +4565,6 @@ void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
|
|||
}
|
||||
}
|
||||
|
||||
static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
void kvmppc_setup_partition_table(struct kvm *kvm)
|
||||
{
|
||||
unsigned long dw0, dw1;
|
||||
|
@ -5426,6 +5428,21 @@ static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa)
|
|||
vpa->update_pending = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Enable a guest to become a secure VM, or test whether
|
||||
* that could be enabled.
|
||||
* Called when the KVM_CAP_PPC_SECURE_GUEST capability is
|
||||
* tested (kvm == NULL) or enabled (kvm != NULL).
|
||||
*/
|
||||
static int kvmhv_enable_svm(struct kvm *kvm)
|
||||
{
|
||||
if (!kvmppc_uvmem_available())
|
||||
return -EINVAL;
|
||||
if (kvm)
|
||||
kvm->arch.svm_enabled = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* IOCTL handler to turn off secure mode of guest
|
||||
*
|
||||
|
@ -5526,9 +5543,7 @@ static struct kvmppc_ops kvm_ops_hv = {
|
|||
.age_hva = kvm_age_hva_hv,
|
||||
.test_age_hva = kvm_test_age_hva_hv,
|
||||
.set_spte_hva = kvm_set_spte_hva_hv,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_hv,
|
||||
.free_memslot = kvmppc_core_free_memslot_hv,
|
||||
.create_memslot = kvmppc_core_create_memslot_hv,
|
||||
.init_vm = kvmppc_core_init_vm_hv,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_hv,
|
||||
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
|
||||
|
@ -5548,6 +5563,7 @@ static struct kvmppc_ops kvm_ops_hv = {
|
|||
.enable_nested = kvmhv_enable_nested,
|
||||
.load_from_eaddr = kvmhv_load_from_eaddr,
|
||||
.store_to_eaddr = kvmhv_store_to_eaddr,
|
||||
.enable_svm = kvmhv_enable_svm,
|
||||
.svm_off = kvmhv_svm_off,
|
||||
};
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
* Copyright 2017 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
|
||||
*/
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_ppc.h>
|
||||
|
@ -44,7 +46,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
u64 newmsr, bescr;
|
||||
int ra, rs;
|
||||
|
||||
switch (instr & 0xfc0007ff) {
|
||||
/*
|
||||
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
|
||||
* in these instructions, so masking bit 31 out doesn't change these
|
||||
* instructions. For treclaim., tsr., and trechkpt. instructions if bit
|
||||
* 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
|
||||
* 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
|
||||
* 31 is an acceptable way to handle these invalid forms that have
|
||||
* bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
|
||||
* bit 31 set) can generate a softpatch interrupt. Hence both forms
|
||||
* are handled below for these instructions so they behave the same way.
|
||||
*/
|
||||
switch (instr & PO_XOP_OPCODE_MASK) {
|
||||
case PPC_INST_RFID:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
newmsr = vcpu->arch.shregs.srr1;
|
||||
|
@ -105,7 +118,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.shregs.msr = newmsr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TSR:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
|
@ -140,7 +154,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.shregs.msr = msr;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TRECLAIM:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
/* generate an illegal instruction interrupt */
|
||||
|
@ -176,7 +191,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
|
||||
return RESUME_GUEST;
|
||||
|
||||
case PPC_INST_TRECHKPT:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
/* check for TM disabled in the HFSCR or MSR */
|
||||
if (!(vcpu->arch.hfscr & HFSCR_TM)) {
|
||||
|
@ -208,6 +224,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
/* What should we do here? We didn't recognize the instruction */
|
||||
WARN_ON_ONCE(1);
|
||||
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
|
||||
pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
|
||||
|
||||
return RESUME_GUEST;
|
||||
}
|
||||
|
|
|
@ -23,7 +23,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
|||
u64 newmsr, msr, bescr;
|
||||
int rs;
|
||||
|
||||
switch (instr & 0xfc0007ff) {
|
||||
/*
|
||||
* rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
|
||||
* in these instructions, so masking bit 31 out doesn't change these
|
||||
* instructions. For the tsr. instruction if bit 31 = 0 then it is per
|
||||
* ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
|
||||
* Forms, informs specifically that ignoring bit 31 is an acceptable way
|
||||
* to handle TM-related invalid forms that have bit 31 = 0. Moreover,
|
||||
* for emulation purposes both forms (w/ and wo/ bit 31 set) can
|
||||
* generate a softpatch interrupt. Hence both forms are handled below
|
||||
* for tsr. to make them behave the same way.
|
||||
*/
|
||||
switch (instr & PO_XOP_OPCODE_MASK) {
|
||||
case PPC_INST_RFID:
|
||||
/* XXX do we need to check for PR=0 here? */
|
||||
newmsr = vcpu->arch.shregs.srr1;
|
||||
|
@ -73,7 +84,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
|
|||
vcpu->arch.shregs.msr = newmsr;
|
||||
return 1;
|
||||
|
||||
case PPC_INST_TSR:
|
||||
/* ignore bit 31, see comment above */
|
||||
case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
|
||||
/* we know the MSR has the TS field = S (0b01) here */
|
||||
msr = vcpu->arch.shregs.msr;
|
||||
/* check for PR=1 and arch 2.06 bit set in PCR */
|
||||
|
|
|
@ -113,6 +113,15 @@ struct kvmppc_uvmem_page_pvt {
|
|||
bool skip_page_out;
|
||||
};
|
||||
|
||||
bool kvmppc_uvmem_available(void)
|
||||
{
|
||||
/*
|
||||
* If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
|
||||
* and our data structures have been initialized successfully.
|
||||
*/
|
||||
return !!kvmppc_uvmem_bitmap;
|
||||
}
|
||||
|
||||
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
|
||||
{
|
||||
struct kvmppc_uvmem_slot *p;
|
||||
|
@ -209,6 +218,8 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
|
|||
int ret = H_SUCCESS;
|
||||
int srcu_idx;
|
||||
|
||||
kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
|
||||
|
||||
if (!kvmppc_uvmem_bitmap)
|
||||
return H_UNSUPPORTED;
|
||||
|
||||
|
@ -216,6 +227,10 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
|
|||
if (!kvm_is_radix(kvm))
|
||||
return H_UNSUPPORTED;
|
||||
|
||||
/* NAK the transition to secure if not enabled */
|
||||
if (!kvm->arch.svm_enabled)
|
||||
return H_AUTHORITY;
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
slots = kvm_memslots(kvm);
|
||||
kvm_for_each_memslot(memslot, slots) {
|
||||
|
@ -233,7 +248,6 @@ unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
|
|||
goto out;
|
||||
}
|
||||
}
|
||||
kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_START;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
return ret;
|
||||
|
@ -809,6 +823,9 @@ int kvmppc_uvmem_init(void)
|
|||
|
||||
void kvmppc_uvmem_free(void)
|
||||
{
|
||||
if (!kvmppc_uvmem_bitmap)
|
||||
return;
|
||||
|
||||
memunmap_pages(&kvmppc_uvmem_pgmap);
|
||||
release_mem_region(kvmppc_uvmem_pgmap.res.start,
|
||||
resource_size(&kvmppc_uvmem_pgmap.res));
|
||||
|
|
|
@ -740,7 +740,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
|
|||
(vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
|
||||
((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
|
||||
pte.raddr &= ~SPLIT_HACK_MASK;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case MSR_IR:
|
||||
vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
|
||||
|
||||
|
@ -1795,7 +1795,7 @@ static int kvmppc_core_vcpu_create_pr(struct kvm_vcpu *vcpu)
|
|||
|
||||
vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
|
||||
|
||||
err = kvmppc_mmu_init(vcpu);
|
||||
err = kvmppc_mmu_init_pr(vcpu);
|
||||
if (err < 0)
|
||||
goto free_shared_page;
|
||||
|
||||
|
@ -1885,7 +1885,6 @@ static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
|
|||
static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
||||
struct kvm_dirty_log *log)
|
||||
{
|
||||
struct kvm_memslots *slots;
|
||||
struct kvm_memory_slot *memslot;
|
||||
struct kvm_vcpu *vcpu;
|
||||
ulong ga, ga_end;
|
||||
|
@ -1895,15 +1894,12 @@ static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
|
|||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
|
||||
r = kvm_get_dirty_log(kvm, log, &is_dirty);
|
||||
r = kvm_get_dirty_log(kvm, log, &is_dirty, &memslot);
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
/* If nothing is dirty, don't bother messing with page tables. */
|
||||
if (is_dirty) {
|
||||
slots = kvm_memslots(kvm);
|
||||
memslot = id_to_memslot(slots, log->slot);
|
||||
|
||||
ga = memslot->base_gfn << PAGE_SHIFT;
|
||||
ga_end = ga + (memslot->npages << PAGE_SHIFT);
|
||||
|
||||
|
@ -1928,7 +1924,8 @@ static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
|
|||
|
||||
static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -1942,19 +1939,11 @@ static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
|
|||
return;
|
||||
}
|
||||
|
||||
static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *slot)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
|
||||
struct kvm_ppc_smmu_info *info)
|
||||
|
@ -2098,9 +2087,7 @@ static struct kvmppc_ops kvm_ops_pr = {
|
|||
.age_hva = kvm_age_hva_pr,
|
||||
.test_age_hva = kvm_test_age_hva_pr,
|
||||
.set_spte_hva = kvm_set_spte_hva_pr,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_pr,
|
||||
.free_memslot = kvmppc_core_free_memslot_pr,
|
||||
.create_memslot = kvmppc_core_create_memslot_pr,
|
||||
.init_vm = kvmppc_core_init_vm_pr,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_pr,
|
||||
.get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
|
||||
|
|
|
@ -421,11 +421,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|||
case BOOKE_IRQPRIO_DATA_STORAGE:
|
||||
case BOOKE_IRQPRIO_ALIGNMENT:
|
||||
update_dear = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case BOOKE_IRQPRIO_INST_STORAGE:
|
||||
case BOOKE_IRQPRIO_PROGRAM:
|
||||
update_esr = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case BOOKE_IRQPRIO_ITLB_MISS:
|
||||
case BOOKE_IRQPRIO_SYSCALL:
|
||||
case BOOKE_IRQPRIO_FP_UNAVAIL:
|
||||
|
@ -459,7 +459,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
|
|||
case BOOKE_IRQPRIO_DECREMENTER:
|
||||
case BOOKE_IRQPRIO_FIT:
|
||||
keep_irq = true;
|
||||
/* fall through */
|
||||
fallthrough;
|
||||
case BOOKE_IRQPRIO_EXTERNAL:
|
||||
case BOOKE_IRQPRIO_DBELL:
|
||||
allowed = vcpu->arch.shared->msr & MSR_EE;
|
||||
|
@ -1766,25 +1766,24 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
|
|||
return r;
|
||||
}
|
||||
|
||||
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
|
||||
{
|
||||
|
||||
}
|
||||
|
||||
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
|
||||
{
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||
{
|
||||
}
|
||||
|
||||
int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvmppc_core_prepare_memory_region(struct kvm *kvm,
|
||||
struct kvm_memory_slot *memslot,
|
||||
const struct kvm_userspace_memory_region *mem)
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -2074,11 +2073,6 @@ void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
|
|||
kvmppc_clear_dbsr();
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
|
||||
}
|
||||
|
||||
int kvmppc_core_init_vm(struct kvm *kvm)
|
||||
{
|
||||
return kvm->arch.kvm_ops->init_vm(kvm);
|
||||
|
|
|
@ -94,7 +94,6 @@ enum int_class {
|
|||
|
||||
void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
|
||||
|
||||
extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
|
@ -102,7 +101,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
|
|||
ulong spr_val);
|
||||
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
|
||||
ulong *spr_val);
|
||||
extern void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu);
|
||||
extern int kvmppc_core_emulate_op_e500(struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu,
|
||||
unsigned int inst, int *advance);
|
||||
|
|
|
@ -490,7 +490,6 @@ static struct kvmppc_ops kvm_ops_e500 = {
|
|||
.vcpu_put = kvmppc_core_vcpu_put_e500,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_e500,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_e500,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_e500,
|
||||
.init_vm = kvmppc_core_init_vm_e500,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_e500,
|
||||
.emulate_op = kvmppc_core_emulate_op_e500,
|
||||
|
|
|
@ -533,10 +533,6 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
|
|||
return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
|
||||
}
|
||||
|
||||
void kvmppc_mmu_destroy_e500(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
}
|
||||
|
||||
/*****************************************/
|
||||
|
||||
static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
|
||||
|
|
|
@ -376,7 +376,6 @@ static struct kvmppc_ops kvm_ops_e500mc = {
|
|||
.vcpu_put = kvmppc_core_vcpu_put_e500mc,
|
||||
.vcpu_create = kvmppc_core_vcpu_create_e500mc,
|
||||
.vcpu_free = kvmppc_core_vcpu_free_e500mc,
|
||||
.mmu_destroy = kvmppc_mmu_destroy_e500,
|
||||
.init_vm = kvmppc_core_init_vm_e500mc,
|
||||
.destroy_vm = kvmppc_core_destroy_vm_e500mc,
|
||||
.emulate_op = kvmppc_core_emulate_op_e500,
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <linux/uaccess.h>
|
||||
#include <asm/mpic.h>
|
||||
#include <asm/kvm_para.h>
|
||||
#include <asm/kvm_host.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <kvm/iodev.h>
|
||||
|
||||
|
|
|
@ -32,7 +32,6 @@
|
|||
#include <asm/plpar_wrappers.h>
|
||||
#endif
|
||||
#include <asm/ultravisor.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
#include "timing.h"
|
||||
#include "irq.h"
|
||||
|
@ -416,12 +415,12 @@ int kvm_arch_hardware_enable(void)
|
|||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_hardware_setup(void)
|
||||
int kvm_arch_hardware_setup(void *opaque)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_arch_check_processor_compat(void)
|
||||
int kvm_arch_check_processor_compat(void *opaque)
|
||||
{
|
||||
return kvmppc_core_check_processor_compat();
|
||||
}
|
||||
|
@ -525,7 +524,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_PPC_GUEST_DEBUG_SSTEP:
|
||||
/* fall through */
|
||||
case KVM_CAP_PPC_PAIRED_SINGLES:
|
||||
case KVM_CAP_PPC_OSI:
|
||||
case KVM_CAP_PPC_GET_PVINFO:
|
||||
|
@ -670,6 +668,12 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) ||
|
||||
(hv_enabled && cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST));
|
||||
break;
|
||||
#endif
|
||||
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
|
||||
case KVM_CAP_PPC_SECURE_GUEST:
|
||||
r = hv_enabled && kvmppc_hv_ops->enable_svm &&
|
||||
!kvmppc_hv_ops->enable_svm(NULL);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = 0;
|
||||
|
@ -685,16 +689,9 @@ long kvm_arch_dev_ioctl(struct file *filp,
|
|||
return -EINVAL;
|
||||
}
|
||||
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
|
||||
struct kvm_memory_slot *dont)
|
||||
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot)
|
||||
{
|
||||
kvmppc_core_free_memslot(kvm, free, dont);
|
||||
}
|
||||
|
||||
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
|
||||
unsigned long npages)
|
||||
{
|
||||
return kvmppc_core_create_memslot(kvm, slot, npages);
|
||||
kvmppc_core_free_memslot(kvm, slot);
|
||||
}
|
||||
|
||||
int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
||||
|
@ -702,12 +699,12 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
|
|||
const struct kvm_userspace_memory_region *mem,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
|
||||
return kvmppc_core_prepare_memory_region(kvm, memslot, mem, change);
|
||||
}
|
||||
|
||||
void kvm_arch_commit_memory_region(struct kvm *kvm,
|
||||
const struct kvm_userspace_memory_region *mem,
|
||||
const struct kvm_memory_slot *old,
|
||||
struct kvm_memory_slot *old,
|
||||
const struct kvm_memory_slot *new,
|
||||
enum kvm_mr_change change)
|
||||
{
|
||||
|
@ -2175,6 +2172,14 @@ int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
|
|||
break;
|
||||
r = kvm->arch.kvm_ops->enable_nested(kvm);
|
||||
break;
|
||||
#endif
|
||||
#if defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
|
||||
case KVM_CAP_PPC_SECURE_GUEST:
|
||||
r = -EINVAL;
|
||||
if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm)
|
||||
break;
|
||||
r = kvm->arch.kvm_ops->enable_svm(kvm);
|
||||
break;
|
||||
#endif
|
||||
default:
|
||||
r = -EINVAL;
|
||||
|
|
|
@ -10,7 +10,6 @@
|
|||
#define __POWERPC_KVM_EXITTIMING_H__
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/kvm_host.h>
|
||||
|
||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||
void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu);
|
||||
|
|
|
@ -37,7 +37,7 @@ CFLAGS_sclp_early_core.o += -I$(srctree)/drivers/s390/char
|
|||
obj-y := head.o als.o startup.o mem_detect.o ipl_parm.o ipl_report.o
|
||||
obj-y += string.o ebcdic.o sclp_early_core.o mem.o ipl_vmparm.o cmdline.o
|
||||
obj-y += version.o pgm_check_info.o ctype.o text_dma.o
|
||||
obj-$(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) += uv.o
|
||||
obj-$(findstring y, $(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) $(CONFIG_PGSTE)) += uv.o
|
||||
obj-$(CONFIG_RELOCATABLE) += machine_kexec_reloc.o
|
||||
obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
targets := bzImage startup.a section_cmp.boot.data section_cmp.boot.preserved.data $(obj-y)
|
||||
|
|
|
@ -3,7 +3,13 @@
|
|||
#include <asm/facility.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
/* will be used in arch/s390/kernel/uv.c */
|
||||
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
|
||||
int __bootdata_preserved(prot_virt_guest);
|
||||
#endif
|
||||
#if IS_ENABLED(CONFIG_KVM)
|
||||
struct uv_info __bootdata_preserved(uv_info);
|
||||
#endif
|
||||
|
||||
void uv_query_info(void)
|
||||
{
|
||||
|
@ -19,7 +25,21 @@ void uv_query_info(void)
|
|||
if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
|
||||
return;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KVM)) {
|
||||
memcpy(uv_info.inst_calls_list, uvcb.inst_calls_list, sizeof(uv_info.inst_calls_list));
|
||||
uv_info.uv_base_stor_len = uvcb.uv_base_stor_len;
|
||||
uv_info.guest_base_stor_len = uvcb.conf_base_phys_stor_len;
|
||||
uv_info.guest_virt_base_stor_len = uvcb.conf_base_virt_stor_len;
|
||||
uv_info.guest_virt_var_stor_len = uvcb.conf_virt_var_stor_len;
|
||||
uv_info.guest_cpu_stor_len = uvcb.cpu_stor_len;
|
||||
uv_info.max_sec_stor_addr = ALIGN(uvcb.max_guest_stor_addr, PAGE_SIZE);
|
||||
uv_info.max_num_sec_conf = uvcb.max_num_sec_conf;
|
||||
uv_info.max_guest_cpus = uvcb.max_guest_cpus;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
|
||||
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
|
||||
test_bit_inv(BIT_UVC_CMD_REMOVE_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list))
|
||||
prot_virt_guest = 1;
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#ifndef _ASM_S390_GMAP_H
|
||||
#define _ASM_S390_GMAP_H
|
||||
|
||||
#include <linux/radix-tree.h>
|
||||
#include <linux/refcount.h>
|
||||
|
||||
/* Generic bits for GMAP notification on DAT table entry changes. */
|
||||
|
@ -31,6 +32,7 @@
|
|||
* @table: pointer to the page directory
|
||||
* @asce: address space control element for gmap page table
|
||||
* @pfault_enabled: defines if pfaults are applicable for the guest
|
||||
* @guest_handle: protected virtual machine handle for the ultravisor
|
||||
* @host_to_rmap: radix tree with gmap_rmap lists
|
||||
* @children: list of shadow gmap structures
|
||||
* @pt_list: list of all page tables used in the shadow guest address space
|
||||
|
@ -54,6 +56,8 @@ struct gmap {
|
|||
unsigned long asce_end;
|
||||
void *private;
|
||||
bool pfault_enabled;
|
||||
/* only set for protected virtual machines */
|
||||
unsigned long guest_handle;
|
||||
/* Additional data for shadow guest address spaces */
|
||||
struct radix_tree_root host_to_rmap;
|
||||
struct list_head children;
|
||||
|
@ -144,4 +148,6 @@ int gmap_mprotect_notify(struct gmap *, unsigned long start,
|
|||
|
||||
void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long dirty_bitmap[4],
|
||||
unsigned long gaddr, unsigned long vmaddr);
|
||||
int gmap_mark_unmergeable(void);
|
||||
void s390_reset_acc(struct mm_struct *mm);
|
||||
#endif /* _ASM_S390_GMAP_H */
|
||||
|
|
|
@ -127,6 +127,12 @@ struct mcck_volatile_info {
|
|||
#define CR14_INITIAL_MASK (CR14_UNUSED_32 | CR14_UNUSED_33 | \
|
||||
CR14_EXTERNAL_DAMAGE_SUBMASK)
|
||||
|
||||
#define SIDAD_SIZE_MASK 0xff
|
||||
#define sida_origin(sie_block) \
|
||||
((sie_block)->sidad & PAGE_MASK)
|
||||
#define sida_size(sie_block) \
|
||||
((((sie_block)->sidad & SIDAD_SIZE_MASK) + 1) * PAGE_SIZE)
|
||||
|
||||
#define CPUSTAT_STOPPED 0x80000000
|
||||
#define CPUSTAT_WAIT 0x10000000
|
||||
#define CPUSTAT_ECALL_PEND 0x08000000
|
||||
|
@ -160,7 +166,13 @@ struct kvm_s390_sie_block {
|
|||
__u8 reserved08[4]; /* 0x0008 */
|
||||
#define PROG_IN_SIE (1<<0)
|
||||
__u32 prog0c; /* 0x000c */
|
||||
__u8 reserved10[16]; /* 0x0010 */
|
||||
union {
|
||||
__u8 reserved10[16]; /* 0x0010 */
|
||||
struct {
|
||||
__u64 pv_handle_cpu;
|
||||
__u64 pv_handle_config;
|
||||
};
|
||||
};
|
||||
#define PROG_BLOCK_SIE (1<<0)
|
||||
#define PROG_REQUEST (1<<1)
|
||||
atomic_t prog20; /* 0x0020 */
|
||||
|
@ -209,10 +221,23 @@ struct kvm_s390_sie_block {
|
|||
#define ICPT_PARTEXEC 0x38
|
||||
#define ICPT_IOINST 0x40
|
||||
#define ICPT_KSS 0x5c
|
||||
#define ICPT_MCHKREQ 0x60
|
||||
#define ICPT_INT_ENABLE 0x64
|
||||
#define ICPT_PV_INSTR 0x68
|
||||
#define ICPT_PV_NOTIFY 0x6c
|
||||
#define ICPT_PV_PREF 0x70
|
||||
__u8 icptcode; /* 0x0050 */
|
||||
__u8 icptstatus; /* 0x0051 */
|
||||
__u16 ihcpu; /* 0x0052 */
|
||||
__u8 reserved54[2]; /* 0x0054 */
|
||||
__u8 reserved54; /* 0x0054 */
|
||||
#define IICTL_CODE_NONE 0x00
|
||||
#define IICTL_CODE_MCHK 0x01
|
||||
#define IICTL_CODE_EXT 0x02
|
||||
#define IICTL_CODE_IO 0x03
|
||||
#define IICTL_CODE_RESTART 0x04
|
||||
#define IICTL_CODE_SPECIFICATION 0x10
|
||||
#define IICTL_CODE_OPERAND 0x11
|
||||
__u8 iictl; /* 0x0055 */
|
||||
__u16 ipa; /* 0x0056 */
|
||||
__u32 ipb; /* 0x0058 */
|
||||
__u32 scaoh; /* 0x005c */
|
||||
|
@ -233,7 +258,7 @@ struct kvm_s390_sie_block {
|
|||
#define ECB3_RI 0x01
|
||||
__u8 ecb3; /* 0x0063 */
|
||||
__u32 scaol; /* 0x0064 */
|
||||
__u8 reserved68; /* 0x0068 */
|
||||
__u8 sdf; /* 0x0068 */
|
||||
__u8 epdx; /* 0x0069 */
|
||||
__u8 reserved6a[2]; /* 0x006a */
|
||||
__u32 todpr; /* 0x006c */
|
||||
|
@ -249,31 +274,58 @@ struct kvm_s390_sie_block {
|
|||
#define HPID_KVM 0x4
|
||||
#define HPID_VSIE 0x5
|
||||
__u8 hpid; /* 0x00b8 */
|
||||
__u8 reservedb9[11]; /* 0x00b9 */
|
||||
__u16 extcpuaddr; /* 0x00c4 */
|
||||
__u16 eic; /* 0x00c6 */
|
||||
__u8 reservedb9[7]; /* 0x00b9 */
|
||||
union {
|
||||
struct {
|
||||
__u32 eiparams; /* 0x00c0 */
|
||||
__u16 extcpuaddr; /* 0x00c4 */
|
||||
__u16 eic; /* 0x00c6 */
|
||||
};
|
||||
__u64 mcic; /* 0x00c0 */
|
||||
} __packed;
|
||||
__u32 reservedc8; /* 0x00c8 */
|
||||
__u16 pgmilc; /* 0x00cc */
|
||||
__u16 iprcc; /* 0x00ce */
|
||||
__u32 dxc; /* 0x00d0 */
|
||||
__u16 mcn; /* 0x00d4 */
|
||||
__u8 perc; /* 0x00d6 */
|
||||
__u8 peratmid; /* 0x00d7 */
|
||||
union {
|
||||
struct {
|
||||
__u16 pgmilc; /* 0x00cc */
|
||||
__u16 iprcc; /* 0x00ce */
|
||||
};
|
||||
__u32 edc; /* 0x00cc */
|
||||
} __packed;
|
||||
union {
|
||||
struct {
|
||||
__u32 dxc; /* 0x00d0 */
|
||||
__u16 mcn; /* 0x00d4 */
|
||||
__u8 perc; /* 0x00d6 */
|
||||
__u8 peratmid; /* 0x00d7 */
|
||||
};
|
||||
__u64 faddr; /* 0x00d0 */
|
||||
} __packed;
|
||||
__u64 peraddr; /* 0x00d8 */
|
||||
__u8 eai; /* 0x00e0 */
|
||||
__u8 peraid; /* 0x00e1 */
|
||||
__u8 oai; /* 0x00e2 */
|
||||
__u8 armid; /* 0x00e3 */
|
||||
__u8 reservede4[4]; /* 0x00e4 */
|
||||
__u64 tecmc; /* 0x00e8 */
|
||||
__u8 reservedf0[12]; /* 0x00f0 */
|
||||
union {
|
||||
__u64 tecmc; /* 0x00e8 */
|
||||
struct {
|
||||
__u16 subchannel_id; /* 0x00e8 */
|
||||
__u16 subchannel_nr; /* 0x00ea */
|
||||
__u32 io_int_parm; /* 0x00ec */
|
||||
__u32 io_int_word; /* 0x00f0 */
|
||||
};
|
||||
} __packed;
|
||||
__u8 reservedf4[8]; /* 0x00f4 */
|
||||
#define CRYCB_FORMAT_MASK 0x00000003
|
||||
#define CRYCB_FORMAT0 0x00000000
|
||||
#define CRYCB_FORMAT1 0x00000001
|
||||
#define CRYCB_FORMAT2 0x00000003
|
||||
__u32 crycbd; /* 0x00fc */
|
||||
__u64 gcr[16]; /* 0x0100 */
|
||||
__u64 gbea; /* 0x0180 */
|
||||
union {
|
||||
__u64 gbea; /* 0x0180 */
|
||||
__u64 sidad;
|
||||
};
|
||||
__u8 reserved188[8]; /* 0x0188 */
|
||||
__u64 sdnxo; /* 0x0190 */
|
||||
__u8 reserved198[8]; /* 0x0198 */
|
||||
|
@ -292,7 +344,7 @@ struct kvm_s390_sie_block {
|
|||
__u64 itdba; /* 0x01e8 */
|
||||
__u64 riccbd; /* 0x01f0 */
|
||||
__u64 gvrd; /* 0x01f8 */
|
||||
} __attribute__((packed));
|
||||
} __packed __aligned(512);
|
||||
|
||||
struct kvm_s390_itdb {
|
||||
__u8 data[256];
|
||||
|
@ -301,7 +353,9 @@ struct kvm_s390_itdb {
|
|||
struct sie_page {
|
||||
struct kvm_s390_sie_block sie_block;
|
||||
struct mcck_volatile_info mcck_info; /* 0x0200 */
|
||||
__u8 reserved218[1000]; /* 0x0218 */
|
||||
__u8 reserved218[360]; /* 0x0218 */
|
||||
__u64 pv_grregs[16]; /* 0x0380 */
|
||||
__u8 reserved400[512]; /* 0x0400 */
|
||||
struct kvm_s390_itdb itdb; /* 0x0600 */
|
||||
__u8 reserved700[2304]; /* 0x0700 */
|
||||
};
|
||||
|
@ -476,6 +530,7 @@ enum irq_types {
|
|||
IRQ_PEND_PFAULT_INIT,
|
||||
IRQ_PEND_EXT_HOST,
|
||||
IRQ_PEND_EXT_SERVICE,
|
||||
IRQ_PEND_EXT_SERVICE_EV,
|
||||
IRQ_PEND_EXT_TIMING,
|
||||
IRQ_PEND_EXT_CPU_TIMER,
|
||||
IRQ_PEND_EXT_CLOCK_COMP,
|
||||
|
@ -520,6 +575,7 @@ enum irq_types {
|
|||
(1UL << IRQ_PEND_EXT_TIMING) | \
|
||||
(1UL << IRQ_PEND_EXT_HOST) | \
|
||||
(1UL << IRQ_PEND_EXT_SERVICE) | \
|
||||
(1UL << IRQ_PEND_EXT_SERVICE_EV) | \
|
||||
(1UL << IRQ_PEND_VIRTIO) | \
|
||||
(1UL << IRQ_PEND_PFAULT_INIT) | \
|
||||
(1UL << IRQ_PEND_PFAULT_DONE))
|
||||
|
@ -536,6 +592,13 @@ enum irq_types {
|
|||
#define IRQ_PEND_MCHK_MASK ((1UL << IRQ_PEND_MCHK_REP) | \
|
||||
(1UL << IRQ_PEND_MCHK_EX))
|
||||
|
||||
#define IRQ_PEND_EXT_II_MASK ((1UL << IRQ_PEND_EXT_CPU_TIMER) | \
|
||||
(1UL << IRQ_PEND_EXT_CLOCK_COMP) | \
|
||||
(1UL << IRQ_PEND_EXT_EMERGENCY) | \
|
||||
(1UL << IRQ_PEND_EXT_EXTERNAL) | \
|
||||
(1UL << IRQ_PEND_EXT_SERVICE) | \
|
||||
(1UL << IRQ_PEND_EXT_SERVICE_EV))
|
||||
|
||||
struct kvm_s390_interrupt_info {
|
||||
struct list_head list;
|
||||
u64 type;
|
||||
|
@ -594,6 +657,7 @@ struct kvm_s390_local_interrupt {
|
|||
|
||||
struct kvm_s390_float_interrupt {
|
||||
unsigned long pending_irqs;
|
||||
unsigned long masked_irqs;
|
||||
spinlock_t lock;
|
||||
struct list_head lists[FIRQ_LIST_COUNT];
|
||||
int counters[FIRQ_MAX_COUNT];
|
||||
|
@ -645,6 +709,11 @@ struct kvm_guestdbg_info_arch {
|
|||
unsigned long last_bp;
|
||||
};
|
||||
|
||||
struct kvm_s390_pv_vcpu {
|
||||
u64 handle;
|
||||
unsigned long stor_base;
|
||||
};
|
||||
|
||||
struct kvm_vcpu_arch {
|
||||
struct kvm_s390_sie_block *sie_block;
|
||||
/* if vsie is active, currently executed shadow sie control block */
|
||||
|
@ -673,6 +742,7 @@ struct kvm_vcpu_arch {
|
|||
__u64 cputm_start;
|
||||
bool gs_enabled;
|
||||
bool skey_enabled;
|
||||
struct kvm_s390_pv_vcpu pv;
|
||||
};
|
||||
|
||||
struct kvm_vm_stat {
|
||||
|
@ -701,9 +771,6 @@ struct s390_io_adapter {
|
|||
bool masked;
|
||||
bool swap;
|
||||
bool suppressible;
|
||||
struct rw_semaphore maps_lock;
|
||||
struct list_head maps;
|
||||
atomic_t nr_maps;
|
||||
};
|
||||
|
||||
#define MAX_S390_IO_ADAPTERS ((MAX_ISC + 1) * 8)
|
||||
|
@ -846,6 +913,13 @@ struct kvm_s390_gisa_interrupt {
|
|||
DECLARE_BITMAP(kicked_mask, KVM_MAX_VCPUS);
|
||||
};
|
||||
|
||||
struct kvm_s390_pv {
|
||||
u64 handle;
|
||||
u64 guest_len;
|
||||
unsigned long stor_base;
|
||||
void *stor_var;
|
||||
};
|
||||
|
||||
struct kvm_arch{
|
||||
void *sca;
|
||||
int use_esca;
|
||||
|
@ -881,6 +955,7 @@ struct kvm_arch{
|
|||
DECLARE_BITMAP(cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
|
||||
DECLARE_BITMAP(idle_mask, KVM_MAX_VCPUS);
|
||||
struct kvm_s390_gisa_interrupt gisa_int;
|
||||
struct kvm_s390_pv pv;
|
||||
};
|
||||
|
||||
#define KVM_HVA_ERR_BAD (-1UL)
|
||||
|
@ -921,7 +996,7 @@ static inline void kvm_arch_hardware_disable(void) {}
|
|||
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
|
||||
static inline void kvm_arch_free_memslot(struct kvm *kvm,
|
||||
struct kvm_memory_slot *free, struct kvm_memory_slot *dont) {}
|
||||
struct kvm_memory_slot *slot) {}
|
||||
static inline void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) {}
|
||||
static inline void kvm_arch_flush_shadow_all(struct kvm *kvm) {}
|
||||
static inline void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
|
||||
|
|
|
@ -16,6 +16,8 @@ typedef struct {
|
|||
unsigned long asce;
|
||||
unsigned long asce_limit;
|
||||
unsigned long vdso_base;
|
||||
/* The mmu context belongs to a secure guest. */
|
||||
atomic_t is_protected;
|
||||
/*
|
||||
* The following bitfields need a down_write on the mm
|
||||
* semaphore when they are written to. As they are only
|
||||
|
|
|
@ -23,6 +23,7 @@ static inline int init_new_context(struct task_struct *tsk,
|
|||
INIT_LIST_HEAD(&mm->context.gmap_list);
|
||||
cpumask_clear(&mm->context.cpu_attach_mask);
|
||||
atomic_set(&mm->context.flush_count, 0);
|
||||
atomic_set(&mm->context.is_protected, 0);
|
||||
mm->context.gmap_asce = 0;
|
||||
mm->context.flush_mm = 0;
|
||||
mm->context.compat_mm = test_thread_flag(TIF_31BIT);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue