mirror of https://gitee.com/openkylin/linux.git
KVM/arm64 updates for Linux 5.12
- Make the nVHE EL2 object relocatable, resulting in much more maintainable code - Handle concurrent translation faults hitting the same page in a more elegant way - Support for the standard TRNG hypervisor call - A bunch of small PMU/Debug fixes - Allow the disabling of symbol export from assembly code - Simplification of the early init hypercall handling -----BEGIN PGP SIGNATURE----- iQJDBAABCgAtFiEEn9UcU+C1Yxj9lZw9I9DQutE9ekMFAmAmjqEPHG1hekBrZXJu ZWwub3JnAAoJECPQ0LrRPXpDoUEQAIrJ7YF4v4gz06a0HG9+b6fbmykHyxlG7jfm trvctfaiKzOybKoY5odPpNFzhbYOOdXXqYipyTHGwBYtGSy9G/9SjMKSUrfln2Ni lr1wBqapr9TE+SVKoR8pWWuZxGGbHVa7brNuMbMsMi1wwAsM2/n70H9PXrdq3QiK Ge1DWLso2oEfhtTwqNKa4dwB2MHjBhBFhhq+Nq5pslm6mmxJaYqz7pyBmw/C+2cc oU/6kpAa1yPAauptWXtYXJYOMHihxgEa1IdK3Gl0hUyFyu96xVkwH/KFsj+bRs23 QGGCSdy4313hzaoGaSOTK22R98Aeg0wI9a6tcCBvVVjTAztnlu1FPtUZr8e/F7uc +r8xVJUJFiywt3Zktf/D7YDK9LuMMqFnj0BkI4U9nIBY59XZRNhENsBCmjru5lnL iXa5cuta03H4emfssIChLpgn0XHFas6t5dFXBPGbXyw0qsQchTw98iQX9LVxefUK rOUGPIN4nE9ESRIZe0SPlAVeCtNP8cLH7+0YG9MJ1QeDVYaUsnvy9Ln/ox+514mR 5y2KJ6y7xnLB136SKCzPDDloYtz7BDiJq6a/RPiXKGheKoxy+N+BSe58yWCqFZYE Fx/cGUr7oSg39U7gCboog6BDp5e2CXBfbRllg6P47bZFfdPNwzNEzHvk49VltMxx Rl2W05bk =6EwV -----END PGP SIGNATURE----- Merge tag 'kvmarm-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/kvmarm/kvmarm into HEAD KVM/arm64 updates for Linux 5.12 - Make the nVHE EL2 object relocatable, resulting in much more maintainable code - Handle concurrent translation faults hitting the same page in a more elegant way - Support for the standard TRNG hypervisor call - A bunch of small PMU/Debug fixes - Allow the disabling of symbol export from assembly code - Simplification of the early init hypercall handling
This commit is contained in:
commit
8c6e67bec3
24
CREDITS
24
CREDITS
|
@ -710,6 +710,10 @@ S: Las Cuevas 2385 - Bo Guemes
|
|||
S: Las Heras, Mendoza CP 5539
|
||||
S: Argentina
|
||||
|
||||
N: Jay Cliburn
|
||||
E: jcliburn@gmail.com
|
||||
D: ATLX Ethernet drivers
|
||||
|
||||
N: Steven P. Cole
|
||||
E: scole@lanl.gov
|
||||
E: elenstev@mesatop.com
|
||||
|
@ -1284,6 +1288,10 @@ D: Major kbuild rework during the 2.5 cycle
|
|||
D: ISDN Maintainer
|
||||
S: USA
|
||||
|
||||
N: Gerrit Renker
|
||||
E: gerrit@erg.abdn.ac.uk
|
||||
D: DCCP protocol support.
|
||||
|
||||
N: Philip Gladstone
|
||||
E: philip@gladstonefamily.net
|
||||
D: Kernel / timekeeping stuff
|
||||
|
@ -2138,6 +2146,10 @@ E: seasons@falcon.sch.bme.hu
|
|||
E: seasons@makosteszta.sote.hu
|
||||
D: Original author of software suspend
|
||||
|
||||
N: Alexey Kuznetsov
|
||||
E: kuznet@ms2.inr.ac.ru
|
||||
D: Author and maintainer of large parts of the networking stack
|
||||
|
||||
N: Jaroslav Kysela
|
||||
E: perex@perex.cz
|
||||
W: https://www.perex.cz
|
||||
|
@ -2696,6 +2708,10 @@ N: Wolfgang Muees
|
|||
E: wolfgang@iksw-muees.de
|
||||
D: Auerswald USB driver
|
||||
|
||||
N: Shrijeet Mukherjee
|
||||
E: shrijeet@gmail.com
|
||||
D: Network routing domains (VRF).
|
||||
|
||||
N: Paul Mundt
|
||||
E: paul.mundt@gmail.com
|
||||
D: SuperH maintainer
|
||||
|
@ -4110,6 +4126,10 @@ S: B-1206 Jingmao Guojigongyu
|
|||
S: 16 Baliqiao Nanjie, Beijing 101100
|
||||
S: People's Repulic of China
|
||||
|
||||
N: Aviad Yehezkel
|
||||
E: aviadye@nvidia.com
|
||||
D: Kernel TLS implementation and offload support.
|
||||
|
||||
N: Victor Yodaiken
|
||||
E: yodaiken@fsmlabs.com
|
||||
D: RTLinux (RealTime Linux)
|
||||
|
@ -4167,6 +4187,10 @@ S: 1507 145th Place SE #B5
|
|||
S: Bellevue, Washington 98007
|
||||
S: USA
|
||||
|
||||
N: Wensong Zhang
|
||||
E: wensong@linux-vs.org
|
||||
D: IP virtual server (IPVS).
|
||||
|
||||
N: Haojian Zhuang
|
||||
E: haojian.zhuang@gmail.com
|
||||
D: MMP support
|
||||
|
|
|
@ -5972,6 +5972,10 @@
|
|||
This option is obsoleted by the "nopv" option, which
|
||||
has equivalent effect for XEN platform.
|
||||
|
||||
xen_no_vector_callback
|
||||
[KNL,X86,XEN] Disable the vector callback for Xen
|
||||
event channel interrupts.
|
||||
|
||||
xen_scrub_pages= [XEN]
|
||||
Boolean option to control scrubbing pages before giving them back
|
||||
to Xen, for use by other domains. Can be also changed at runtime
|
||||
|
|
|
@ -163,6 +163,7 @@ allOf:
|
|||
enum:
|
||||
- renesas,etheravb-r8a774a1
|
||||
- renesas,etheravb-r8a774b1
|
||||
- renesas,etheravb-r8a774e1
|
||||
- renesas,etheravb-r8a7795
|
||||
- renesas,etheravb-r8a7796
|
||||
- renesas,etheravb-r8a77961
|
||||
|
|
|
@ -161,7 +161,8 @@ properties:
|
|||
* snps,route-dcbcp, DCB Control Packets
|
||||
* snps,route-up, Untagged Packets
|
||||
* snps,route-multi-broad, Multicast & Broadcast Packets
|
||||
* snps,priority, RX queue priority (Range 0x0 to 0xF)
|
||||
* snps,priority, bitmask of the tagged frames priorities assigned to
|
||||
the queue
|
||||
|
||||
snps,mtl-tx-config:
|
||||
$ref: /schemas/types.yaml#/definitions/phandle
|
||||
|
@ -188,7 +189,10 @@ properties:
|
|||
* snps,idle_slope, unlock on WoL
|
||||
* snps,high_credit, max write outstanding req. limit
|
||||
* snps,low_credit, max read outstanding req. limit
|
||||
* snps,priority, TX queue priority (Range 0x0 to 0xF)
|
||||
* snps,priority, bitmask of the priorities assigned to the queue.
|
||||
When a PFC frame is received with priorities matching the bitmask,
|
||||
the queue is blocked from transmitting for the pause time specified
|
||||
in the PFC frame.
|
||||
|
||||
snps,reset-gpio:
|
||||
deprecated: true
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
# Copyright (C) 2020 Texas Instruments Incorporated
|
||||
# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/sound/ti,j721e-cpb-audio.yaml#
|
||||
|
@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Texas Instruments J721e Common Processor Board Audio Support
|
||||
|
||||
maintainers:
|
||||
- Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
- Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
|
||||
description: |
|
||||
The audio support on the board is using pcm3168a codec connected to McASP10
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
|
||||
# Copyright (C) 2020 Texas Instruments Incorporated
|
||||
# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
%YAML 1.2
|
||||
---
|
||||
$id: http://devicetree.org/schemas/sound/ti,j721e-cpb-ivi-audio.yaml#
|
||||
|
@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
|||
title: Texas Instruments J721e Common Processor Board Audio Support
|
||||
|
||||
maintainers:
|
||||
- Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
- Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
|
||||
description: |
|
||||
The Infotainment board plugs into the Common Processor Board, the support of the
|
||||
|
|
|
@ -50,8 +50,8 @@ The following files belong to it:
|
|||
0x00000010 Memory Uncorrectable non-fatal
|
||||
0x00000020 Memory Uncorrectable fatal
|
||||
0x00000040 PCI Express Correctable
|
||||
0x00000080 PCI Express Uncorrectable fatal
|
||||
0x00000100 PCI Express Uncorrectable non-fatal
|
||||
0x00000080 PCI Express Uncorrectable non-fatal
|
||||
0x00000100 PCI Express Uncorrectable fatal
|
||||
0x00000200 Platform Correctable
|
||||
0x00000400 Platform Uncorrectable non-fatal
|
||||
0x00000800 Platform Uncorrectable fatal
|
||||
|
|
|
@ -10,18 +10,177 @@ Introduction
|
|||
The following is a random collection of documentation regarding
|
||||
network devices.
|
||||
|
||||
struct net_device allocation rules
|
||||
==================================
|
||||
struct net_device lifetime rules
|
||||
================================
|
||||
Network device structures need to persist even after module is unloaded and
|
||||
must be allocated with alloc_netdev_mqs() and friends.
|
||||
If device has registered successfully, it will be freed on last use
|
||||
by free_netdev(). This is required to handle the pathologic case cleanly
|
||||
(example: rmmod mydriver </sys/class/net/myeth/mtu )
|
||||
by free_netdev(). This is required to handle the pathological case cleanly
|
||||
(example: ``rmmod mydriver </sys/class/net/myeth/mtu``)
|
||||
|
||||
alloc_netdev_mqs()/alloc_netdev() reserve extra space for driver
|
||||
alloc_netdev_mqs() / alloc_netdev() reserve extra space for driver
|
||||
private data which gets freed when the network device is freed. If
|
||||
separately allocated data is attached to the network device
|
||||
(netdev_priv(dev)) then it is up to the module exit handler to free that.
|
||||
(netdev_priv()) then it is up to the module exit handler to free that.
|
||||
|
||||
There are two groups of APIs for registering struct net_device.
|
||||
First group can be used in normal contexts where ``rtnl_lock`` is not already
|
||||
held: register_netdev(), unregister_netdev().
|
||||
Second group can be used when ``rtnl_lock`` is already held:
|
||||
register_netdevice(), unregister_netdevice(), free_netdevice().
|
||||
|
||||
Simple drivers
|
||||
--------------
|
||||
|
||||
Most drivers (especially device drivers) handle lifetime of struct net_device
|
||||
in context where ``rtnl_lock`` is not held (e.g. driver probe and remove paths).
|
||||
|
||||
In that case the struct net_device registration is done using
|
||||
the register_netdev(), and unregister_netdev() functions:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
int probe()
|
||||
{
|
||||
struct my_device_priv *priv;
|
||||
int err;
|
||||
|
||||
dev = alloc_netdev_mqs(...);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
/* ... do all device setup before calling register_netdev() ...
|
||||
*/
|
||||
|
||||
err = register_netdev(dev);
|
||||
if (err)
|
||||
goto err_undo;
|
||||
|
||||
/* net_device is visible to the user! */
|
||||
|
||||
err_undo:
|
||||
/* ... undo the device setup ... */
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
void remove()
|
||||
{
|
||||
unregister_netdev(dev);
|
||||
free_netdev(dev);
|
||||
}
|
||||
|
||||
Note that after calling register_netdev() the device is visible in the system.
|
||||
Users can open it and start sending / receiving traffic immediately,
|
||||
or run any other callback, so all initialization must be done prior to
|
||||
registration.
|
||||
|
||||
unregister_netdev() closes the device and waits for all users to be done
|
||||
with it. The memory of struct net_device itself may still be referenced
|
||||
by sysfs but all operations on that device will fail.
|
||||
|
||||
free_netdev() can be called after unregister_netdev() returns on when
|
||||
register_netdev() failed.
|
||||
|
||||
Device management under RTNL
|
||||
----------------------------
|
||||
|
||||
Registering struct net_device while in context which already holds
|
||||
the ``rtnl_lock`` requires extra care. In those scenarios most drivers
|
||||
will want to make use of struct net_device's ``needs_free_netdev``
|
||||
and ``priv_destructor`` members for freeing of state.
|
||||
|
||||
Example flow of netdev handling under ``rtnl_lock``:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
static void my_setup(struct net_device *dev)
|
||||
{
|
||||
dev->needs_free_netdev = true;
|
||||
}
|
||||
|
||||
static void my_destructor(struct net_device *dev)
|
||||
{
|
||||
some_obj_destroy(priv->obj);
|
||||
some_uninit(priv);
|
||||
}
|
||||
|
||||
int create_link()
|
||||
{
|
||||
struct my_device_priv *priv;
|
||||
int err;
|
||||
|
||||
ASSERT_RTNL();
|
||||
|
||||
dev = alloc_netdev(sizeof(*priv), "net%d", NET_NAME_UNKNOWN, my_setup);
|
||||
if (!dev)
|
||||
return -ENOMEM;
|
||||
priv = netdev_priv(dev);
|
||||
|
||||
/* Implicit constructor */
|
||||
err = some_init(priv);
|
||||
if (err)
|
||||
goto err_free_dev;
|
||||
|
||||
priv->obj = some_obj_create();
|
||||
if (!priv->obj) {
|
||||
err = -ENOMEM;
|
||||
goto err_some_uninit;
|
||||
}
|
||||
/* End of constructor, set the destructor: */
|
||||
dev->priv_destructor = my_destructor;
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err)
|
||||
/* register_netdevice() calls destructor on failure */
|
||||
goto err_free_dev;
|
||||
|
||||
/* If anything fails now unregister_netdevice() (or unregister_netdev())
|
||||
* will take care of calling my_destructor and free_netdev().
|
||||
*/
|
||||
|
||||
return 0;
|
||||
|
||||
err_some_uninit:
|
||||
some_uninit(priv);
|
||||
err_free_dev:
|
||||
free_netdev(dev);
|
||||
return err;
|
||||
}
|
||||
|
||||
If struct net_device.priv_destructor is set it will be called by the core
|
||||
some time after unregister_netdevice(), it will also be called if
|
||||
register_netdevice() fails. The callback may be invoked with or without
|
||||
``rtnl_lock`` held.
|
||||
|
||||
There is no explicit constructor callback, driver "constructs" the private
|
||||
netdev state after allocating it and before registration.
|
||||
|
||||
Setting struct net_device.needs_free_netdev makes core call free_netdevice()
|
||||
automatically after unregister_netdevice() when all references to the device
|
||||
are gone. It only takes effect after a successful call to register_netdevice()
|
||||
so if register_netdevice() fails driver is responsible for calling
|
||||
free_netdev().
|
||||
|
||||
free_netdev() is safe to call on error paths right after unregister_netdevice()
|
||||
or when register_netdevice() fails. Parts of netdev (de)registration process
|
||||
happen after ``rtnl_lock`` is released, therefore in those cases free_netdev()
|
||||
will defer some of the processing until ``rtnl_lock`` is released.
|
||||
|
||||
Devices spawned from struct rtnl_link_ops should never free the
|
||||
struct net_device directly.
|
||||
|
||||
.ndo_init and .ndo_uninit
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
``.ndo_init`` and ``.ndo_uninit`` callbacks are called during net_device
|
||||
registration and de-registration, under ``rtnl_lock``. Drivers can use
|
||||
those e.g. when parts of their init process need to run under ``rtnl_lock``.
|
||||
|
||||
``.ndo_init`` runs before device is visible in the system, ``.ndo_uninit``
|
||||
runs during de-registering after device is closed but other subsystems
|
||||
may still have outstanding references to the netdevice.
|
||||
|
||||
MTU
|
||||
===
|
||||
|
|
|
@ -530,7 +530,7 @@ TLS device feature flags only control adding of new TLS connection
|
|||
offloads, old connections will remain active after flags are cleared.
|
||||
|
||||
TLS encryption cannot be offloaded to devices without checksum calculation
|
||||
offload. Hence, TLS TX device feature flag requires NETIF_F_HW_CSUM being set.
|
||||
offload. Hence, TLS TX device feature flag requires TX csum offload being set.
|
||||
Disabling the latter implies clearing the former. Disabling TX checksum offload
|
||||
should not affect old connections, and drivers should make sure checksum
|
||||
calculation does not break for them.
|
||||
|
|
|
@ -1501,7 +1501,7 @@ Module for Digigram miXart8 sound cards.
|
|||
|
||||
This module supports multiple cards.
|
||||
Note: One miXart8 board will be represented as 4 alsa cards.
|
||||
See MIXART.txt for details.
|
||||
See Documentation/sound/cards/mixart.rst for details.
|
||||
|
||||
When the driver is compiled as a module and the hotplug firmware
|
||||
is supported, the firmware data is loaded via hotplug automatically.
|
||||
|
|
20
MAINTAINERS
20
MAINTAINERS
|
@ -820,7 +820,6 @@ M: Netanel Belgazal <netanel@amazon.com>
|
|||
M: Arthur Kiyanovski <akiyano@amazon.com>
|
||||
R: Guy Tzalik <gtzalik@amazon.com>
|
||||
R: Saeed Bishara <saeedb@amazon.com>
|
||||
R: Zorik Machulsky <zorik@amazon.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/networking/device_drivers/ethernet/amazon/ena.rst
|
||||
|
@ -907,7 +906,7 @@ AMD KFD
|
|||
M: Felix Kuehling <Felix.Kuehling@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd*.[ch]
|
||||
F: drivers/gpu/drm/amd/amdkfd/
|
||||
F: drivers/gpu/drm/amd/include/cik_structs.h
|
||||
|
@ -2942,7 +2941,6 @@ S: Maintained
|
|||
F: drivers/hwmon/asus_atk0110.c
|
||||
|
||||
ATLX ETHERNET DRIVERS
|
||||
M: Jay Cliburn <jcliburn@gmail.com>
|
||||
M: Chris Snook <chris.snook@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -4922,9 +4920,8 @@ F: Documentation/scsi/dc395x.rst
|
|||
F: drivers/scsi/dc395x.*
|
||||
|
||||
DCCP PROTOCOL
|
||||
M: Gerrit Renker <gerrit@erg.abdn.ac.uk>
|
||||
L: dccp@vger.kernel.org
|
||||
S: Maintained
|
||||
S: Orphan
|
||||
W: http://www.linuxfoundation.org/collaborate/workgroups/networking/dccp
|
||||
F: include/linux/dccp.h
|
||||
F: include/linux/tfrc.h
|
||||
|
@ -9326,7 +9323,6 @@ W: http://www.adaptec.com/
|
|||
F: drivers/scsi/ips*
|
||||
|
||||
IPVS
|
||||
M: Wensong Zhang <wensong@linux-vs.org>
|
||||
M: Simon Horman <horms@verge.net.au>
|
||||
M: Julian Anastasov <ja@ssi.bg>
|
||||
L: netdev@vger.kernel.org
|
||||
|
@ -12416,7 +12412,6 @@ F: tools/testing/selftests/net/ipsec.c
|
|||
|
||||
NETWORKING [IPv4/IPv6]
|
||||
M: "David S. Miller" <davem@davemloft.net>
|
||||
M: Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
|
||||
M: Hideaki YOSHIFUJI <yoshfuji@linux-ipv6.org>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
|
@ -12473,7 +12468,6 @@ F: net/ipv6/tcp*.c
|
|||
|
||||
NETWORKING [TLS]
|
||||
M: Boris Pismenny <borisp@nvidia.com>
|
||||
M: Aviad Yehezkel <aviadye@nvidia.com>
|
||||
M: John Fastabend <john.fastabend@gmail.com>
|
||||
M: Daniel Borkmann <daniel@iogearbox.net>
|
||||
M: Jakub Kicinski <kuba@kernel.org>
|
||||
|
@ -12848,7 +12842,7 @@ F: include/misc/ocxl*
|
|||
F: include/uapi/misc/ocxl.h
|
||||
|
||||
OMAP AUDIO SUPPORT
|
||||
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
M: Jarkko Nikula <jarkko.nikula@bitmer.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
L: linux-omap@vger.kernel.org
|
||||
|
@ -14818,7 +14812,7 @@ M: Alex Deucher <alexander.deucher@amd.com>
|
|||
M: Christian König <christian.koenig@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git git://people.freedesktop.org/~agd5f/linux
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/
|
||||
F: drivers/gpu/drm/radeon/
|
||||
F: include/uapi/drm/amdgpu_drm.h
|
||||
|
@ -16319,6 +16313,7 @@ M: Pekka Enberg <penberg@kernel.org>
|
|||
M: David Rientjes <rientjes@google.com>
|
||||
M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
|
||||
M: Andrew Morton <akpm@linux-foundation.org>
|
||||
M: Vlastimil Babka <vbabka@suse.cz>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: include/linux/sl?b*.h
|
||||
|
@ -17541,7 +17536,7 @@ F: arch/xtensa/
|
|||
F: drivers/irqchip/irq-xtensa-*
|
||||
|
||||
TEXAS INSTRUMENTS ASoC DRIVERS
|
||||
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: sound/soc/ti/
|
||||
|
@ -17851,7 +17846,7 @@ F: Documentation/devicetree/bindings/net/nfc/trf7970a.txt
|
|||
F: drivers/nfc/trf7970a.c
|
||||
|
||||
TI TWL4030 SERIES SOC CODEC DRIVER
|
||||
M: Peter Ujfalusi <peter.ujfalusi@ti.com>
|
||||
M: Peter Ujfalusi <peter.ujfalusi@gmail.com>
|
||||
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: sound/soc/codecs/twl4030*
|
||||
|
@ -19071,7 +19066,6 @@ K: regulator_get_optional
|
|||
|
||||
VRF
|
||||
M: David Ahern <dsahern@kernel.org>
|
||||
M: Shrijeet Mukherjee <shrijeet@gmail.com>
|
||||
L: netdev@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/networking/vrf.rst
|
||||
|
|
2
Makefile
2
Makefile
|
@ -2,7 +2,7 @@
|
|||
VERSION = 5
|
||||
PATCHLEVEL = 11
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc3
|
||||
EXTRAVERSION = -rc4
|
||||
NAME = Kleptomaniac Octopus
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
|
|
@ -371,7 +371,7 @@ static int __init xen_guest_init(void)
|
|||
}
|
||||
gnttab_init();
|
||||
if (!xen_initial_domain())
|
||||
xenbus_probe(NULL);
|
||||
xenbus_probe();
|
||||
|
||||
/*
|
||||
* Making sure board specific code will not set up ops for
|
||||
|
|
|
@ -174,8 +174,6 @@ config ARM64
|
|||
select HAVE_NMI
|
||||
select HAVE_PATA_PLATFORM
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_EVENTS_NMI if ARM64_PSEUDO_NMI && HW_PERF_EVENTS
|
||||
select HAVE_HARDLOCKUP_DETECTOR_PERF if PERF_EVENTS && HAVE_PERF_EVENTS_NMI
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
#include <asm/lse.h>
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
static inline void arch_##op(int i, atomic_t *v) \
|
||||
static __always_inline void arch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
__lse_ll_sc_body(op, i, v); \
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ ATOMIC_OP(atomic_sub)
|
|||
#undef ATOMIC_OP
|
||||
|
||||
#define ATOMIC_FETCH_OP(name, op) \
|
||||
static inline int arch_##op##name(int i, atomic_t *v) \
|
||||
static __always_inline int arch_##op##name(int i, atomic_t *v) \
|
||||
{ \
|
||||
return __lse_ll_sc_body(op##name, i, v); \
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ ATOMIC_FETCH_OPS(atomic_sub_return)
|
|||
#undef ATOMIC_FETCH_OPS
|
||||
|
||||
#define ATOMIC64_OP(op) \
|
||||
static inline void arch_##op(long i, atomic64_t *v) \
|
||||
static __always_inline void arch_##op(long i, atomic64_t *v) \
|
||||
{ \
|
||||
__lse_ll_sc_body(op, i, v); \
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ ATOMIC64_OP(atomic64_sub)
|
|||
#undef ATOMIC64_OP
|
||||
|
||||
#define ATOMIC64_FETCH_OP(name, op) \
|
||||
static inline long arch_##op##name(long i, atomic64_t *v) \
|
||||
static __always_inline long arch_##op##name(long i, atomic64_t *v) \
|
||||
{ \
|
||||
return __lse_ll_sc_body(op##name, i, v); \
|
||||
}
|
||||
|
@ -94,7 +94,7 @@ ATOMIC64_FETCH_OPS(atomic64_sub_return)
|
|||
#undef ATOMIC64_FETCH_OP
|
||||
#undef ATOMIC64_FETCH_OPS
|
||||
|
||||
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
|
||||
static __always_inline long arch_atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
return __lse_ll_sc_body(atomic64_dec_if_positive, v);
|
||||
}
|
||||
|
|
|
@ -7,6 +7,9 @@
|
|||
#ifndef __ARM64_HYP_IMAGE_H__
|
||||
#define __ARM64_HYP_IMAGE_H__
|
||||
|
||||
#define __HYP_CONCAT(a, b) a ## b
|
||||
#define HYP_CONCAT(a, b) __HYP_CONCAT(a, b)
|
||||
|
||||
/*
|
||||
* KVM nVHE code has its own symbol namespace prefixed with __kvm_nvhe_,
|
||||
* to separate it from the kernel proper.
|
||||
|
@ -21,9 +24,31 @@
|
|||
*/
|
||||
#define HYP_SECTION_NAME(NAME) .hyp##NAME
|
||||
|
||||
/* Symbol defined at the beginning of each hyp section. */
|
||||
#define HYP_SECTION_SYMBOL_NAME(NAME) \
|
||||
HYP_CONCAT(__hyp_section_, HYP_SECTION_NAME(NAME))
|
||||
|
||||
/*
|
||||
* Helper to generate linker script statements starting a hyp section.
|
||||
*
|
||||
* A symbol with a well-known name is defined at the first byte. This
|
||||
* is used as a base for hyp relocations (see gen-hyprel.c). It must
|
||||
* be defined inside the section so the linker of `vmlinux` cannot
|
||||
* separate it from the section data.
|
||||
*/
|
||||
#define BEGIN_HYP_SECTION(NAME) \
|
||||
HYP_SECTION_NAME(NAME) : { \
|
||||
HYP_SECTION_SYMBOL_NAME(NAME) = .;
|
||||
|
||||
/* Helper to generate linker script statements ending a hyp section. */
|
||||
#define END_HYP_SECTION \
|
||||
}
|
||||
|
||||
/* Defines an ELF hyp section from input section @NAME and its subsections. */
|
||||
#define HYP_SECTION(NAME) \
|
||||
HYP_SECTION_NAME(NAME) : { *(NAME NAME##.*) }
|
||||
#define HYP_SECTION(NAME) \
|
||||
BEGIN_HYP_SECTION(NAME) \
|
||||
*(NAME NAME##.*) \
|
||||
END_HYP_SECTION
|
||||
|
||||
/*
|
||||
* Defines a linker script alias of a kernel-proper symbol referenced by
|
||||
|
|
|
@ -199,32 +199,6 @@ extern void __vgic_v3_init_lrs(void);
|
|||
|
||||
extern u32 __kvm_get_mdcr_el2(void);
|
||||
|
||||
#if defined(GCC_VERSION) && GCC_VERSION < 50000
|
||||
#define SYM_CONSTRAINT "i"
|
||||
#else
|
||||
#define SYM_CONSTRAINT "S"
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Obtain the PC-relative address of a kernel symbol
|
||||
* s: symbol
|
||||
*
|
||||
* The goal of this macro is to return a symbol's address based on a
|
||||
* PC-relative computation, as opposed to a loading the VA from a
|
||||
* constant pool or something similar. This works well for HYP, as an
|
||||
* absolute VA is guaranteed to be wrong. Only use this if trying to
|
||||
* obtain the address of a symbol (i.e. not something you obtained by
|
||||
* following a pointer).
|
||||
*/
|
||||
#define hyp_symbol_addr(s) \
|
||||
({ \
|
||||
typeof(s) *addr; \
|
||||
asm("adrp %0, %1\n" \
|
||||
"add %0, %0, :lo12:%1\n" \
|
||||
: "=r" (addr) : SYM_CONSTRAINT (&s)); \
|
||||
addr; \
|
||||
})
|
||||
|
||||
#define __KVM_EXTABLE(from, to) \
|
||||
" .pushsection __kvm_ex_table, \"a\"\n" \
|
||||
" .align 3\n" \
|
||||
|
|
|
@ -770,4 +770,6 @@ bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
|
|||
#define kvm_vcpu_has_pmu(vcpu) \
|
||||
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
|
||||
|
||||
int kvm_trng_call(struct kvm_vcpu *vcpu);
|
||||
|
||||
#endif /* __ARM64_KVM_HOST_H__ */
|
||||
|
|
|
@ -73,8 +73,18 @@ alternative_cb_end
|
|||
.endm
|
||||
|
||||
/*
|
||||
* Convert a kernel image address to a PA
|
||||
* reg: kernel address to be converted in place
|
||||
* Convert a hypervisor VA to a PA
|
||||
* reg: hypervisor address to be converted in place
|
||||
* tmp: temporary register
|
||||
*/
|
||||
.macro hyp_pa reg, tmp
|
||||
ldr_l \tmp, hyp_physvirt_offset
|
||||
add \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Convert a hypervisor VA to a kernel image address
|
||||
* reg: hypervisor address to be converted in place
|
||||
* tmp: temporary register
|
||||
*
|
||||
* The actual code generation takes place in kvm_get_kimage_voffset, and
|
||||
|
@ -82,7 +92,11 @@ alternative_cb_end
|
|||
* perform the register allocation (kvm_get_kimage_voffset uses the
|
||||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
.macro kimg_pa reg, tmp
|
||||
.macro hyp_kimg_va reg, tmp
|
||||
/* Convert hyp VA -> PA. */
|
||||
hyp_pa \reg, \tmp
|
||||
|
||||
/* Load kimage_voffset. */
|
||||
alternative_cb kvm_get_kimage_voffset
|
||||
movz \tmp, #0
|
||||
movk \tmp, #0, lsl #16
|
||||
|
@ -90,32 +104,8 @@ alternative_cb kvm_get_kimage_voffset
|
|||
movk \tmp, #0, lsl #48
|
||||
alternative_cb_end
|
||||
|
||||
/* reg = __pa(reg) */
|
||||
sub \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Convert a kernel image address to a hyp VA
|
||||
* reg: kernel address to be converted in place
|
||||
* tmp: temporary register
|
||||
*
|
||||
* The actual code generation takes place in kvm_get_kimage_voffset, and
|
||||
* the instructions below are only there to reserve the space and
|
||||
* perform the register allocation (kvm_update_kimg_phys_offset uses the
|
||||
* specific registers encoded in the instructions).
|
||||
*/
|
||||
.macro kimg_hyp_va reg, tmp
|
||||
alternative_cb kvm_update_kimg_phys_offset
|
||||
movz \tmp, #0
|
||||
movk \tmp, #0, lsl #16
|
||||
movk \tmp, #0, lsl #32
|
||||
movk \tmp, #0, lsl #48
|
||||
alternative_cb_end
|
||||
|
||||
sub \reg, \reg, \tmp
|
||||
mov_q \tmp, PAGE_OFFSET
|
||||
orr \reg, \reg, \tmp
|
||||
kern_hyp_va \reg
|
||||
/* Convert PA -> kimg VA. */
|
||||
add \reg, \reg, \tmp
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
@ -129,6 +119,7 @@ alternative_cb_end
|
|||
void kvm_update_va_mask(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst);
|
||||
void kvm_compute_layout(void);
|
||||
void kvm_apply_hyp_relocations(void);
|
||||
|
||||
static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
|
@ -144,24 +135,6 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v)
|
|||
|
||||
#define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v))))
|
||||
|
||||
static __always_inline unsigned long __kimg_hyp_va(unsigned long v)
|
||||
{
|
||||
unsigned long offset;
|
||||
|
||||
asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
|
||||
"movk %0, #0, lsl #16\n"
|
||||
"movk %0, #0, lsl #32\n"
|
||||
"movk %0, #0, lsl #48\n",
|
||||
kvm_update_kimg_phys_offset)
|
||||
: "=r" (offset));
|
||||
|
||||
return __kern_hyp_va((v - offset) | PAGE_OFFSET);
|
||||
}
|
||||
|
||||
#define kimg_fn_hyp_va(v) ((typeof(*v))(__kimg_hyp_va((unsigned long)(v))))
|
||||
|
||||
#define kimg_fn_ptr(x) (typeof(x) **)(x)
|
||||
|
||||
/*
|
||||
* We currently support using a VM-specified IPA size. For backward
|
||||
* compatibility, the default IPA size is fixed to 40bits.
|
||||
|
|
|
@ -157,6 +157,11 @@ void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt);
|
|||
* If device attributes are not explicitly requested in @prot, then the
|
||||
* mapping will be normal, cacheable.
|
||||
*
|
||||
* Note that the update of a valid leaf PTE in this function will be aborted,
|
||||
* if it's trying to recreate the exact same mapping or only change the access
|
||||
* permissions. Instead, the vCPU will exit one more time from guest if still
|
||||
* needed and then go through the path of relaxing permissions.
|
||||
*
|
||||
* Note that this function will both coalesce existing table entries and split
|
||||
* existing block mappings, relying on page-faults to fault back areas outside
|
||||
* of the new mapping lazily.
|
||||
|
|
|
@ -94,8 +94,7 @@
|
|||
#endif /* CONFIG_ARM64_FORCE_52BIT */
|
||||
|
||||
extern phys_addr_t arm64_dma_phys_limit;
|
||||
extern phys_addr_t arm64_dma32_phys_limit;
|
||||
#define ARCH_LOW_ADDRESS_LIMIT ((arm64_dma_phys_limit ? : arm64_dma32_phys_limit) - 1)
|
||||
#define ARCH_LOW_ADDRESS_LIMIT (arm64_dma_phys_limit - 1)
|
||||
|
||||
struct debug_info {
|
||||
#ifdef CONFIG_HAVE_HW_BREAKPOINT
|
||||
|
|
|
@ -11,7 +11,8 @@ extern char __alt_instructions[], __alt_instructions_end[];
|
|||
extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
|
||||
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
||||
extern char __hyp_text_start[], __hyp_text_end[];
|
||||
extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[];
|
||||
extern char __hyp_rodata_start[], __hyp_rodata_end[];
|
||||
extern char __hyp_reloc_begin[], __hyp_reloc_end[];
|
||||
extern char __idmap_text_start[], __idmap_text_end[];
|
||||
extern char __initdata_begin[], __initdata_end[];
|
||||
extern char __inittext_begin[], __inittext_end[];
|
||||
|
|
|
@ -846,7 +846,10 @@
|
|||
|
||||
#define ID_DFR0_PERFMON_SHIFT 24
|
||||
|
||||
#define ID_DFR0_PERFMON_8_0 0x3
|
||||
#define ID_DFR0_PERFMON_8_1 0x4
|
||||
#define ID_DFR0_PERFMON_8_4 0x5
|
||||
#define ID_DFR0_PERFMON_8_5 0x6
|
||||
|
||||
#define ID_ISAR4_SWP_FRAC_SHIFT 28
|
||||
#define ID_ISAR4_PSR_M_SHIFT 24
|
||||
|
|
|
@ -75,7 +75,7 @@ int main(void)
|
|||
DEFINE(S_SDEI_TTBR1, offsetof(struct pt_regs, sdei_ttbr1));
|
||||
DEFINE(S_PMR_SAVE, offsetof(struct pt_regs, pmr_save));
|
||||
DEFINE(S_STACKFRAME, offsetof(struct pt_regs, stackframe));
|
||||
DEFINE(S_FRAME_SIZE, sizeof(struct pt_regs));
|
||||
DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
|
||||
BLANK();
|
||||
#ifdef CONFIG_COMPAT
|
||||
DEFINE(COMPAT_SIGFRAME_REGS_OFFSET, offsetof(struct compat_sigframe, uc.uc_mcontext.arm_r0));
|
||||
|
|
|
@ -35,7 +35,7 @@
|
|||
*/
|
||||
.macro ftrace_regs_entry, allregs=0
|
||||
/* Make room for pt_regs, plus a callee frame */
|
||||
sub sp, sp, #(S_FRAME_SIZE + 16)
|
||||
sub sp, sp, #(PT_REGS_SIZE + 16)
|
||||
|
||||
/* Save function arguments (and x9 for simplicity) */
|
||||
stp x0, x1, [sp, #S_X0]
|
||||
|
@ -61,15 +61,15 @@
|
|||
.endif
|
||||
|
||||
/* Save the callsite's SP and LR */
|
||||
add x10, sp, #(S_FRAME_SIZE + 16)
|
||||
add x10, sp, #(PT_REGS_SIZE + 16)
|
||||
stp x9, x10, [sp, #S_LR]
|
||||
|
||||
/* Save the PC after the ftrace callsite */
|
||||
str x30, [sp, #S_PC]
|
||||
|
||||
/* Create a frame record for the callsite above pt_regs */
|
||||
stp x29, x9, [sp, #S_FRAME_SIZE]
|
||||
add x29, sp, #S_FRAME_SIZE
|
||||
stp x29, x9, [sp, #PT_REGS_SIZE]
|
||||
add x29, sp, #PT_REGS_SIZE
|
||||
|
||||
/* Create our frame record within pt_regs. */
|
||||
stp x29, x30, [sp, #S_STACKFRAME]
|
||||
|
@ -120,7 +120,7 @@ ftrace_common_return:
|
|||
ldr x9, [sp, #S_PC]
|
||||
|
||||
/* Restore the callsite's SP */
|
||||
add sp, sp, #S_FRAME_SIZE + 16
|
||||
add sp, sp, #PT_REGS_SIZE + 16
|
||||
|
||||
ret x9
|
||||
SYM_CODE_END(ftrace_common)
|
||||
|
@ -130,7 +130,7 @@ SYM_CODE_START(ftrace_graph_caller)
|
|||
ldr x0, [sp, #S_PC]
|
||||
sub x0, x0, #AARCH64_INSN_SIZE // ip (callsite's BL insn)
|
||||
add x1, sp, #S_LR // parent_ip (callsite's LR)
|
||||
ldr x2, [sp, #S_FRAME_SIZE] // parent fp (callsite's FP)
|
||||
ldr x2, [sp, #PT_REGS_SIZE] // parent fp (callsite's FP)
|
||||
bl prepare_ftrace_return
|
||||
b ftrace_common_return
|
||||
SYM_CODE_END(ftrace_graph_caller)
|
||||
|
|
|
@ -75,7 +75,7 @@ alternative_else_nop_endif
|
|||
.endif
|
||||
#endif
|
||||
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
#ifdef CONFIG_VMAP_STACK
|
||||
/*
|
||||
* Test whether the SP has overflowed, without corrupting a GPR.
|
||||
|
@ -96,7 +96,7 @@ alternative_else_nop_endif
|
|||
* userspace, and can clobber EL0 registers to free up GPRs.
|
||||
*/
|
||||
|
||||
/* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
|
||||
/* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */
|
||||
msr tpidr_el0, x0
|
||||
|
||||
/* Recover the original x0 value and stash it in tpidrro_el0 */
|
||||
|
@ -253,7 +253,7 @@ alternative_else_nop_endif
|
|||
|
||||
scs_load tsk, x20
|
||||
.else
|
||||
add x21, sp, #S_FRAME_SIZE
|
||||
add x21, sp, #PT_REGS_SIZE
|
||||
get_current_task tsk
|
||||
.endif /* \el == 0 */
|
||||
mrs x22, elr_el1
|
||||
|
@ -377,7 +377,7 @@ alternative_else_nop_endif
|
|||
ldp x26, x27, [sp, #16 * 13]
|
||||
ldp x28, x29, [sp, #16 * 14]
|
||||
ldr lr, [sp, #S_LR]
|
||||
add sp, sp, #S_FRAME_SIZE // restore sp
|
||||
add sp, sp, #PT_REGS_SIZE // restore sp
|
||||
|
||||
.if \el == 0
|
||||
alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
|
||||
|
@ -580,12 +580,12 @@ __bad_stack:
|
|||
|
||||
/*
|
||||
* Store the original GPRs to the new stack. The orginal SP (minus
|
||||
* S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
|
||||
* PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry.
|
||||
*/
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
kernel_entry 1
|
||||
mrs x0, tpidr_el0
|
||||
add x0, x0, #S_FRAME_SIZE
|
||||
add x0, x0, #PT_REGS_SIZE
|
||||
str x0, [sp, #S_SP]
|
||||
|
||||
/* Stash the regs for handle_bad_stack */
|
||||
|
|
|
@ -64,7 +64,6 @@ __efistub__ctype = _ctype;
|
|||
/* Alternative callbacks for init-time patching of nVHE hyp code. */
|
||||
KVM_NVHE_ALIAS(kvm_patch_vector_branch);
|
||||
KVM_NVHE_ALIAS(kvm_update_va_mask);
|
||||
KVM_NVHE_ALIAS(kvm_update_kimg_phys_offset);
|
||||
KVM_NVHE_ALIAS(kvm_get_kimage_voffset);
|
||||
|
||||
/* Global kernel state accessed by nVHE hyp code. */
|
||||
|
|
|
@ -23,8 +23,6 @@
|
|||
#include <linux/platform_device.h>
|
||||
#include <linux/sched_clock.h>
|
||||
#include <linux/smp.h>
|
||||
#include <linux/nmi.h>
|
||||
#include <linux/cpufreq.h>
|
||||
|
||||
/* ARMv8 Cortex-A53 specific event types. */
|
||||
#define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2
|
||||
|
@ -1250,21 +1248,10 @@ static struct platform_driver armv8_pmu_driver = {
|
|||
|
||||
static int __init armv8_pmu_driver_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (acpi_disabled)
|
||||
ret = platform_driver_register(&armv8_pmu_driver);
|
||||
return platform_driver_register(&armv8_pmu_driver);
|
||||
else
|
||||
ret = arm_pmu_acpi_probe(armv8_pmuv3_init);
|
||||
|
||||
/*
|
||||
* Try to re-initialize lockup detector after PMU init in
|
||||
* case PMU events are triggered via NMIs.
|
||||
*/
|
||||
if (ret == 0 && arm_pmu_irq_is_nmi())
|
||||
lockup_detector_init();
|
||||
|
||||
return ret;
|
||||
return arm_pmu_acpi_probe(armv8_pmuv3_init);
|
||||
}
|
||||
device_initcall(armv8_pmu_driver_init)
|
||||
|
||||
|
@ -1322,27 +1309,3 @@ void arch_perf_update_userpage(struct perf_event *event,
|
|||
userpg->cap_user_time_zero = 1;
|
||||
userpg->cap_user_time_short = 1;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_HARDLOCKUP_DETECTOR_PERF
|
||||
/*
|
||||
* Safe maximum CPU frequency in case a particular platform doesn't implement
|
||||
* cpufreq driver. Although, architecture doesn't put any restrictions on
|
||||
* maximum frequency but 5 GHz seems to be safe maximum given the available
|
||||
* Arm CPUs in the market which are clocked much less than 5 GHz. On the other
|
||||
* hand, we can't make it much higher as it would lead to a large hard-lockup
|
||||
* detection timeout on parts which are running slower (eg. 1GHz on
|
||||
* Developerbox) and doesn't possess a cpufreq driver.
|
||||
*/
|
||||
#define SAFE_MAX_CPU_FREQ 5000000000UL // 5 GHz
|
||||
u64 hw_nmi_get_sample_period(int watchdog_thresh)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
unsigned long max_cpu_freq;
|
||||
|
||||
max_cpu_freq = cpufreq_get_hw_max_freq(cpu) * 1000UL;
|
||||
if (!max_cpu_freq)
|
||||
max_cpu_freq = SAFE_MAX_CPU_FREQ;
|
||||
|
||||
return (u64)max_cpu_freq * watchdog_thresh;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -25,7 +25,7 @@
|
|||
stp x24, x25, [sp, #S_X24]
|
||||
stp x26, x27, [sp, #S_X26]
|
||||
stp x28, x29, [sp, #S_X28]
|
||||
add x0, sp, #S_FRAME_SIZE
|
||||
add x0, sp, #PT_REGS_SIZE
|
||||
stp lr, x0, [sp, #S_LR]
|
||||
/*
|
||||
* Construct a useful saved PSTATE
|
||||
|
@ -62,7 +62,7 @@
|
|||
.endm
|
||||
|
||||
SYM_CODE_START(kretprobe_trampoline)
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
sub sp, sp, #PT_REGS_SIZE
|
||||
|
||||
save_all_base_regs
|
||||
|
||||
|
@ -76,7 +76,7 @@ SYM_CODE_START(kretprobe_trampoline)
|
|||
|
||||
restore_all_base_regs
|
||||
|
||||
add sp, sp, #S_FRAME_SIZE
|
||||
add sp, sp, #PT_REGS_SIZE
|
||||
ret
|
||||
|
||||
SYM_CODE_END(kretprobe_trampoline)
|
||||
|
|
|
@ -914,13 +914,6 @@ static void do_signal(struct pt_regs *regs)
|
|||
asmlinkage void do_notify_resume(struct pt_regs *regs,
|
||||
unsigned long thread_flags)
|
||||
{
|
||||
/*
|
||||
* The assembly code enters us with IRQs off, but it hasn't
|
||||
* informed the tracing code of that for efficiency reasons.
|
||||
* Update the trace code with the current status.
|
||||
*/
|
||||
trace_hardirqs_off();
|
||||
|
||||
do {
|
||||
if (thread_flags & _TIF_NEED_RESCHED) {
|
||||
/* Unmask Debug and SError for the next task */
|
||||
|
|
|
@ -434,8 +434,10 @@ static void __init hyp_mode_check(void)
|
|||
"CPU: CPUs started in inconsistent modes");
|
||||
else
|
||||
pr_info("CPU: All CPU(s) started at EL1\n");
|
||||
if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
|
||||
if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode()) {
|
||||
kvm_compute_layout();
|
||||
kvm_apply_hyp_relocations();
|
||||
}
|
||||
}
|
||||
|
||||
void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <asm/daifflags.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/fpsimd.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/thread_info.h>
|
||||
|
@ -165,15 +166,8 @@ static void el0_svc_common(struct pt_regs *regs, int scno, int sc_nr,
|
|||
if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
|
||||
local_daif_mask();
|
||||
flags = current_thread_info()->flags;
|
||||
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
|
||||
/*
|
||||
* We're off to userspace, where interrupts are
|
||||
* always enabled after we restore the flags from
|
||||
* the SPSR.
|
||||
*/
|
||||
trace_hardirqs_on();
|
||||
if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
|
||||
return;
|
||||
}
|
||||
local_daif_restore(DAIF_PROCCTX);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,10 +31,11 @@ jiffies = jiffies_64;
|
|||
__stop___kvm_ex_table = .;
|
||||
|
||||
#define HYPERVISOR_DATA_SECTIONS \
|
||||
HYP_SECTION_NAME(.data..ro_after_init) : { \
|
||||
__hyp_data_ro_after_init_start = .; \
|
||||
HYP_SECTION_NAME(.rodata) : { \
|
||||
__hyp_rodata_start = .; \
|
||||
*(HYP_SECTION_NAME(.data..ro_after_init)) \
|
||||
__hyp_data_ro_after_init_end = .; \
|
||||
*(HYP_SECTION_NAME(.rodata)) \
|
||||
__hyp_rodata_end = .; \
|
||||
}
|
||||
|
||||
#define HYPERVISOR_PERCPU_SECTION \
|
||||
|
@ -42,10 +43,19 @@ jiffies = jiffies_64;
|
|||
HYP_SECTION_NAME(.data..percpu) : { \
|
||||
*(HYP_SECTION_NAME(.data..percpu)) \
|
||||
}
|
||||
|
||||
#define HYPERVISOR_RELOC_SECTION \
|
||||
.hyp.reloc : ALIGN(4) { \
|
||||
__hyp_reloc_begin = .; \
|
||||
*(.hyp.reloc) \
|
||||
__hyp_reloc_end = .; \
|
||||
}
|
||||
|
||||
#else /* CONFIG_KVM */
|
||||
#define HYPERVISOR_EXTABLE
|
||||
#define HYPERVISOR_DATA_SECTIONS
|
||||
#define HYPERVISOR_PERCPU_SECTION
|
||||
#define HYPERVISOR_RELOC_SECTION
|
||||
#endif
|
||||
|
||||
#define HYPERVISOR_TEXT \
|
||||
|
@ -216,6 +226,8 @@ SECTIONS
|
|||
PERCPU_SECTION(L1_CACHE_BYTES)
|
||||
HYPERVISOR_PERCPU_SECTION
|
||||
|
||||
HYPERVISOR_RELOC_SECTION
|
||||
|
||||
.rela.dyn : ALIGN(8) {
|
||||
*(.rela .rela*)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ kvm-y := $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o \
|
|||
inject_fault.o va_layout.o handle_exit.o \
|
||||
guest.o debug.o reset.o sys_regs.o \
|
||||
vgic-sys-reg-v3.o fpsimd.o pmu.o \
|
||||
arch_timer.o \
|
||||
arch_timer.o trng.o\
|
||||
vgic/vgic.o vgic/vgic-init.o \
|
||||
vgic/vgic-irqfd.o vgic/vgic-v2.o \
|
||||
vgic/vgic-v3.o vgic/vgic-v4.o \
|
||||
|
|
|
@ -1750,11 +1750,10 @@ static int init_hyp_mode(void)
|
|||
goto out_err;
|
||||
}
|
||||
|
||||
err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_ro_after_init_start),
|
||||
kvm_ksym_ref(__hyp_data_ro_after_init_end),
|
||||
PAGE_HYP_RO);
|
||||
err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start),
|
||||
kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO);
|
||||
if (err) {
|
||||
kvm_err("Cannot map .hyp.data..ro_after_init section\n");
|
||||
kvm_err("Cannot map .hyp.rodata section\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
|
|
|
@ -505,8 +505,8 @@ static inline void __kvm_unexpected_el2_exception(void)
|
|||
struct exception_table_entry *entry, *end;
|
||||
unsigned long elr_el2 = read_sysreg(elr_el2);
|
||||
|
||||
entry = hyp_symbol_addr(__start___kvm_ex_table);
|
||||
end = hyp_symbol_addr(__stop___kvm_ex_table);
|
||||
entry = &__start___kvm_ex_table;
|
||||
end = &__stop___kvm_ex_table;
|
||||
|
||||
while (entry < end) {
|
||||
addr = (unsigned long)&entry->insn + entry->insn;
|
||||
|
|
|
@ -1,2 +1,4 @@
|
|||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
gen-hyprel
|
||||
hyp.lds
|
||||
hyp-reloc.S
|
||||
|
|
|
@ -3,8 +3,11 @@
|
|||
# Makefile for Kernel-based Virtual Machine module, HYP/nVHE part
|
||||
#
|
||||
|
||||
asflags-y := -D__KVM_NVHE_HYPERVISOR__
|
||||
ccflags-y := -D__KVM_NVHE_HYPERVISOR__
|
||||
asflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
|
||||
ccflags-y := -D__KVM_NVHE_HYPERVISOR__ -D__DISABLE_EXPORTS
|
||||
|
||||
hostprogs := gen-hyprel
|
||||
HOST_EXTRACFLAGS += -I$(objtree)/include
|
||||
|
||||
obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
|
||||
hyp-main.o hyp-smp.o psci-relay.o
|
||||
|
@ -19,7 +22,7 @@ obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
|
|||
|
||||
hyp-obj := $(patsubst %.o,%.nvhe.o,$(obj-y))
|
||||
obj-y := kvm_nvhe.o
|
||||
extra-y := $(hyp-obj) kvm_nvhe.tmp.o hyp.lds
|
||||
extra-y := $(hyp-obj) kvm_nvhe.tmp.o kvm_nvhe.rel.o hyp.lds hyp-reloc.S hyp-reloc.o
|
||||
|
||||
# 1) Compile all source files to `.nvhe.o` object files. The file extension
|
||||
# avoids file name clashes for files shared with VHE.
|
||||
|
@ -42,11 +45,31 @@ LDFLAGS_kvm_nvhe.tmp.o := -r -T
|
|||
$(obj)/kvm_nvhe.tmp.o: $(obj)/hyp.lds $(addprefix $(obj)/,$(hyp-obj)) FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
# 4) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
|
||||
# 4) Generate list of hyp code/data positions that need to be relocated at
|
||||
# runtime. Because the hypervisor is part of the kernel binary, relocations
|
||||
# produce a kernel VA. We enumerate relocations targeting hyp at build time
|
||||
# and convert the kernel VAs at those positions to hyp VAs.
|
||||
$(obj)/hyp-reloc.S: $(obj)/kvm_nvhe.tmp.o $(obj)/gen-hyprel
|
||||
$(call if_changed,hyprel)
|
||||
|
||||
# 5) Compile hyp-reloc.S and link it into the existing partially linked object.
|
||||
# The object file now contains a section with pointers to hyp positions that
|
||||
# will contain kernel VAs at runtime. These pointers have relocations on them
|
||||
# so that they get updated as the hyp object is linked into `vmlinux`.
|
||||
LDFLAGS_kvm_nvhe.rel.o := -r
|
||||
$(obj)/kvm_nvhe.rel.o: $(obj)/kvm_nvhe.tmp.o $(obj)/hyp-reloc.o FORCE
|
||||
$(call if_changed,ld)
|
||||
|
||||
# 6) Produce the final 'kvm_nvhe.o', ready to be linked into 'vmlinux'.
|
||||
# Prefixes names of ELF symbols with '__kvm_nvhe_'.
|
||||
$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.tmp.o FORCE
|
||||
$(obj)/kvm_nvhe.o: $(obj)/kvm_nvhe.rel.o FORCE
|
||||
$(call if_changed,hypcopy)
|
||||
|
||||
# The HYPREL command calls `gen-hyprel` to generate an assembly file with
|
||||
# a list of relocations targeting hyp code/data.
|
||||
quiet_cmd_hyprel = HYPREL $@
|
||||
cmd_hyprel = $(obj)/gen-hyprel $< > $@
|
||||
|
||||
# The HYPCOPY command uses `objcopy` to prefix all ELF symbol names
|
||||
# to avoid clashes with VHE code/data.
|
||||
quiet_cmd_hypcopy = HYPCOPY $@
|
||||
|
|
|
@ -0,0 +1,438 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-only
|
||||
/*
|
||||
* Copyright (C) 2020 - Google LLC
|
||||
* Author: David Brazdil <dbrazdil@google.com>
|
||||
*
|
||||
* Generates relocation information used by the kernel to convert
|
||||
* absolute addresses in hyp data from kernel VAs to hyp VAs.
|
||||
*
|
||||
* This is necessary because hyp code is linked into the same binary
|
||||
* as the kernel but executes under different memory mappings.
|
||||
* If the compiler used absolute addressing, those addresses need to
|
||||
* be converted before they are used by hyp code.
|
||||
*
|
||||
* The input of this program is the relocatable ELF object containing
|
||||
* all hyp code/data, not yet linked into vmlinux. Hyp section names
|
||||
* should have been prefixed with `.hyp` at this point.
|
||||
*
|
||||
* The output (printed to stdout) is an assembly file containing
|
||||
* an array of 32-bit integers and static relocations that instruct
|
||||
* the linker of `vmlinux` to populate the array entries with offsets
|
||||
* to positions in the kernel binary containing VAs used by hyp code.
|
||||
*
|
||||
* Note that dynamic relocations could be used for the same purpose.
|
||||
* However, those are only generated if CONFIG_RELOCATABLE=y.
|
||||
*/
|
||||
|
||||
#include <elf.h>
|
||||
#include <endian.h>
|
||||
#include <errno.h>
|
||||
#include <fcntl.h>
|
||||
#include <stdbool.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/mman.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/stat.h>
|
||||
#include <unistd.h>
|
||||
|
||||
#include <generated/autoconf.h>
|
||||
|
||||
#define HYP_SECTION_PREFIX ".hyp"
|
||||
#define HYP_RELOC_SECTION ".hyp.reloc"
|
||||
#define HYP_SECTION_SYMBOL_PREFIX "__hyp_section_"
|
||||
|
||||
/*
|
||||
* AArch64 relocation type constants.
|
||||
* Included in case these are not defined in the host toolchain.
|
||||
*/
|
||||
#ifndef R_AARCH64_ABS64
|
||||
#define R_AARCH64_ABS64 257
|
||||
#endif
|
||||
#ifndef R_AARCH64_LD_PREL_LO19
|
||||
#define R_AARCH64_LD_PREL_LO19 273
|
||||
#endif
|
||||
#ifndef R_AARCH64_ADR_PREL_LO21
|
||||
#define R_AARCH64_ADR_PREL_LO21 274
|
||||
#endif
|
||||
#ifndef R_AARCH64_ADR_PREL_PG_HI21
|
||||
#define R_AARCH64_ADR_PREL_PG_HI21 275
|
||||
#endif
|
||||
#ifndef R_AARCH64_ADR_PREL_PG_HI21_NC
|
||||
#define R_AARCH64_ADR_PREL_PG_HI21_NC 276
|
||||
#endif
|
||||
#ifndef R_AARCH64_ADD_ABS_LO12_NC
|
||||
#define R_AARCH64_ADD_ABS_LO12_NC 277
|
||||
#endif
|
||||
#ifndef R_AARCH64_LDST8_ABS_LO12_NC
|
||||
#define R_AARCH64_LDST8_ABS_LO12_NC 278
|
||||
#endif
|
||||
#ifndef R_AARCH64_TSTBR14
|
||||
#define R_AARCH64_TSTBR14 279
|
||||
#endif
|
||||
#ifndef R_AARCH64_CONDBR19
|
||||
#define R_AARCH64_CONDBR19 280
|
||||
#endif
|
||||
#ifndef R_AARCH64_JUMP26
|
||||
#define R_AARCH64_JUMP26 282
|
||||
#endif
|
||||
#ifndef R_AARCH64_CALL26
|
||||
#define R_AARCH64_CALL26 283
|
||||
#endif
|
||||
#ifndef R_AARCH64_LDST16_ABS_LO12_NC
|
||||
#define R_AARCH64_LDST16_ABS_LO12_NC 284
|
||||
#endif
|
||||
#ifndef R_AARCH64_LDST32_ABS_LO12_NC
|
||||
#define R_AARCH64_LDST32_ABS_LO12_NC 285
|
||||
#endif
|
||||
#ifndef R_AARCH64_LDST64_ABS_LO12_NC
|
||||
#define R_AARCH64_LDST64_ABS_LO12_NC 286
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G0
|
||||
#define R_AARCH64_MOVW_PREL_G0 287
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G0_NC
|
||||
#define R_AARCH64_MOVW_PREL_G0_NC 288
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G1
|
||||
#define R_AARCH64_MOVW_PREL_G1 289
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G1_NC
|
||||
#define R_AARCH64_MOVW_PREL_G1_NC 290
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G2
|
||||
#define R_AARCH64_MOVW_PREL_G2 291
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G2_NC
|
||||
#define R_AARCH64_MOVW_PREL_G2_NC 292
|
||||
#endif
|
||||
#ifndef R_AARCH64_MOVW_PREL_G3
|
||||
#define R_AARCH64_MOVW_PREL_G3 293
|
||||
#endif
|
||||
#ifndef R_AARCH64_LDST128_ABS_LO12_NC
|
||||
#define R_AARCH64_LDST128_ABS_LO12_NC 299
|
||||
#endif
|
||||
|
||||
/* Global state of the processed ELF. */
|
||||
static struct {
|
||||
const char *path;
|
||||
char *begin;
|
||||
size_t size;
|
||||
Elf64_Ehdr *ehdr;
|
||||
Elf64_Shdr *sh_table;
|
||||
const char *sh_string;
|
||||
} elf;
|
||||
|
||||
#if defined(CONFIG_CPU_LITTLE_ENDIAN)
|
||||
|
||||
#define elf16toh(x) le16toh(x)
|
||||
#define elf32toh(x) le32toh(x)
|
||||
#define elf64toh(x) le64toh(x)
|
||||
|
||||
#define ELFENDIAN ELFDATA2LSB
|
||||
|
||||
#elif defined(CONFIG_CPU_BIG_ENDIAN)
|
||||
|
||||
#define elf16toh(x) be16toh(x)
|
||||
#define elf32toh(x) be32toh(x)
|
||||
#define elf64toh(x) be64toh(x)
|
||||
|
||||
#define ELFENDIAN ELFDATA2MSB
|
||||
|
||||
#else
|
||||
|
||||
#error PDP-endian sadly unsupported...
|
||||
|
||||
#endif
|
||||
|
||||
#define fatal_error(fmt, ...) \
|
||||
({ \
|
||||
fprintf(stderr, "error: %s: " fmt "\n", \
|
||||
elf.path, ## __VA_ARGS__); \
|
||||
exit(EXIT_FAILURE); \
|
||||
__builtin_unreachable(); \
|
||||
})
|
||||
|
||||
#define fatal_perror(msg) \
|
||||
({ \
|
||||
fprintf(stderr, "error: %s: " msg ": %s\n", \
|
||||
elf.path, strerror(errno)); \
|
||||
exit(EXIT_FAILURE); \
|
||||
__builtin_unreachable(); \
|
||||
})
|
||||
|
||||
#define assert_op(lhs, rhs, fmt, op) \
|
||||
({ \
|
||||
typeof(lhs) _lhs = (lhs); \
|
||||
typeof(rhs) _rhs = (rhs); \
|
||||
\
|
||||
if (!(_lhs op _rhs)) { \
|
||||
fatal_error("assertion " #lhs " " #op " " #rhs \
|
||||
" failed (lhs=" fmt ", rhs=" fmt \
|
||||
", line=%d)", _lhs, _rhs, __LINE__); \
|
||||
} \
|
||||
})
|
||||
|
||||
#define assert_eq(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, ==)
|
||||
#define assert_ne(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, !=)
|
||||
#define assert_lt(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, <)
|
||||
#define assert_ge(lhs, rhs, fmt) assert_op(lhs, rhs, fmt, >=)
|
||||
|
||||
/*
|
||||
* Return a pointer of a given type at a given offset from
|
||||
* the beginning of the ELF file.
|
||||
*/
|
||||
#define elf_ptr(type, off) ((type *)(elf.begin + (off)))
|
||||
|
||||
/* Iterate over all sections in the ELF. */
|
||||
#define for_each_section(var) \
|
||||
for (var = elf.sh_table; var < elf.sh_table + elf16toh(elf.ehdr->e_shnum); ++var)
|
||||
|
||||
/* Iterate over all Elf64_Rela relocations in a given section. */
|
||||
#define for_each_rela(shdr, var) \
|
||||
for (var = elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset)); \
|
||||
var < elf_ptr(Elf64_Rela, elf64toh(shdr->sh_offset) + elf64toh(shdr->sh_size)); var++)
|
||||
|
||||
/* True if a string starts with a given prefix. */
|
||||
static inline bool starts_with(const char *str, const char *prefix)
|
||||
{
|
||||
return memcmp(str, prefix, strlen(prefix)) == 0;
|
||||
}
|
||||
|
||||
/* Returns a string containing the name of a given section. */
|
||||
static inline const char *section_name(Elf64_Shdr *shdr)
|
||||
{
|
||||
return elf.sh_string + elf32toh(shdr->sh_name);
|
||||
}
|
||||
|
||||
/* Returns a pointer to the first byte of section data. */
|
||||
static inline const char *section_begin(Elf64_Shdr *shdr)
|
||||
{
|
||||
return elf_ptr(char, elf64toh(shdr->sh_offset));
|
||||
}
|
||||
|
||||
/* Find a section by its offset from the beginning of the file. */
|
||||
static inline Elf64_Shdr *section_by_off(Elf64_Off off)
|
||||
{
|
||||
assert_ne(off, 0UL, "%lu");
|
||||
return elf_ptr(Elf64_Shdr, off);
|
||||
}
|
||||
|
||||
/* Find a section by its index. */
|
||||
static inline Elf64_Shdr *section_by_idx(uint16_t idx)
|
||||
{
|
||||
assert_ne(idx, SHN_UNDEF, "%u");
|
||||
return &elf.sh_table[idx];
|
||||
}
|
||||
|
||||
/*
|
||||
* Memory-map the given ELF file, perform sanity checks, and
|
||||
* populate global state.
|
||||
*/
|
||||
static void init_elf(const char *path)
|
||||
{
|
||||
int fd, ret;
|
||||
struct stat stat;
|
||||
|
||||
/* Store path in the global struct for error printing. */
|
||||
elf.path = path;
|
||||
|
||||
/* Open the ELF file. */
|
||||
fd = open(path, O_RDONLY);
|
||||
if (fd < 0)
|
||||
fatal_perror("Could not open ELF file");
|
||||
|
||||
/* Get status of ELF file to obtain its size. */
|
||||
ret = fstat(fd, &stat);
|
||||
if (ret < 0) {
|
||||
close(fd);
|
||||
fatal_perror("Could not get status of ELF file");
|
||||
}
|
||||
|
||||
/* mmap() the entire ELF file read-only at an arbitrary address. */
|
||||
elf.begin = mmap(0, stat.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
|
||||
if (elf.begin == MAP_FAILED) {
|
||||
close(fd);
|
||||
fatal_perror("Could not mmap ELF file");
|
||||
}
|
||||
|
||||
/* mmap() was successful, close the FD. */
|
||||
close(fd);
|
||||
|
||||
/* Get pointer to the ELF header. */
|
||||
assert_ge(stat.st_size, sizeof(*elf.ehdr), "%lu");
|
||||
elf.ehdr = elf_ptr(Elf64_Ehdr, 0);
|
||||
|
||||
/* Check the ELF magic. */
|
||||
assert_eq(elf.ehdr->e_ident[EI_MAG0], ELFMAG0, "0x%x");
|
||||
assert_eq(elf.ehdr->e_ident[EI_MAG1], ELFMAG1, "0x%x");
|
||||
assert_eq(elf.ehdr->e_ident[EI_MAG2], ELFMAG2, "0x%x");
|
||||
assert_eq(elf.ehdr->e_ident[EI_MAG3], ELFMAG3, "0x%x");
|
||||
|
||||
/* Sanity check that this is an ELF64 relocatable object for AArch64. */
|
||||
assert_eq(elf.ehdr->e_ident[EI_CLASS], ELFCLASS64, "%u");
|
||||
assert_eq(elf.ehdr->e_ident[EI_DATA], ELFENDIAN, "%u");
|
||||
assert_eq(elf16toh(elf.ehdr->e_type), ET_REL, "%u");
|
||||
assert_eq(elf16toh(elf.ehdr->e_machine), EM_AARCH64, "%u");
|
||||
|
||||
/* Populate fields of the global struct. */
|
||||
elf.sh_table = section_by_off(elf64toh(elf.ehdr->e_shoff));
|
||||
elf.sh_string = section_begin(section_by_idx(elf16toh(elf.ehdr->e_shstrndx)));
|
||||
}
|
||||
|
||||
/* Print the prologue of the output ASM file. */
|
||||
static void emit_prologue(void)
|
||||
{
|
||||
printf(".data\n"
|
||||
".pushsection " HYP_RELOC_SECTION ", \"a\"\n");
|
||||
}
|
||||
|
||||
/* Print ASM statements needed as a prologue to a processed hyp section. */
|
||||
static void emit_section_prologue(const char *sh_orig_name)
|
||||
{
|
||||
/* Declare the hyp section symbol. */
|
||||
printf(".global %s%s\n", HYP_SECTION_SYMBOL_PREFIX, sh_orig_name);
|
||||
}
|
||||
|
||||
/*
|
||||
* Print ASM statements to create a hyp relocation entry for a given
|
||||
* R_AARCH64_ABS64 relocation.
|
||||
*
|
||||
* The linker of vmlinux will populate the position given by `rela` with
|
||||
* an absolute 64-bit kernel VA. If the kernel is relocatable, it will
|
||||
* also generate a dynamic relocation entry so that the kernel can shift
|
||||
* the address at runtime for KASLR.
|
||||
*
|
||||
* Emit a 32-bit offset from the current address to the position given
|
||||
* by `rela`. This way the kernel can iterate over all kernel VAs used
|
||||
* by hyp at runtime and convert them to hyp VAs. However, that offset
|
||||
* will not be known until linking of `vmlinux`, so emit a PREL32
|
||||
* relocation referencing a symbol that the hyp linker script put at
|
||||
* the beginning of the relocated section + the offset from `rela`.
|
||||
*/
|
||||
static void emit_rela_abs64(Elf64_Rela *rela, const char *sh_orig_name)
|
||||
{
|
||||
/* Offset of this reloc from the beginning of HYP_RELOC_SECTION. */
|
||||
static size_t reloc_offset;
|
||||
|
||||
/* Create storage for the 32-bit offset. */
|
||||
printf(".word 0\n");
|
||||
|
||||
/*
|
||||
* Create a PREL32 relocation which instructs the linker of `vmlinux`
|
||||
* to insert offset to position <base> + <offset>, where <base> is
|
||||
* a symbol at the beginning of the relocated section, and <offset>
|
||||
* is `rela->r_offset`.
|
||||
*/
|
||||
printf(".reloc %lu, R_AARCH64_PREL32, %s%s + 0x%lx\n",
|
||||
reloc_offset, HYP_SECTION_SYMBOL_PREFIX, sh_orig_name,
|
||||
elf64toh(rela->r_offset));
|
||||
|
||||
reloc_offset += 4;
|
||||
}
|
||||
|
||||
/* Print the epilogue of the output ASM file. */
|
||||
static void emit_epilogue(void)
|
||||
{
|
||||
printf(".popsection\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Iterate over all RELA relocations in a given section and emit
|
||||
* hyp relocation data for all absolute addresses in hyp code/data.
|
||||
*
|
||||
* Static relocations that generate PC-relative-addressing are ignored.
|
||||
* Failure is reported for unexpected relocation types.
|
||||
*/
|
||||
static void emit_rela_section(Elf64_Shdr *sh_rela)
|
||||
{
|
||||
Elf64_Shdr *sh_orig = &elf.sh_table[elf32toh(sh_rela->sh_info)];
|
||||
const char *sh_orig_name = section_name(sh_orig);
|
||||
Elf64_Rela *rela;
|
||||
|
||||
/* Skip all non-hyp sections. */
|
||||
if (!starts_with(sh_orig_name, HYP_SECTION_PREFIX))
|
||||
return;
|
||||
|
||||
emit_section_prologue(sh_orig_name);
|
||||
|
||||
for_each_rela(sh_rela, rela) {
|
||||
uint32_t type = (uint32_t)elf64toh(rela->r_info);
|
||||
|
||||
/* Check that rela points inside the relocated section. */
|
||||
assert_lt(elf64toh(rela->r_offset), elf64toh(sh_orig->sh_size), "0x%lx");
|
||||
|
||||
switch (type) {
|
||||
/*
|
||||
* Data relocations to generate absolute addressing.
|
||||
* Emit a hyp relocation.
|
||||
*/
|
||||
case R_AARCH64_ABS64:
|
||||
emit_rela_abs64(rela, sh_orig_name);
|
||||
break;
|
||||
/* Allow relocations to generate PC-relative addressing. */
|
||||
case R_AARCH64_LD_PREL_LO19:
|
||||
case R_AARCH64_ADR_PREL_LO21:
|
||||
case R_AARCH64_ADR_PREL_PG_HI21:
|
||||
case R_AARCH64_ADR_PREL_PG_HI21_NC:
|
||||
case R_AARCH64_ADD_ABS_LO12_NC:
|
||||
case R_AARCH64_LDST8_ABS_LO12_NC:
|
||||
case R_AARCH64_LDST16_ABS_LO12_NC:
|
||||
case R_AARCH64_LDST32_ABS_LO12_NC:
|
||||
case R_AARCH64_LDST64_ABS_LO12_NC:
|
||||
case R_AARCH64_LDST128_ABS_LO12_NC:
|
||||
break;
|
||||
/* Allow relative relocations for control-flow instructions. */
|
||||
case R_AARCH64_TSTBR14:
|
||||
case R_AARCH64_CONDBR19:
|
||||
case R_AARCH64_JUMP26:
|
||||
case R_AARCH64_CALL26:
|
||||
break;
|
||||
/* Allow group relocations to create PC-relative offset inline. */
|
||||
case R_AARCH64_MOVW_PREL_G0:
|
||||
case R_AARCH64_MOVW_PREL_G0_NC:
|
||||
case R_AARCH64_MOVW_PREL_G1:
|
||||
case R_AARCH64_MOVW_PREL_G1_NC:
|
||||
case R_AARCH64_MOVW_PREL_G2:
|
||||
case R_AARCH64_MOVW_PREL_G2_NC:
|
||||
case R_AARCH64_MOVW_PREL_G3:
|
||||
break;
|
||||
default:
|
||||
fatal_error("Unexpected RELA type %u", type);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Iterate over all sections and emit hyp relocation data for RELA sections. */
|
||||
static void emit_all_relocs(void)
|
||||
{
|
||||
Elf64_Shdr *shdr;
|
||||
|
||||
for_each_section(shdr) {
|
||||
switch (elf32toh(shdr->sh_type)) {
|
||||
case SHT_REL:
|
||||
fatal_error("Unexpected SHT_REL section \"%s\"",
|
||||
section_name(shdr));
|
||||
case SHT_RELA:
|
||||
emit_rela_section(shdr);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, const char **argv)
|
||||
{
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <elf_input>\n", argv[0]);
|
||||
return EXIT_FAILURE;
|
||||
}
|
||||
|
||||
init_elf(argv[1]);
|
||||
|
||||
emit_prologue();
|
||||
emit_all_relocs();
|
||||
emit_epilogue();
|
||||
|
||||
return EXIT_SUCCESS;
|
||||
}
|
|
@ -74,27 +74,28 @@ SYM_FUNC_END(__host_enter)
|
|||
* void __noreturn __hyp_do_panic(bool restore_host, u64 spsr, u64 elr, u64 par);
|
||||
*/
|
||||
SYM_FUNC_START(__hyp_do_panic)
|
||||
/* Load the format arguments into x1-7 */
|
||||
mov x6, x3
|
||||
get_vcpu_ptr x7, x3
|
||||
|
||||
mrs x3, esr_el2
|
||||
mrs x4, far_el2
|
||||
mrs x5, hpfar_el2
|
||||
|
||||
/* Prepare and exit to the host's panic funciton. */
|
||||
mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
|
||||
PSR_MODE_EL1h)
|
||||
msr spsr_el2, lr
|
||||
ldr lr, =panic
|
||||
hyp_kimg_va lr, x6
|
||||
msr elr_el2, lr
|
||||
|
||||
/*
|
||||
* Set the panic format string and enter the host, conditionally
|
||||
* restoring the host context.
|
||||
*/
|
||||
/* Set the panic format string. Use the, now free, LR as scratch. */
|
||||
ldr lr, =__hyp_panic_string
|
||||
hyp_kimg_va lr, x6
|
||||
|
||||
/* Load the format arguments into x1-7. */
|
||||
mov x6, x3
|
||||
get_vcpu_ptr x7, x3
|
||||
mrs x3, esr_el2
|
||||
mrs x4, far_el2
|
||||
mrs x5, hpfar_el2
|
||||
|
||||
/* Enter the host, conditionally restoring the host context. */
|
||||
cmp x0, xzr
|
||||
ldr x0, =__hyp_panic_string
|
||||
mov x0, lr
|
||||
b.eq __host_enter_without_restoring
|
||||
b __host_enter_for_panic
|
||||
SYM_FUNC_END(__hyp_do_panic)
|
||||
|
@ -124,7 +125,7 @@ SYM_FUNC_END(__hyp_do_panic)
|
|||
* Preserve x0-x4, which may contain stub parameters.
|
||||
*/
|
||||
ldr x5, =__kvm_handle_stub_hvc
|
||||
kimg_pa x5, x6
|
||||
hyp_pa x5, x6
|
||||
br x5
|
||||
.L__vect_end\@:
|
||||
.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <asm/virt.h>
|
||||
|
||||
.text
|
||||
.pushsection .hyp.idmap.text, "ax"
|
||||
.pushsection .idmap.text, "ax"
|
||||
|
||||
.align 11
|
||||
|
||||
|
@ -57,17 +57,10 @@ __do_hyp_init:
|
|||
cmp x0, #HVC_STUB_HCALL_NR
|
||||
b.lo __kvm_handle_stub_hvc
|
||||
|
||||
// We only actively check bits [24:31], and everything
|
||||
// else has to be zero, which we check at build time.
|
||||
#if (KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) & 0xFFFFFFFF00FFFFFF)
|
||||
#error Unexpected __KVM_HOST_SMCCC_FUNC___kvm_hyp_init value
|
||||
#endif
|
||||
mov x3, #KVM_HOST_SMCCC_FUNC(__kvm_hyp_init)
|
||||
cmp x0, x3
|
||||
b.eq 1f
|
||||
|
||||
ror x0, x0, #24
|
||||
eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 24) & 0xF)
|
||||
ror x0, x0, #4
|
||||
eor x0, x0, #((KVM_HOST_SMCCC_FUNC(__kvm_hyp_init) >> 28) & 0xF)
|
||||
cbz x0, 1f
|
||||
mov x0, #SMCCC_RET_NOT_SUPPORTED
|
||||
eret
|
||||
|
||||
|
@ -141,7 +134,6 @@ alternative_else_nop_endif
|
|||
|
||||
/* Set the host vector */
|
||||
ldr x0, =__kvm_hyp_host_vector
|
||||
kimg_hyp_va x0, x1
|
||||
msr vbar_el2, x0
|
||||
|
||||
ret
|
||||
|
@ -200,7 +192,6 @@ SYM_CODE_START_LOCAL(__kvm_hyp_init_cpu)
|
|||
/* Leave idmap. */
|
||||
mov x0, x29
|
||||
ldr x1, =kvm_host_psci_cpu_entry
|
||||
kimg_hyp_va x1, x2
|
||||
br x1
|
||||
SYM_CODE_END(__kvm_hyp_init_cpu)
|
||||
|
||||
|
|
|
@ -108,9 +108,9 @@ static void handle___vgic_v3_restore_aprs(struct kvm_cpu_context *host_ctxt)
|
|||
|
||||
typedef void (*hcall_t)(struct kvm_cpu_context *);
|
||||
|
||||
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = kimg_fn_ptr(handle_##x)
|
||||
#define HANDLE_FUNC(x) [__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
|
||||
|
||||
static const hcall_t *host_hcall[] = {
|
||||
static const hcall_t host_hcall[] = {
|
||||
HANDLE_FUNC(__kvm_vcpu_run),
|
||||
HANDLE_FUNC(__kvm_flush_vm_context),
|
||||
HANDLE_FUNC(__kvm_tlb_flush_vmid_ipa),
|
||||
|
@ -130,7 +130,6 @@ static const hcall_t *host_hcall[] = {
|
|||
static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
||||
{
|
||||
DECLARE_REG(unsigned long, id, host_ctxt, 0);
|
||||
const hcall_t *kfn;
|
||||
hcall_t hfn;
|
||||
|
||||
id -= KVM_HOST_SMCCC_ID(0);
|
||||
|
@ -138,13 +137,11 @@ static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
|
|||
if (unlikely(id >= ARRAY_SIZE(host_hcall)))
|
||||
goto inval;
|
||||
|
||||
kfn = host_hcall[id];
|
||||
if (unlikely(!kfn))
|
||||
hfn = host_hcall[id];
|
||||
if (unlikely(!hfn))
|
||||
goto inval;
|
||||
|
||||
cpu_reg(host_ctxt, 0) = SMCCC_RET_SUCCESS;
|
||||
|
||||
hfn = kimg_fn_hyp_va(kfn);
|
||||
hfn(host_ctxt);
|
||||
|
||||
return;
|
||||
|
|
|
@ -33,8 +33,8 @@ unsigned long __hyp_per_cpu_offset(unsigned int cpu)
|
|||
if (cpu >= ARRAY_SIZE(kvm_arm_hyp_percpu_base))
|
||||
hyp_panic();
|
||||
|
||||
cpu_base_array = (unsigned long *)hyp_symbol_addr(kvm_arm_hyp_percpu_base);
|
||||
cpu_base_array = (unsigned long *)&kvm_arm_hyp_percpu_base;
|
||||
this_cpu_base = kern_hyp_va(cpu_base_array[cpu]);
|
||||
elf_base = (unsigned long)hyp_symbol_addr(__per_cpu_start);
|
||||
elf_base = (unsigned long)&__per_cpu_start;
|
||||
return this_cpu_base - elf_base;
|
||||
}
|
||||
|
|
|
@ -12,14 +12,17 @@
|
|||
#include <asm/memory.h>
|
||||
|
||||
SECTIONS {
|
||||
HYP_SECTION(.idmap.text)
|
||||
HYP_SECTION(.text)
|
||||
HYP_SECTION(.data..ro_after_init)
|
||||
HYP_SECTION(.rodata)
|
||||
|
||||
/*
|
||||
* .hyp..data..percpu needs to be page aligned to maintain the same
|
||||
* alignment for when linking into vmlinux.
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
HYP_SECTION_NAME(.data..percpu) : {
|
||||
BEGIN_HYP_SECTION(.data..percpu)
|
||||
PERCPU_INPUT(L1_CACHE_BYTES)
|
||||
}
|
||||
HYP_SECTION(.data..ro_after_init)
|
||||
END_HYP_SECTION
|
||||
}
|
||||
|
|
|
@ -128,8 +128,8 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
|||
if (cpu_id == INVALID_CPU_ID)
|
||||
return PSCI_RET_INVALID_PARAMS;
|
||||
|
||||
boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
|
||||
init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);
|
||||
boot_args = per_cpu_ptr(&cpu_on_args, cpu_id);
|
||||
init_params = per_cpu_ptr(&kvm_init_params, cpu_id);
|
||||
|
||||
/* Check if the target CPU is already being booted. */
|
||||
if (!try_acquire_boot_args(boot_args))
|
||||
|
@ -140,7 +140,7 @@ static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
|||
wmb();
|
||||
|
||||
ret = psci_call(func_id, mpidr,
|
||||
__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
|
||||
__hyp_pa(&kvm_hyp_cpu_entry),
|
||||
__hyp_pa(init_params));
|
||||
|
||||
/* If successful, the lock will be released by the target CPU. */
|
||||
|
@ -159,8 +159,8 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
|||
struct psci_boot_args *boot_args;
|
||||
struct kvm_nvhe_init_params *init_params;
|
||||
|
||||
boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
|
||||
init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
|
||||
boot_args = this_cpu_ptr(&suspend_args);
|
||||
init_params = this_cpu_ptr(&kvm_init_params);
|
||||
|
||||
/*
|
||||
* No need to acquire a lock before writing to boot_args because a core
|
||||
|
@ -174,7 +174,7 @@ static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
|||
* point if it is a deep sleep state.
|
||||
*/
|
||||
return psci_call(func_id, power_state,
|
||||
__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
|
||||
__hyp_pa(&kvm_hyp_cpu_resume),
|
||||
__hyp_pa(init_params));
|
||||
}
|
||||
|
||||
|
@ -186,8 +186,8 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
|||
struct psci_boot_args *boot_args;
|
||||
struct kvm_nvhe_init_params *init_params;
|
||||
|
||||
boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
|
||||
init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
|
||||
boot_args = this_cpu_ptr(&suspend_args);
|
||||
init_params = this_cpu_ptr(&kvm_init_params);
|
||||
|
||||
/*
|
||||
* No need to acquire a lock before writing to boot_args because a core
|
||||
|
@ -198,7 +198,7 @@ static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
|
|||
|
||||
/* Will only return on error. */
|
||||
return psci_call(func_id,
|
||||
__hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
|
||||
__hyp_pa(&kvm_hyp_cpu_resume),
|
||||
__hyp_pa(init_params), 0);
|
||||
}
|
||||
|
||||
|
@ -207,12 +207,12 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
|
|||
struct psci_boot_args *boot_args;
|
||||
struct kvm_cpu_context *host_ctxt;
|
||||
|
||||
host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;
|
||||
host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
|
||||
|
||||
if (is_cpu_on)
|
||||
boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
|
||||
boot_args = this_cpu_ptr(&cpu_on_args);
|
||||
else
|
||||
boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
|
||||
boot_args = this_cpu_ptr(&suspend_args);
|
||||
|
||||
cpu_reg(host_ctxt, 0) = boot_args->r0;
|
||||
write_sysreg_el2(boot_args->pc, SYS_ELR);
|
||||
|
|
|
@ -45,6 +45,10 @@
|
|||
|
||||
#define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
|
||||
|
||||
#define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
|
||||
KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
|
||||
KVM_PTE_LEAF_ATTR_HI_S2_XN)
|
||||
|
||||
struct kvm_pgtable_walk_data {
|
||||
struct kvm_pgtable *pgt;
|
||||
struct kvm_pgtable_walker *walker;
|
||||
|
@ -170,10 +174,9 @@ static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp)
|
|||
smp_store_release(ptep, pte);
|
||||
}
|
||||
|
||||
static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr,
|
||||
u32 level)
|
||||
static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
|
||||
{
|
||||
kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(pa);
|
||||
kvm_pte_t pte = kvm_phys_to_pte(pa);
|
||||
u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
|
||||
KVM_PTE_TYPE_BLOCK;
|
||||
|
||||
|
@ -181,12 +184,7 @@ static bool kvm_set_valid_leaf_pte(kvm_pte_t *ptep, u64 pa, kvm_pte_t attr,
|
|||
pte |= FIELD_PREP(KVM_PTE_TYPE, type);
|
||||
pte |= KVM_PTE_VALID;
|
||||
|
||||
/* Tolerate KVM recreating the exact same mapping. */
|
||||
if (kvm_pte_valid(old))
|
||||
return old == pte;
|
||||
|
||||
smp_store_release(ptep, pte);
|
||||
return true;
|
||||
return pte;
|
||||
}
|
||||
|
||||
static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
|
||||
|
@ -341,12 +339,17 @@ static int hyp_map_set_prot_attr(enum kvm_pgtable_prot prot,
|
|||
static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||
kvm_pte_t *ptep, struct hyp_map_data *data)
|
||||
{
|
||||
kvm_pte_t new, old = *ptep;
|
||||
u64 granule = kvm_granule_size(level), phys = data->phys;
|
||||
|
||||
if (!kvm_block_mapping_supported(addr, end, phys, level))
|
||||
return false;
|
||||
|
||||
WARN_ON(!kvm_set_valid_leaf_pte(ptep, phys, data->attr, level));
|
||||
/* Tolerate KVM recreating the exact same mapping */
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
|
||||
if (old != new && !WARN_ON(kvm_pte_valid(old)))
|
||||
smp_store_release(ptep, new);
|
||||
|
||||
data->phys += granule;
|
||||
return true;
|
||||
}
|
||||
|
@ -461,34 +464,41 @@ static int stage2_map_set_prot_attr(enum kvm_pgtable_prot prot,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||
kvm_pte_t *ptep,
|
||||
struct stage2_map_data *data)
|
||||
static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
|
||||
kvm_pte_t *ptep,
|
||||
struct stage2_map_data *data)
|
||||
{
|
||||
kvm_pte_t new, old = *ptep;
|
||||
u64 granule = kvm_granule_size(level), phys = data->phys;
|
||||
struct page *page = virt_to_page(ptep);
|
||||
|
||||
if (!kvm_block_mapping_supported(addr, end, phys, level))
|
||||
return false;
|
||||
return -E2BIG;
|
||||
|
||||
/*
|
||||
* If the PTE was already valid, drop the refcount on the table
|
||||
* early, as it will be bumped-up again in stage2_map_walk_leaf().
|
||||
* This ensures that the refcount stays constant across a valid to
|
||||
* valid PTE update.
|
||||
*/
|
||||
if (kvm_pte_valid(*ptep))
|
||||
put_page(virt_to_page(ptep));
|
||||
new = kvm_init_valid_leaf_pte(phys, data->attr, level);
|
||||
if (kvm_pte_valid(old)) {
|
||||
/*
|
||||
* Skip updating the PTE if we are trying to recreate the exact
|
||||
* same mapping or only change the access permissions. Instead,
|
||||
* the vCPU will exit one more time from guest if still needed
|
||||
* and then go through the path of relaxing permissions.
|
||||
*/
|
||||
if (!((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS)))
|
||||
return -EAGAIN;
|
||||
|
||||
if (kvm_set_valid_leaf_pte(ptep, phys, data->attr, level))
|
||||
goto out;
|
||||
/*
|
||||
* There's an existing different valid leaf entry, so perform
|
||||
* break-before-make.
|
||||
*/
|
||||
kvm_set_invalid_pte(ptep);
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
||||
put_page(page);
|
||||
}
|
||||
|
||||
/* There's an existing valid leaf entry, so perform break-before-make */
|
||||
kvm_set_invalid_pte(ptep);
|
||||
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, data->mmu, addr, level);
|
||||
kvm_set_valid_leaf_pte(ptep, phys, data->attr, level);
|
||||
out:
|
||||
smp_store_release(ptep, new);
|
||||
get_page(page);
|
||||
data->phys += granule;
|
||||
return true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
|
||||
|
@ -516,6 +526,7 @@ static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
|
|||
static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
||||
struct stage2_map_data *data)
|
||||
{
|
||||
int ret;
|
||||
kvm_pte_t *childp, pte = *ptep;
|
||||
struct page *page = virt_to_page(ptep);
|
||||
|
||||
|
@ -526,8 +537,9 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
|||
return 0;
|
||||
}
|
||||
|
||||
if (stage2_map_walker_try_leaf(addr, end, level, ptep, data))
|
||||
goto out_get_page;
|
||||
ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
|
||||
if (ret != -E2BIG)
|
||||
return ret;
|
||||
|
||||
if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
|
||||
return -EINVAL;
|
||||
|
@ -551,9 +563,8 @@ static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
|
|||
}
|
||||
|
||||
kvm_set_table_pte(ptep, childp);
|
||||
|
||||
out_get_page:
|
||||
get_page(page);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
|
||||
rd = kvm_vcpu_dabt_get_rd(vcpu);
|
||||
addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
|
||||
addr = kvm_vgic_global_state.vcpu_hyp_va;
|
||||
addr += fault_ipa - vgic->vgic_cpu_base;
|
||||
|
||||
if (kvm_vcpu_dabt_iswrite(vcpu)) {
|
||||
|
|
|
@ -71,6 +71,12 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
|
|||
if (gpa != GPA_INVALID)
|
||||
val = gpa;
|
||||
break;
|
||||
case ARM_SMCCC_TRNG_VERSION:
|
||||
case ARM_SMCCC_TRNG_FEATURES:
|
||||
case ARM_SMCCC_TRNG_GET_UUID:
|
||||
case ARM_SMCCC_TRNG_RND32:
|
||||
case ARM_SMCCC_TRNG_RND64:
|
||||
return kvm_trng_call(vcpu);
|
||||
default:
|
||||
return kvm_psci_call(vcpu);
|
||||
}
|
||||
|
|
|
@ -879,11 +879,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
if (vma_pagesize == PAGE_SIZE && !force_pte)
|
||||
vma_pagesize = transparent_hugepage_adjust(memslot, hva,
|
||||
&pfn, &fault_ipa);
|
||||
if (writable) {
|
||||
if (writable)
|
||||
prot |= KVM_PGTABLE_PROT_W;
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
mark_page_dirty(kvm, gfn);
|
||||
}
|
||||
|
||||
if (fault_status != FSC_PERM && !device)
|
||||
clean_dcache_guest_page(pfn, vma_pagesize);
|
||||
|
@ -911,11 +908,17 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
|
|||
memcache);
|
||||
}
|
||||
|
||||
/* Mark the page dirty only if the fault is handled successfully */
|
||||
if (writable && !ret) {
|
||||
kvm_set_pfn_dirty(pfn);
|
||||
mark_page_dirty(kvm, gfn);
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
spin_unlock(&kvm->mmu_lock);
|
||||
kvm_set_pfn_accessed(pfn);
|
||||
kvm_release_pfn_clean(pfn);
|
||||
return ret;
|
||||
return ret != -EAGAIN ? ret : 0;
|
||||
}
|
||||
|
||||
/* Resolve the access fault by making the page young again. */
|
||||
|
|
|
@ -23,11 +23,11 @@ static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc);
|
|||
static u32 kvm_pmu_event_mask(struct kvm *kvm)
|
||||
{
|
||||
switch (kvm->arch.pmuver) {
|
||||
case 1: /* ARMv8.0 */
|
||||
case ID_AA64DFR0_PMUVER_8_0:
|
||||
return GENMASK(9, 0);
|
||||
case 4: /* ARMv8.1 */
|
||||
case 5: /* ARMv8.4 */
|
||||
case 6: /* ARMv8.5 */
|
||||
case ID_AA64DFR0_PMUVER_8_1:
|
||||
case ID_AA64DFR0_PMUVER_8_4:
|
||||
case ID_AA64DFR0_PMUVER_8_5:
|
||||
return GENMASK(15, 0);
|
||||
default: /* Shouldn't be here, just for sanity */
|
||||
WARN_ONCE(1, "Unknown PMU version %d\n", kvm->arch.pmuver);
|
||||
|
@ -795,6 +795,12 @@ u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
|
|||
base = 0;
|
||||
} else {
|
||||
val = read_sysreg(pmceid1_el0);
|
||||
/*
|
||||
* Don't advertise STALL_SLOT, as PMMIR_EL0 is handled
|
||||
* as RAZ
|
||||
*/
|
||||
if (vcpu->kvm->arch.pmuver >= ID_AA64DFR0_PMUVER_8_4)
|
||||
val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32);
|
||||
base = 32;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
* Christoffer Dall <c.dall@virtualopensystems.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -700,14 +701,18 @@ static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
|||
static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
u64 pmceid;
|
||||
u64 pmceid, mask, shift;
|
||||
|
||||
BUG_ON(p->is_write);
|
||||
|
||||
if (pmu_access_el0_disabled(vcpu))
|
||||
return false;
|
||||
|
||||
get_access_mask(r, &mask, &shift);
|
||||
|
||||
pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
|
||||
pmceid &= mask;
|
||||
pmceid >>= shift;
|
||||
|
||||
p->regval = pmceid;
|
||||
|
||||
|
@ -1021,6 +1026,8 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
|
|||
return true;
|
||||
}
|
||||
|
||||
#define FEATURE(x) (GENMASK_ULL(x##_SHIFT + 3, x##_SHIFT))
|
||||
|
||||
/* Read a sanitised cpufeature ID register by sys_reg_desc */
|
||||
static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_desc const *r, bool raz)
|
||||
|
@ -1028,36 +1035,41 @@ static u64 read_id_reg(const struct kvm_vcpu *vcpu,
|
|||
u32 id = reg_to_encoding(r);
|
||||
u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
|
||||
|
||||
if (id == SYS_ID_AA64PFR0_EL1) {
|
||||
switch (id) {
|
||||
case SYS_ID_AA64PFR0_EL1:
|
||||
if (!vcpu_has_sve(vcpu))
|
||||
val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
|
||||
val &= ~(0xfUL << ID_AA64PFR0_AMU_SHIFT);
|
||||
val &= ~(0xfUL << ID_AA64PFR0_CSV2_SHIFT);
|
||||
val |= ((u64)vcpu->kvm->arch.pfr0_csv2 << ID_AA64PFR0_CSV2_SHIFT);
|
||||
val &= ~(0xfUL << ID_AA64PFR0_CSV3_SHIFT);
|
||||
val |= ((u64)vcpu->kvm->arch.pfr0_csv3 << ID_AA64PFR0_CSV3_SHIFT);
|
||||
} else if (id == SYS_ID_AA64PFR1_EL1) {
|
||||
val &= ~(0xfUL << ID_AA64PFR1_MTE_SHIFT);
|
||||
} else if (id == SYS_ID_AA64ISAR1_EL1 && !vcpu_has_ptrauth(vcpu)) {
|
||||
val &= ~((0xfUL << ID_AA64ISAR1_APA_SHIFT) |
|
||||
(0xfUL << ID_AA64ISAR1_API_SHIFT) |
|
||||
(0xfUL << ID_AA64ISAR1_GPA_SHIFT) |
|
||||
(0xfUL << ID_AA64ISAR1_GPI_SHIFT));
|
||||
} else if (id == SYS_ID_AA64DFR0_EL1) {
|
||||
u64 cap = 0;
|
||||
|
||||
/* Limit guests to PMUv3 for ARMv8.1 */
|
||||
if (kvm_vcpu_has_pmu(vcpu))
|
||||
cap = ID_AA64DFR0_PMUVER_8_1;
|
||||
|
||||
val &= ~FEATURE(ID_AA64PFR0_SVE);
|
||||
val &= ~FEATURE(ID_AA64PFR0_AMU);
|
||||
val &= ~FEATURE(ID_AA64PFR0_CSV2);
|
||||
val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
|
||||
val &= ~FEATURE(ID_AA64PFR0_CSV3);
|
||||
val |= FIELD_PREP(FEATURE(ID_AA64PFR0_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
|
||||
break;
|
||||
case SYS_ID_AA64PFR1_EL1:
|
||||
val &= ~FEATURE(ID_AA64PFR1_MTE);
|
||||
break;
|
||||
case SYS_ID_AA64ISAR1_EL1:
|
||||
if (!vcpu_has_ptrauth(vcpu))
|
||||
val &= ~(FEATURE(ID_AA64ISAR1_APA) |
|
||||
FEATURE(ID_AA64ISAR1_API) |
|
||||
FEATURE(ID_AA64ISAR1_GPA) |
|
||||
FEATURE(ID_AA64ISAR1_GPI));
|
||||
break;
|
||||
case SYS_ID_AA64DFR0_EL1:
|
||||
/* Limit debug to ARMv8.0 */
|
||||
val &= ~FEATURE(ID_AA64DFR0_DEBUGVER);
|
||||
val |= FIELD_PREP(FEATURE(ID_AA64DFR0_DEBUGVER), 6);
|
||||
/* Limit guests to PMUv3 for ARMv8.4 */
|
||||
val = cpuid_feature_cap_perfmon_field(val,
|
||||
ID_AA64DFR0_PMUVER_SHIFT,
|
||||
cap);
|
||||
} else if (id == SYS_ID_DFR0_EL1) {
|
||||
/* Limit guests to PMUv3 for ARMv8.1 */
|
||||
ID_AA64DFR0_PMUVER_SHIFT,
|
||||
kvm_vcpu_has_pmu(vcpu) ? ID_AA64DFR0_PMUVER_8_4 : 0);
|
||||
break;
|
||||
case SYS_ID_DFR0_EL1:
|
||||
/* Limit guests to PMUv3 for ARMv8.4 */
|
||||
val = cpuid_feature_cap_perfmon_field(val,
|
||||
ID_DFR0_PERFMON_SHIFT,
|
||||
ID_DFR0_PERFMON_8_1);
|
||||
ID_DFR0_PERFMON_SHIFT,
|
||||
kvm_vcpu_has_pmu(vcpu) ? ID_DFR0_PERFMON_8_4 : 0);
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
|
@ -1493,6 +1505,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
{ PMU_SYS_REG(SYS_PMINTENCLR_EL1),
|
||||
.access = access_pminten, .reg = PMINTENSET_EL1 },
|
||||
{ SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
|
||||
|
||||
{ SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
|
||||
{ SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
|
||||
|
@ -1720,7 +1733,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
|
|||
{ SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
|
||||
};
|
||||
|
||||
static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
||||
static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
|
@ -1734,7 +1747,7 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
|||
p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
|
||||
(((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
|
||||
(((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
|
||||
| (6 << 16) | (el3 << 14) | (el3 << 12));
|
||||
| (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1767,8 +1780,8 @@ static bool trap_dbgidr(struct kvm_vcpu *vcpu,
|
|||
* guest. Revisit this one day, would this principle change.
|
||||
*/
|
||||
static const struct sys_reg_desc cp14_regs[] = {
|
||||
/* DBGIDR */
|
||||
{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
|
||||
/* DBGDIDR */
|
||||
{ Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
|
||||
/* DBGDTRRXext */
|
||||
{ Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
|
||||
|
||||
|
@ -1918,8 +1931,8 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
{ Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
|
||||
{ Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
|
||||
{ AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
|
||||
{ AA32(LO), Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
|
||||
{ Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
|
||||
|
@ -1927,6 +1940,10 @@ static const struct sys_reg_desc cp15_regs[] = {
|
|||
{ Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
|
||||
{ Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
|
||||
{ Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
|
||||
{ AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 4), access_pmceid },
|
||||
{ AA32(HI), Op1( 0), CRn( 9), CRm(14), Op2( 5), access_pmceid },
|
||||
/* PMMIR */
|
||||
{ Op1( 0), CRn( 9), CRm(14), Op2( 6), trap_raz_wi },
|
||||
|
||||
/* PRRR/MAIR0 */
|
||||
{ AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
|
||||
|
|
|
@ -0,0 +1,85 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (C) 2020 Arm Ltd.
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
#include <asm/kvm_emulate.h>
|
||||
|
||||
#include <kvm/arm_hypercalls.h>
|
||||
|
||||
#define ARM_SMCCC_TRNG_VERSION_1_0 0x10000UL
|
||||
|
||||
/* Those values are deliberately separate from the generic SMCCC definitions. */
|
||||
#define TRNG_SUCCESS 0UL
|
||||
#define TRNG_NOT_SUPPORTED ((unsigned long)-1)
|
||||
#define TRNG_INVALID_PARAMETER ((unsigned long)-2)
|
||||
#define TRNG_NO_ENTROPY ((unsigned long)-3)
|
||||
|
||||
#define TRNG_MAX_BITS64 192
|
||||
|
||||
static const uuid_t arm_smc_trng_uuid __aligned(4) = UUID_INIT(
|
||||
0x0d21e000, 0x4384, 0x11eb, 0x80, 0x70, 0x52, 0x44, 0x55, 0x4e, 0x5a, 0x4c);
|
||||
|
||||
static int kvm_trng_do_rnd(struct kvm_vcpu *vcpu, int size)
|
||||
{
|
||||
DECLARE_BITMAP(bits, TRNG_MAX_BITS64);
|
||||
u32 num_bits = smccc_get_arg1(vcpu);
|
||||
int i;
|
||||
|
||||
if (num_bits > 3 * size) {
|
||||
smccc_set_retval(vcpu, TRNG_INVALID_PARAMETER, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* get as many bits as we need to fulfil the request */
|
||||
for (i = 0; i < DIV_ROUND_UP(num_bits, BITS_PER_LONG); i++)
|
||||
bits[i] = get_random_long();
|
||||
|
||||
bitmap_clear(bits, num_bits, TRNG_MAX_BITS64 - num_bits);
|
||||
|
||||
if (size == 32)
|
||||
smccc_set_retval(vcpu, TRNG_SUCCESS, lower_32_bits(bits[1]),
|
||||
upper_32_bits(bits[0]), lower_32_bits(bits[0]));
|
||||
else
|
||||
smccc_set_retval(vcpu, TRNG_SUCCESS, bits[2], bits[1], bits[0]);
|
||||
|
||||
memzero_explicit(bits, sizeof(bits));
|
||||
return 1;
|
||||
}
|
||||
|
||||
int kvm_trng_call(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
const __le32 *u = (__le32 *)arm_smc_trng_uuid.b;
|
||||
u32 func_id = smccc_get_function(vcpu);
|
||||
unsigned long val = TRNG_NOT_SUPPORTED;
|
||||
int size = 64;
|
||||
|
||||
switch (func_id) {
|
||||
case ARM_SMCCC_TRNG_VERSION:
|
||||
val = ARM_SMCCC_TRNG_VERSION_1_0;
|
||||
break;
|
||||
case ARM_SMCCC_TRNG_FEATURES:
|
||||
switch (smccc_get_arg1(vcpu)) {
|
||||
case ARM_SMCCC_TRNG_VERSION:
|
||||
case ARM_SMCCC_TRNG_FEATURES:
|
||||
case ARM_SMCCC_TRNG_GET_UUID:
|
||||
case ARM_SMCCC_TRNG_RND32:
|
||||
case ARM_SMCCC_TRNG_RND64:
|
||||
val = TRNG_SUCCESS;
|
||||
}
|
||||
break;
|
||||
case ARM_SMCCC_TRNG_GET_UUID:
|
||||
smccc_set_retval(vcpu, le32_to_cpu(u[0]), le32_to_cpu(u[1]),
|
||||
le32_to_cpu(u[2]), le32_to_cpu(u[3]));
|
||||
return 1;
|
||||
case ARM_SMCCC_TRNG_RND32:
|
||||
size = 32;
|
||||
fallthrough;
|
||||
case ARM_SMCCC_TRNG_RND64:
|
||||
return kvm_trng_do_rnd(vcpu, size);
|
||||
}
|
||||
|
||||
smccc_set_retval(vcpu, val, 0, 0, 0);
|
||||
return 1;
|
||||
}
|
|
@ -81,6 +81,34 @@ __init void kvm_compute_layout(void)
|
|||
init_hyp_physvirt_offset();
|
||||
}
|
||||
|
||||
/*
|
||||
* The .hyp.reloc ELF section contains a list of kimg positions that
|
||||
* contains kimg VAs but will be accessed only in hyp execution context.
|
||||
* Convert them to hyp VAs. See gen-hyprel.c for more details.
|
||||
*/
|
||||
__init void kvm_apply_hyp_relocations(void)
|
||||
{
|
||||
int32_t *rel;
|
||||
int32_t *begin = (int32_t *)__hyp_reloc_begin;
|
||||
int32_t *end = (int32_t *)__hyp_reloc_end;
|
||||
|
||||
for (rel = begin; rel < end; ++rel) {
|
||||
uintptr_t *ptr, kimg_va;
|
||||
|
||||
/*
|
||||
* Each entry contains a 32-bit relative offset from itself
|
||||
* to a kimg VA position.
|
||||
*/
|
||||
ptr = (uintptr_t *)lm_alias((char *)rel + *rel);
|
||||
|
||||
/* Read the kimg VA value at the relocation address. */
|
||||
kimg_va = *ptr;
|
||||
|
||||
/* Convert to hyp VA and store back to the relocation address. */
|
||||
*ptr = __early_kern_hyp_va((uintptr_t)lm_alias(kimg_va));
|
||||
}
|
||||
}
|
||||
|
||||
static u32 compute_instruction(int n, u32 rd, u32 rn)
|
||||
{
|
||||
u32 insn = AARCH64_BREAK_FAULT;
|
||||
|
@ -255,12 +283,6 @@ static void generate_mov_q(u64 val, __le32 *origptr, __le32 *updptr, int nr_inst
|
|||
*updptr++ = cpu_to_le32(insn);
|
||||
}
|
||||
|
||||
void kvm_update_kimg_phys_offset(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
generate_mov_q(kimage_voffset + PHYS_OFFSET, origptr, updptr, nr_inst);
|
||||
}
|
||||
|
||||
void kvm_get_kimage_voffset(struct alt_instr *alt,
|
||||
__le32 *origptr, __le32 *updptr, int nr_inst)
|
||||
{
|
||||
|
|
|
@ -53,13 +53,13 @@ s64 memstart_addr __ro_after_init = -1;
|
|||
EXPORT_SYMBOL(memstart_addr);
|
||||
|
||||
/*
|
||||
* We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
|
||||
* memory as some devices, namely the Raspberry Pi 4, have peripherals with
|
||||
* this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
|
||||
* bit addressable memory area.
|
||||
* If the corresponding config options are enabled, we create both ZONE_DMA
|
||||
* and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
|
||||
* unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
|
||||
* In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
|
||||
* otherwise it is empty.
|
||||
*/
|
||||
phys_addr_t arm64_dma_phys_limit __ro_after_init;
|
||||
phys_addr_t arm64_dma32_phys_limit __ro_after_init;
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/*
|
||||
|
@ -84,7 +84,7 @@ static void __init reserve_crashkernel(void)
|
|||
|
||||
if (crash_base == 0) {
|
||||
/* Current arm64 boot protocol requires 2MB alignment */
|
||||
crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit,
|
||||
crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
|
||||
crash_size, SZ_2M);
|
||||
if (crash_base == 0) {
|
||||
pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
|
||||
|
@ -196,6 +196,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
|
||||
unsigned int __maybe_unused acpi_zone_dma_bits;
|
||||
unsigned int __maybe_unused dt_zone_dma_bits;
|
||||
phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
|
||||
|
||||
#ifdef CONFIG_ZONE_DMA
|
||||
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
|
||||
|
@ -205,8 +206,12 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
|
|||
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
|
||||
#endif
|
||||
#ifdef CONFIG_ZONE_DMA32
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
|
||||
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
|
||||
if (!arm64_dma_phys_limit)
|
||||
arm64_dma_phys_limit = dma32_phys_limit;
|
||||
#endif
|
||||
if (!arm64_dma_phys_limit)
|
||||
arm64_dma_phys_limit = PHYS_MASK + 1;
|
||||
max_zone_pfns[ZONE_NORMAL] = max;
|
||||
|
||||
free_area_init(max_zone_pfns);
|
||||
|
@ -394,16 +399,9 @@ void __init arm64_memblock_init(void)
|
|||
|
||||
early_init_fdt_scan_reserved_mem();
|
||||
|
||||
if (IS_ENABLED(CONFIG_ZONE_DMA32))
|
||||
arm64_dma32_phys_limit = max_zone_phys(32);
|
||||
else
|
||||
arm64_dma32_phys_limit = PHYS_MASK + 1;
|
||||
|
||||
reserve_elfcorehdr();
|
||||
|
||||
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
|
||||
|
||||
dma_contiguous_reserve(arm64_dma32_phys_limit);
|
||||
}
|
||||
|
||||
void __init bootmem_init(void)
|
||||
|
@ -438,6 +436,11 @@ void __init bootmem_init(void)
|
|||
sparse_init();
|
||||
zone_sizes_init(min, max);
|
||||
|
||||
/*
|
||||
* Reserve the CMA area after arm64_dma_phys_limit was initialised.
|
||||
*/
|
||||
dma_contiguous_reserve(arm64_dma_phys_limit);
|
||||
|
||||
/*
|
||||
* request_standard_resources() depends on crashkernel's memory being
|
||||
* reserved, so do it here.
|
||||
|
@ -455,7 +458,7 @@ void __init bootmem_init(void)
|
|||
void __init mem_init(void)
|
||||
{
|
||||
if (swiotlb_force == SWIOTLB_FORCE ||
|
||||
max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit))
|
||||
max_pfn > PFN_DOWN(arm64_dma_phys_limit))
|
||||
swiotlb_init(1);
|
||||
else
|
||||
swiotlb_force = SWIOTLB_NO_FORCE;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/libfdt.h>
|
||||
|
||||
#include <asm/addrspace.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
/*
|
||||
* These two variables specify the free mem region
|
||||
|
@ -117,7 +118,7 @@ void decompress_kernel(unsigned long boot_heap_start)
|
|||
dtb_size = fdt_totalsize((void *)&__appended_dtb);
|
||||
|
||||
/* last four bytes is always image size in little endian */
|
||||
image_size = le32_to_cpup((void *)&__image_end - 4);
|
||||
image_size = get_unaligned_le32((void *)&__image_end - 4);
|
||||
|
||||
/* copy dtb to where the booted kernel will expect it */
|
||||
memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
|
||||
|
|
|
@ -1444,7 +1444,7 @@ static void octeon_irq_setup_secondary_ciu2(void)
|
|||
static int __init octeon_irq_init_ciu(
|
||||
struct device_node *ciu_node, struct device_node *parent)
|
||||
{
|
||||
unsigned int i, r;
|
||||
int i, r;
|
||||
struct irq_chip *chip;
|
||||
struct irq_chip *chip_edge;
|
||||
struct irq_chip *chip_mbox;
|
||||
|
|
|
@ -103,4 +103,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
|
|||
#undef ns_to_kernel_old_timeval
|
||||
#define ns_to_kernel_old_timeval ns_to_old_timeval32
|
||||
|
||||
/*
|
||||
* Some data types as stored in coredump.
|
||||
*/
|
||||
#define user_long_t compat_long_t
|
||||
#define user_siginfo_t compat_siginfo_t
|
||||
#define copy_siginfo_to_external copy_siginfo_to_external32
|
||||
|
||||
#include "../../../fs/binfmt_elf.c"
|
||||
|
|
|
@ -106,4 +106,11 @@ jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value)
|
|||
#undef ns_to_kernel_old_timeval
|
||||
#define ns_to_kernel_old_timeval ns_to_old_timeval32
|
||||
|
||||
/*
|
||||
* Some data types as stored in coredump.
|
||||
*/
|
||||
#define user_long_t compat_long_t
|
||||
#define user_siginfo_t compat_siginfo_t
|
||||
#define copy_siginfo_to_external copy_siginfo_to_external32
|
||||
|
||||
#include "../../../fs/binfmt_elf.c"
|
||||
|
|
|
@ -187,8 +187,14 @@ static int __init relocate_exception_table(long offset)
|
|||
static inline __init unsigned long rotate_xor(unsigned long hash,
|
||||
const void *area, size_t size)
|
||||
{
|
||||
size_t i;
|
||||
unsigned long *ptr = (unsigned long *)area;
|
||||
const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
|
||||
size_t diff, i;
|
||||
|
||||
diff = (void *)ptr - area;
|
||||
if (unlikely(size < diff + sizeof(hash)))
|
||||
return hash;
|
||||
|
||||
size = ALIGN_DOWN(size - diff, sizeof(hash));
|
||||
|
||||
for (i = 0; i < size / sizeof(hash); i++) {
|
||||
/* Rotate by odd number of bits and XOR. */
|
||||
|
|
|
@ -103,6 +103,8 @@ int gettimeofday_fallback(struct __kernel_old_timeval *_tv, struct timezone *_tz
|
|||
return do_syscall_2(__NR_gettimeofday, (unsigned long)_tv, (unsigned long)_tz);
|
||||
}
|
||||
|
||||
#ifdef __powerpc64__
|
||||
|
||||
static __always_inline
|
||||
int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
{
|
||||
|
@ -115,10 +117,22 @@ int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
|||
return do_syscall_2(__NR_clock_getres, _clkid, (unsigned long)_ts);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_VDSO32
|
||||
#else
|
||||
|
||||
#define BUILD_VDSO32 1
|
||||
|
||||
static __always_inline
|
||||
int clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
{
|
||||
return do_syscall_2(__NR_clock_gettime64, _clkid, (unsigned long)_ts);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
|
||||
{
|
||||
return do_syscall_2(__NR_clock_getres_time64, _clkid, (unsigned long)_ts);
|
||||
}
|
||||
|
||||
static __always_inline
|
||||
int clock_gettime32_fallback(clockid_t _clkid, struct old_timespec32 *_ts)
|
||||
{
|
||||
|
|
|
@ -187,6 +187,12 @@ SECTIONS
|
|||
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
|
||||
_sinittext = .;
|
||||
INIT_TEXT
|
||||
|
||||
/*
|
||||
*.init.text might be RO so we must ensure this section ends on
|
||||
* a page boundary.
|
||||
*/
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
_einittext = .;
|
||||
#ifdef CONFIG_PPC64
|
||||
*(.tramp.ftrace.init);
|
||||
|
@ -200,6 +206,8 @@ SECTIONS
|
|||
EXIT_TEXT
|
||||
}
|
||||
|
||||
. = ALIGN(PAGE_SIZE);
|
||||
|
||||
INIT_DATA_SECTION(16)
|
||||
|
||||
. = ALIGN(8);
|
||||
|
|
|
@ -137,7 +137,7 @@ config PA_BITS
|
|||
|
||||
config PAGE_OFFSET
|
||||
hex
|
||||
default 0xC0000000 if 32BIT && MAXPHYSMEM_2GB
|
||||
default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
|
||||
default 0x80000000 if 64BIT && !MMU
|
||||
default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
|
||||
default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
|
||||
|
@ -247,10 +247,12 @@ config MODULE_SECTIONS
|
|||
|
||||
choice
|
||||
prompt "Maximum Physical Memory"
|
||||
default MAXPHYSMEM_2GB if 32BIT
|
||||
default MAXPHYSMEM_1GB if 32BIT
|
||||
default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
|
||||
default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
|
||||
|
||||
config MAXPHYSMEM_1GB
|
||||
bool "1GiB"
|
||||
config MAXPHYSMEM_2GB
|
||||
bool "2GiB"
|
||||
config MAXPHYSMEM_128GB
|
||||
|
|
|
@ -88,7 +88,9 @@ ð0 {
|
|||
phy-mode = "gmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-id0007.0771";
|
||||
reg = <0>;
|
||||
reset-gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -64,6 +64,8 @@ CONFIG_HW_RANDOM=y
|
|||
CONFIG_HW_RANDOM_VIRTIO=y
|
||||
CONFIG_SPI=y
|
||||
CONFIG_SPI_SIFIVE=y
|
||||
CONFIG_GPIOLIB=y
|
||||
CONFIG_GPIO_SIFIVE=y
|
||||
# CONFIG_PTP_1588_CLOCK is not set
|
||||
CONFIG_POWER_RESET=y
|
||||
CONFIG_DRM=y
|
||||
|
|
|
@ -99,7 +99,6 @@
|
|||
| _PAGE_DIRTY)
|
||||
|
||||
#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
|
||||
#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
|
||||
#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
|
||||
|
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
#include <linux/types.h>
|
||||
|
||||
#ifndef GENERIC_TIME_VSYSCALL
|
||||
#ifndef CONFIG_GENERIC_TIME_VSYSCALL
|
||||
struct vdso_data {
|
||||
};
|
||||
#endif
|
||||
|
|
|
@ -26,7 +26,16 @@ cache_get_priv_group(struct cacheinfo *this_leaf)
|
|||
|
||||
static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
|
||||
{
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(smp_processor_id());
|
||||
/*
|
||||
* Using raw_smp_processor_id() elides a preemptability check, but this
|
||||
* is really indicative of a larger problem: the cacheinfo UABI assumes
|
||||
* that cores have a homonogenous view of the cache hierarchy. That
|
||||
* happens to be the case for the current set of RISC-V systems, but
|
||||
* likely won't be true in general. Since there's no way to provide
|
||||
* correct information for these systems via the current UABI we're
|
||||
* just eliding the check for now.
|
||||
*/
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
|
||||
struct cacheinfo *this_leaf;
|
||||
int index;
|
||||
|
||||
|
|
|
@ -124,15 +124,15 @@ skip_context_tracking:
|
|||
REG_L a1, (a1)
|
||||
jr a1
|
||||
1:
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
call trace_hardirqs_on
|
||||
#endif
|
||||
/*
|
||||
* Exceptions run with interrupts enabled or disabled depending on the
|
||||
* state of SR_PIE in m/sstatus.
|
||||
*/
|
||||
andi t0, s1, SR_PIE
|
||||
beqz t0, 1f
|
||||
#ifdef CONFIG_TRACE_IRQFLAGS
|
||||
call trace_hardirqs_on
|
||||
#endif
|
||||
csrs CSR_STATUS, SR_IE
|
||||
|
||||
1:
|
||||
|
@ -155,6 +155,15 @@ skip_context_tracking:
|
|||
tail do_trap_unknown
|
||||
|
||||
handle_syscall:
|
||||
#ifdef CONFIG_RISCV_M_MODE
|
||||
/*
|
||||
* When running is M-Mode (no MMU config), MPIE does not get set.
|
||||
* As a result, we need to force enable interrupts here because
|
||||
* handle_exception did not do set SR_IE as it always sees SR_PIE
|
||||
* being cleared.
|
||||
*/
|
||||
csrs CSR_STATUS, SR_IE
|
||||
#endif
|
||||
#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
|
||||
/* Recover a0 - a7 for system calls */
|
||||
REG_L a0, PT_A0(sp)
|
||||
|
@ -186,14 +195,7 @@ check_syscall_nr:
|
|||
* Syscall number held in a7.
|
||||
* If syscall number is above allowed value, redirect to ni_syscall.
|
||||
*/
|
||||
bge a7, t0, 1f
|
||||
/*
|
||||
* Check if syscall is rejected by tracer, i.e., a7 == -1.
|
||||
* If yes, we pretend it was executed.
|
||||
*/
|
||||
li t1, -1
|
||||
beq a7, t1, ret_from_syscall_rejected
|
||||
blt a7, t1, 1f
|
||||
bgeu a7, t0, 1f
|
||||
/* Call syscall */
|
||||
la s0, sys_call_table
|
||||
slli t0, a7, RISCV_LGPTR
|
||||
|
|
|
@ -127,7 +127,9 @@ static void __init init_resources(void)
|
|||
{
|
||||
struct memblock_region *region = NULL;
|
||||
struct resource *res = NULL;
|
||||
int ret = 0;
|
||||
struct resource *mem_res = NULL;
|
||||
size_t mem_res_sz = 0;
|
||||
int ret = 0, i = 0;
|
||||
|
||||
code_res.start = __pa_symbol(_text);
|
||||
code_res.end = __pa_symbol(_etext) - 1;
|
||||
|
@ -145,16 +147,17 @@ static void __init init_resources(void)
|
|||
bss_res.end = __pa_symbol(__bss_stop) - 1;
|
||||
bss_res.flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
|
||||
|
||||
mem_res_sz = (memblock.memory.cnt + memblock.reserved.cnt) * sizeof(*mem_res);
|
||||
mem_res = memblock_alloc(mem_res_sz, SMP_CACHE_BYTES);
|
||||
if (!mem_res)
|
||||
panic("%s: Failed to allocate %zu bytes\n", __func__, mem_res_sz);
|
||||
/*
|
||||
* Start by adding the reserved regions, if they overlap
|
||||
* with /memory regions, insert_resource later on will take
|
||||
* care of it.
|
||||
*/
|
||||
for_each_reserved_mem_region(region) {
|
||||
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
|
||||
if (!res)
|
||||
panic("%s: Failed to allocate %zu bytes\n", __func__,
|
||||
sizeof(struct resource));
|
||||
res = &mem_res[i++];
|
||||
|
||||
res->name = "Reserved";
|
||||
res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
|
||||
|
@ -171,8 +174,10 @@ static void __init init_resources(void)
|
|||
* Ignore any other reserved regions within
|
||||
* system memory.
|
||||
*/
|
||||
if (memblock_is_memory(res->start))
|
||||
if (memblock_is_memory(res->start)) {
|
||||
memblock_free((phys_addr_t) res, sizeof(struct resource));
|
||||
continue;
|
||||
}
|
||||
|
||||
ret = add_resource(&iomem_resource, res);
|
||||
if (ret < 0)
|
||||
|
@ -181,10 +186,7 @@ static void __init init_resources(void)
|
|||
|
||||
/* Add /memory regions to the resource tree */
|
||||
for_each_mem_region(region) {
|
||||
res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
|
||||
if (!res)
|
||||
panic("%s: Failed to allocate %zu bytes\n", __func__,
|
||||
sizeof(struct resource));
|
||||
res = &mem_res[i++];
|
||||
|
||||
if (unlikely(memblock_is_nomap(region))) {
|
||||
res->name = "Reserved";
|
||||
|
@ -205,9 +207,9 @@ static void __init init_resources(void)
|
|||
return;
|
||||
|
||||
error:
|
||||
memblock_free((phys_addr_t) res, sizeof(struct resource));
|
||||
/* Better an empty resource tree than an inconsistent one */
|
||||
release_child_resources(&iomem_resource);
|
||||
memblock_free((phys_addr_t) mem_res, mem_res_sz);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
#include <asm/stacktrace.h>
|
||||
|
||||
register unsigned long sp_in_global __asm__("sp");
|
||||
register const unsigned long sp_in_global __asm__("sp");
|
||||
|
||||
#ifdef CONFIG_FRAME_POINTER
|
||||
|
||||
|
@ -28,9 +28,8 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
|
|||
sp = user_stack_pointer(regs);
|
||||
pc = instruction_pointer(regs);
|
||||
} else if (task == NULL || task == current) {
|
||||
const register unsigned long current_sp = sp_in_global;
|
||||
fp = (unsigned long)__builtin_frame_address(0);
|
||||
sp = current_sp;
|
||||
sp = sp_in_global;
|
||||
pc = (unsigned long)walk_stackframe;
|
||||
} else {
|
||||
/* task blocked in __switch_to */
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
* Copyright (C) 2017 SiFive
|
||||
*/
|
||||
|
||||
#include <linux/of_clk.h>
|
||||
#include <linux/clocksource.h>
|
||||
#include <linux/delay.h>
|
||||
#include <asm/sbi.h>
|
||||
|
@ -24,6 +25,8 @@ void __init time_init(void)
|
|||
riscv_timebase = prop;
|
||||
|
||||
lpj_fine = riscv_timebase / HZ;
|
||||
|
||||
of_clk_init(NULL);
|
||||
timer_probe();
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include <linux/binfmts.h>
|
||||
#include <linux/err.h>
|
||||
#include <asm/page.h>
|
||||
#ifdef GENERIC_TIME_VSYSCALL
|
||||
#ifdef CONFIG_GENERIC_TIME_VSYSCALL
|
||||
#include <vdso/datapage.h>
|
||||
#else
|
||||
#include <asm/vdso.h>
|
||||
|
|
|
@ -157,9 +157,10 @@ static void __init setup_initrd(void)
|
|||
void __init setup_bootmem(void)
|
||||
{
|
||||
phys_addr_t mem_start = 0;
|
||||
phys_addr_t start, end = 0;
|
||||
phys_addr_t start, dram_end, end = 0;
|
||||
phys_addr_t vmlinux_end = __pa_symbol(&_end);
|
||||
phys_addr_t vmlinux_start = __pa_symbol(&_start);
|
||||
phys_addr_t max_mapped_addr = __pa(~(ulong)0);
|
||||
u64 i;
|
||||
|
||||
/* Find the memory region containing the kernel */
|
||||
|
@ -181,7 +182,18 @@ void __init setup_bootmem(void)
|
|||
/* Reserve from the start of the kernel to the end of the kernel */
|
||||
memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
|
||||
|
||||
max_pfn = PFN_DOWN(memblock_end_of_DRAM());
|
||||
dram_end = memblock_end_of_DRAM();
|
||||
|
||||
/*
|
||||
* memblock allocator is not aware of the fact that last 4K bytes of
|
||||
* the addressable memory can not be mapped because of IS_ERR_VALUE
|
||||
* macro. Make sure that last 4k bytes are not usable by memblock
|
||||
* if end of dram is equal to maximum addressable memory.
|
||||
*/
|
||||
if (max_mapped_addr == (dram_end - 1))
|
||||
memblock_set_current_limit(max_mapped_addr - 4096);
|
||||
|
||||
max_pfn = PFN_DOWN(dram_end);
|
||||
max_low_pfn = max_pfn;
|
||||
dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
|
||||
set_max_mapnr(max_low_pfn);
|
||||
|
|
|
@ -93,8 +93,8 @@ void __init kasan_init(void)
|
|||
VMALLOC_END));
|
||||
|
||||
for_each_mem_range(i, &_start, &_end) {
|
||||
void *start = (void *)_start;
|
||||
void *end = (void *)_end;
|
||||
void *start = (void *)__va(_start);
|
||||
void *end = (void *)__va(_end);
|
||||
|
||||
if (start >= end)
|
||||
break;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <asm/hyperv-tlfs.h>
|
||||
#include <asm/mshyperv.h>
|
||||
#include <asm/idtentry.h>
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/version.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/mm.h>
|
||||
|
@ -26,6 +27,8 @@
|
|||
#include <linux/syscore_ops.h>
|
||||
#include <clocksource/hyperv_timer.h>
|
||||
|
||||
int hyperv_init_cpuhp;
|
||||
|
||||
void *hv_hypercall_pg;
|
||||
EXPORT_SYMBOL_GPL(hv_hypercall_pg);
|
||||
|
||||
|
@ -401,6 +404,7 @@ void __init hyperv_init(void)
|
|||
|
||||
register_syscore_ops(&hv_syscore_ops);
|
||||
|
||||
hyperv_init_cpuhp = cpuhp;
|
||||
return;
|
||||
|
||||
remove_cpuhp_state:
|
||||
|
|
|
@ -66,11 +66,17 @@ static void hyperv_flush_tlb_others(const struct cpumask *cpus,
|
|||
if (!hv_hypercall_pg)
|
||||
goto do_native;
|
||||
|
||||
if (cpumask_empty(cpus))
|
||||
return;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
/*
|
||||
* Only check the mask _after_ interrupt has been disabled to avoid the
|
||||
* mask changing under our feet.
|
||||
*/
|
||||
if (cpumask_empty(cpus)) {
|
||||
local_irq_restore(flags);
|
||||
return;
|
||||
}
|
||||
|
||||
flush_pcpu = (struct hv_tlb_flush **)
|
||||
this_cpu_ptr(hyperv_pcpu_input_arg);
|
||||
|
||||
|
|
|
@ -74,6 +74,8 @@ static inline void hv_disable_stimer0_percpu_irq(int irq) {}
|
|||
|
||||
|
||||
#if IS_ENABLED(CONFIG_HYPERV)
|
||||
extern int hyperv_init_cpuhp;
|
||||
|
||||
extern void *hv_hypercall_pg;
|
||||
extern void __percpu **hyperv_pcpu_input_arg;
|
||||
|
||||
|
|
|
@ -135,14 +135,32 @@ static void hv_machine_shutdown(void)
|
|||
{
|
||||
if (kexec_in_progress && hv_kexec_handler)
|
||||
hv_kexec_handler();
|
||||
|
||||
/*
|
||||
* Call hv_cpu_die() on all the CPUs, otherwise later the hypervisor
|
||||
* corrupts the old VP Assist Pages and can crash the kexec kernel.
|
||||
*/
|
||||
if (kexec_in_progress && hyperv_init_cpuhp > 0)
|
||||
cpuhp_remove_state(hyperv_init_cpuhp);
|
||||
|
||||
/* The function calls stop_other_cpus(). */
|
||||
native_machine_shutdown();
|
||||
|
||||
/* Disable the hypercall page when there is only 1 active CPU. */
|
||||
if (kexec_in_progress)
|
||||
hyperv_cleanup();
|
||||
}
|
||||
|
||||
static void hv_machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
if (hv_crash_handler)
|
||||
hv_crash_handler(regs);
|
||||
|
||||
/* The function calls crash_smp_send_stop(). */
|
||||
native_machine_crash_shutdown(regs);
|
||||
|
||||
/* Disable the hypercall page when there is only 1 active CPU. */
|
||||
hyperv_cleanup();
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
#endif /* CONFIG_HYPERV */
|
||||
|
|
|
@ -164,10 +164,10 @@ static int xen_cpu_up_prepare_hvm(unsigned int cpu)
|
|||
else
|
||||
per_cpu(xen_vcpu_id, cpu) = cpu;
|
||||
rc = xen_vcpu_setup(cpu);
|
||||
if (rc)
|
||||
if (rc || !xen_have_vector_callback)
|
||||
return rc;
|
||||
|
||||
if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
|
||||
if (xen_feature(XENFEAT_hvm_safe_pvclock))
|
||||
xen_setup_timer(cpu);
|
||||
|
||||
rc = xen_smp_intr_init(cpu);
|
||||
|
@ -188,6 +188,8 @@ static int xen_cpu_dead_hvm(unsigned int cpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool no_vector_callback __initdata;
|
||||
|
||||
static void __init xen_hvm_guest_init(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
|
@ -207,7 +209,7 @@ static void __init xen_hvm_guest_init(void)
|
|||
|
||||
xen_panic_handler_init();
|
||||
|
||||
if (xen_feature(XENFEAT_hvm_callback_vector))
|
||||
if (!no_vector_callback && xen_feature(XENFEAT_hvm_callback_vector))
|
||||
xen_have_vector_callback = 1;
|
||||
|
||||
xen_hvm_smp_init();
|
||||
|
@ -233,6 +235,13 @@ static __init int xen_parse_nopv(char *arg)
|
|||
}
|
||||
early_param("xen_nopv", xen_parse_nopv);
|
||||
|
||||
static __init int xen_parse_no_vector_callback(char *arg)
|
||||
{
|
||||
no_vector_callback = true;
|
||||
return 0;
|
||||
}
|
||||
early_param("xen_no_vector_callback", xen_parse_no_vector_callback);
|
||||
|
||||
bool __init xen_hvm_need_lapic(void)
|
||||
{
|
||||
if (xen_pv_domain())
|
||||
|
|
|
@ -33,9 +33,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
|
|||
int cpu;
|
||||
|
||||
native_smp_prepare_cpus(max_cpus);
|
||||
WARN_ON(xen_smp_intr_init(0));
|
||||
|
||||
xen_init_lock_cpu(0);
|
||||
if (xen_have_vector_callback) {
|
||||
WARN_ON(xen_smp_intr_init(0));
|
||||
xen_init_lock_cpu(0);
|
||||
}
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (cpu == 0)
|
||||
|
@ -50,9 +52,11 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus)
|
|||
static void xen_hvm_cpu_die(unsigned int cpu)
|
||||
{
|
||||
if (common_cpu_die(cpu) == 0) {
|
||||
xen_smp_intr_free(cpu);
|
||||
xen_uninit_lock_cpu(cpu);
|
||||
xen_teardown_timer(cpu);
|
||||
if (xen_have_vector_callback) {
|
||||
xen_smp_intr_free(cpu);
|
||||
xen_uninit_lock_cpu(cpu);
|
||||
xen_teardown_timer(cpu);
|
||||
}
|
||||
}
|
||||
}
|
||||
#else
|
||||
|
@ -64,14 +68,17 @@ static void xen_hvm_cpu_die(unsigned int cpu)
|
|||
|
||||
void __init xen_hvm_smp_init(void)
|
||||
{
|
||||
if (!xen_have_vector_callback)
|
||||
return;
|
||||
|
||||
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
|
||||
smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus;
|
||||
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
|
||||
smp_ops.smp_cpus_done = xen_smp_cpus_done;
|
||||
smp_ops.cpu_die = xen_hvm_cpu_die;
|
||||
|
||||
if (!xen_have_vector_callback) {
|
||||
nopvspin = true;
|
||||
return;
|
||||
}
|
||||
|
||||
smp_ops.smp_send_reschedule = xen_smp_send_reschedule;
|
||||
smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi;
|
||||
smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi;
|
||||
smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu;
|
||||
smp_ops.smp_cpus_done = xen_smp_cpus_done;
|
||||
}
|
||||
|
|
|
@ -97,7 +97,7 @@ void acpi_scan_table_handler(u32 event, void *table, void *context);
|
|||
extern struct list_head acpi_bus_id_list;
|
||||
|
||||
struct acpi_device_bus_id {
|
||||
char bus_id[15];
|
||||
const char *bus_id;
|
||||
unsigned int instance_no;
|
||||
struct list_head node;
|
||||
};
|
||||
|
|
|
@ -486,6 +486,7 @@ static void acpi_device_del(struct acpi_device *device)
|
|||
acpi_device_bus_id->instance_no--;
|
||||
else {
|
||||
list_del(&acpi_device_bus_id->node);
|
||||
kfree_const(acpi_device_bus_id->bus_id);
|
||||
kfree(acpi_device_bus_id);
|
||||
}
|
||||
break;
|
||||
|
@ -674,7 +675,14 @@ int acpi_device_add(struct acpi_device *device,
|
|||
}
|
||||
if (!found) {
|
||||
acpi_device_bus_id = new_bus_id;
|
||||
strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
|
||||
acpi_device_bus_id->bus_id =
|
||||
kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
|
||||
if (!acpi_device_bus_id->bus_id) {
|
||||
pr_err(PREFIX "Memory allocation error for bus id\n");
|
||||
result = -ENOMEM;
|
||||
goto err_free_new_bus_id;
|
||||
}
|
||||
|
||||
acpi_device_bus_id->instance_no = 0;
|
||||
list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
|
||||
}
|
||||
|
@ -709,6 +717,11 @@ int acpi_device_add(struct acpi_device *device,
|
|||
if (device->parent)
|
||||
list_del(&device->node);
|
||||
list_del(&device->wakeup_list);
|
||||
|
||||
err_free_new_bus_id:
|
||||
if (!found)
|
||||
kfree(new_bus_id);
|
||||
|
||||
mutex_unlock(&acpi_device_lock);
|
||||
|
||||
err_detach:
|
||||
|
|
|
@ -1256,6 +1256,8 @@ static struct tegra_clk_init_table init_table[] __initdata = {
|
|||
{ TEGRA30_CLK_I2S3_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
|
||||
{ TEGRA30_CLK_I2S4_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
|
||||
{ TEGRA30_CLK_VIMCLK_SYNC, TEGRA30_CLK_CLK_MAX, 24000000, 0 },
|
||||
{ TEGRA30_CLK_HDA, TEGRA30_CLK_PLL_P, 102000000, 0 },
|
||||
{ TEGRA30_CLK_HDA2CODEC_2X, TEGRA30_CLK_PLL_P, 48000000, 0 },
|
||||
/* must be the last entry */
|
||||
{ TEGRA30_CLK_CLK_MAX, TEGRA30_CLK_CLK_MAX, 0, 0 },
|
||||
};
|
||||
|
|
|
@ -251,6 +251,9 @@ static void cma_heap_dma_buf_release(struct dma_buf *dmabuf)
|
|||
buffer->vaddr = NULL;
|
||||
}
|
||||
|
||||
/* free page list */
|
||||
kfree(buffer->pages);
|
||||
/* release memory */
|
||||
cma_release(cma_heap->cma, buffer->cma_pages, buffer->pagecount);
|
||||
kfree(buffer);
|
||||
}
|
||||
|
|
|
@ -112,6 +112,7 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev)
|
|||
union igp_info {
|
||||
struct atom_integrated_system_info_v1_11 v11;
|
||||
struct atom_integrated_system_info_v1_12 v12;
|
||||
struct atom_integrated_system_info_v2_1 v21;
|
||||
};
|
||||
|
||||
union umc_info {
|
||||
|
@ -209,24 +210,42 @@ amdgpu_atomfirmware_get_vram_info(struct amdgpu_device *adev,
|
|||
if (adev->flags & AMD_IS_APU) {
|
||||
igp_info = (union igp_info *)
|
||||
(mode_info->atom_context->bios + data_offset);
|
||||
switch (crev) {
|
||||
case 11:
|
||||
mem_channel_number = igp_info->v11.umachannelnumber;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v11.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
switch (frev) {
|
||||
case 1:
|
||||
switch (crev) {
|
||||
case 11:
|
||||
case 12:
|
||||
mem_channel_number = igp_info->v11.umachannelnumber;
|
||||
if (!mem_channel_number)
|
||||
mem_channel_number = 1;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v11.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 12:
|
||||
mem_channel_number = igp_info->v12.umachannelnumber;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v12.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
case 2:
|
||||
switch (crev) {
|
||||
case 1:
|
||||
case 2:
|
||||
mem_channel_number = igp_info->v21.umachannelnumber;
|
||||
if (!mem_channel_number)
|
||||
mem_channel_number = 1;
|
||||
/* channel width is 64 */
|
||||
if (vram_width)
|
||||
*vram_width = mem_channel_number * 64;
|
||||
mem_type = igp_info->v21.memorytype;
|
||||
if (vram_type)
|
||||
*vram_type = convert_atom_mem_type_to_vram_type(adev, mem_type);
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
|
|
|
@ -3034,7 +3034,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
|
|||
#endif
|
||||
default:
|
||||
if (amdgpu_dc > 0)
|
||||
DRM_INFO("Display Core has been requested via kernel parameter "
|
||||
DRM_INFO_ONCE("Display Core has been requested via kernel parameter "
|
||||
"but isn't supported by ASIC, ignoring\n");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1085,6 +1085,8 @@ static const struct pci_device_id pciidlist[] = {
|
|||
|
||||
/* Renoir */
|
||||
{0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
{0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
{0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU},
|
||||
|
||||
/* Navi12 */
|
||||
{0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12},
|
||||
|
|
|
@ -99,6 +99,10 @@
|
|||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580
|
||||
#define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025
|
||||
#define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026
|
||||
#define mmGOLDEN_TSC_COUNT_LOWER_Vangogh_BASE_IDX 1
|
||||
#define mmSPI_CONFIG_CNTL_1_Vangogh 0x2441
|
||||
#define mmSPI_CONFIG_CNTL_1_Vangogh_BASE_IDX 1
|
||||
#define mmVGT_TF_MEMORY_BASE_HI_Vangogh 0x2261
|
||||
|
@ -160,6 +164,9 @@
|
|||
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid 0x15db
|
||||
#define mmGCVM_L2_CGTT_CLK_CTRL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid 0x2030
|
||||
#define mmGC_THROTTLE_CTRL_Sienna_Cichlid_BASE_IDX 0
|
||||
|
||||
MODULE_FIRMWARE("amdgpu/navi10_ce.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_pfp.bin");
|
||||
MODULE_FIRMWARE("amdgpu/navi10_me.bin");
|
||||
|
@ -3324,6 +3331,7 @@ static void gfx_v10_0_ring_emit_de_meta(struct amdgpu_ring *ring, bool resume);
|
|||
static void gfx_v10_0_ring_emit_frame_cntl(struct amdgpu_ring *ring, bool start, bool secure);
|
||||
static u32 gfx_v10_3_get_disabled_sa(struct amdgpu_device *adev);
|
||||
static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev);
|
||||
static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev);
|
||||
|
||||
static void gfx10_kiq_set_resources(struct amdgpu_ring *kiq_ring, uint64_t queue_mask)
|
||||
{
|
||||
|
@ -7192,6 +7200,9 @@ static int gfx_v10_0_hw_init(void *handle)
|
|||
if (adev->asic_type == CHIP_SIENNA_CICHLID)
|
||||
gfx_v10_3_program_pbb_mode(adev);
|
||||
|
||||
if (adev->asic_type >= CHIP_SIENNA_CICHLID)
|
||||
gfx_v10_3_set_power_brake_sequence(adev);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
|
@ -7377,8 +7388,16 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
|
|||
|
||||
amdgpu_gfx_off_ctrl(adev, false);
|
||||
mutex_lock(&adev->gfx.gpu_clock_mutex);
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
switch (adev->asic_type) {
|
||||
case CHIP_VANGOGH:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER_Vangogh) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER_Vangogh) << 32ULL);
|
||||
break;
|
||||
default:
|
||||
clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
|
||||
((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&adev->gfx.gpu_clock_mutex);
|
||||
amdgpu_gfx_off_ctrl(adev, true);
|
||||
return clock;
|
||||
|
@ -9169,6 +9188,31 @@ static void gfx_v10_3_program_pbb_mode(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
|
||||
static void gfx_v10_3_set_power_brake_sequence(struct amdgpu_device *adev)
|
||||
{
|
||||
WREG32_SOC15(GC, 0, mmGRBM_GFX_INDEX,
|
||||
(0x1 << GRBM_GFX_INDEX__SA_BROADCAST_WRITES__SHIFT) |
|
||||
(0x1 << GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES__SHIFT) |
|
||||
(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, ixPWRBRK_STALL_PATTERN_CTRL);
|
||||
WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA,
|
||||
(0x1 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_STEP_INTERVAL__SHIFT) |
|
||||
(0x12 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_BEGIN_STEP__SHIFT) |
|
||||
(0x13 << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_END_STEP__SHIFT) |
|
||||
(0xf << PWRBRK_STALL_PATTERN_CTRL__PWRBRK_THROTTLE_PATTERN_BIT_NUMS__SHIFT));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmGC_THROTTLE_CTRL_Sienna_Cichlid,
|
||||
(0x1 << GC_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT) |
|
||||
(0x1 << GC_THROTTLE_CTRL__PATTERN_MODE__SHIFT) |
|
||||
(0x5 << GC_THROTTLE_CTRL__RELEASE_STEP_INTERVAL__SHIFT));
|
||||
|
||||
WREG32_SOC15(GC, 0, mmDIDT_IND_INDEX, ixDIDT_SQ_THROTTLE_CTRL);
|
||||
|
||||
WREG32_SOC15(GC, 0, mmDIDT_IND_DATA,
|
||||
(0x1 << DIDT_SQ_THROTTLE_CTRL__PWRBRK_STALL_EN__SHIFT));
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version gfx_v10_0_ip_block =
|
||||
{
|
||||
.type = AMD_IP_BLOCK_TYPE_GFX,
|
||||
|
|
|
@ -47,7 +47,7 @@ enum psp_gfx_crtl_cmd_id
|
|||
GFX_CTRL_CMD_ID_DISABLE_INT = 0x00060000, /* disable PSP-to-Gfx interrupt */
|
||||
GFX_CTRL_CMD_ID_MODE1_RST = 0x00070000, /* trigger the Mode 1 reset */
|
||||
GFX_CTRL_CMD_ID_GBR_IH_SET = 0x00080000, /* set Gbr IH_RB_CNTL registers */
|
||||
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x000A0000, /* send interrupt to psp for updating write pointer of vf */
|
||||
GFX_CTRL_CMD_ID_CONSUME_CMD = 0x00090000, /* send interrupt to psp for updating write pointer of vf */
|
||||
GFX_CTRL_CMD_ID_DESTROY_GPCOM_RING = 0x000C0000, /* destroy GPCOM ring */
|
||||
|
||||
GFX_CTRL_CMD_ID_MAX = 0x000F0000, /* max command ID */
|
||||
|
|
|
@ -1239,7 +1239,8 @@ static int soc15_common_early_init(void *handle)
|
|||
break;
|
||||
case CHIP_RENOIR:
|
||||
adev->asic_funcs = &soc15_asic_funcs;
|
||||
if (adev->pdev->device == 0x1636)
|
||||
if ((adev->pdev->device == 0x1636) ||
|
||||
(adev->pdev->device == 0x164c))
|
||||
adev->apu_flags |= AMD_APU_IS_RENOIR;
|
||||
else
|
||||
adev->apu_flags |= AMD_APU_IS_GREEN_SARDINE;
|
||||
|
|
|
@ -1040,11 +1040,14 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
|
|||
(struct crat_subtype_iolink *)sub_type_hdr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
crat_table->length += (sub_type_hdr->length * entries);
|
||||
crat_table->total_entries += entries;
|
||||
|
||||
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
|
||||
sub_type_hdr->length * entries);
|
||||
if (entries) {
|
||||
crat_table->length += (sub_type_hdr->length * entries);
|
||||
crat_table->total_entries += entries;
|
||||
|
||||
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
|
||||
sub_type_hdr->length * entries);
|
||||
}
|
||||
#else
|
||||
pr_info("IO link not available for non x86 platforms\n");
|
||||
#endif
|
||||
|
|
|
@ -939,41 +939,6 @@ static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_
|
|||
}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int create_crtc_crc_properties(struct amdgpu_display_manager *dm)
|
||||
{
|
||||
dm->crc_win_x_start_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_X_START", 0, U16_MAX);
|
||||
if (!dm->crc_win_x_start_property)
|
||||
return -ENOMEM;
|
||||
|
||||
dm->crc_win_y_start_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_Y_START", 0, U16_MAX);
|
||||
if (!dm->crc_win_y_start_property)
|
||||
return -ENOMEM;
|
||||
|
||||
dm->crc_win_x_end_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_X_END", 0, U16_MAX);
|
||||
if (!dm->crc_win_x_end_property)
|
||||
return -ENOMEM;
|
||||
|
||||
dm->crc_win_y_end_property =
|
||||
drm_property_create_range(adev_to_drm(dm->adev),
|
||||
DRM_MODE_PROP_ATOMIC,
|
||||
"AMD_CRC_WIN_Y_END", 0, U16_MAX);
|
||||
if (!dm->crc_win_y_end_property)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int amdgpu_dm_init(struct amdgpu_device *adev)
|
||||
{
|
||||
struct dc_init_data init_data;
|
||||
|
@ -1120,10 +1085,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
|
||||
dc_init_callbacks(adev->dm.dc, &init_params);
|
||||
}
|
||||
#endif
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
if (create_crtc_crc_properties(&adev->dm))
|
||||
DRM_ERROR("amdgpu: failed to create crc property.\n");
|
||||
#endif
|
||||
if (amdgpu_dm_initialize_drm_device(adev)) {
|
||||
DRM_ERROR(
|
||||
|
@ -5333,64 +5294,12 @@ dm_crtc_duplicate_state(struct drm_crtc *crtc)
|
|||
state->crc_src = cur->crc_src;
|
||||
state->cm_has_degamma = cur->cm_has_degamma;
|
||||
state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
state->crc_window = cur->crc_window;
|
||||
#endif
|
||||
|
||||
/* TODO Duplicate dc_stream after objects are stream object is flattened */
|
||||
|
||||
return &state->base;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static int amdgpu_dm_crtc_atomic_set_property(struct drm_crtc *crtc,
|
||||
struct drm_crtc_state *crtc_state,
|
||||
struct drm_property *property,
|
||||
uint64_t val)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dm_crtc_state *dm_new_state =
|
||||
to_dm_crtc_state(crtc_state);
|
||||
|
||||
if (property == adev->dm.crc_win_x_start_property)
|
||||
dm_new_state->crc_window.x_start = val;
|
||||
else if (property == adev->dm.crc_win_y_start_property)
|
||||
dm_new_state->crc_window.y_start = val;
|
||||
else if (property == adev->dm.crc_win_x_end_property)
|
||||
dm_new_state->crc_window.x_end = val;
|
||||
else if (property == adev->dm.crc_win_y_end_property)
|
||||
dm_new_state->crc_window.y_end = val;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int amdgpu_dm_crtc_atomic_get_property(struct drm_crtc *crtc,
|
||||
const struct drm_crtc_state *state,
|
||||
struct drm_property *property,
|
||||
uint64_t *val)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct amdgpu_device *adev = drm_to_adev(dev);
|
||||
struct dm_crtc_state *dm_state =
|
||||
to_dm_crtc_state(state);
|
||||
|
||||
if (property == adev->dm.crc_win_x_start_property)
|
||||
*val = dm_state->crc_window.x_start;
|
||||
else if (property == adev->dm.crc_win_y_start_property)
|
||||
*val = dm_state->crc_window.y_start;
|
||||
else if (property == adev->dm.crc_win_x_end_property)
|
||||
*val = dm_state->crc_window.x_end;
|
||||
else if (property == adev->dm.crc_win_y_end_property)
|
||||
*val = dm_state->crc_window.y_end;
|
||||
else
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
static inline int dm_set_vupdate_irq(struct drm_crtc *crtc, bool enable)
|
||||
{
|
||||
enum dc_irq_source irq_source;
|
||||
|
@ -5457,10 +5366,6 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
|
|||
.enable_vblank = dm_enable_vblank,
|
||||
.disable_vblank = dm_disable_vblank,
|
||||
.get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
.atomic_set_property = amdgpu_dm_crtc_atomic_set_property,
|
||||
.atomic_get_property = amdgpu_dm_crtc_atomic_get_property,
|
||||
#endif
|
||||
};
|
||||
|
||||
static enum drm_connector_status
|
||||
|
@ -6662,25 +6567,6 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
static void attach_crtc_crc_properties(struct amdgpu_display_manager *dm,
|
||||
struct amdgpu_crtc *acrtc)
|
||||
{
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_x_start_property,
|
||||
0);
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_y_start_property,
|
||||
0);
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_x_end_property,
|
||||
0);
|
||||
drm_object_attach_property(&acrtc->base.base,
|
||||
dm->crc_win_y_end_property,
|
||||
0);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
||||
struct drm_plane *plane,
|
||||
uint32_t crtc_index)
|
||||
|
@ -6728,9 +6614,7 @@ static int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm,
|
|||
drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES,
|
||||
true, MAX_COLOR_LUT_ENTRIES);
|
||||
drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
attach_crtc_crc_properties(dm, acrtc);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
@ -8367,7 +8251,6 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
*/
|
||||
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
|
||||
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
|
||||
bool configure_crc = false;
|
||||
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
|
||||
|
@ -8377,27 +8260,20 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
|
|||
dc_stream_retain(dm_new_crtc_state->stream);
|
||||
acrtc->dm_irq_params.stream = dm_new_crtc_state->stream;
|
||||
manage_dm_interrupts(adev, acrtc, true);
|
||||
}
|
||||
if (IS_ENABLED(CONFIG_DEBUG_FS) && new_crtc_state->active &&
|
||||
amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/**
|
||||
* Frontend may have changed so reapply the CRC capture
|
||||
* settings for the stream.
|
||||
*/
|
||||
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
|
||||
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
|
||||
|
||||
if (amdgpu_dm_crc_window_is_default(dm_new_crtc_state)) {
|
||||
if (!old_crtc_state->active || drm_atomic_crtc_needs_modeset(new_crtc_state))
|
||||
configure_crc = true;
|
||||
} else {
|
||||
if (amdgpu_dm_crc_window_changed(dm_new_crtc_state, dm_old_crtc_state))
|
||||
configure_crc = true;
|
||||
}
|
||||
|
||||
if (configure_crc)
|
||||
if (amdgpu_dm_is_valid_crc_source(dm_new_crtc_state->crc_src)) {
|
||||
amdgpu_dm_crtc_configure_crc_source(
|
||||
crtc, dm_new_crtc_state, dm_new_crtc_state->crc_src);
|
||||
crtc, dm_new_crtc_state,
|
||||
dm_new_crtc_state->crc_src);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -336,32 +336,6 @@ struct amdgpu_display_manager {
|
|||
*/
|
||||
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/**
|
||||
* @crc_win_x_start_property:
|
||||
*
|
||||
* X start of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_x_start_property;
|
||||
/**
|
||||
* @crc_win_y_start_property:
|
||||
*
|
||||
* Y start of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_y_start_property;
|
||||
/**
|
||||
* @crc_win_x_end_property:
|
||||
*
|
||||
* X end of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_x_end_property;
|
||||
/**
|
||||
* @crc_win_y_end_property:
|
||||
*
|
||||
* Y end of the crc calculation window
|
||||
*/
|
||||
struct drm_property *crc_win_y_end_property;
|
||||
#endif
|
||||
/**
|
||||
* @mst_encoders:
|
||||
*
|
||||
|
@ -448,15 +422,6 @@ struct dm_plane_state {
|
|||
struct dc_plane_state *dc_state;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct crc_rec {
|
||||
uint16_t x_start;
|
||||
uint16_t y_start;
|
||||
uint16_t x_end;
|
||||
uint16_t y_end;
|
||||
};
|
||||
#endif
|
||||
|
||||
struct dm_crtc_state {
|
||||
struct drm_crtc_state base;
|
||||
struct dc_stream_state *stream;
|
||||
|
@ -479,9 +444,6 @@ struct dm_crtc_state {
|
|||
struct dc_info_packet vrr_infopacket;
|
||||
|
||||
int abm_level;
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
struct crc_rec crc_window;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base)
|
||||
|
|
|
@ -81,41 +81,6 @@ const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc,
|
|||
return pipe_crc_sources;
|
||||
}
|
||||
|
||||
static void amdgpu_dm_set_crc_window_default(struct dm_crtc_state *dm_crtc_state)
|
||||
{
|
||||
dm_crtc_state->crc_window.x_start = 0;
|
||||
dm_crtc_state->crc_window.y_start = 0;
|
||||
dm_crtc_state->crc_window.x_end = 0;
|
||||
dm_crtc_state->crc_window.y_end = 0;
|
||||
}
|
||||
|
||||
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state)
|
||||
{
|
||||
bool ret = true;
|
||||
|
||||
if ((dm_crtc_state->crc_window.x_start != 0) ||
|
||||
(dm_crtc_state->crc_window.y_start != 0) ||
|
||||
(dm_crtc_state->crc_window.x_end != 0) ||
|
||||
(dm_crtc_state->crc_window.y_end != 0))
|
||||
ret = false;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
|
||||
struct dm_crtc_state *dm_old_crtc_state)
|
||||
{
|
||||
bool ret = false;
|
||||
|
||||
if ((dm_new_crtc_state->crc_window.x_start != dm_old_crtc_state->crc_window.x_start) ||
|
||||
(dm_new_crtc_state->crc_window.y_start != dm_old_crtc_state->crc_window.y_start) ||
|
||||
(dm_new_crtc_state->crc_window.x_end != dm_old_crtc_state->crc_window.x_end) ||
|
||||
(dm_new_crtc_state->crc_window.y_end != dm_old_crtc_state->crc_window.y_end))
|
||||
ret = true;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
|
||||
size_t *values_cnt)
|
||||
|
@ -140,7 +105,6 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
|||
struct dc_stream_state *stream_state = dm_crtc_state->stream;
|
||||
bool enable = amdgpu_dm_is_valid_crc_source(source);
|
||||
int ret = 0;
|
||||
struct crc_params *crc_window = NULL, tmp_window;
|
||||
|
||||
/* Configuration will be deferred to stream enable. */
|
||||
if (!stream_state)
|
||||
|
@ -150,24 +114,8 @@ int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
|||
|
||||
/* Enable CRTC CRC generation if necessary. */
|
||||
if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) {
|
||||
if (!enable)
|
||||
amdgpu_dm_set_crc_window_default(dm_crtc_state);
|
||||
|
||||
if (!amdgpu_dm_crc_window_is_default(dm_crtc_state)) {
|
||||
crc_window = &tmp_window;
|
||||
|
||||
tmp_window.windowa_x_start = dm_crtc_state->crc_window.x_start;
|
||||
tmp_window.windowa_y_start = dm_crtc_state->crc_window.y_start;
|
||||
tmp_window.windowa_x_end = dm_crtc_state->crc_window.x_end;
|
||||
tmp_window.windowa_y_end = dm_crtc_state->crc_window.y_end;
|
||||
tmp_window.windowb_x_start = dm_crtc_state->crc_window.x_start;
|
||||
tmp_window.windowb_y_start = dm_crtc_state->crc_window.y_start;
|
||||
tmp_window.windowb_x_end = dm_crtc_state->crc_window.x_end;
|
||||
tmp_window.windowb_y_end = dm_crtc_state->crc_window.y_end;
|
||||
}
|
||||
|
||||
if (!dc_stream_configure_crc(stream_state->ctx->dc,
|
||||
stream_state, crc_window, enable, enable)) {
|
||||
stream_state, NULL, enable, enable)) {
|
||||
ret = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
|
|
@ -46,13 +46,10 @@ static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source
|
|||
}
|
||||
|
||||
/* amdgpu_dm_crc.c */
|
||||
bool amdgpu_dm_crc_window_is_default(struct dm_crtc_state *dm_crtc_state);
|
||||
bool amdgpu_dm_crc_window_changed(struct dm_crtc_state *dm_new_crtc_state,
|
||||
struct dm_crtc_state *dm_old_crtc_state);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc,
|
||||
struct dm_crtc_state *dm_crtc_state,
|
||||
enum amdgpu_dm_pipe_crc_source source);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
|
||||
int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
|
||||
const char *src_name,
|
||||
|
|
|
@ -3992,7 +3992,7 @@ bool dc_link_dp_set_test_pattern(
|
|||
unsigned int cust_pattern_size)
|
||||
{
|
||||
struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx;
|
||||
struct pipe_ctx *pipe_ctx = &pipes[0];
|
||||
struct pipe_ctx *pipe_ctx = NULL;
|
||||
unsigned int lane;
|
||||
unsigned int i;
|
||||
unsigned char link_qual_pattern[LANE_COUNT_DP_MAX] = {0};
|
||||
|
@ -4002,12 +4002,18 @@ bool dc_link_dp_set_test_pattern(
|
|||
memset(&training_pattern, 0, sizeof(training_pattern));
|
||||
|
||||
for (i = 0; i < MAX_PIPES; i++) {
|
||||
if (pipes[i].stream == NULL)
|
||||
continue;
|
||||
|
||||
if (pipes[i].stream->link == link && !pipes[i].top_pipe && !pipes[i].prev_odm_pipe) {
|
||||
pipe_ctx = &pipes[i];
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (pipe_ctx == NULL)
|
||||
return false;
|
||||
|
||||
/* Reset CRTC Test Pattern if it is currently running and request is VideoMode */
|
||||
if (link->test_pattern_enabled && test_pattern ==
|
||||
DP_TEST_PATTERN_VIDEO_MODE) {
|
||||
|
|
|
@ -470,7 +470,7 @@ void mpc1_cursor_lock(struct mpc *mpc, int opp_id, bool lock)
|
|||
unsigned int mpc1_get_mpc_out_mux(struct mpc *mpc, int opp_id)
|
||||
{
|
||||
struct dcn10_mpc *mpc10 = TO_DCN10_MPC(mpc);
|
||||
uint32_t val = 0;
|
||||
uint32_t val = 0xf;
|
||||
|
||||
if (opp_id < MAX_OPP && REG(MUX[opp_id]))
|
||||
REG_GET(MUX[opp_id], MPC_OUT_MUX, &val);
|
||||
|
|
|
@ -608,8 +608,8 @@ static const struct dc_debug_options debug_defaults_drv = {
|
|||
.disable_pplib_clock_request = false,
|
||||
.disable_pplib_wm_range = false,
|
||||
.pplib_wm_report_mode = WM_REPORT_DEFAULT,
|
||||
.pipe_split_policy = MPC_SPLIT_DYNAMIC,
|
||||
.force_single_disp_pipe_split = true,
|
||||
.pipe_split_policy = MPC_SPLIT_AVOID,
|
||||
.force_single_disp_pipe_split = false,
|
||||
.disable_dcc = DCC_ENABLE,
|
||||
.voltage_align_fclk = true,
|
||||
.disable_stereo_support = true,
|
||||
|
|
|
@ -1731,6 +1731,7 @@ static struct resource_funcs dcn301_res_pool_funcs = {
|
|||
.populate_dml_pipes = dcn30_populate_dml_pipes_from_context,
|
||||
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
|
||||
.add_stream_to_ctx = dcn30_add_stream_to_ctx,
|
||||
.add_dsc_to_stream_resource = dcn20_add_dsc_to_stream_resource,
|
||||
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
|
||||
.populate_dml_writeback_from_context = dcn30_populate_dml_writeback_from_context,
|
||||
.set_mcif_arb_params = dcn30_set_mcif_arb_params,
|
||||
|
|
|
@ -2635,14 +2635,15 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
|
|||
}
|
||||
|
||||
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
|
||||
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60 &&
|
||||
mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
|
||||
mode_lib->vba.DRAMClockChangeWatermark += 25;
|
||||
|
||||
for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
|
||||
if (mode_lib->vba.DRAMClockChangeWatermark >
|
||||
dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
|
||||
mode_lib->vba.MinTTUVBlank[k] += 25;
|
||||
}
|
||||
}
|
||||
|
||||
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue