mirror of https://gitee.com/openkylin/linux.git
orangefs: integrate readahead cache changes from out-of-tree
The readahead cache has long been present as a compile time option to OrangeFS. As it had not been tested in some time, it was disabled by default. Recently, Walt Ligon started work reviving it, which eventually culminated in the commit below to OrangeFS SVN. r12519 | walt | 2016-07-13 14:32:42 -0400 (Wed, 13 Jul 2016) | 1 line reintegrating readahead cache code This cache is implemented almost entirely in userspace. There are a few parameters exposed via sysfs, and the cache must be flushed after an inode is released. -----BEGIN PGP SIGNATURE----- Version: GnuPG v2 iQIcBAABCAAGBQJXqN4uAAoJEPVzxHxs4+kh0eEP/ArXGFSbdreYWvktlsuO14zN H6OBYnhgNcKwFvNsN+VsS6M+t1SWga6VbEpNg2NRlAP//rhUlVRl3hNFRbu6olBV Q86GygL/St1Iy/pXHIKTZWHFvajUxLGoroPed4DvjCiBfmPonHrOXwE0pkOOY3ez Xc9nh1Y7ku2Qc4tud6WEzwvEswRWnItSPOkoy/kWcq1mt2OUgwsc/8w5Sb39Dkjk jxPsuW3fO2qX7Lz6kOGTBCPXrioAeZ7BOtDl3KVhbpR4GT/YfZObaxxhrnSJc2FY FRVZWWmV8JayaKrmwBZngbQ1ZeUz3zrLxATIOVy9Q3rBc4qU4u5q4YYh36YQrfOy ICI7m/OTpKqgxD95mPr1l3ahZrvrGSCNW92KJC7sD/kf1QmLaUsLD0yDXAxZ1/Si zZ6l5sL3x4NbiA8LIif9zMN5tDP5/J1Fv6G6nIkyzzwjS2cnBISxVlMqvf0JJNVa CHVGo02AHwZ4xweIuSLa8pcQ66EOf7SmuxXRRJ+k+ZM0Wd9s2wjyVJhwSRkBdAv2 JfVyi2DM0VPR4tgNiC+MXo3Fq8gxKOnCCg9/CugsAsYSRKCmU67Z3iddzZ+wkIpB yzFodA/e8q2ShqcyX6jfaSmgDi2ZmvRRgAzpPETtsMEQD4sI37U/OI0MZr0O++1E fhpjZH1IVlWhHMPMW4fn =+8gF -----END PGP SIGNATURE----- Merge tag 'for-hubcap-v4.9-readahead' of git://github.com/martinbrandenburg/linux orangefs: integrate readahead cache changes from out-of-tree The readahead cache has long been present as a compile time option to OrangeFS. As it had not been tested in some time, it was disabled by default. Recently, Walt Ligon started work reviving it, which eventually culminated in the commit below to OrangeFS SVN. r12519 | walt | 2016-07-13 14:32:42 -0400 (Wed, 13 Jul 2016) | 1 line reintegrating readahead cache code This cache is implemented almost entirely in userspace. There are a few parameters exposed via sysfs, and the cache must be flushed after an inode is released.
This commit is contained in:
commit
a21aae3bb1
|
@ -0,0 +1,3 @@
|
|||
[spatch]
|
||||
options = --timeout 200
|
||||
options = --use-gitgrep
|
|
@ -37,6 +37,7 @@ modules.builtin
|
|||
Module.symvers
|
||||
*.dwo
|
||||
*.su
|
||||
*.c.[012]*.*
|
||||
|
||||
#
|
||||
# Top-level generic files
|
||||
|
@ -66,6 +67,7 @@ Module.symvers
|
|||
#
|
||||
!.gitignore
|
||||
!.mailmap
|
||||
!.cocciconfig
|
||||
|
||||
#
|
||||
# Generated include files
|
||||
|
|
|
@ -78,422 +78,111 @@ CONFIG_PCI_MSI option.
|
|||
|
||||
4.2 Using MSI
|
||||
|
||||
Most of the hard work is done for the driver in the PCI layer. It simply
|
||||
has to request that the PCI layer set up the MSI capability for this
|
||||
Most of the hard work is done for the driver in the PCI layer. The driver
|
||||
simply has to request that the PCI layer set up the MSI capability for this
|
||||
device.
|
||||
|
||||
4.2.1 pci_enable_msi
|
||||
To automatically use MSI or MSI-X interrupt vectors, use the following
|
||||
function:
|
||||
|
||||
int pci_enable_msi(struct pci_dev *dev)
|
||||
int pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
|
||||
unsigned int max_vecs, unsigned int flags);
|
||||
|
||||
A successful call allocates ONE interrupt to the device, regardless
|
||||
of how many MSIs the device supports. The device is switched from
|
||||
pin-based interrupt mode to MSI mode. The dev->irq number is changed
|
||||
to a new number which represents the message signaled interrupt;
|
||||
consequently, this function should be called before the driver calls
|
||||
request_irq(), because an MSI is delivered via a vector that is
|
||||
different from the vector of a pin-based interrupt.
|
||||
which allocates up to max_vecs interrupt vectors for a PCI device. It
|
||||
returns the number of vectors allocated or a negative error. If the device
|
||||
has a requirements for a minimum number of vectors the driver can pass a
|
||||
min_vecs argument set to this limit, and the PCI core will return -ENOSPC
|
||||
if it can't meet the minimum number of vectors.
|
||||
|
||||
4.2.2 pci_enable_msi_range
|
||||
The flags argument should normally be set to 0, but can be used to pass the
|
||||
PCI_IRQ_NOMSI and PCI_IRQ_NOMSIX flag in case a device claims to support
|
||||
MSI or MSI-X, but the support is broken, or to pass PCI_IRQ_NOLEGACY in
|
||||
case the device does not support legacy interrupt lines.
|
||||
|
||||
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
|
||||
By default this function will spread the interrupts around the available
|
||||
CPUs, but this feature can be disabled by passing the PCI_IRQ_NOAFFINITY
|
||||
flag.
|
||||
|
||||
This function allows a device driver to request any number of MSI
|
||||
interrupts within specified range from 'minvec' to 'maxvec'.
|
||||
To get the Linux IRQ numbers passed to request_irq() and free_irq() and the
|
||||
vectors, use the following function:
|
||||
|
||||
If this function returns a positive number it indicates the number of
|
||||
MSI interrupts that have been successfully allocated. In this case
|
||||
the device is switched from pin-based interrupt mode to MSI mode and
|
||||
updates dev->irq to be the lowest of the new interrupts assigned to it.
|
||||
The other interrupts assigned to the device are in the range dev->irq
|
||||
to dev->irq + returned value - 1. Device driver can use the returned
|
||||
number of successfully allocated MSI interrupts to further allocate
|
||||
and initialize device resources.
|
||||
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
|
||||
|
||||
If this function returns a negative number, it indicates an error and
|
||||
the driver should not attempt to request any more MSI interrupts for
|
||||
this device.
|
||||
Any allocated resources should be freed before removing the device using
|
||||
the following function:
|
||||
|
||||
This function should be called before the driver calls request_irq(),
|
||||
because MSI interrupts are delivered via vectors that are different
|
||||
from the vector of a pin-based interrupt.
|
||||
void pci_free_irq_vectors(struct pci_dev *dev);
|
||||
|
||||
It is ideal if drivers can cope with a variable number of MSI interrupts;
|
||||
there are many reasons why the platform may not be able to provide the
|
||||
exact number that a driver asks for.
|
||||
If a device supports both MSI-X and MSI capabilities, this API will use the
|
||||
MSI-X facilities in preference to the MSI facilities. MSI-X supports any
|
||||
number of interrupts between 1 and 2048. In contrast, MSI is restricted to
|
||||
a maximum of 32 interrupts (and must be a power of two). In addition, the
|
||||
MSI interrupt vectors must be allocated consecutively, so the system might
|
||||
not be able to allocate as many vectors for MSI as it could for MSI-X. On
|
||||
some platforms, MSI interrupts must all be targeted at the same set of CPUs
|
||||
whereas MSI-X interrupts can all be targeted at different CPUs.
|
||||
|
||||
There could be devices that can not operate with just any number of MSI
|
||||
interrupts within a range. See chapter 4.3.1.3 to get the idea how to
|
||||
handle such devices for MSI-X - the same logic applies to MSI.
|
||||
If a device supports neither MSI-X or MSI it will fall back to a single
|
||||
legacy IRQ vector.
|
||||
|
||||
4.2.1.1 Maximum possible number of MSI interrupts
|
||||
The typical usage of MSI or MSI-X interrupts is to allocate as many vectors
|
||||
as possible, likely up to the limit supported by the device. If nvec is
|
||||
larger than the number supported by the device it will automatically be
|
||||
capped to the supported limit, so there is no need to query the number of
|
||||
vectors supported beforehand:
|
||||
|
||||
The typical usage of MSI interrupts is to allocate as many vectors as
|
||||
possible, likely up to the limit returned by pci_msi_vec_count() function:
|
||||
|
||||
static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
|
||||
{
|
||||
return pci_enable_msi_range(pdev, 1, nvec);
|
||||
}
|
||||
|
||||
Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
|
||||
the value of 0 would be meaningless and could result in error.
|
||||
|
||||
Some devices have a minimal limit on number of MSI interrupts.
|
||||
In this case the function could look like this:
|
||||
|
||||
static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
|
||||
{
|
||||
return pci_enable_msi_range(pdev, FOO_DRIVER_MINIMUM_NVEC, nvec);
|
||||
}
|
||||
|
||||
4.2.1.2 Exact number of MSI interrupts
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, 0);
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
If a driver is unable or unwilling to deal with a variable number of MSI
|
||||
interrupts it could request a particular number of interrupts by passing
|
||||
that number to pci_enable_msi_range() function as both 'minvec' and 'maxvec'
|
||||
parameters:
|
||||
interrupts it can request a particular number of interrupts by passing that
|
||||
number to pci_alloc_irq_vectors() function as both 'min_vecs' and
|
||||
'max_vecs' parameters:
|
||||
|
||||
static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
|
||||
{
|
||||
return pci_enable_msi_range(pdev, nvec, nvec);
|
||||
}
|
||||
ret = pci_alloc_irq_vectors(pdev, nvec, nvec, 0);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
Note, unlike pci_enable_msi_exact() function, which could be also used to
|
||||
enable a particular number of MSI-X interrupts, pci_enable_msi_range()
|
||||
returns either a negative errno or 'nvec' (not negative errno or 0 - as
|
||||
pci_enable_msi_exact() does).
|
||||
The most notorious example of the request type described above is enabling
|
||||
the single MSI mode for a device. It could be done by passing two 1s as
|
||||
'min_vecs' and 'max_vecs':
|
||||
|
||||
4.2.1.3 Single MSI mode
|
||||
ret = pci_alloc_irq_vectors(pdev, 1, 1, 0);
|
||||
if (ret < 0)
|
||||
goto out_err;
|
||||
|
||||
The most notorious example of the request type described above is
|
||||
enabling the single MSI mode for a device. It could be done by passing
|
||||
two 1s as 'minvec' and 'maxvec':
|
||||
Some devices might not support using legacy line interrupts, in which case
|
||||
the PCI_IRQ_NOLEGACY flag can be used to fail the request if the platform
|
||||
can't provide MSI or MSI-X interrupts:
|
||||
|
||||
static int foo_driver_enable_single_msi(struct pci_dev *pdev)
|
||||
{
|
||||
return pci_enable_msi_range(pdev, 1, 1);
|
||||
}
|
||||
nvec = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_NOLEGACY);
|
||||
if (nvec < 0)
|
||||
goto out_err;
|
||||
|
||||
Note, unlike pci_enable_msi() function, which could be also used to
|
||||
enable the single MSI mode, pci_enable_msi_range() returns either a
|
||||
negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
|
||||
does).
|
||||
4.3 Legacy APIs
|
||||
|
||||
4.2.3 pci_enable_msi_exact
|
||||
The following old APIs to enable and disable MSI or MSI-X interrupts should
|
||||
not be used in new code:
|
||||
|
||||
int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
|
||||
pci_enable_msi() /* deprecated */
|
||||
pci_enable_msi_range() /* deprecated */
|
||||
pci_enable_msi_exact() /* deprecated */
|
||||
pci_disable_msi() /* deprecated */
|
||||
pci_enable_msix_range() /* deprecated */
|
||||
pci_enable_msix_exact() /* deprecated */
|
||||
pci_disable_msix() /* deprecated */
|
||||
|
||||
This variation on pci_enable_msi_range() call allows a device driver to
|
||||
request exactly 'nvec' MSIs.
|
||||
Additionally there are APIs to provide the number of supported MSI or MSI-X
|
||||
vectors: pci_msi_vec_count() and pci_msix_vec_count(). In general these
|
||||
should be avoided in favor of letting pci_alloc_irq_vectors() cap the
|
||||
number of vectors. If you have a legitimate special use case for the count
|
||||
of vectors we might have to revisit that decision and add a
|
||||
pci_nr_irq_vectors() helper that handles MSI and MSI-X transparently.
|
||||
|
||||
If this function returns a negative number, it indicates an error and
|
||||
the driver should not attempt to request any more MSI interrupts for
|
||||
this device.
|
||||
4.4 Considerations when using MSIs
|
||||
|
||||
By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
|
||||
returns zero in case of success, which indicates MSI interrupts have been
|
||||
successfully allocated.
|
||||
|
||||
4.2.4 pci_disable_msi
|
||||
|
||||
void pci_disable_msi(struct pci_dev *dev)
|
||||
|
||||
This function should be used to undo the effect of pci_enable_msi_range().
|
||||
Calling it restores dev->irq to the pin-based interrupt number and frees
|
||||
the previously allocated MSIs. The interrupts may subsequently be assigned
|
||||
to another device, so drivers should not cache the value of dev->irq.
|
||||
|
||||
Before calling this function, a device driver must always call free_irq()
|
||||
on any interrupt for which it previously called request_irq().
|
||||
Failure to do so results in a BUG_ON(), leaving the device with
|
||||
MSI enabled and thus leaking its vector.
|
||||
|
||||
4.2.4 pci_msi_vec_count
|
||||
|
||||
int pci_msi_vec_count(struct pci_dev *dev)
|
||||
|
||||
This function could be used to retrieve the number of MSI vectors the
|
||||
device requested (via the Multiple Message Capable register). The MSI
|
||||
specification only allows the returned value to be a power of two,
|
||||
up to a maximum of 2^5 (32).
|
||||
|
||||
If this function returns a negative number, it indicates the device is
|
||||
not capable of sending MSIs.
|
||||
|
||||
If this function returns a positive number, it indicates the maximum
|
||||
number of MSI interrupt vectors that could be allocated.
|
||||
|
||||
4.3 Using MSI-X
|
||||
|
||||
The MSI-X capability is much more flexible than the MSI capability.
|
||||
It supports up to 2048 interrupts, each of which can be controlled
|
||||
independently. To support this flexibility, drivers must use an array of
|
||||
`struct msix_entry':
|
||||
|
||||
struct msix_entry {
|
||||
u16 vector; /* kernel uses to write alloc vector */
|
||||
u16 entry; /* driver uses to specify entry */
|
||||
};
|
||||
|
||||
This allows for the device to use these interrupts in a sparse fashion;
|
||||
for example, it could use interrupts 3 and 1027 and yet allocate only a
|
||||
two-element array. The driver is expected to fill in the 'entry' value
|
||||
in each element of the array to indicate for which entries the kernel
|
||||
should assign interrupts; it is invalid to fill in two entries with the
|
||||
same number.
|
||||
|
||||
4.3.1 pci_enable_msix_range
|
||||
|
||||
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
|
||||
int minvec, int maxvec)
|
||||
|
||||
Calling this function asks the PCI subsystem to allocate any number of
|
||||
MSI-X interrupts within specified range from 'minvec' to 'maxvec'.
|
||||
The 'entries' argument is a pointer to an array of msix_entry structs
|
||||
which should be at least 'maxvec' entries in size.
|
||||
|
||||
On success, the device is switched into MSI-X mode and the function
|
||||
returns the number of MSI-X interrupts that have been successfully
|
||||
allocated. In this case the 'vector' member in entries numbered from
|
||||
0 to the returned value - 1 is populated with the interrupt number;
|
||||
the driver should then call request_irq() for each 'vector' that it
|
||||
decides to use. The device driver is responsible for keeping track of the
|
||||
interrupts assigned to the MSI-X vectors so it can free them again later.
|
||||
Device driver can use the returned number of successfully allocated MSI-X
|
||||
interrupts to further allocate and initialize device resources.
|
||||
|
||||
If this function returns a negative number, it indicates an error and
|
||||
the driver should not attempt to allocate any more MSI-X interrupts for
|
||||
this device.
|
||||
|
||||
This function, in contrast with pci_enable_msi_range(), does not adjust
|
||||
dev->irq. The device will not generate interrupts for this interrupt
|
||||
number once MSI-X is enabled.
|
||||
|
||||
Device drivers should normally call this function once per device
|
||||
during the initialization phase.
|
||||
|
||||
It is ideal if drivers can cope with a variable number of MSI-X interrupts;
|
||||
there are many reasons why the platform may not be able to provide the
|
||||
exact number that a driver asks for.
|
||||
|
||||
There could be devices that can not operate with just any number of MSI-X
|
||||
interrupts within a range. E.g., an network adapter might need let's say
|
||||
four vectors per each queue it provides. Therefore, a number of MSI-X
|
||||
interrupts allocated should be a multiple of four. In this case interface
|
||||
pci_enable_msix_range() can not be used alone to request MSI-X interrupts
|
||||
(since it can allocate any number within the range, without any notion of
|
||||
the multiple of four) and the device driver should master a custom logic
|
||||
to request the required number of MSI-X interrupts.
|
||||
|
||||
4.3.1.1 Maximum possible number of MSI-X interrupts
|
||||
|
||||
The typical usage of MSI-X interrupts is to allocate as many vectors as
|
||||
possible, likely up to the limit returned by pci_msix_vec_count() function:
|
||||
|
||||
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
|
||||
{
|
||||
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
1, nvec);
|
||||
}
|
||||
|
||||
Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
|
||||
the value of 0 would be meaningless and could result in error.
|
||||
|
||||
Some devices have a minimal limit on number of MSI-X interrupts.
|
||||
In this case the function could look like this:
|
||||
|
||||
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
|
||||
{
|
||||
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
FOO_DRIVER_MINIMUM_NVEC, nvec);
|
||||
}
|
||||
|
||||
4.3.1.2 Exact number of MSI-X interrupts
|
||||
|
||||
If a driver is unable or unwilling to deal with a variable number of MSI-X
|
||||
interrupts it could request a particular number of interrupts by passing
|
||||
that number to pci_enable_msix_range() function as both 'minvec' and 'maxvec'
|
||||
parameters:
|
||||
|
||||
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
|
||||
{
|
||||
return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
nvec, nvec);
|
||||
}
|
||||
|
||||
Note, unlike pci_enable_msix_exact() function, which could be also used to
|
||||
enable a particular number of MSI-X interrupts, pci_enable_msix_range()
|
||||
returns either a negative errno or 'nvec' (not negative errno or 0 - as
|
||||
pci_enable_msix_exact() does).
|
||||
|
||||
4.3.1.3 Specific requirements to the number of MSI-X interrupts
|
||||
|
||||
As noted above, there could be devices that can not operate with just any
|
||||
number of MSI-X interrupts within a range. E.g., let's assume a device that
|
||||
is only capable sending the number of MSI-X interrupts which is a power of
|
||||
two. A routine that enables MSI-X mode for such device might look like this:
|
||||
|
||||
/*
|
||||
* Assume 'minvec' and 'maxvec' are non-zero
|
||||
*/
|
||||
static int foo_driver_enable_msix(struct foo_adapter *adapter,
|
||||
int minvec, int maxvec)
|
||||
{
|
||||
int rc;
|
||||
|
||||
minvec = roundup_pow_of_two(minvec);
|
||||
maxvec = rounddown_pow_of_two(maxvec);
|
||||
|
||||
if (minvec > maxvec)
|
||||
return -ERANGE;
|
||||
|
||||
retry:
|
||||
rc = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
|
||||
maxvec, maxvec);
|
||||
/*
|
||||
* -ENOSPC is the only error code allowed to be analyzed
|
||||
*/
|
||||
if (rc == -ENOSPC) {
|
||||
if (maxvec == 1)
|
||||
return -ENOSPC;
|
||||
|
||||
maxvec /= 2;
|
||||
|
||||
if (minvec > maxvec)
|
||||
return -ENOSPC;
|
||||
|
||||
goto retry;
|
||||
}
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
Note how pci_enable_msix_range() return value is analyzed for a fallback -
|
||||
any error code other than -ENOSPC indicates a fatal error and should not
|
||||
be retried.
|
||||
|
||||
4.3.2 pci_enable_msix_exact
|
||||
|
||||
int pci_enable_msix_exact(struct pci_dev *dev,
|
||||
struct msix_entry *entries, int nvec)
|
||||
|
||||
This variation on pci_enable_msix_range() call allows a device driver to
|
||||
request exactly 'nvec' MSI-Xs.
|
||||
|
||||
If this function returns a negative number, it indicates an error and
|
||||
the driver should not attempt to allocate any more MSI-X interrupts for
|
||||
this device.
|
||||
|
||||
By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
|
||||
returns zero in case of success, which indicates MSI-X interrupts have been
|
||||
successfully allocated.
|
||||
|
||||
Another version of a routine that enables MSI-X mode for a device with
|
||||
specific requirements described in chapter 4.3.1.3 might look like this:
|
||||
|
||||
/*
|
||||
* Assume 'minvec' and 'maxvec' are non-zero
|
||||
*/
|
||||
static int foo_driver_enable_msix(struct foo_adapter *adapter,
|
||||
int minvec, int maxvec)
|
||||
{
|
||||
int rc;
|
||||
|
||||
minvec = roundup_pow_of_two(minvec);
|
||||
maxvec = rounddown_pow_of_two(maxvec);
|
||||
|
||||
if (minvec > maxvec)
|
||||
return -ERANGE;
|
||||
|
||||
retry:
|
||||
rc = pci_enable_msix_exact(adapter->pdev,
|
||||
adapter->msix_entries, maxvec);
|
||||
|
||||
/*
|
||||
* -ENOSPC is the only error code allowed to be analyzed
|
||||
*/
|
||||
if (rc == -ENOSPC) {
|
||||
if (maxvec == 1)
|
||||
return -ENOSPC;
|
||||
|
||||
maxvec /= 2;
|
||||
|
||||
if (minvec > maxvec)
|
||||
return -ENOSPC;
|
||||
|
||||
goto retry;
|
||||
} else if (rc < 0) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
return maxvec;
|
||||
}
|
||||
|
||||
4.3.3 pci_disable_msix
|
||||
|
||||
void pci_disable_msix(struct pci_dev *dev)
|
||||
|
||||
This function should be used to undo the effect of pci_enable_msix_range().
|
||||
It frees the previously allocated MSI-X interrupts. The interrupts may
|
||||
subsequently be assigned to another device, so drivers should not cache
|
||||
the value of the 'vector' elements over a call to pci_disable_msix().
|
||||
|
||||
Before calling this function, a device driver must always call free_irq()
|
||||
on any interrupt for which it previously called request_irq().
|
||||
Failure to do so results in a BUG_ON(), leaving the device with
|
||||
MSI-X enabled and thus leaking its vector.
|
||||
|
||||
4.3.3 The MSI-X Table
|
||||
|
||||
The MSI-X capability specifies a BAR and offset within that BAR for the
|
||||
MSI-X Table. This address is mapped by the PCI subsystem, and should not
|
||||
be accessed directly by the device driver. If the driver wishes to
|
||||
mask or unmask an interrupt, it should call disable_irq() / enable_irq().
|
||||
|
||||
4.3.4 pci_msix_vec_count
|
||||
|
||||
int pci_msix_vec_count(struct pci_dev *dev)
|
||||
|
||||
This function could be used to retrieve number of entries in the device
|
||||
MSI-X table.
|
||||
|
||||
If this function returns a negative number, it indicates the device is
|
||||
not capable of sending MSI-Xs.
|
||||
|
||||
If this function returns a positive number, it indicates the maximum
|
||||
number of MSI-X interrupt vectors that could be allocated.
|
||||
|
||||
4.4 Handling devices implementing both MSI and MSI-X capabilities
|
||||
|
||||
If a device implements both MSI and MSI-X capabilities, it can
|
||||
run in either MSI mode or MSI-X mode, but not both simultaneously.
|
||||
This is a requirement of the PCI spec, and it is enforced by the
|
||||
PCI layer. Calling pci_enable_msi_range() when MSI-X is already
|
||||
enabled or pci_enable_msix_range() when MSI is already enabled
|
||||
results in an error. If a device driver wishes to switch between MSI
|
||||
and MSI-X at runtime, it must first quiesce the device, then switch
|
||||
it back to pin-interrupt mode, before calling pci_enable_msi_range()
|
||||
or pci_enable_msix_range() and resuming operation. This is not expected
|
||||
to be a common operation but may be useful for debugging or testing
|
||||
during development.
|
||||
|
||||
4.5 Considerations when using MSIs
|
||||
|
||||
4.5.1 Choosing between MSI-X and MSI
|
||||
|
||||
If your device supports both MSI-X and MSI capabilities, you should use
|
||||
the MSI-X facilities in preference to the MSI facilities. As mentioned
|
||||
above, MSI-X supports any number of interrupts between 1 and 2048.
|
||||
In contrast, MSI is restricted to a maximum of 32 interrupts (and
|
||||
must be a power of two). In addition, the MSI interrupt vectors must
|
||||
be allocated consecutively, so the system might not be able to allocate
|
||||
as many vectors for MSI as it could for MSI-X. On some platforms, MSI
|
||||
interrupts must all be targeted at the same set of CPUs whereas MSI-X
|
||||
interrupts can all be targeted at different CPUs.
|
||||
|
||||
4.5.2 Spinlocks
|
||||
4.4.1 Spinlocks
|
||||
|
||||
Most device drivers have a per-device spinlock which is taken in the
|
||||
interrupt handler. With pin-based interrupts or a single MSI, it is not
|
||||
|
@ -505,7 +194,7 @@ acquire the spinlock. Such deadlocks can be avoided by using
|
|||
spin_lock_irqsave() or spin_lock_irq() which disable local interrupts
|
||||
and acquire the lock (see Documentation/DocBook/kernel-locking).
|
||||
|
||||
4.6 How to tell whether MSI/MSI-X is enabled on a device
|
||||
4.5 How to tell whether MSI/MSI-X is enabled on a device
|
||||
|
||||
Using 'lspci -v' (as root) may show some devices with "MSI", "Message
|
||||
Signalled Interrupts" or "MSI-X" capabilities. Each of these capabilities
|
||||
|
|
|
@ -38,6 +38,15 @@ as a regular user, and install it with
|
|||
|
||||
sudo make install
|
||||
|
||||
Supplemental documentation
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
For supplemental documentation refer to the wiki:
|
||||
|
||||
https://bottest.wiki.kernel.org/coccicheck
|
||||
|
||||
The wiki documentation always refers to the linux-next version of the script.
|
||||
|
||||
Using Coccinelle on the Linux kernel
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
@ -94,11 +103,26 @@ To enable verbose messages set the V= variable, for example:
|
|||
|
||||
make coccicheck MODE=report V=1
|
||||
|
||||
Coccinelle parallelization
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
By default, coccicheck tries to run as parallel as possible. To change
|
||||
the parallelism, set the J= variable. For example, to run across 4 CPUs:
|
||||
|
||||
make coccicheck MODE=report J=4
|
||||
|
||||
As of Coccinelle 1.0.2 Coccinelle uses Ocaml parmap for parallelization,
|
||||
if support for this is detected you will benefit from parmap parallelization.
|
||||
|
||||
When parmap is enabled coccicheck will enable dynamic load balancing by using
|
||||
'--chunksize 1' argument, this ensures we keep feeding threads with work
|
||||
one by one, so that we avoid the situation where most work gets done by only
|
||||
a few threads. With dynamic load balancing, if a thread finishes early we keep
|
||||
feeding it more work.
|
||||
|
||||
When parmap is enabled, if an error occurs in Coccinelle, this error
|
||||
value is propagated back, the return value of the 'make coccicheck'
|
||||
captures this return value.
|
||||
|
||||
Using Coccinelle with a single semantic patch
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
@ -142,15 +166,118 @@ semantic patch as shown in the previous section.
|
|||
The "report" mode is the default. You can select another one with the
|
||||
MODE variable explained above.
|
||||
|
||||
Debugging Coccinelle SmPL patches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Using coccicheck is best as it provides in the spatch command line
|
||||
include options matching the options used when we compile the kernel.
|
||||
You can learn what these options are by using V=1, you could then
|
||||
manually run Coccinelle with debug options added.
|
||||
|
||||
Alternatively you can debug running Coccinelle against SmPL patches
|
||||
by asking for stderr to be redirected to stderr, by default stderr
|
||||
is redirected to /dev/null, if you'd like to capture stderr you
|
||||
can specify the DEBUG_FILE="file.txt" option to coccicheck. For
|
||||
instance:
|
||||
|
||||
rm -f cocci.err
|
||||
make coccicheck COCCI=scripts/coccinelle/free/kfree.cocci MODE=report DEBUG_FILE=cocci.err
|
||||
cat cocci.err
|
||||
|
||||
You can use SPFLAGS to add debugging flags, for instance you may want to
|
||||
add both --profile --show-trying to SPFLAGS when debugging. For instance
|
||||
you may want to use:
|
||||
|
||||
rm -f err.log
|
||||
export COCCI=scripts/coccinelle/misc/irqf_oneshot.cocci
|
||||
make coccicheck DEBUG_FILE="err.log" MODE=report SPFLAGS="--profile --show-trying" M=./drivers/mfd/arizona-irq.c
|
||||
|
||||
err.log will now have the profiling information, while stdout will
|
||||
provide some progress information as Coccinelle moves forward with
|
||||
work.
|
||||
|
||||
DEBUG_FILE support is only supported when using coccinelle >= 1.2.
|
||||
|
||||
.cocciconfig support
|
||||
~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Coccinelle supports reading .cocciconfig for default Coccinelle options that
|
||||
should be used every time spatch is spawned, the order of precedence for
|
||||
variables for .cocciconfig is as follows:
|
||||
|
||||
o Your current user's home directory is processed first
|
||||
o Your directory from which spatch is called is processed next
|
||||
o The directory provided with the --dir option is processed last, if used
|
||||
|
||||
Since coccicheck runs through make, it naturally runs from the kernel
|
||||
proper dir, as such the second rule above would be implied for picking up a
|
||||
.cocciconfig when using 'make coccicheck'.
|
||||
|
||||
'make coccicheck' also supports using M= targets.If you do not supply
|
||||
any M= target, it is assumed you want to target the entire kernel.
|
||||
The kernel coccicheck script has:
|
||||
|
||||
if [ "$KBUILD_EXTMOD" = "" ] ; then
|
||||
OPTIONS="--dir $srctree $COCCIINCLUDE"
|
||||
else
|
||||
OPTIONS="--dir $KBUILD_EXTMOD $COCCIINCLUDE"
|
||||
fi
|
||||
|
||||
KBUILD_EXTMOD is set when an explicit target with M= is used. For both cases
|
||||
the spatch --dir argument is used, as such third rule applies when whether M=
|
||||
is used or not, and when M= is used the target directory can have its own
|
||||
.cocciconfig file. When M= is not passed as an argument to coccicheck the
|
||||
target directory is the same as the directory from where spatch was called.
|
||||
|
||||
If not using the kernel's coccicheck target, keep the above precedence
|
||||
order logic of .cocciconfig reading. If using the kernel's coccicheck target,
|
||||
override any of the kernel's .coccicheck's settings using SPFLAGS.
|
||||
|
||||
We help Coccinelle when used against Linux with a set of sensible defaults
|
||||
options for Linux with our own Linux .cocciconfig. This hints to coccinelle
|
||||
git can be used for 'git grep' queries over coccigrep. A timeout of 200
|
||||
seconds should suffice for now.
|
||||
|
||||
The options picked up by coccinelle when reading a .cocciconfig do not appear
|
||||
as arguments to spatch processes running on your system, to confirm what
|
||||
options will be used by Coccinelle run:
|
||||
|
||||
spatch --print-options-only
|
||||
|
||||
You can override with your own preferred index option by using SPFLAGS. Take
|
||||
note that when there are conflicting options Coccinelle takes precedence for
|
||||
the last options passed. Using .cocciconfig is possible to use idutils, however
|
||||
given the order of precedence followed by Coccinelle, since the kernel now
|
||||
carries its own .cocciconfig, you will need to use SPFLAGS to use idutils if
|
||||
desired. See below section "Additional flags" for more details on how to use
|
||||
idutils.
|
||||
|
||||
Additional flags
|
||||
~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Additional flags can be passed to spatch through the SPFLAGS
|
||||
variable.
|
||||
variable. This works as Coccinelle respects the last flags
|
||||
given to it when options are in conflict.
|
||||
|
||||
make SPFLAGS=--use-glimpse coccicheck
|
||||
|
||||
Coccinelle supports idutils as well but requires coccinelle >= 1.0.6.
|
||||
When no ID file is specified coccinelle assumes your ID database file
|
||||
is in the file .id-utils.index on the top level of the kernel, coccinelle
|
||||
carries a script scripts/idutils_index.sh which creates the database with
|
||||
|
||||
mkid -i C --output .id-utils.index
|
||||
|
||||
If you have another database filename you can also just symlink with this
|
||||
name.
|
||||
|
||||
make SPFLAGS=--use-idutils coccicheck
|
||||
|
||||
Alternatively you can specify the database filename explicitly, for
|
||||
instance:
|
||||
|
||||
make SPFLAGS="--use-idutils /full-path/to/ID" coccicheck
|
||||
|
||||
See spatch --help to learn more about spatch options.
|
||||
|
||||
Note that the '--use-glimpse' and '--use-idutils' options
|
||||
|
@ -159,6 +286,25 @@ thus active by default. However, by indexing the code with
|
|||
one of these tools, and according to the cocci file used,
|
||||
spatch could proceed the entire code base more quickly.
|
||||
|
||||
SmPL patch specific options
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
SmPL patches can have their own requirements for options passed
|
||||
to Coccinelle. SmPL patch specific options can be provided by
|
||||
providing them at the top of the SmPL patch, for instance:
|
||||
|
||||
// Options: --no-includes --include-headers
|
||||
|
||||
SmPL patch Coccinelle requirements
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As Coccinelle features get added some more advanced SmPL patches
|
||||
may require newer versions of Coccinelle. If an SmPL patch requires
|
||||
at least a version of Coccinelle, this can be specified as follows,
|
||||
as an example if requiring at least Coccinelle >= 1.0.5:
|
||||
|
||||
// Requires: 1.0.5
|
||||
|
||||
Proposing new semantic patches
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
|
|
|
@ -46,6 +46,10 @@ Required properties:
|
|||
0 maps to GPMC_WAIT0 pin.
|
||||
- gpio-cells: Must be set to 2
|
||||
|
||||
Required properties when using NAND prefetch dma:
|
||||
- dmas GPMC NAND prefetch dma channel
|
||||
- dma-names Must be set to "rxtx"
|
||||
|
||||
Timing properties for child nodes. All are optional and default to 0.
|
||||
|
||||
- gpmc,sync-clk-ps: Minimum clock period for synchronous mode, in picoseconds
|
||||
|
@ -137,7 +141,8 @@ Example for an AM33xx board:
|
|||
ti,hwmods = "gpmc";
|
||||
reg = <0x50000000 0x2000>;
|
||||
interrupts = <100>;
|
||||
|
||||
dmas = <&edma 52 0>;
|
||||
dma-names = "rxtx";
|
||||
gpmc,num-cs = <8>;
|
||||
gpmc,num-waitpins = <2>;
|
||||
#address-cells = <2>;
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
* Atmel Quad Serial Peripheral Interface (QSPI)
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "atmel,sama5d2-qspi".
|
||||
- reg: Should contain the locations and lengths of the base registers
|
||||
and the mapped memory.
|
||||
- reg-names: Should contain the resource reg names:
|
||||
- qspi_base: configuration register address space
|
||||
- qspi_mmap: memory mapped address space
|
||||
- interrupts: Should contain the interrupt for the device.
|
||||
- clocks: The phandle of the clock needed by the QSPI controller.
|
||||
- #address-cells: Should be <1>.
|
||||
- #size-cells: Should be <0>.
|
||||
|
||||
Example:
|
||||
|
||||
spi@f0020000 {
|
||||
compatible = "atmel,sama5d2-qspi";
|
||||
reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>;
|
||||
reg-names = "qspi_base", "qspi_mmap";
|
||||
interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>;
|
||||
clocks = <&spi0_clk>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&pinctrl_spi0_default>;
|
||||
status = "okay";
|
||||
|
||||
m25p80@0 {
|
||||
...
|
||||
};
|
||||
};
|
|
@ -27,6 +27,7 @@ Required properties:
|
|||
brcm,brcmnand-v6.2
|
||||
brcm,brcmnand-v7.0
|
||||
brcm,brcmnand-v7.1
|
||||
brcm,brcmnand-v7.2
|
||||
brcm,brcmnand
|
||||
- reg : the register start and length for NAND register region.
|
||||
(optional) Flash DMA register range (if present)
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
* Cadence Quad SPI controller
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be "cdns,qspi-nor".
|
||||
- reg : Contains two entries, each of which is a tuple consisting of a
|
||||
physical address and length. The first entry is the address and
|
||||
length of the controller register set. The second entry is the
|
||||
address and length of the QSPI Controller data area.
|
||||
- interrupts : Unit interrupt specifier for the controller interrupt.
|
||||
- clocks : phandle to the Quad SPI clock.
|
||||
- cdns,fifo-depth : Size of the data FIFO in words.
|
||||
- cdns,fifo-width : Bus width of the data FIFO in bytes.
|
||||
- cdns,trigger-address : 32-bit indirect AHB trigger address.
|
||||
|
||||
Optional properties:
|
||||
- cdns,is-decoded-cs : Flag to indicate whether decoder is used or not.
|
||||
|
||||
Optional subnodes:
|
||||
Subnodes of the Cadence Quad SPI controller are spi slave nodes with additional
|
||||
custom properties:
|
||||
- cdns,read-delay : Delay for read capture logic, in clock cycles
|
||||
- cdns,tshsl-ns : Delay in nanoseconds for the length that the master
|
||||
mode chip select outputs are de-asserted between
|
||||
transactions.
|
||||
- cdns,tsd2d-ns : Delay in nanoseconds between one chip select being
|
||||
de-activated and the activation of another.
|
||||
- cdns,tchsh-ns : Delay in nanoseconds between last bit of current
|
||||
transaction and deasserting the device chip select
|
||||
(qspi_n_ss_out).
|
||||
- cdns,tslch-ns : Delay in nanoseconds between setting qspi_n_ss_out low
|
||||
and first bit transfer.
|
||||
|
||||
Example:
|
||||
|
||||
qspi: spi@ff705000 {
|
||||
compatible = "cdns,qspi-nor";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0xff705000 0x1000>,
|
||||
<0xffa00000 0x1000>;
|
||||
interrupts = <0 151 4>;
|
||||
clocks = <&qspi_clk>;
|
||||
cdns,is-decoded-cs;
|
||||
cdns,fifo-depth = <128>;
|
||||
cdns,fifo-width = <4>;
|
||||
cdns,trigger-address = <0x00000000>;
|
||||
|
||||
flash0: n25q00@0 {
|
||||
...
|
||||
cdns,read-delay = <4>;
|
||||
cdns,tshsl-ns = <50>;
|
||||
cdns,tsd2d-ns = <50>;
|
||||
cdns,tchsh-ns = <4>;
|
||||
cdns,tslch-ns = <4>;
|
||||
};
|
||||
};
|
|
@ -39,7 +39,7 @@ Optional properties:
|
|||
|
||||
"prefetch-polled" Prefetch polled mode (default)
|
||||
"polled" Polled mode, without prefetch
|
||||
"prefetch-dma" Prefetch enabled sDMA mode
|
||||
"prefetch-dma" Prefetch enabled DMA mode
|
||||
"prefetch-irq" Prefetch enabled irq mode
|
||||
|
||||
- elm_id: <deprecated> use "ti,elm-id" instead
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
HiSilicon SPI-NOR Flash Controller
|
||||
|
||||
Required properties:
|
||||
- compatible : Should be "hisilicon,fmc-spi-nor" and one of the following strings:
|
||||
"hisilicon,hi3519-spi-nor"
|
||||
- address-cells : Should be 1.
|
||||
- size-cells : Should be 0.
|
||||
- reg : Offset and length of the register set for the controller device.
|
||||
- reg-names : Must include the following two entries: "control", "memory".
|
||||
- clocks : handle to spi-nor flash controller clock.
|
||||
|
||||
Example:
|
||||
spi-nor-controller@10000000 {
|
||||
compatible = "hisilicon,hi3519-spi-nor", "hisilicon,fmc-spi-nor";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0x10000000 0x1000>, <0x14000000 0x1000000>;
|
||||
reg-names = "control", "memory";
|
||||
clocks = <&clock HI3519_FMC_CLK>;
|
||||
spi-nor@0 {
|
||||
compatible = "jedec,spi-nor";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
|
@ -0,0 +1,160 @@
|
|||
MTK SoCs NAND FLASH controller (NFC) DT binding
|
||||
|
||||
This file documents the device tree bindings for MTK SoCs NAND controllers.
|
||||
The functional split of the controller requires two drivers to operate:
|
||||
the nand controller interface driver and the ECC engine driver.
|
||||
|
||||
The hardware description for both devices must be captured as device
|
||||
tree nodes.
|
||||
|
||||
1) NFC NAND Controller Interface (NFI):
|
||||
=======================================
|
||||
|
||||
The first part of NFC is NAND Controller Interface (NFI) HW.
|
||||
Required NFI properties:
|
||||
- compatible: Should be "mediatek,mtxxxx-nfc".
|
||||
- reg: Base physical address and size of NFI.
|
||||
- interrupts: Interrupts of NFI.
|
||||
- clocks: NFI required clocks.
|
||||
- clock-names: NFI clocks internal name.
|
||||
- status: Disabled default. Then set "okay" by platform.
|
||||
- ecc-engine: Required ECC Engine node.
|
||||
- #address-cells: NAND chip index, should be 1.
|
||||
- #size-cells: Should be 0.
|
||||
|
||||
Example:
|
||||
|
||||
nandc: nfi@1100d000 {
|
||||
compatible = "mediatek,mt2701-nfc";
|
||||
reg = <0 0x1100d000 0 0x1000>;
|
||||
interrupts = <GIC_SPI 56 IRQ_TYPE_LEVEL_LOW>;
|
||||
clocks = <&pericfg CLK_PERI_NFI>,
|
||||
<&pericfg CLK_PERI_NFI_PAD>;
|
||||
clock-names = "nfi_clk", "pad_clk";
|
||||
status = "disabled";
|
||||
ecc-engine = <&bch>;
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
};
|
||||
|
||||
Platform related properties, should be set in {platform_name}.dts:
|
||||
- children nodes: NAND chips.
|
||||
|
||||
Children nodes properties:
|
||||
- reg: Chip Select Signal, default 0.
|
||||
Set as reg = <0>, <1> when need 2 CS.
|
||||
Optional:
|
||||
- nand-on-flash-bbt: Store BBT on NAND Flash.
|
||||
- nand-ecc-mode: the NAND ecc mode (check driver for supported modes)
|
||||
- nand-ecc-step-size: Number of data bytes covered by a single ECC step.
|
||||
valid values: 512 and 1024.
|
||||
1024 is recommended for large page NANDs.
|
||||
- nand-ecc-strength: Number of bits to correct per ECC step.
|
||||
The valid values that the controller supports are: 4, 6,
|
||||
8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, 40, 44,
|
||||
48, 52, 56, 60.
|
||||
The strength should be calculated as follows:
|
||||
E = (S - F) * 8 / 14
|
||||
S = O / (P / Q)
|
||||
E : nand-ecc-strength.
|
||||
S : spare size per sector.
|
||||
F : FDM size, should be in the range [1,8].
|
||||
It is used to store free oob data.
|
||||
O : oob size.
|
||||
P : page size.
|
||||
Q : nand-ecc-step-size.
|
||||
If the result does not match any one of the listed
|
||||
choices above, please select the smaller valid value from
|
||||
the list.
|
||||
(otherwise the driver will do the adjustment at runtime)
|
||||
- pinctrl-names: Default NAND pin GPIO setting name.
|
||||
- pinctrl-0: GPIO setting node.
|
||||
|
||||
Example:
|
||||
&pio {
|
||||
nand_pins_default: nanddefault {
|
||||
pins_dat {
|
||||
pinmux = <MT2701_PIN_111_MSDC0_DAT7__FUNC_NLD7>,
|
||||
<MT2701_PIN_112_MSDC0_DAT6__FUNC_NLD6>,
|
||||
<MT2701_PIN_114_MSDC0_DAT4__FUNC_NLD4>,
|
||||
<MT2701_PIN_118_MSDC0_DAT3__FUNC_NLD3>,
|
||||
<MT2701_PIN_121_MSDC0_DAT0__FUNC_NLD0>,
|
||||
<MT2701_PIN_120_MSDC0_DAT1__FUNC_NLD1>,
|
||||
<MT2701_PIN_113_MSDC0_DAT5__FUNC_NLD5>,
|
||||
<MT2701_PIN_115_MSDC0_RSTB__FUNC_NLD8>,
|
||||
<MT2701_PIN_119_MSDC0_DAT2__FUNC_NLD2>;
|
||||
input-enable;
|
||||
drive-strength = <MTK_DRIVE_8mA>;
|
||||
bias-pull-up;
|
||||
};
|
||||
|
||||
pins_we {
|
||||
pinmux = <MT2701_PIN_117_MSDC0_CLK__FUNC_NWEB>;
|
||||
drive-strength = <MTK_DRIVE_8mA>;
|
||||
bias-pull-up = <MTK_PUPD_SET_R1R0_10>;
|
||||
};
|
||||
|
||||
pins_ale {
|
||||
pinmux = <MT2701_PIN_116_MSDC0_CMD__FUNC_NALE>;
|
||||
drive-strength = <MTK_DRIVE_8mA>;
|
||||
bias-pull-down = <MTK_PUPD_SET_R1R0_10>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
&nandc {
|
||||
status = "okay";
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&nand_pins_default>;
|
||||
nand@0 {
|
||||
reg = <0>;
|
||||
nand-on-flash-bbt;
|
||||
nand-ecc-mode = "hw";
|
||||
nand-ecc-strength = <24>;
|
||||
nand-ecc-step-size = <1024>;
|
||||
};
|
||||
};
|
||||
|
||||
NAND chip optional subnodes:
|
||||
- Partitions, see Documentation/devicetree/bindings/mtd/partition.txt
|
||||
|
||||
Example:
|
||||
nand@0 {
|
||||
partitions {
|
||||
compatible = "fixed-partitions";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
|
||||
preloader@0 {
|
||||
label = "pl";
|
||||
read-only;
|
||||
reg = <0x00000000 0x00400000>;
|
||||
};
|
||||
android@0x00400000 {
|
||||
label = "android";
|
||||
reg = <0x00400000 0x12c00000>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
2) ECC Engine:
|
||||
==============
|
||||
|
||||
Required BCH properties:
|
||||
- compatible: Should be "mediatek,mtxxxx-ecc".
|
||||
- reg: Base physical address and size of ECC.
|
||||
- interrupts: Interrupts of ECC.
|
||||
- clocks: ECC required clocks.
|
||||
- clock-names: ECC clocks internal name.
|
||||
- status: Disabled default. Then set "okay" by platform.
|
||||
|
||||
Example:
|
||||
|
||||
bch: ecc@1100e000 {
|
||||
compatible = "mediatek,mt2701-ecc";
|
||||
reg = <0 0x1100e000 0 0x1000>;
|
||||
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_LOW>;
|
||||
clocks = <&pericfg CLK_PERI_NFI_ECC>;
|
||||
clock-names = "nfiecc_clk";
|
||||
status = "disabled";
|
||||
};
|
|
@ -11,10 +11,16 @@ Required properties:
|
|||
* "ahb" : AHB gating clock
|
||||
* "mod" : nand controller clock
|
||||
|
||||
Optional properties:
|
||||
- dmas : shall reference DMA channel associated to the NAND controller.
|
||||
- dma-names : shall be "rxtx".
|
||||
|
||||
Optional children nodes:
|
||||
Children nodes represent the available nand chips.
|
||||
|
||||
Optional properties:
|
||||
- reset : phandle + reset specifier pair
|
||||
- reset-names : must contain "ahb"
|
||||
- allwinner,rb : shall contain the native Ready/Busy ids.
|
||||
or
|
||||
- rb-gpios : shall contain the gpios used as R/B pins.
|
||||
|
|
|
@ -0,0 +1,56 @@
|
|||
Aardvark PCIe controller
|
||||
|
||||
This PCIe controller is used on the Marvell Armada 3700 ARM64 SoC.
|
||||
|
||||
The Device Tree node describing an Aardvark PCIe controller must
|
||||
contain the following properties:
|
||||
|
||||
- compatible: Should be "marvell,armada-3700-pcie"
|
||||
- reg: range of registers for the PCIe controller
|
||||
- interrupts: the interrupt line of the PCIe controller
|
||||
- #address-cells: set to <3>
|
||||
- #size-cells: set to <2>
|
||||
- device_type: set to "pci"
|
||||
- ranges: ranges for the PCI memory and I/O regions
|
||||
- #interrupt-cells: set to <1>
|
||||
- msi-controller: indicates that the PCIe controller can itself
|
||||
handle MSI interrupts
|
||||
- msi-parent: pointer to the MSI controller to be used
|
||||
- interrupt-map-mask and interrupt-map: standard PCI properties to
|
||||
define the mapping of the PCIe interface to interrupt numbers.
|
||||
- bus-range: PCI bus numbers covered
|
||||
|
||||
In addition, the Device Tree describing an Aardvark PCIe controller
|
||||
must include a sub-node that describes the legacy interrupt controller
|
||||
built into the PCIe controller. This sub-node must have the following
|
||||
properties:
|
||||
|
||||
- interrupt-controller
|
||||
- #interrupt-cells: set to <1>
|
||||
|
||||
Example:
|
||||
|
||||
pcie0: pcie@d0070000 {
|
||||
compatible = "marvell,armada-3700-pcie";
|
||||
device_type = "pci";
|
||||
status = "disabled";
|
||||
reg = <0 0xd0070000 0 0x20000>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
bus-range = <0x00 0xff>;
|
||||
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#interrupt-cells = <1>;
|
||||
msi-controller;
|
||||
msi-parent = <&pcie0>;
|
||||
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
|
||||
0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc 0>,
|
||||
<0 0 0 2 &pcie_intc 1>,
|
||||
<0 0 0 3 &pcie_intc 2>,
|
||||
<0 0 0 4 &pcie_intc 3>;
|
||||
pcie_intc: interrupt-controller {
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
};
|
||||
};
|
|
@ -0,0 +1,46 @@
|
|||
* Axis ARTPEC-6 PCIe interface
|
||||
|
||||
This PCIe host controller is based on the Synopsys DesignWare PCIe IP
|
||||
and thus inherits all the common properties defined in designware-pcie.txt.
|
||||
|
||||
Required properties:
|
||||
- compatible: "axis,artpec6-pcie", "snps,dw-pcie"
|
||||
- reg: base addresses and lengths of the PCIe controller (DBI),
|
||||
the phy controller, and configuration address space.
|
||||
- reg-names: Must include the following entries:
|
||||
- "dbi"
|
||||
- "phy"
|
||||
- "config"
|
||||
- interrupts: A list of interrupt outputs of the controller. Must contain an
|
||||
entry for each entry in the interrupt-names property.
|
||||
- interrupt-names: Must include the following entries:
|
||||
- "msi": The interrupt that is asserted when an MSI is received
|
||||
- axis,syscon-pcie: A phandle pointing to the ARTPEC-6 system controller,
|
||||
used to enable and control the Synopsys IP.
|
||||
|
||||
Example:
|
||||
|
||||
pcie@f8050000 {
|
||||
compatible = "axis,artpec6-pcie", "snps,dw-pcie";
|
||||
reg = <0xf8050000 0x2000
|
||||
0xf8040000 0x1000
|
||||
0xc0000000 0x1000>;
|
||||
reg-names = "dbi", "phy", "config";
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
device_type = "pci";
|
||||
/* downstream I/O */
|
||||
ranges = <0x81000000 0 0x00010000 0xc0010000 0 0x00010000
|
||||
/* non-prefetchable memory */
|
||||
0x82000000 0 0xc0020000 0xc0020000 0 0x1ffe0000>;
|
||||
num-lanes = <2>;
|
||||
interrupts = <GIC_SPI 148 IRQ_TYPE_LEVEL_HIGH>;
|
||||
interrupt-names = "msi";
|
||||
#interrupt-cells = <1>;
|
||||
interrupt-map-mask = <0 0 0 0x7>;
|
||||
interrupt-map = <0 0 0 1 &intc GIC_SPI 144 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 2 &intc GIC_SPI 145 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 3 &intc GIC_SPI 146 IRQ_TYPE_LEVEL_HIGH>,
|
||||
<0 0 0 4 &intc GIC_SPI 147 IRQ_TYPE_LEVEL_HIGH>;
|
||||
axis,syscon-pcie = <&syscon>;
|
||||
};
|
|
@ -3,6 +3,7 @@
|
|||
*.bc
|
||||
*.bin
|
||||
*.bz2
|
||||
*.c.[012]*.*
|
||||
*.cis
|
||||
*.cpio
|
||||
*.csp
|
||||
|
|
|
@ -0,0 +1,87 @@
|
|||
GCC plugin infrastructure
|
||||
=========================
|
||||
|
||||
|
||||
1. Introduction
|
||||
===============
|
||||
|
||||
GCC plugins are loadable modules that provide extra features to the
|
||||
compiler [1]. They are useful for runtime instrumentation and static analysis.
|
||||
We can analyse, change and add further code during compilation via
|
||||
callbacks [2], GIMPLE [3], IPA [4] and RTL passes [5].
|
||||
|
||||
The GCC plugin infrastructure of the kernel supports all gcc versions from
|
||||
4.5 to 6.0, building out-of-tree modules, cross-compilation and building in a
|
||||
separate directory.
|
||||
Plugin source files have to be compilable by both a C and a C++ compiler as well
|
||||
because gcc versions 4.5 and 4.6 are compiled by a C compiler,
|
||||
gcc-4.7 can be compiled by a C or a C++ compiler,
|
||||
and versions 4.8+ can only be compiled by a C++ compiler.
|
||||
|
||||
Currently the GCC plugin infrastructure supports only the x86, arm and arm64
|
||||
architectures.
|
||||
|
||||
This infrastructure was ported from grsecurity [6] and PaX [7].
|
||||
|
||||
--
|
||||
[1] https://gcc.gnu.org/onlinedocs/gccint/Plugins.html
|
||||
[2] https://gcc.gnu.org/onlinedocs/gccint/Plugin-API.html#Plugin-API
|
||||
[3] https://gcc.gnu.org/onlinedocs/gccint/GIMPLE.html
|
||||
[4] https://gcc.gnu.org/onlinedocs/gccint/IPA.html
|
||||
[5] https://gcc.gnu.org/onlinedocs/gccint/RTL.html
|
||||
[6] https://grsecurity.net/
|
||||
[7] https://pax.grsecurity.net/
|
||||
|
||||
|
||||
2. Files
|
||||
========
|
||||
|
||||
$(src)/scripts/gcc-plugins
|
||||
This is the directory of the GCC plugins.
|
||||
|
||||
$(src)/scripts/gcc-plugins/gcc-common.h
|
||||
This is a compatibility header for GCC plugins.
|
||||
It should be always included instead of individual gcc headers.
|
||||
|
||||
$(src)/scripts/gcc-plugin.sh
|
||||
This script checks the availability of the included headers in
|
||||
gcc-common.h and chooses the proper host compiler to build the plugins
|
||||
(gcc-4.7 can be built by either gcc or g++).
|
||||
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-gimple-pass.h
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-ipa-pass.h
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-simple_ipa-pass.h
|
||||
$(src)/scripts/gcc-plugins/gcc-generate-rtl-pass.h
|
||||
These headers automatically generate the registration structures for
|
||||
GIMPLE, SIMPLE_IPA, IPA and RTL passes. They support all gcc versions
|
||||
from 4.5 to 6.0.
|
||||
They should be preferred to creating the structures by hand.
|
||||
|
||||
|
||||
3. Usage
|
||||
========
|
||||
|
||||
You must install the gcc plugin headers for your gcc version,
|
||||
e.g., on Ubuntu for gcc-4.9:
|
||||
|
||||
apt-get install gcc-4.9-plugin-dev
|
||||
|
||||
Enable a GCC plugin based feature in the kernel config:
|
||||
|
||||
CONFIG_GCC_PLUGIN_CYC_COMPLEXITY = y
|
||||
|
||||
To compile only the plugin(s):
|
||||
|
||||
make gcc-plugins
|
||||
|
||||
or just run the kernel make and compile the whole kernel with
|
||||
the cyclomatic complexity GCC plugin.
|
||||
|
||||
|
||||
4. How to add a new GCC plugin
|
||||
==============================
|
||||
|
||||
The GCC plugins are in $(src)/scripts/gcc-plugins/. You can use a file or a directory
|
||||
here. It must be added to $(src)/scripts/gcc-plugins/Makefile,
|
||||
$(src)/scripts/Makefile.gcc-plugins and $(src)/arch/Kconfig.
|
||||
See the cyc_complexity_plugin.c (CONFIG_GCC_PLUGIN_CYC_COMPLEXITY) GCC plugin.
|
|
@ -3021,6 +3021,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
resource_alignment=
|
||||
Format:
|
||||
[<order of align>@][<domain>:]<bus>:<slot>.<func>[; ...]
|
||||
[<order of align>@]pci:<vendor>:<device>\
|
||||
[:<subvendor>:<subdevice>][; ...]
|
||||
Specifies alignment and device to reassign
|
||||
aligned memory resources.
|
||||
If <order of align> is not specified,
|
||||
|
@ -3039,6 +3041,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
hpmemsize=nn[KMG] The fixed amount of bus space which is
|
||||
reserved for hotplug bridge's memory window.
|
||||
Default size is 2 megabytes.
|
||||
hpbussize=nn The minimum amount of additional bus numbers
|
||||
reserved for buses below a hotplug bridge.
|
||||
Default is 1.
|
||||
realloc= Enable/disable reallocating PCI bridge resources
|
||||
if allocations done by BIOS are too small to
|
||||
accommodate resources required by all child
|
||||
|
@ -3070,6 +3075,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||
compat Treat PCIe ports as PCI-to-PCI bridges, disable the PCIe
|
||||
ports driver.
|
||||
|
||||
pcie_port_pm= [PCIE] PCIe port power management handling:
|
||||
off Disable power management of all PCIe ports
|
||||
force Forcibly enable power management of all PCIe ports
|
||||
|
||||
pcie_pme= [PCIE,PM] Native PCIe PME signaling options:
|
||||
nomsi Do not use MSI for native PCIe PME signaling (this makes
|
||||
all PCIe root ports use INTx for all services).
|
||||
|
|
|
@ -1482,6 +1482,11 @@ struct kvm_irq_routing_msi {
|
|||
__u32 pad;
|
||||
};
|
||||
|
||||
On x86, address_hi is ignored unless the KVM_X2APIC_API_USE_32BIT_IDS
|
||||
feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled,
|
||||
address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of
|
||||
address_hi must be zero.
|
||||
|
||||
struct kvm_irq_routing_s390_adapter {
|
||||
__u64 ind_addr;
|
||||
__u64 summary_addr;
|
||||
|
@ -1583,6 +1588,17 @@ struct kvm_lapic_state {
|
|||
Reads the Local APIC registers and copies them into the input argument. The
|
||||
data format and layout are the same as documented in the architecture manual.
|
||||
|
||||
If KVM_X2APIC_API_USE_32BIT_IDS feature of KVM_CAP_X2APIC_API is
|
||||
enabled, then the format of APIC_ID register depends on the APIC mode
|
||||
(reported by MSR_IA32_APICBASE) of its VCPU. x2APIC stores APIC ID in
|
||||
the APIC_ID register (bytes 32-35). xAPIC only allows an 8-bit APIC ID
|
||||
which is stored in bits 31-24 of the APIC register, or equivalently in
|
||||
byte 35 of struct kvm_lapic_state's regs field. KVM_GET_LAPIC must then
|
||||
be called after MSR_IA32_APICBASE has been set with KVM_SET_MSR.
|
||||
|
||||
If KVM_X2APIC_API_USE_32BIT_IDS feature is disabled, struct kvm_lapic_state
|
||||
always uses xAPIC format.
|
||||
|
||||
|
||||
4.58 KVM_SET_LAPIC
|
||||
|
||||
|
@ -1600,6 +1616,10 @@ struct kvm_lapic_state {
|
|||
Copies the input argument into the Local APIC registers. The data format
|
||||
and layout are the same as documented in the architecture manual.
|
||||
|
||||
The format of the APIC ID register (bytes 32-35 of struct kvm_lapic_state's
|
||||
regs field) depends on the state of the KVM_CAP_X2APIC_API capability.
|
||||
See the note in KVM_GET_LAPIC.
|
||||
|
||||
|
||||
4.59 KVM_IOEVENTFD
|
||||
|
||||
|
@ -2032,6 +2052,12 @@ registers, find a list below:
|
|||
MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
|
||||
MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH3 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
|
||||
MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_CTL | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
|
||||
MIPS | KVM_REG_MIPS_COUNT_HZ | 64
|
||||
|
@ -2156,7 +2182,7 @@ after pausing the vcpu, but before it is resumed.
|
|||
4.71 KVM_SIGNAL_MSI
|
||||
|
||||
Capability: KVM_CAP_SIGNAL_MSI
|
||||
Architectures: x86
|
||||
Architectures: x86 arm64
|
||||
Type: vm ioctl
|
||||
Parameters: struct kvm_msi (in)
|
||||
Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
|
||||
|
@ -2169,10 +2195,22 @@ struct kvm_msi {
|
|||
__u32 address_hi;
|
||||
__u32 data;
|
||||
__u32 flags;
|
||||
__u8 pad[16];
|
||||
__u32 devid;
|
||||
__u8 pad[12];
|
||||
};
|
||||
|
||||
No flags are defined so far. The corresponding field must be 0.
|
||||
flags: KVM_MSI_VALID_DEVID: devid contains a valid value
|
||||
devid: If KVM_MSI_VALID_DEVID is set, contains a unique device identifier
|
||||
for the device that wrote the MSI message.
|
||||
For PCI, this is usually a BFD identifier in the lower 16 bits.
|
||||
|
||||
The per-VM KVM_CAP_MSI_DEVID capability advertises the need to provide
|
||||
the device ID. If this capability is not set, userland cannot rely on
|
||||
the kernel to allow the KVM_MSI_VALID_DEVID flag being set.
|
||||
|
||||
On x86, address_hi is ignored unless the KVM_CAP_X2APIC_API capability is
|
||||
enabled. If it is enabled, address_hi bits 31-8 provide bits 31-8 of the
|
||||
destination id. Bits 7-0 of address_hi must be zero.
|
||||
|
||||
|
||||
4.71 KVM_CREATE_PIT2
|
||||
|
@ -2520,6 +2558,7 @@ Parameters: struct kvm_device_attr
|
|||
Returns: 0 on success, -1 on error
|
||||
Errors:
|
||||
ENXIO: The group or attribute is unknown/unsupported for this device
|
||||
or hardware support is missing.
|
||||
EPERM: The attribute cannot (currently) be accessed this way
|
||||
(e.g. read-only attribute, or attribute that only makes
|
||||
sense when the device is in a different state)
|
||||
|
@ -2547,6 +2586,7 @@ Parameters: struct kvm_device_attr
|
|||
Returns: 0 on success, -1 on error
|
||||
Errors:
|
||||
ENXIO: The group or attribute is unknown/unsupported for this device
|
||||
or hardware support is missing.
|
||||
|
||||
Tests whether a device supports a particular attribute. A successful
|
||||
return indicates the attribute is implemented. It does not necessarily
|
||||
|
@ -3803,6 +3843,42 @@ Allows use of runtime-instrumentation introduced with zEC12 processor.
|
|||
Will return -EINVAL if the machine does not support runtime-instrumentation.
|
||||
Will return -EBUSY if a VCPU has already been created.
|
||||
|
||||
7.7 KVM_CAP_X2APIC_API
|
||||
|
||||
Architectures: x86
|
||||
Parameters: args[0] - features that should be enabled
|
||||
Returns: 0 on success, -EINVAL when args[0] contains invalid features
|
||||
|
||||
Valid feature flags in args[0] are
|
||||
|
||||
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
|
||||
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
|
||||
|
||||
Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of
|
||||
KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC,
|
||||
allowing the use of 32-bit APIC IDs. See KVM_CAP_X2APIC_API in their
|
||||
respective sections.
|
||||
|
||||
KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK must be enabled for x2APIC to work
|
||||
in logical mode or with more than 255 VCPUs. Otherwise, KVM treats 0xff
|
||||
as a broadcast even in x2APIC mode in order to support physical x2APIC
|
||||
without interrupt remapping. This is undesirable in logical mode,
|
||||
where 0xff represents CPUs 0-7 in cluster 0.
|
||||
|
||||
7.8 KVM_CAP_S390_USER_INSTR0
|
||||
|
||||
Architectures: s390
|
||||
Parameters: none
|
||||
|
||||
With this capability enabled, all illegal instructions 0x0000 (2 bytes) will
|
||||
be intercepted and forwarded to user space. User space can use this
|
||||
mechanism e.g. to realize 2-byte software breakpoints. The kernel will
|
||||
not inject an operating exception for these instructions, user space has
|
||||
to take care of that.
|
||||
|
||||
This capability can be enabled dynamically even if VCPUs were already
|
||||
created and are running.
|
||||
|
||||
8. Other capabilities.
|
||||
----------------------
|
||||
|
||||
|
|
|
@ -4,16 +4,22 @@ ARM Virtual Generic Interrupt Controller (VGIC)
|
|||
Device types supported:
|
||||
KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
|
||||
KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
|
||||
KVM_DEV_TYPE_ARM_VGIC_ITS ARM Interrupt Translation Service Controller
|
||||
|
||||
Only one VGIC instance may be instantiated through either this API or the
|
||||
legacy KVM_CREATE_IRQCHIP api. The created VGIC will act as the VM interrupt
|
||||
controller, requiring emulated user-space devices to inject interrupts to the
|
||||
VGIC instead of directly to CPUs.
|
||||
Only one VGIC instance of the V2/V3 types above may be instantiated through
|
||||
either this API or the legacy KVM_CREATE_IRQCHIP api. The created VGIC will
|
||||
act as the VM interrupt controller, requiring emulated user-space devices to
|
||||
inject interrupts to the VGIC instead of directly to CPUs.
|
||||
|
||||
Creating a guest GICv3 device requires a host GICv3 as well.
|
||||
GICv3 implementations with hardware compatibility support allow a guest GICv2
|
||||
as well.
|
||||
|
||||
Creating a virtual ITS controller requires a host GICv3 (but does not depend
|
||||
on having physical ITS controllers).
|
||||
There can be multiple ITS controllers per guest, each of them has to have
|
||||
a separate, non-overlapping MMIO region.
|
||||
|
||||
Groups:
|
||||
KVM_DEV_ARM_VGIC_GRP_ADDR
|
||||
Attributes:
|
||||
|
@ -39,6 +45,13 @@ Groups:
|
|||
Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
|
||||
This address needs to be 64K aligned.
|
||||
|
||||
KVM_VGIC_V3_ADDR_TYPE_ITS (rw, 64-bit)
|
||||
Base address in the guest physical address space of the GICv3 ITS
|
||||
control register frame. The ITS allows MSI(-X) interrupts to be
|
||||
injected into guests. This extension is optional. If the kernel
|
||||
does not support the ITS, the call returns -ENODEV.
|
||||
Only valid for KVM_DEV_TYPE_ARM_VGIC_ITS.
|
||||
This address needs to be 64K aligned and the region covers 128K.
|
||||
|
||||
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
|
||||
Attributes:
|
||||
|
@ -109,8 +122,8 @@ Groups:
|
|||
KVM_DEV_ARM_VGIC_GRP_CTRL
|
||||
Attributes:
|
||||
KVM_DEV_ARM_VGIC_CTRL_INIT
|
||||
request the initialization of the VGIC, no additional parameter in
|
||||
kvm_device_attr.addr.
|
||||
request the initialization of the VGIC or ITS, no additional parameter
|
||||
in kvm_device_attr.addr.
|
||||
Errors:
|
||||
-ENXIO: VGIC not properly configured as required prior to calling
|
||||
this attribute
|
||||
|
|
|
@ -20,7 +20,8 @@ Enables Collaborative Memory Management Assist (CMMA) for the virtual machine.
|
|||
|
||||
1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA
|
||||
Parameters: none
|
||||
Returns: 0
|
||||
Returns: -EINVAL if CMMA was not enabled
|
||||
0 otherwise
|
||||
|
||||
Clear the CMMA status for all guest pages, so any pages the guest marked
|
||||
as unused are again used any may not be reclaimed by the host.
|
||||
|
@ -85,6 +86,90 @@ Returns: -EBUSY in case 1 or more vcpus are already activated (only in write
|
|||
-ENOMEM if not enough memory is available to process the ioctl
|
||||
0 in case of success
|
||||
|
||||
2.3. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_FEAT (r/o)
|
||||
|
||||
Allows user space to retrieve available cpu features. A feature is available if
|
||||
provided by the hardware and supported by kvm. In theory, cpu features could
|
||||
even be completely emulated by kvm.
|
||||
|
||||
struct kvm_s390_vm_cpu_feat {
|
||||
__u64 feat[16]; # Bitmap (1 = feature available), MSB 0 bit numbering
|
||||
};
|
||||
|
||||
Parameters: address of a buffer to load the feature list from.
|
||||
Returns: -EFAULT if the given address is not accessible from kernel space.
|
||||
0 in case of success.
|
||||
|
||||
2.4. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_FEAT (r/w)
|
||||
|
||||
Allows user space to retrieve or change enabled cpu features for all VCPUs of a
|
||||
VM. Features that are not available cannot be enabled.
|
||||
|
||||
See 2.3. for a description of the parameter struct.
|
||||
|
||||
Parameters: address of a buffer to store/load the feature list from.
|
||||
Returns: -EFAULT if the given address is not accessible from kernel space.
|
||||
-EINVAL if a cpu feature that is not available is to be enabled.
|
||||
-EBUSY if at least one VCPU has already been defined.
|
||||
0 in case of success.
|
||||
|
||||
2.5. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_SUBFUNC (r/o)
|
||||
|
||||
Allows user space to retrieve available cpu subfunctions without any filtering
|
||||
done by a set IBC. These subfunctions are indicated to the guest VCPU via
|
||||
query or "test bit" subfunctions and used e.g. by cpacf functions, plo and ptff.
|
||||
|
||||
A subfunction block is only valid if KVM_S390_VM_CPU_MACHINE contains the
|
||||
STFL(E) bit introducing the affected instruction. If the affected instruction
|
||||
indicates subfunctions via a "query subfunction", the response block is
|
||||
contained in the returned struct. If the affected instruction
|
||||
indicates subfunctions via a "test bit" mechanism, the subfunction codes are
|
||||
contained in the returned struct in MSB 0 bit numbering.
|
||||
|
||||
struct kvm_s390_vm_cpu_subfunc {
|
||||
u8 plo[32]; # always valid (ESA/390 feature)
|
||||
u8 ptff[16]; # valid with TOD-clock steering
|
||||
u8 kmac[16]; # valid with Message-Security-Assist
|
||||
u8 kmc[16]; # valid with Message-Security-Assist
|
||||
u8 km[16]; # valid with Message-Security-Assist
|
||||
u8 kimd[16]; # valid with Message-Security-Assist
|
||||
u8 klmd[16]; # valid with Message-Security-Assist
|
||||
u8 pckmo[16]; # valid with Message-Security-Assist-Extension 3
|
||||
u8 kmctr[16]; # valid with Message-Security-Assist-Extension 4
|
||||
u8 kmf[16]; # valid with Message-Security-Assist-Extension 4
|
||||
u8 kmo[16]; # valid with Message-Security-Assist-Extension 4
|
||||
u8 pcc[16]; # valid with Message-Security-Assist-Extension 4
|
||||
u8 ppno[16]; # valid with Message-Security-Assist-Extension 5
|
||||
u8 reserved[1824]; # reserved for future instructions
|
||||
};
|
||||
|
||||
Parameters: address of a buffer to load the subfunction blocks from.
|
||||
Returns: -EFAULT if the given address is not accessible from kernel space.
|
||||
0 in case of success.
|
||||
|
||||
2.6. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_SUBFUNC (r/w)
|
||||
|
||||
Allows user space to retrieve or change cpu subfunctions to be indicated for
|
||||
all VCPUs of a VM. This attribute will only be available if kernel and
|
||||
hardware support are in place.
|
||||
|
||||
The kernel uses the configured subfunction blocks for indication to
|
||||
the guest. A subfunction block will only be used if the associated STFL(E) bit
|
||||
has not been disabled by user space (so the instruction to be queried is
|
||||
actually available for the guest).
|
||||
|
||||
As long as no data has been written, a read will fail. The IBC will be used
|
||||
to determine available subfunctions in this case, this will guarantee backward
|
||||
compatibility.
|
||||
|
||||
See 2.5. for a description of the parameter struct.
|
||||
|
||||
Parameters: address of a buffer to store/load the subfunction blocks from.
|
||||
Returns: -EFAULT if the given address is not accessible from kernel space.
|
||||
-EINVAL when reading, if there was no write yet.
|
||||
-EBUSY if at least one VCPU has already been defined.
|
||||
0 in case of success.
|
||||
|
||||
3. GROUP: KVM_S390_VM_TOD
|
||||
Architectures: s390
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ In mmu_spte_clear_track_bits():
|
|||
old_spte = *spte;
|
||||
|
||||
/* 'if' condition is satisfied. */
|
||||
if (old_spte.Accssed == 1 &&
|
||||
if (old_spte.Accessed == 1 &&
|
||||
old_spte.W == 0)
|
||||
spte = 0ull;
|
||||
on fast page fault path:
|
||||
|
@ -102,7 +102,7 @@ In mmu_spte_clear_track_bits():
|
|||
old_spte = xchg(spte, 0ull)
|
||||
|
||||
|
||||
if (old_spte.Accssed == 1)
|
||||
if (old_spte.Accessed == 1)
|
||||
kvm_set_pfn_accessed(spte.pfn);
|
||||
if (old_spte.Dirty == 1)
|
||||
kvm_set_pfn_dirty(spte.pfn);
|
||||
|
|
26
MAINTAINERS
26
MAINTAINERS
|
@ -5094,6 +5094,15 @@ L: linux-scsi@vger.kernel.org
|
|||
S: Odd Fixes (e.g., new signatures)
|
||||
F: drivers/scsi/fdomain.*
|
||||
|
||||
GCC PLUGINS
|
||||
M: Kees Cook <keescook@chromium.org>
|
||||
R: Emese Revfy <re.emese@gmail.com>
|
||||
L: kernel-hardening@lists.openwall.com
|
||||
S: Maintained
|
||||
F: scripts/gcc-plugins/
|
||||
F: scripts/gcc-plugin.sh
|
||||
F: Documentation/gcc-plugins.txt
|
||||
|
||||
GCOV BASED KERNEL PROFILING
|
||||
M: Peter Oberparleiter <oberpar@linux.vnet.ibm.com>
|
||||
S: Maintained
|
||||
|
@ -8874,6 +8883,7 @@ L: linux-pci@vger.kernel.org
|
|||
Q: http://patchwork.ozlabs.org/project/linux-pci/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/helgaas/pci.git
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/pci/
|
||||
F: Documentation/PCI/
|
||||
F: drivers/pci/
|
||||
F: include/linux/pci*
|
||||
|
@ -8937,6 +8947,13 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
|||
S: Maintained
|
||||
F: drivers/pci/host/*mvebu*
|
||||
|
||||
PCI DRIVER FOR AARDVARK (Marvell Armada 3700)
|
||||
M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
|
||||
L: linux-pci@vger.kernel.org
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: drivers/pci/host/pci-aardvark.c
|
||||
|
||||
PCI DRIVER FOR NVIDIA TEGRA
|
||||
M: Thierry Reding <thierry.reding@gmail.com>
|
||||
L: linux-tegra@vger.kernel.org
|
||||
|
@ -9019,6 +9036,15 @@ S: Maintained
|
|||
F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt
|
||||
F: drivers/pci/host/pci-xgene-msi.c
|
||||
|
||||
PCIE DRIVER FOR AXIS ARTPEC
|
||||
M: Niklas Cassel <niklas.cassel@axis.com>
|
||||
M: Jesper Nilsson <jesper.nilsson@axis.com>
|
||||
L: linux-arm-kernel@axis.com
|
||||
L: linux-pci@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/pci/axis,artpec*
|
||||
F: drivers/pci/host/*artpec*
|
||||
|
||||
PCIE DRIVER FOR HISILICON
|
||||
M: Zhou Wang <wangzhou1@hisilicon.com>
|
||||
M: Gabriele Paoloni <gabriele.paoloni@huawei.com>
|
||||
|
|
45
Makefile
45
Makefile
|
@ -371,26 +371,27 @@ CFLAGS_KERNEL =
|
|||
AFLAGS_KERNEL =
|
||||
LDFLAGS_vmlinux =
|
||||
CFLAGS_GCOV = -fprofile-arcs -ftest-coverage -fno-tree-loop-im
|
||||
CFLAGS_KCOV = -fsanitize-coverage=trace-pc
|
||||
CFLAGS_KCOV := $(call cc-option,-fsanitize-coverage=trace-pc,)
|
||||
|
||||
|
||||
# Use USERINCLUDE when you must reference the UAPI directories only.
|
||||
USERINCLUDE := \
|
||||
-I$(srctree)/arch/$(hdr-arch)/include/uapi \
|
||||
-Iarch/$(hdr-arch)/include/generated/uapi \
|
||||
-I$(objtree)/arch/$(hdr-arch)/include/generated/uapi \
|
||||
-I$(srctree)/include/uapi \
|
||||
-Iinclude/generated/uapi \
|
||||
-I$(objtree)/include/generated/uapi \
|
||||
-include $(srctree)/include/linux/kconfig.h
|
||||
|
||||
# Use LINUXINCLUDE when you must reference the include/ directory.
|
||||
# Needed to be compatible with the O= option
|
||||
LINUXINCLUDE := \
|
||||
-I$(srctree)/arch/$(hdr-arch)/include \
|
||||
-Iarch/$(hdr-arch)/include/generated/uapi \
|
||||
-Iarch/$(hdr-arch)/include/generated \
|
||||
-I$(objtree)/arch/$(hdr-arch)/include/generated/uapi \
|
||||
-I$(objtree)/arch/$(hdr-arch)/include/generated \
|
||||
$(if $(KBUILD_SRC), -I$(srctree)/include) \
|
||||
-Iinclude \
|
||||
$(USERINCLUDE)
|
||||
-I$(objtree)/include
|
||||
|
||||
LINUXINCLUDE += $(filter-out $(LINUXINCLUDE),$(USERINCLUDE))
|
||||
|
||||
KBUILD_CPPFLAGS := -D__KERNEL__
|
||||
|
||||
|
@ -554,7 +555,7 @@ ifeq ($(KBUILD_EXTMOD),)
|
|||
# in parallel
|
||||
PHONY += scripts
|
||||
scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
|
||||
asm-generic
|
||||
asm-generic gcc-plugins
|
||||
$(Q)$(MAKE) $(build)=$(@)
|
||||
|
||||
# Objects we will link into vmlinux / subdirs we need to visit
|
||||
|
@ -635,6 +636,15 @@ endif
|
|||
# Tell gcc to never replace conditional load with a non-conditional one
|
||||
KBUILD_CFLAGS += $(call cc-option,--param=allow-store-data-races=0)
|
||||
|
||||
PHONY += gcc-plugins
|
||||
gcc-plugins: scripts_basic
|
||||
ifdef CONFIG_GCC_PLUGINS
|
||||
$(Q)$(MAKE) $(build)=scripts/gcc-plugins
|
||||
endif
|
||||
@:
|
||||
|
||||
include scripts/Makefile.gcc-plugins
|
||||
|
||||
ifdef CONFIG_READABLE_ASM
|
||||
# Disable optimizations that make assembler listings hard to read.
|
||||
# reorder blocks reorders the control in the function
|
||||
|
@ -666,21 +676,11 @@ endif
|
|||
endif
|
||||
# Find arch-specific stack protector compiler sanity-checking script.
|
||||
ifdef CONFIG_CC_STACKPROTECTOR
|
||||
stackp-path := $(srctree)/scripts/gcc-$(ARCH)_$(BITS)-has-stack-protector.sh
|
||||
ifneq ($(wildcard $(stackp-path)),)
|
||||
stackp-check := $(stackp-path)
|
||||
endif
|
||||
stackp-path := $(srctree)/scripts/gcc-$(SRCARCH)_$(BITS)-has-stack-protector.sh
|
||||
stackp-check := $(wildcard $(stackp-path))
|
||||
endif
|
||||
KBUILD_CFLAGS += $(stackp-flag)
|
||||
|
||||
ifdef CONFIG_KCOV
|
||||
ifeq ($(call cc-option, $(CFLAGS_KCOV)),)
|
||||
$(warning Cannot use CONFIG_KCOV: \
|
||||
-fsanitize-coverage=trace-pc is not supported by compiler)
|
||||
CFLAGS_KCOV =
|
||||
endif
|
||||
endif
|
||||
|
||||
ifeq ($(cc-name),clang)
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-Qunused-arguments,)
|
||||
KBUILD_CPPFLAGS += $(call cc-option,-Wno-unknown-warning-option,)
|
||||
|
@ -1019,7 +1019,7 @@ prepare1: prepare2 $(version_h) include/generated/utsrelease.h \
|
|||
|
||||
archprepare: archheaders archscripts prepare1 scripts_basic
|
||||
|
||||
prepare0: archprepare
|
||||
prepare0: archprepare gcc-plugins
|
||||
$(Q)$(MAKE) $(build)=.
|
||||
|
||||
# All the preparing..
|
||||
|
@ -1531,6 +1531,7 @@ clean: $(clean-dirs)
|
|||
-o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
|
||||
-o -name '*.symtypes' -o -name 'modules.order' \
|
||||
-o -name modules.builtin -o -name '.tmp_*.o.*' \
|
||||
-o -name '*.c.[012]*.*' \
|
||||
-o -name '*.gcno' \) -type f -print | xargs rm -f
|
||||
|
||||
# Generate tags for editors
|
||||
|
@ -1641,7 +1642,7 @@ endif
|
|||
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
||||
$(build)=$(build-dir)
|
||||
# Make sure the latest headers are built for Documentation
|
||||
Documentation/: headers_install
|
||||
Documentation/ samples/: headers_install
|
||||
%/: prepare scripts FORCE
|
||||
$(cmd_crmodverdir)
|
||||
$(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
|
||||
|
|
37
arch/Kconfig
37
arch/Kconfig
|
@ -357,6 +357,43 @@ config SECCOMP_FILTER
|
|||
|
||||
See Documentation/prctl/seccomp_filter.txt for details.
|
||||
|
||||
config HAVE_GCC_PLUGINS
|
||||
bool
|
||||
help
|
||||
An arch should select this symbol if it supports building with
|
||||
GCC plugins.
|
||||
|
||||
menuconfig GCC_PLUGINS
|
||||
bool "GCC plugins"
|
||||
depends on HAVE_GCC_PLUGINS
|
||||
depends on !COMPILE_TEST
|
||||
help
|
||||
GCC plugins are loadable modules that provide extra features to the
|
||||
compiler. They are useful for runtime instrumentation and static analysis.
|
||||
|
||||
See Documentation/gcc-plugins.txt for details.
|
||||
|
||||
config GCC_PLUGIN_CYC_COMPLEXITY
|
||||
bool "Compute the cyclomatic complexity of a function"
|
||||
depends on GCC_PLUGINS
|
||||
help
|
||||
The complexity M of a function's control flow graph is defined as:
|
||||
M = E - N + 2P
|
||||
where
|
||||
|
||||
E = the number of edges
|
||||
N = the number of nodes
|
||||
P = the number of connected components (exit nodes).
|
||||
|
||||
config GCC_PLUGIN_SANCOV
|
||||
bool
|
||||
depends on GCC_PLUGINS
|
||||
help
|
||||
This plugin inserts a __sanitizer_cov_trace_pc() call at the start of
|
||||
basic blocks. It supports all gcc versions with plugin support (from
|
||||
gcc-4.5 on). It is based on the commit "Add fuzzing coverage support"
|
||||
by Dmitry Vyukov <dvyukov@google.com>.
|
||||
|
||||
config HAVE_CC_STACKPROTECTOR
|
||||
bool
|
||||
help
|
||||
|
|
|
@ -15,7 +15,7 @@ targets := vmlinux.gz vmlinux \
|
|||
OBJSTRIP := $(obj)/tools/objstrip
|
||||
|
||||
HOSTCFLAGS := -Wall -I$(objtree)/usr/include
|
||||
BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj)
|
||||
BOOTCFLAGS += -I$(objtree)/$(obj) -I$(srctree)/$(obj)
|
||||
|
||||
# SRM bootable image. Copy to offset 512 of a partition.
|
||||
$(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
|
||||
|
|
|
@ -54,6 +54,7 @@ config ARM
|
|||
select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
|
||||
select HAVE_FUNCTION_GRAPH_TRACER if (!THUMB2_KERNEL)
|
||||
select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
select HAVE_HW_BREAKPOINT if (PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7))
|
||||
select HAVE_IDE if PCI || ISA || PCMCIA
|
||||
|
@ -699,7 +700,7 @@ config ARCH_VIRT
|
|||
depends on ARCH_MULTI_V7
|
||||
select ARM_AMBA
|
||||
select ARM_GIC
|
||||
select ARM_GIC_V2M if PCI_MSI
|
||||
select ARM_GIC_V2M if PCI
|
||||
select ARM_GIC_V3
|
||||
select ARM_PSCI
|
||||
select HAVE_ARM_ARCH_TIMER
|
||||
|
|
|
@ -66,6 +66,8 @@ extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
|
|||
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void __init_stage2_translation(void);
|
||||
|
||||
extern void __kvm_hyp_reset(unsigned long);
|
||||
#endif
|
||||
|
||||
#endif /* __ARM_KVM_ASM_H__ */
|
||||
|
|
|
@ -241,8 +241,7 @@ int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
|
|||
int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
|
||||
int exception_index);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
||||
phys_addr_t pgd_ptr,
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
{
|
||||
|
@ -251,18 +250,13 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
|||
* code. The init code doesn't need to preserve these
|
||||
* registers as r0-r3 are already callee saved according to
|
||||
* the AAPCS.
|
||||
* Note that we slightly misuse the prototype by casing the
|
||||
* Note that we slightly misuse the prototype by casting the
|
||||
* stack pointer to a void *.
|
||||
*
|
||||
* We don't have enough registers to perform the full init in
|
||||
* one go. Install the boot PGD first, and then install the
|
||||
* runtime PGD, stack pointer and vectors. The PGDs are always
|
||||
* passed as the third argument, in order to be passed into
|
||||
* r2-r3 to the init code (yes, this is compliant with the
|
||||
* PCS!).
|
||||
*/
|
||||
|
||||
kvm_call_hyp(NULL, 0, boot_pgd_ptr);
|
||||
* The PGDs are always passed as the third argument, in order
|
||||
* to be passed into r2-r3 to the init code (yes, this is
|
||||
* compliant with the PCS!).
|
||||
*/
|
||||
|
||||
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
|
||||
}
|
||||
|
@ -272,16 +266,13 @@ static inline void __cpu_init_stage2(void)
|
|||
kvm_call_hyp(__init_stage2_translation);
|
||||
}
|
||||
|
||||
static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
|
||||
static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
|
||||
phys_addr_t phys_idmap_start)
|
||||
{
|
||||
/*
|
||||
* TODO
|
||||
* kvm_call_reset(boot_pgd_ptr, phys_idmap_start);
|
||||
*/
|
||||
kvm_call_hyp((void *)virt_to_idmap(__kvm_hyp_reset), vector_ptr);
|
||||
}
|
||||
|
||||
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
|
||||
static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,9 +25,6 @@
|
|||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
||||
#define kern_hyp_va(v) (v)
|
||||
#define hyp_kern_va(v) (v)
|
||||
|
||||
#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
|
||||
"mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
|
||||
#define __ACCESS_CP15_64(Op1, CRm) \
|
||||
|
|
|
@ -26,16 +26,7 @@
|
|||
* We directly use the kernel VA for the HYP, as we can directly share
|
||||
* the mapping (HTTBR "covers" TTBR1).
|
||||
*/
|
||||
#define HYP_PAGE_OFFSET_MASK UL(~0)
|
||||
#define HYP_PAGE_OFFSET PAGE_OFFSET
|
||||
#define KERN_TO_HYP(kva) (kva)
|
||||
|
||||
/*
|
||||
* Our virtual mapping for the boot-time MMU-enable code. Must be
|
||||
* shared across all the page-tables. Conveniently, we use the vectors
|
||||
* page, where no kernel data will ever be shared with HYP.
|
||||
*/
|
||||
#define TRAMPOLINE_VA UL(CONFIG_VECTORS_BASE)
|
||||
#define kern_hyp_va(kva) (kva)
|
||||
|
||||
/*
|
||||
* KVM_MMU_CACHE_MIN_PAGES is the number of stage2 page table translation levels.
|
||||
|
@ -49,9 +40,8 @@
|
|||
#include <asm/pgalloc.h>
|
||||
#include <asm/stage2_pgtable.h>
|
||||
|
||||
int create_hyp_mappings(void *from, void *to);
|
||||
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
|
||||
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
||||
void free_boot_hyp_pgd(void);
|
||||
void free_hyp_pgds(void);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
|
@ -65,7 +55,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
|||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
phys_addr_t kvm_mmu_get_boot_httbr(void);
|
||||
phys_addr_t kvm_get_idmap_vector(void);
|
||||
phys_addr_t kvm_get_idmap_start(void);
|
||||
int kvm_mmu_init(void);
|
||||
|
|
|
@ -22,6 +22,7 @@ struct hw_pci {
|
|||
struct msi_controller *msi_ctrl;
|
||||
struct pci_ops *ops;
|
||||
int nr_controllers;
|
||||
unsigned int io_optional:1;
|
||||
void **private_data;
|
||||
int (*setup)(int nr, struct pci_sys_data *);
|
||||
struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
|
||||
|
|
|
@ -97,7 +97,9 @@ extern pgprot_t pgprot_s2_device;
|
|||
#define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY)
|
||||
#define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN)
|
||||
#define PAGE_KERNEL_EXEC pgprot_kernel
|
||||
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP)
|
||||
#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_XN)
|
||||
#define PAGE_HYP_EXEC _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY)
|
||||
#define PAGE_HYP_RO _MOD_PROT(pgprot_kernel, L_PTE_HYP | L_PTE_RDONLY | L_PTE_XN)
|
||||
#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP)
|
||||
#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY)
|
||||
#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_S2_RDONLY)
|
||||
|
|
|
@ -80,6 +80,10 @@ static inline bool is_kernel_in_hyp_mode(void)
|
|||
return false;
|
||||
}
|
||||
|
||||
/* The section containing the hypervisor idmap text */
|
||||
extern char __hyp_idmap_text_start[];
|
||||
extern char __hyp_idmap_text_end[];
|
||||
|
||||
/* The section containing the hypervisor text */
|
||||
extern char __hyp_text_start[];
|
||||
extern char __hyp_text_end[];
|
||||
|
|
|
@ -410,7 +410,8 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
|
|||
return irq;
|
||||
}
|
||||
|
||||
static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
|
||||
static int pcibios_init_resource(int busnr, struct pci_sys_data *sys,
|
||||
int io_optional)
|
||||
{
|
||||
int ret;
|
||||
struct resource_entry *window;
|
||||
|
@ -420,6 +421,14 @@ static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
|
|||
&iomem_resource, sys->mem_offset);
|
||||
}
|
||||
|
||||
/*
|
||||
* If a platform says I/O port support is optional, we don't add
|
||||
* the default I/O space. The platform is responsible for adding
|
||||
* any I/O space it needs.
|
||||
*/
|
||||
if (io_optional)
|
||||
return 0;
|
||||
|
||||
resource_list_for_each_entry(window, &sys->resources)
|
||||
if (resource_type(window->res) == IORESOURCE_IO)
|
||||
return 0;
|
||||
|
@ -466,7 +475,7 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
|
|||
if (ret > 0) {
|
||||
struct pci_host_bridge *host_bridge;
|
||||
|
||||
ret = pcibios_init_resources(nr, sys);
|
||||
ret = pcibios_init_resource(nr, sys, hw->io_optional);
|
||||
if (ret) {
|
||||
kfree(sys);
|
||||
break;
|
||||
|
@ -515,25 +524,23 @@ void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
|
|||
list_for_each_entry(sys, &head, node) {
|
||||
struct pci_bus *bus = sys->bus;
|
||||
|
||||
if (!pci_has_flag(PCI_PROBE_ONLY)) {
|
||||
/*
|
||||
* We insert PCI resources into the iomem_resource and
|
||||
* ioport_resource trees in either pci_bus_claim_resources()
|
||||
* or pci_bus_assign_resources().
|
||||
*/
|
||||
if (pci_has_flag(PCI_PROBE_ONLY)) {
|
||||
pci_bus_claim_resources(bus);
|
||||
} else {
|
||||
struct pci_bus *child;
|
||||
|
||||
/*
|
||||
* Size the bridge windows.
|
||||
*/
|
||||
pci_bus_size_bridges(bus);
|
||||
|
||||
/*
|
||||
* Assign resources.
|
||||
*/
|
||||
pci_bus_assign_resources(bus);
|
||||
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child);
|
||||
}
|
||||
/*
|
||||
* Tell drivers about devices found.
|
||||
*/
|
||||
|
||||
pci_bus_add_devices(bus);
|
||||
}
|
||||
}
|
||||
|
@ -590,18 +597,6 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|||
return start;
|
||||
}
|
||||
|
||||
/**
|
||||
* pcibios_enable_device - Enable I/O and memory.
|
||||
* @dev: PCI device to be enabled
|
||||
*/
|
||||
int pcibios_enable_device(struct pci_dev *dev, int mask)
|
||||
{
|
||||
if (pci_has_flag(PCI_PROBE_ONLY))
|
||||
return 0;
|
||||
|
||||
return pci_enable_resources(dev, mask);
|
||||
}
|
||||
|
||||
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
||||
enum pci_mmap_state mmap_state, int write_combine)
|
||||
{
|
||||
|
|
|
@ -46,13 +46,6 @@ config KVM_ARM_HOST
|
|||
---help---
|
||||
Provides host support for ARM processors.
|
||||
|
||||
config KVM_NEW_VGIC
|
||||
bool "New VGIC implementation"
|
||||
depends on KVM
|
||||
default y
|
||||
---help---
|
||||
uses the new VGIC implementation
|
||||
|
||||
source drivers/vhost/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
|
|
@ -22,7 +22,6 @@ obj-y += kvm-arm.o init.o interrupts.o
|
|||
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
|
||||
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
|
||||
|
||||
ifeq ($(CONFIG_KVM_NEW_VGIC),y)
|
||||
obj-y += $(KVM)/arm/vgic/vgic.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-init.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-irqfd.o
|
||||
|
@ -30,9 +29,4 @@ obj-y += $(KVM)/arm/vgic/vgic-v2.o
|
|||
obj-y += $(KVM)/arm/vgic/vgic-mmio.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-mmio-v2.o
|
||||
obj-y += $(KVM)/arm/vgic/vgic-kvm-device.o
|
||||
else
|
||||
obj-y += $(KVM)/arm/vgic.o
|
||||
obj-y += $(KVM)/arm/vgic-v2.o
|
||||
obj-y += $(KVM)/arm/vgic-v2-emul.o
|
||||
endif
|
||||
obj-y += $(KVM)/arm/arch_timer.o
|
||||
|
|
|
@ -20,6 +20,7 @@
|
|||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/fs.h>
|
||||
|
@ -122,7 +123,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
|
|||
if (ret)
|
||||
goto out_fail_alloc;
|
||||
|
||||
ret = create_hyp_mappings(kvm, kvm + 1);
|
||||
ret = create_hyp_mappings(kvm, kvm + 1, PAGE_HYP);
|
||||
if (ret)
|
||||
goto out_free_stage2_pgd;
|
||||
|
||||
|
@ -201,7 +202,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
|
|||
r = KVM_MAX_VCPUS;
|
||||
break;
|
||||
default:
|
||||
r = kvm_arch_dev_ioctl_check_extension(ext);
|
||||
r = kvm_arch_dev_ioctl_check_extension(kvm, ext);
|
||||
break;
|
||||
}
|
||||
return r;
|
||||
|
@ -239,7 +240,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
if (err)
|
||||
goto free_vcpu;
|
||||
|
||||
err = create_hyp_mappings(vcpu, vcpu + 1);
|
||||
err = create_hyp_mappings(vcpu, vcpu + 1, PAGE_HYP);
|
||||
if (err)
|
||||
goto vcpu_uninit;
|
||||
|
||||
|
@ -377,7 +378,7 @@ void force_vm_exit(const cpumask_t *mask)
|
|||
|
||||
/**
|
||||
* need_new_vmid_gen - check that the VMID is still valid
|
||||
* @kvm: The VM's VMID to checkt
|
||||
* @kvm: The VM's VMID to check
|
||||
*
|
||||
* return true if there is a new generation of VMIDs being used
|
||||
*
|
||||
|
@ -616,7 +617,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
* Enter the guest
|
||||
*/
|
||||
trace_kvm_entry(*vcpu_pc(vcpu));
|
||||
__kvm_guest_enter();
|
||||
guest_enter_irqoff();
|
||||
vcpu->mode = IN_GUEST_MODE;
|
||||
|
||||
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
|
||||
|
@ -642,14 +643,14 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
local_irq_enable();
|
||||
|
||||
/*
|
||||
* We do local_irq_enable() before calling kvm_guest_exit() so
|
||||
* We do local_irq_enable() before calling guest_exit() so
|
||||
* that if a timer interrupt hits while running the guest we
|
||||
* account that tick as being spent in the guest. We enable
|
||||
* preemption after calling kvm_guest_exit() so that if we get
|
||||
* preemption after calling guest_exit() so that if we get
|
||||
* preempted we make sure ticks after that is not counted as
|
||||
* guest time.
|
||||
*/
|
||||
kvm_guest_exit();
|
||||
guest_exit();
|
||||
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
|
||||
|
||||
/*
|
||||
|
@ -1039,7 +1040,6 @@ long kvm_arch_vm_ioctl(struct file *filp,
|
|||
|
||||
static void cpu_init_hyp_mode(void *dummy)
|
||||
{
|
||||
phys_addr_t boot_pgd_ptr;
|
||||
phys_addr_t pgd_ptr;
|
||||
unsigned long hyp_stack_ptr;
|
||||
unsigned long stack_page;
|
||||
|
@ -1048,13 +1048,12 @@ static void cpu_init_hyp_mode(void *dummy)
|
|||
/* Switch from the HYP stub to our own HYP init vector */
|
||||
__hyp_set_vectors(kvm_get_idmap_vector());
|
||||
|
||||
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
|
||||
pgd_ptr = kvm_mmu_get_httbr();
|
||||
stack_page = __this_cpu_read(kvm_arm_hyp_stack_page);
|
||||
hyp_stack_ptr = stack_page + PAGE_SIZE;
|
||||
vector_ptr = (unsigned long)kvm_ksym_ref(__kvm_hyp_vector);
|
||||
|
||||
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
__cpu_init_stage2();
|
||||
|
||||
kvm_arm_init_debug();
|
||||
|
@ -1076,15 +1075,9 @@ static void cpu_hyp_reinit(void)
|
|||
|
||||
static void cpu_hyp_reset(void)
|
||||
{
|
||||
phys_addr_t boot_pgd_ptr;
|
||||
phys_addr_t phys_idmap_start;
|
||||
|
||||
if (!is_kernel_in_hyp_mode()) {
|
||||
boot_pgd_ptr = kvm_mmu_get_boot_httbr();
|
||||
phys_idmap_start = kvm_get_idmap_start();
|
||||
|
||||
__cpu_reset_hyp_mode(boot_pgd_ptr, phys_idmap_start);
|
||||
}
|
||||
if (!is_kernel_in_hyp_mode())
|
||||
__cpu_reset_hyp_mode(hyp_default_vectors,
|
||||
kvm_get_idmap_start());
|
||||
}
|
||||
|
||||
static void _kvm_arch_hardware_enable(void *discard)
|
||||
|
@ -1294,14 +1287,14 @@ static int init_hyp_mode(void)
|
|||
* Map the Hyp-code called directly from the host
|
||||
*/
|
||||
err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start),
|
||||
kvm_ksym_ref(__hyp_text_end));
|
||||
kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC);
|
||||
if (err) {
|
||||
kvm_err("Cannot map world-switch code\n");
|
||||
goto out_err;
|
||||
}
|
||||
|
||||
err = create_hyp_mappings(kvm_ksym_ref(__start_rodata),
|
||||
kvm_ksym_ref(__end_rodata));
|
||||
kvm_ksym_ref(__end_rodata), PAGE_HYP_RO);
|
||||
if (err) {
|
||||
kvm_err("Cannot map rodata section\n");
|
||||
goto out_err;
|
||||
|
@ -1312,7 +1305,8 @@ static int init_hyp_mode(void)
|
|||
*/
|
||||
for_each_possible_cpu(cpu) {
|
||||
char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu);
|
||||
err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE);
|
||||
err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE,
|
||||
PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
kvm_err("Cannot map hyp stack\n");
|
||||
|
@ -1324,7 +1318,7 @@ static int init_hyp_mode(void)
|
|||
kvm_cpu_context_t *cpu_ctxt;
|
||||
|
||||
cpu_ctxt = per_cpu_ptr(kvm_host_cpu_state, cpu);
|
||||
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1);
|
||||
err = create_hyp_mappings(cpu_ctxt, cpu_ctxt + 1, PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
kvm_err("Cannot map host CPU state: %d\n", err);
|
||||
|
@ -1332,10 +1326,6 @@ static int init_hyp_mode(void)
|
|||
}
|
||||
}
|
||||
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
free_boot_hyp_pgd();
|
||||
#endif
|
||||
|
||||
/* set size of VMID supported by CPU */
|
||||
kvm_vmid_bits = kvm_get_vmid_bits();
|
||||
kvm_info("%d-bit VMID\n", kvm_vmid_bits);
|
||||
|
|
|
@ -210,7 +210,7 @@ bool kvm_condition_valid(struct kvm_vcpu *vcpu)
|
|||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* When exceptions occur while instructions are executed in Thumb IF-THEN
|
||||
* blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
|
||||
* blocks, the ITSTATE field of the CPSR is not advanced (updated), so we have
|
||||
* to do this little bit of work manually. The fields map like this:
|
||||
*
|
||||
* IT[7:0] -> CPSR[26:25],CPSR[15:10]
|
||||
|
|
|
@ -182,7 +182,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
|||
/**
|
||||
* kvm_arm_copy_reg_indices - get indices of all registers.
|
||||
*
|
||||
* We do core registers right here, then we apppend coproc regs.
|
||||
* We do core registers right here, then we append coproc regs.
|
||||
*/
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
|
|
|
@ -32,23 +32,13 @@
|
|||
* r2,r3 = Hypervisor pgd pointer
|
||||
*
|
||||
* The init scenario is:
|
||||
* - We jump in HYP with four parameters: boot HYP pgd, runtime HYP pgd,
|
||||
* runtime stack, runtime vectors
|
||||
* - Enable the MMU with the boot pgd
|
||||
* - Jump to a target into the trampoline page (remember, this is the same
|
||||
* physical page!)
|
||||
* - Now switch to the runtime pgd (same VA, and still the same physical
|
||||
* page!)
|
||||
* - We jump in HYP with 3 parameters: runtime HYP pgd, runtime stack,
|
||||
* runtime vectors
|
||||
* - Invalidate TLBs
|
||||
* - Set stack and vectors
|
||||
* - Setup the page tables
|
||||
* - Enable the MMU
|
||||
* - Profit! (or eret, if you only care about the code).
|
||||
*
|
||||
* As we only have four registers available to pass parameters (and we
|
||||
* need six), we split the init in two phases:
|
||||
* - Phase 1: r0 = 0, r1 = 0, r2,r3 contain the boot PGD.
|
||||
* Provides the basic HYP init, and enable the MMU.
|
||||
* - Phase 2: r0 = ToS, r1 = vectors, r2,r3 contain the runtime PGD.
|
||||
* Switches to the runtime PGD, set stack and vectors.
|
||||
*/
|
||||
|
||||
.text
|
||||
|
@ -68,8 +58,11 @@ __kvm_hyp_init:
|
|||
W(b) .
|
||||
|
||||
__do_hyp_init:
|
||||
cmp r0, #0 @ We have a SP?
|
||||
bne phase2 @ Yes, second stage init
|
||||
@ Set stack pointer
|
||||
mov sp, r0
|
||||
|
||||
@ Set HVBAR to point to the HYP vectors
|
||||
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
|
||||
|
||||
@ Set the HTTBR to point to the hypervisor PGD pointer passed
|
||||
mcrr p15, 4, rr_lo_hi(r2, r3), c2
|
||||
|
@ -114,34 +107,25 @@ __do_hyp_init:
|
|||
THUMB( ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) )
|
||||
orr r1, r1, r2
|
||||
orr r0, r0, r1
|
||||
isb
|
||||
mcr p15, 4, r0, c1, c0, 0 @ HSCR
|
||||
|
||||
@ End of init phase-1
|
||||
eret
|
||||
|
||||
phase2:
|
||||
@ Set stack pointer
|
||||
mov sp, r0
|
||||
|
||||
@ Set HVBAR to point to the HYP vectors
|
||||
mcr p15, 4, r1, c12, c0, 0 @ HVBAR
|
||||
|
||||
@ Jump to the trampoline page
|
||||
ldr r0, =TRAMPOLINE_VA
|
||||
adr r1, target
|
||||
bfi r0, r1, #0, #PAGE_SHIFT
|
||||
ret r0
|
||||
|
||||
target: @ We're now in the trampoline code, switch page tables
|
||||
mcrr p15, 4, rr_lo_hi(r2, r3), c2
|
||||
isb
|
||||
|
||||
@ Invalidate the old TLBs
|
||||
mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH
|
||||
dsb ish
|
||||
eret
|
||||
|
||||
@ r0 : stub vectors address
|
||||
ENTRY(__kvm_hyp_reset)
|
||||
/* We're now in idmap, disable MMU */
|
||||
mrc p15, 4, r1, c1, c0, 0 @ HSCTLR
|
||||
ldr r2, =(HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I)
|
||||
bic r1, r1, r2
|
||||
mcr p15, 4, r1, c1, c0, 0 @ HSCTLR
|
||||
|
||||
/* Install stub vectors */
|
||||
mcr p15, 4, r0, c12, c0, 0 @ HVBAR
|
||||
isb
|
||||
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_reset)
|
||||
|
||||
.ltorg
|
||||
|
||||
|
|
|
@ -32,8 +32,6 @@
|
|||
|
||||
#include "trace.h"
|
||||
|
||||
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
||||
|
||||
static pgd_t *boot_hyp_pgd;
|
||||
static pgd_t *hyp_pgd;
|
||||
static pgd_t *merged_hyp_pgd;
|
||||
|
@ -483,28 +481,6 @@ static void unmap_hyp_range(pgd_t *pgdp, phys_addr_t start, u64 size)
|
|||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
/**
|
||||
* free_boot_hyp_pgd - free HYP boot page tables
|
||||
*
|
||||
* Free the HYP boot page tables. The bounce page is also freed.
|
||||
*/
|
||||
void free_boot_hyp_pgd(void)
|
||||
{
|
||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||
|
||||
if (boot_hyp_pgd) {
|
||||
unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
|
||||
unmap_hyp_range(boot_hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
|
||||
free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
|
||||
boot_hyp_pgd = NULL;
|
||||
}
|
||||
|
||||
if (hyp_pgd)
|
||||
unmap_hyp_range(hyp_pgd, TRAMPOLINE_VA, PAGE_SIZE);
|
||||
|
||||
mutex_unlock(&kvm_hyp_pgd_mutex);
|
||||
}
|
||||
|
||||
/**
|
||||
* free_hyp_pgds - free Hyp-mode page tables
|
||||
*
|
||||
|
@ -519,15 +495,20 @@ void free_hyp_pgds(void)
|
|||
{
|
||||
unsigned long addr;
|
||||
|
||||
free_boot_hyp_pgd();
|
||||
|
||||
mutex_lock(&kvm_hyp_pgd_mutex);
|
||||
|
||||
if (boot_hyp_pgd) {
|
||||
unmap_hyp_range(boot_hyp_pgd, hyp_idmap_start, PAGE_SIZE);
|
||||
free_pages((unsigned long)boot_hyp_pgd, hyp_pgd_order);
|
||||
boot_hyp_pgd = NULL;
|
||||
}
|
||||
|
||||
if (hyp_pgd) {
|
||||
unmap_hyp_range(hyp_pgd, hyp_idmap_start, PAGE_SIZE);
|
||||
for (addr = PAGE_OFFSET; virt_addr_valid(addr); addr += PGDIR_SIZE)
|
||||
unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
|
||||
unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
|
||||
for (addr = VMALLOC_START; is_vmalloc_addr((void*)addr); addr += PGDIR_SIZE)
|
||||
unmap_hyp_range(hyp_pgd, KERN_TO_HYP(addr), PGDIR_SIZE);
|
||||
unmap_hyp_range(hyp_pgd, kern_hyp_va(addr), PGDIR_SIZE);
|
||||
|
||||
free_pages((unsigned long)hyp_pgd, hyp_pgd_order);
|
||||
hyp_pgd = NULL;
|
||||
|
@ -679,17 +660,18 @@ static phys_addr_t kvm_kaddr_to_phys(void *kaddr)
|
|||
* create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode
|
||||
* @from: The virtual kernel start address of the range
|
||||
* @to: The virtual kernel end address of the range (exclusive)
|
||||
* @prot: The protection to be applied to this range
|
||||
*
|
||||
* The same virtual address as the kernel virtual address is also used
|
||||
* in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying
|
||||
* physical pages.
|
||||
*/
|
||||
int create_hyp_mappings(void *from, void *to)
|
||||
int create_hyp_mappings(void *from, void *to, pgprot_t prot)
|
||||
{
|
||||
phys_addr_t phys_addr;
|
||||
unsigned long virt_addr;
|
||||
unsigned long start = KERN_TO_HYP((unsigned long)from);
|
||||
unsigned long end = KERN_TO_HYP((unsigned long)to);
|
||||
unsigned long start = kern_hyp_va((unsigned long)from);
|
||||
unsigned long end = kern_hyp_va((unsigned long)to);
|
||||
|
||||
if (is_kernel_in_hyp_mode())
|
||||
return 0;
|
||||
|
@ -704,7 +686,7 @@ int create_hyp_mappings(void *from, void *to)
|
|||
err = __create_hyp_mappings(hyp_pgd, virt_addr,
|
||||
virt_addr + PAGE_SIZE,
|
||||
__phys_to_pfn(phys_addr),
|
||||
PAGE_HYP);
|
||||
prot);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -723,8 +705,8 @@ int create_hyp_mappings(void *from, void *to)
|
|||
*/
|
||||
int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr)
|
||||
{
|
||||
unsigned long start = KERN_TO_HYP((unsigned long)from);
|
||||
unsigned long end = KERN_TO_HYP((unsigned long)to);
|
||||
unsigned long start = kern_hyp_va((unsigned long)from);
|
||||
unsigned long end = kern_hyp_va((unsigned long)to);
|
||||
|
||||
if (is_kernel_in_hyp_mode())
|
||||
return 0;
|
||||
|
@ -1687,14 +1669,6 @@ phys_addr_t kvm_mmu_get_httbr(void)
|
|||
return virt_to_phys(hyp_pgd);
|
||||
}
|
||||
|
||||
phys_addr_t kvm_mmu_get_boot_httbr(void)
|
||||
{
|
||||
if (__kvm_cpu_uses_extended_idmap())
|
||||
return virt_to_phys(merged_hyp_pgd);
|
||||
else
|
||||
return virt_to_phys(boot_hyp_pgd);
|
||||
}
|
||||
|
||||
phys_addr_t kvm_get_idmap_vector(void)
|
||||
{
|
||||
return hyp_idmap_vector;
|
||||
|
@ -1705,6 +1679,22 @@ phys_addr_t kvm_get_idmap_start(void)
|
|||
return hyp_idmap_start;
|
||||
}
|
||||
|
||||
static int kvm_map_idmap_text(pgd_t *pgd)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Create the idmap in the boot page tables */
|
||||
err = __create_hyp_mappings(pgd,
|
||||
hyp_idmap_start, hyp_idmap_end,
|
||||
__phys_to_pfn(hyp_idmap_start),
|
||||
PAGE_HYP_EXEC);
|
||||
if (err)
|
||||
kvm_err("Failed to idmap %lx-%lx\n",
|
||||
hyp_idmap_start, hyp_idmap_end);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
int kvm_mmu_init(void)
|
||||
{
|
||||
int err;
|
||||
|
@ -1719,28 +1709,41 @@ int kvm_mmu_init(void)
|
|||
*/
|
||||
BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK);
|
||||
|
||||
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
|
||||
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
|
||||
kvm_info("IDMAP page: %lx\n", hyp_idmap_start);
|
||||
kvm_info("HYP VA range: %lx:%lx\n",
|
||||
kern_hyp_va(PAGE_OFFSET), kern_hyp_va(~0UL));
|
||||
|
||||
if (!hyp_pgd || !boot_hyp_pgd) {
|
||||
if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) &&
|
||||
hyp_idmap_start < kern_hyp_va(~0UL)) {
|
||||
/*
|
||||
* The idmap page is intersecting with the VA space,
|
||||
* it is not safe to continue further.
|
||||
*/
|
||||
kvm_err("IDMAP intersecting with HYP VA, unable to continue\n");
|
||||
err = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, hyp_pgd_order);
|
||||
if (!hyp_pgd) {
|
||||
kvm_err("Hyp mode PGD not allocated\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Create the idmap in the boot page tables */
|
||||
err = __create_hyp_mappings(boot_hyp_pgd,
|
||||
hyp_idmap_start, hyp_idmap_end,
|
||||
__phys_to_pfn(hyp_idmap_start),
|
||||
PAGE_HYP);
|
||||
|
||||
if (err) {
|
||||
kvm_err("Failed to idmap %lx-%lx\n",
|
||||
hyp_idmap_start, hyp_idmap_end);
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (__kvm_cpu_uses_extended_idmap()) {
|
||||
boot_hyp_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
|
||||
hyp_pgd_order);
|
||||
if (!boot_hyp_pgd) {
|
||||
kvm_err("Hyp boot PGD not allocated\n");
|
||||
err = -ENOMEM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
err = kvm_map_idmap_text(boot_hyp_pgd);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
merged_hyp_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
|
||||
if (!merged_hyp_pgd) {
|
||||
kvm_err("Failed to allocate extra HYP pgd\n");
|
||||
|
@ -1748,29 +1751,10 @@ int kvm_mmu_init(void)
|
|||
}
|
||||
__kvm_extend_hypmap(boot_hyp_pgd, hyp_pgd, merged_hyp_pgd,
|
||||
hyp_idmap_start);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Map the very same page at the trampoline VA */
|
||||
err = __create_hyp_mappings(boot_hyp_pgd,
|
||||
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
|
||||
__phys_to_pfn(hyp_idmap_start),
|
||||
PAGE_HYP);
|
||||
if (err) {
|
||||
kvm_err("Failed to map trampoline @%lx into boot HYP pgd\n",
|
||||
TRAMPOLINE_VA);
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Map the same page again into the runtime page tables */
|
||||
err = __create_hyp_mappings(hyp_pgd,
|
||||
TRAMPOLINE_VA, TRAMPOLINE_VA + PAGE_SIZE,
|
||||
__phys_to_pfn(hyp_idmap_start),
|
||||
PAGE_HYP);
|
||||
if (err) {
|
||||
kvm_err("Failed to map trampoline @%lx into runtime HYP pgd\n",
|
||||
TRAMPOLINE_VA);
|
||||
goto out;
|
||||
} else {
|
||||
err = kvm_map_idmap_text(hyp_pgd);
|
||||
if (err)
|
||||
goto out;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -52,7 +52,7 @@ static const struct kvm_irq_level cortexa_vtimer_irq = {
|
|||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* This function finds the right table above and sets the registers on the
|
||||
* virtual CPU struct to their architectually defined reset values.
|
||||
* virtual CPU struct to their architecturally defined reset values.
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
|
|
|
@ -3,6 +3,7 @@ config ARM64
|
|||
select ACPI_CCA_REQUIRED if ACPI
|
||||
select ACPI_GENERIC_GSI if ACPI
|
||||
select ACPI_REDUCED_HARDWARE_ONLY if ACPI
|
||||
select ACPI_MCFG if ACPI
|
||||
select ARCH_HAS_DEVMEM_IS_ALLOWED
|
||||
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
|
@ -22,9 +23,9 @@ config ARM64
|
|||
select ARM_ARCH_TIMER
|
||||
select ARM_GIC
|
||||
select AUDIT_ARCH_COMPAT_GENERIC
|
||||
select ARM_GIC_V2M if PCI_MSI
|
||||
select ARM_GIC_V2M if PCI
|
||||
select ARM_GIC_V3
|
||||
select ARM_GIC_V3_ITS if PCI_MSI
|
||||
select ARM_GIC_V3_ITS if PCI
|
||||
select ARM_PSCI_FW
|
||||
select BUILDTIME_EXTABLE_SORT
|
||||
select CLONE_BACKWARDS
|
||||
|
@ -78,6 +79,7 @@ config ARM64
|
|||
select HAVE_FTRACE_MCOUNT_RECORD
|
||||
select HAVE_FUNCTION_TRACER
|
||||
select HAVE_FUNCTION_GRAPH_TRACER
|
||||
select HAVE_GCC_PLUGINS
|
||||
select HAVE_GENERIC_DMA_COHERENT
|
||||
select HAVE_HW_BREAKPOINT if PERF_EVENTS
|
||||
select HAVE_IRQ_TIME_ACCOUNTING
|
||||
|
@ -101,6 +103,7 @@ config ARM64
|
|||
select OF_EARLY_FLATTREE
|
||||
select OF_NUMA if NUMA && OF
|
||||
select OF_RESERVED_MEM
|
||||
select PCI_ECAM if ACPI
|
||||
select PERF_USE_VMALLOC
|
||||
select POWER_RESET
|
||||
select POWER_SUPPLY
|
||||
|
|
|
@ -76,3 +76,8 @@ &uart0 {
|
|||
&usb3 {
|
||||
status = "okay";
|
||||
};
|
||||
|
||||
/* CON17 (PCIe) / CON12 (mini-PCIe) */
|
||||
&pcie0 {
|
||||
status = "okay";
|
||||
};
|
||||
|
|
|
@ -176,5 +176,30 @@ gic: interrupt-controller@1d00000 {
|
|||
<0x1d40000 0x40000>; /* GICR */
|
||||
};
|
||||
};
|
||||
|
||||
pcie0: pcie@d0070000 {
|
||||
compatible = "marvell,armada-3700-pcie";
|
||||
device_type = "pci";
|
||||
status = "disabled";
|
||||
reg = <0 0xd0070000 0 0x20000>;
|
||||
#address-cells = <3>;
|
||||
#size-cells = <2>;
|
||||
bus-range = <0x00 0xff>;
|
||||
interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>;
|
||||
#interrupt-cells = <1>;
|
||||
msi-parent = <&pcie0>;
|
||||
msi-controller;
|
||||
ranges = <0x82000000 0 0xe8000000 0 0xe8000000 0 0x1000000 /* Port 0 MEM */
|
||||
0x81000000 0 0xe9000000 0 0xe9000000 0 0x10000>; /* Port 0 IO*/
|
||||
interrupt-map-mask = <0 0 0 7>;
|
||||
interrupt-map = <0 0 0 1 &pcie_intc 0>,
|
||||
<0 0 0 2 &pcie_intc 1>,
|
||||
<0 0 0 3 &pcie_intc 2>,
|
||||
<0 0 0 4 &pcie_intc 3>;
|
||||
pcie_intc: interrupt-controller {
|
||||
interrupt-controller;
|
||||
#interrupt-cells = <1>;
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
|
|
|
@ -36,8 +36,9 @@
|
|||
#define ARM64_HAS_VIRT_HOST_EXTN 11
|
||||
#define ARM64_WORKAROUND_CAVIUM_27456 12
|
||||
#define ARM64_HAS_32BIT_EL0 13
|
||||
#define ARM64_HYP_OFFSET_LOW 14
|
||||
|
||||
#define ARM64_NCAPS 14
|
||||
#define ARM64_NCAPS 15
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
|
|
|
@ -178,7 +178,7 @@
|
|||
/* Hyp System Trap Register */
|
||||
#define HSTR_EL2_T(x) (1 << x)
|
||||
|
||||
/* Hyp Coproccessor Trap Register Shifts */
|
||||
/* Hyp Coprocessor Trap Register Shifts */
|
||||
#define CPTR_EL2_TFP_SHIFT 10
|
||||
|
||||
/* Hyp Coprocessor Trap Register */
|
||||
|
|
|
@ -47,8 +47,7 @@
|
|||
|
||||
int __attribute_const__ kvm_target_cpu(void);
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
|
||||
int kvm_arch_dev_ioctl_check_extension(long ext);
|
||||
unsigned long kvm_hyp_reset_entry(void);
|
||||
int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext);
|
||||
void __extended_idmap_trampoline(phys_addr_t boot_pgd, phys_addr_t idmap_start);
|
||||
|
||||
struct kvm_arch {
|
||||
|
@ -348,8 +347,7 @@ int kvm_perf_teardown(void);
|
|||
|
||||
struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
|
||||
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
||||
phys_addr_t pgd_ptr,
|
||||
static inline void __cpu_init_hyp_mode(phys_addr_t pgd_ptr,
|
||||
unsigned long hyp_stack_ptr,
|
||||
unsigned long vector_ptr)
|
||||
{
|
||||
|
@ -357,19 +355,14 @@ static inline void __cpu_init_hyp_mode(phys_addr_t boot_pgd_ptr,
|
|||
* Call initialization code, and switch to the full blown
|
||||
* HYP code.
|
||||
*/
|
||||
__kvm_call_hyp((void *)boot_pgd_ptr, pgd_ptr,
|
||||
hyp_stack_ptr, vector_ptr);
|
||||
__kvm_call_hyp((void *)pgd_ptr, hyp_stack_ptr, vector_ptr);
|
||||
}
|
||||
|
||||
static inline void __cpu_reset_hyp_mode(phys_addr_t boot_pgd_ptr,
|
||||
void __kvm_hyp_teardown(void);
|
||||
static inline void __cpu_reset_hyp_mode(unsigned long vector_ptr,
|
||||
phys_addr_t phys_idmap_start)
|
||||
{
|
||||
/*
|
||||
* Call reset code, and switch back to stub hyp vectors.
|
||||
* Uses __kvm_call_hyp() to avoid kaslr's kvm_ksym_ref() translation.
|
||||
*/
|
||||
__kvm_call_hyp((void *)kvm_hyp_reset_entry(),
|
||||
boot_pgd_ptr, phys_idmap_start);
|
||||
kvm_call_hyp(__kvm_hyp_teardown, phys_idmap_start);
|
||||
}
|
||||
|
||||
static inline void kvm_arch_hardware_unsetup(void) {}
|
||||
|
|
|
@ -25,29 +25,6 @@
|
|||
|
||||
#define __hyp_text __section(.hyp.text) notrace
|
||||
|
||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("and %0, %0, %1",
|
||||
"nop",
|
||||
ARM64_HAS_VIRT_HOST_EXTN)
|
||||
: "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
|
||||
return v;
|
||||
}
|
||||
|
||||
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
|
||||
|
||||
static inline unsigned long __hyp_kern_va(unsigned long v)
|
||||
{
|
||||
u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
|
||||
asm volatile(ALTERNATIVE("add %0, %0, %1",
|
||||
"nop",
|
||||
ARM64_HAS_VIRT_HOST_EXTN)
|
||||
: "+r" (v) : "r" (offset));
|
||||
return v;
|
||||
}
|
||||
|
||||
#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
|
||||
|
||||
#define read_sysreg_elx(r,nvh,vh) \
|
||||
({ \
|
||||
u64 reg; \
|
||||
|
|
|
@ -29,21 +29,48 @@
|
|||
*
|
||||
* Instead, give the HYP mode its own VA region at a fixed offset from
|
||||
* the kernel by just masking the top bits (which are all ones for a
|
||||
* kernel address).
|
||||
* kernel address). We need to find out how many bits to mask.
|
||||
*
|
||||
* ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
|
||||
* macros (the entire kernel runs at EL2).
|
||||
* We want to build a set of page tables that cover both parts of the
|
||||
* idmap (the trampoline page used to initialize EL2), and our normal
|
||||
* runtime VA space, at the same time.
|
||||
*
|
||||
* Given that the kernel uses VA_BITS for its entire address space,
|
||||
* and that half of that space (VA_BITS - 1) is used for the linear
|
||||
* mapping, we can also limit the EL2 space to (VA_BITS - 1).
|
||||
*
|
||||
* The main question is "Within the VA_BITS space, does EL2 use the
|
||||
* top or the bottom half of that space to shadow the kernel's linear
|
||||
* mapping?". As we need to idmap the trampoline page, this is
|
||||
* determined by the range in which this page lives.
|
||||
*
|
||||
* If the page is in the bottom half, we have to use the top half. If
|
||||
* the page is in the top half, we have to use the bottom half:
|
||||
*
|
||||
* T = __virt_to_phys(__hyp_idmap_text_start)
|
||||
* if (T & BIT(VA_BITS - 1))
|
||||
* HYP_VA_MIN = 0 //idmap in upper half
|
||||
* else
|
||||
* HYP_VA_MIN = 1 << (VA_BITS - 1)
|
||||
* HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
|
||||
*
|
||||
* This of course assumes that the trampoline page exists within the
|
||||
* VA_BITS range. If it doesn't, then it means we're in the odd case
|
||||
* where the kernel idmap (as well as HYP) uses more levels than the
|
||||
* kernel runtime page tables (as seen when the kernel is configured
|
||||
* for 4k pages, 39bits VA, and yet memory lives just above that
|
||||
* limit, forcing the idmap to use 4 levels of page tables while the
|
||||
* kernel itself only uses 3). In this particular case, it doesn't
|
||||
* matter which side of VA_BITS we use, as we're guaranteed not to
|
||||
* conflict with anything.
|
||||
*
|
||||
* When using VHE, there are no separate hyp mappings and all KVM
|
||||
* functionality is already mapped as part of the main kernel
|
||||
* mappings, and none of this applies in that case.
|
||||
*/
|
||||
#define HYP_PAGE_OFFSET_SHIFT VA_BITS
|
||||
#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
|
||||
#define HYP_PAGE_OFFSET (PAGE_OFFSET & HYP_PAGE_OFFSET_MASK)
|
||||
|
||||
/*
|
||||
* Our virtual mapping for the idmap-ed MMU-enable code. Must be
|
||||
* shared across all the page-tables. Conveniently, we use the last
|
||||
* possible page, where no kernel mapping will ever exist.
|
||||
*/
|
||||
#define TRAMPOLINE_VA (HYP_PAGE_OFFSET_MASK & PAGE_MASK)
|
||||
#define HYP_PAGE_OFFSET_HIGH_MASK ((UL(1) << VA_BITS) - 1)
|
||||
#define HYP_PAGE_OFFSET_LOW_MASK ((UL(1) << (VA_BITS - 1)) - 1)
|
||||
|
||||
#ifdef __ASSEMBLY__
|
||||
|
||||
|
@ -53,13 +80,33 @@
|
|||
/*
|
||||
* Convert a kernel VA into a HYP VA.
|
||||
* reg: VA to be converted.
|
||||
*
|
||||
* This generates the following sequences:
|
||||
* - High mask:
|
||||
* and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
|
||||
* nop
|
||||
* - Low mask:
|
||||
* and x0, x0, #HYP_PAGE_OFFSET_HIGH_MASK
|
||||
* and x0, x0, #HYP_PAGE_OFFSET_LOW_MASK
|
||||
* - VHE:
|
||||
* nop
|
||||
* nop
|
||||
*
|
||||
* The "low mask" version works because the mask is a strict subset of
|
||||
* the "high mask", hence performing the first mask for nothing.
|
||||
* Should be completely invisible on any viable CPU.
|
||||
*/
|
||||
.macro kern_hyp_va reg
|
||||
alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
|
||||
and \reg, \reg, #HYP_PAGE_OFFSET_MASK
|
||||
and \reg, \reg, #HYP_PAGE_OFFSET_HIGH_MASK
|
||||
alternative_else
|
||||
nop
|
||||
alternative_endif
|
||||
alternative_if_not ARM64_HYP_OFFSET_LOW
|
||||
nop
|
||||
alternative_else
|
||||
and \reg, \reg, #HYP_PAGE_OFFSET_LOW_MASK
|
||||
alternative_endif
|
||||
.endm
|
||||
|
||||
#else
|
||||
|
@ -70,7 +117,22 @@ alternative_endif
|
|||
#include <asm/mmu_context.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
||||
#define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET)
|
||||
static inline unsigned long __kern_hyp_va(unsigned long v)
|
||||
{
|
||||
asm volatile(ALTERNATIVE("and %0, %0, %1",
|
||||
"nop",
|
||||
ARM64_HAS_VIRT_HOST_EXTN)
|
||||
: "+r" (v)
|
||||
: "i" (HYP_PAGE_OFFSET_HIGH_MASK));
|
||||
asm volatile(ALTERNATIVE("nop",
|
||||
"and %0, %0, %1",
|
||||
ARM64_HYP_OFFSET_LOW)
|
||||
: "+r" (v)
|
||||
: "i" (HYP_PAGE_OFFSET_LOW_MASK));
|
||||
return v;
|
||||
}
|
||||
|
||||
#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
|
||||
|
||||
/*
|
||||
* We currently only support a 40bit IPA.
|
||||
|
@ -81,9 +143,8 @@ alternative_endif
|
|||
|
||||
#include <asm/stage2_pgtable.h>
|
||||
|
||||
int create_hyp_mappings(void *from, void *to);
|
||||
int create_hyp_mappings(void *from, void *to, pgprot_t prot);
|
||||
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
|
||||
void free_boot_hyp_pgd(void);
|
||||
void free_hyp_pgds(void);
|
||||
|
||||
void stage2_unmap_vm(struct kvm *kvm);
|
||||
|
@ -97,7 +158,6 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run);
|
|||
void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu);
|
||||
|
||||
phys_addr_t kvm_mmu_get_httbr(void);
|
||||
phys_addr_t kvm_mmu_get_boot_httbr(void);
|
||||
phys_addr_t kvm_get_idmap_vector(void);
|
||||
phys_addr_t kvm_get_idmap_start(void);
|
||||
int kvm_mmu_init(void);
|
||||
|
|
|
@ -164,6 +164,7 @@
|
|||
#define PTE_CONT (_AT(pteval_t, 1) << 52) /* Contiguous range */
|
||||
#define PTE_PXN (_AT(pteval_t, 1) << 53) /* Privileged XN */
|
||||
#define PTE_UXN (_AT(pteval_t, 1) << 54) /* User XN */
|
||||
#define PTE_HYP_XN (_AT(pteval_t, 1) << 54) /* HYP XN */
|
||||
|
||||
/*
|
||||
* AttrIndx[2:0] encoding (mapping attributes defined in the MAIR* registers).
|
||||
|
|
|
@ -55,7 +55,9 @@
|
|||
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
|
||||
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
|
||||
|
||||
#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP)
|
||||
#define PAGE_HYP __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_HYP_XN)
|
||||
#define PAGE_HYP_EXEC __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY)
|
||||
#define PAGE_HYP_RO __pgprot(_PAGE_DEFAULT | PTE_HYP | PTE_RDONLY | PTE_HYP_XN)
|
||||
#define PAGE_HYP_DEVICE __pgprot(PROT_DEVICE_nGnRE | PTE_HYP)
|
||||
|
||||
#define PAGE_S2 __pgprot(PROT_DEFAULT | PTE_S2_MEMATTR(MT_S2_NORMAL) | PTE_S2_RDONLY)
|
||||
|
|
|
@ -87,6 +87,10 @@ extern void verify_cpu_run_el(void);
|
|||
static inline void verify_cpu_run_el(void) {}
|
||||
#endif
|
||||
|
||||
/* The section containing the hypervisor idmap text */
|
||||
extern char __hyp_idmap_text_start[];
|
||||
extern char __hyp_idmap_text_end[];
|
||||
|
||||
/* The section containing the hypervisor text */
|
||||
extern char __hyp_text_start[];
|
||||
extern char __hyp_text_end[];
|
||||
|
|
|
@ -87,9 +87,11 @@ struct kvm_regs {
|
|||
/* Supported VGICv3 address types */
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
|
||||
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
|
||||
#define KVM_VGIC_ITS_ADDR_TYPE 4
|
||||
|
||||
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
|
||||
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
|
||||
#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K)
|
||||
|
||||
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
|
||||
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
|
||||
|
|
|
@ -726,6 +726,19 @@ static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused
|
|||
return is_kernel_in_hyp_mode();
|
||||
}
|
||||
|
||||
static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
|
||||
int __unused)
|
||||
{
|
||||
phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
|
||||
|
||||
/*
|
||||
* Activate the lower HYP offset only if:
|
||||
* - the idmap doesn't clash with it,
|
||||
* - the kernel is not running at EL2.
|
||||
*/
|
||||
return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
|
||||
}
|
||||
|
||||
static const struct arm64_cpu_capabilities arm64_features[] = {
|
||||
{
|
||||
.desc = "GIC system register CPU interface",
|
||||
|
@ -803,6 +816,12 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
|
|||
.field_pos = ID_AA64PFR0_EL0_SHIFT,
|
||||
.min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
|
||||
},
|
||||
{
|
||||
.desc = "Reduced HYP mapping offset",
|
||||
.capability = ARM64_HYP_OFFSET_LOW,
|
||||
.def_scope = SCOPE_SYSTEM,
|
||||
.matches = hyp_offset_low,
|
||||
},
|
||||
{},
|
||||
};
|
||||
|
||||
|
|
|
@ -17,6 +17,9 @@
|
|||
#include <linux/mm.h>
|
||||
#include <linux/of_pci.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/pci-acpi.h>
|
||||
#include <linux/pci-ecam.h>
|
||||
#include <linux/slab.h>
|
||||
|
||||
/*
|
||||
|
@ -36,25 +39,17 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
|
|||
return res->start;
|
||||
}
|
||||
|
||||
/**
|
||||
* pcibios_enable_device - Enable I/O and memory.
|
||||
* @dev: PCI device to be enabled
|
||||
* @mask: bitmask of BARs to enable
|
||||
*/
|
||||
int pcibios_enable_device(struct pci_dev *dev, int mask)
|
||||
{
|
||||
if (pci_has_flag(PCI_PROBE_ONLY))
|
||||
return 0;
|
||||
|
||||
return pci_enable_resources(dev, mask);
|
||||
}
|
||||
|
||||
/*
|
||||
* Try to assign the IRQ number from DT when adding a new device
|
||||
* Try to assign the IRQ number when probing a new device
|
||||
*/
|
||||
int pcibios_add_device(struct pci_dev *dev)
|
||||
int pcibios_alloc_irq(struct pci_dev *dev)
|
||||
{
|
||||
dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
|
||||
if (acpi_disabled)
|
||||
dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
|
||||
#ifdef CONFIG_ACPI
|
||||
else
|
||||
return acpi_pci_irq_enable(dev);
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -65,13 +60,21 @@ int pcibios_add_device(struct pci_dev *dev)
|
|||
int raw_pci_read(unsigned int domain, unsigned int bus,
|
||||
unsigned int devfn, int reg, int len, u32 *val)
|
||||
{
|
||||
return -ENXIO;
|
||||
struct pci_bus *b = pci_find_bus(domain, bus);
|
||||
|
||||
if (!b)
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
return b->ops->read(b, devfn, reg, len, val);
|
||||
}
|
||||
|
||||
int raw_pci_write(unsigned int domain, unsigned int bus,
|
||||
unsigned int devfn, int reg, int len, u32 val)
|
||||
{
|
||||
return -ENXIO;
|
||||
struct pci_bus *b = pci_find_bus(domain, bus);
|
||||
|
||||
if (!b)
|
||||
return PCIBIOS_DEVICE_NOT_FOUND;
|
||||
return b->ops->write(b, devfn, reg, len, val);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NUMA
|
||||
|
@ -85,10 +88,124 @@ EXPORT_SYMBOL(pcibus_to_node);
|
|||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
/* Root bridge scanning */
|
||||
|
||||
struct acpi_pci_generic_root_info {
|
||||
struct acpi_pci_root_info common;
|
||||
struct pci_config_window *cfg; /* config space mapping */
|
||||
};
|
||||
|
||||
int acpi_pci_bus_find_domain_nr(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_config_window *cfg = bus->sysdata;
|
||||
struct acpi_device *adev = to_acpi_device(cfg->parent);
|
||||
struct acpi_pci_root *root = acpi_driver_data(adev);
|
||||
|
||||
return root->segment;
|
||||
}
|
||||
|
||||
int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
|
||||
{
|
||||
if (!acpi_disabled) {
|
||||
struct pci_config_window *cfg = bridge->bus->sysdata;
|
||||
struct acpi_device *adev = to_acpi_device(cfg->parent);
|
||||
ACPI_COMPANION_SET(&bridge->dev, adev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Lookup the bus range for the domain in MCFG, and set up config space
|
||||
* mapping.
|
||||
*/
|
||||
static struct pci_config_window *
|
||||
pci_acpi_setup_ecam_mapping(struct acpi_pci_root *root)
|
||||
{
|
||||
struct resource *bus_res = &root->secondary;
|
||||
u16 seg = root->segment;
|
||||
struct pci_config_window *cfg;
|
||||
struct resource cfgres;
|
||||
unsigned int bsz;
|
||||
|
||||
/* Use address from _CBA if present, otherwise lookup MCFG */
|
||||
if (!root->mcfg_addr)
|
||||
root->mcfg_addr = pci_mcfg_lookup(seg, bus_res);
|
||||
|
||||
if (!root->mcfg_addr) {
|
||||
dev_err(&root->device->dev, "%04x:%pR ECAM region not found\n",
|
||||
seg, bus_res);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bsz = 1 << pci_generic_ecam_ops.bus_shift;
|
||||
cfgres.start = root->mcfg_addr + bus_res->start * bsz;
|
||||
cfgres.end = cfgres.start + resource_size(bus_res) * bsz - 1;
|
||||
cfgres.flags = IORESOURCE_MEM;
|
||||
cfg = pci_ecam_create(&root->device->dev, &cfgres, bus_res,
|
||||
&pci_generic_ecam_ops);
|
||||
if (IS_ERR(cfg)) {
|
||||
dev_err(&root->device->dev, "%04x:%pR error %ld mapping ECAM\n",
|
||||
seg, bus_res, PTR_ERR(cfg));
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cfg;
|
||||
}
|
||||
|
||||
/* release_info: free resources allocated by init_info */
|
||||
static void pci_acpi_generic_release_info(struct acpi_pci_root_info *ci)
|
||||
{
|
||||
struct acpi_pci_generic_root_info *ri;
|
||||
|
||||
ri = container_of(ci, struct acpi_pci_generic_root_info, common);
|
||||
pci_ecam_free(ri->cfg);
|
||||
kfree(ri);
|
||||
}
|
||||
|
||||
static struct acpi_pci_root_ops acpi_pci_root_ops = {
|
||||
.release_info = pci_acpi_generic_release_info,
|
||||
};
|
||||
|
||||
/* Interface called from ACPI code to setup PCI host controller */
|
||||
struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
|
||||
{
|
||||
/* TODO: Should be revisited when implementing PCI on ACPI */
|
||||
return NULL;
|
||||
int node = acpi_get_node(root->device->handle);
|
||||
struct acpi_pci_generic_root_info *ri;
|
||||
struct pci_bus *bus, *child;
|
||||
|
||||
ri = kzalloc_node(sizeof(*ri), GFP_KERNEL, node);
|
||||
if (!ri)
|
||||
return NULL;
|
||||
|
||||
ri->cfg = pci_acpi_setup_ecam_mapping(root);
|
||||
if (!ri->cfg) {
|
||||
kfree(ri);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
acpi_pci_root_ops.pci_ops = &ri->cfg->ops->pci_ops;
|
||||
bus = acpi_pci_root_create(root, &acpi_pci_root_ops, &ri->common,
|
||||
ri->cfg);
|
||||
if (!bus)
|
||||
return NULL;
|
||||
|
||||
pci_bus_size_bridges(bus);
|
||||
pci_bus_assign_resources(bus);
|
||||
|
||||
list_for_each_entry(child, &bus->children, node)
|
||||
pcie_bus_configure_settings(child);
|
||||
|
||||
return bus;
|
||||
}
|
||||
|
||||
void pcibios_add_bus(struct pci_bus *bus)
|
||||
{
|
||||
acpi_pci_add_bus(bus);
|
||||
}
|
||||
|
||||
void pcibios_remove_bus(struct pci_bus *bus)
|
||||
{
|
||||
acpi_pci_remove_bus(bus);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
|
|
@ -36,6 +36,7 @@ config KVM
|
|||
select HAVE_KVM_IRQFD
|
||||
select KVM_ARM_VGIC_V3
|
||||
select KVM_ARM_PMU if HW_PERF_EVENTS
|
||||
select HAVE_KVM_MSI
|
||||
---help---
|
||||
Support hosting virtualized guest machines.
|
||||
We don't support KVM with 16K page tables yet, due to the multiple
|
||||
|
@ -54,13 +55,6 @@ config KVM_ARM_PMU
|
|||
Adds support for a virtual Performance Monitoring Unit (PMU) in
|
||||
virtual machines.
|
||||
|
||||
config KVM_NEW_VGIC
|
||||
bool "New VGIC implementation"
|
||||
depends on KVM
|
||||
default y
|
||||
---help---
|
||||
uses the new VGIC implementation
|
||||
|
||||
source drivers/vhost/Kconfig
|
||||
|
||||
endif # VIRTUALIZATION
|
||||
|
|
|
@ -20,7 +20,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
|
|||
kvm-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += guest.o debug.o reset.o sys_regs.o sys_regs_generic_v8.o
|
||||
|
||||
ifeq ($(CONFIG_KVM_NEW_VGIC),y)
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-init.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-irqfd.o
|
||||
|
@ -30,12 +29,6 @@ kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio.o
|
|||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-mmio-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-kvm-device.o
|
||||
else
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v2-emul.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
|
||||
endif
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic/vgic-its.o
|
||||
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
|
||||
kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
|
||||
|
|
|
@ -211,7 +211,7 @@ unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu)
|
|||
/**
|
||||
* kvm_arm_copy_reg_indices - get indices of all registers.
|
||||
*
|
||||
* We do core registers right here, then we apppend system regs.
|
||||
* We do core registers right here, then we append system regs.
|
||||
*/
|
||||
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
|
||||
{
|
||||
|
|
|
@ -53,10 +53,9 @@ __invalid:
|
|||
b .
|
||||
|
||||
/*
|
||||
* x0: HYP boot pgd
|
||||
* x1: HYP pgd
|
||||
* x2: HYP stack
|
||||
* x3: HYP vectors
|
||||
* x0: HYP pgd
|
||||
* x1: HYP stack
|
||||
* x2: HYP vectors
|
||||
*/
|
||||
__do_hyp_init:
|
||||
|
||||
|
@ -110,71 +109,27 @@ __do_hyp_init:
|
|||
msr sctlr_el2, x4
|
||||
isb
|
||||
|
||||
/* Skip the trampoline dance if we merged the boot and runtime PGDs */
|
||||
cmp x0, x1
|
||||
b.eq merged
|
||||
|
||||
/* MMU is now enabled. Get ready for the trampoline dance */
|
||||
ldr x4, =TRAMPOLINE_VA
|
||||
adr x5, target
|
||||
bfi x4, x5, #0, #PAGE_SHIFT
|
||||
br x4
|
||||
|
||||
target: /* We're now in the trampoline code, switch page tables */
|
||||
msr ttbr0_el2, x1
|
||||
isb
|
||||
|
||||
/* Invalidate the old TLBs */
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
|
||||
merged:
|
||||
/* Set the stack and new vectors */
|
||||
kern_hyp_va x1
|
||||
mov sp, x1
|
||||
kern_hyp_va x2
|
||||
mov sp, x2
|
||||
kern_hyp_va x3
|
||||
msr vbar_el2, x3
|
||||
msr vbar_el2, x2
|
||||
|
||||
/* Hello, World! */
|
||||
eret
|
||||
ENDPROC(__kvm_hyp_init)
|
||||
|
||||
/*
|
||||
* Reset kvm back to the hyp stub. This is the trampoline dance in
|
||||
* reverse. If kvm used an extended idmap, __extended_idmap_trampoline
|
||||
* calls this code directly in the idmap. In this case switching to the
|
||||
* boot tables is a no-op.
|
||||
*
|
||||
* x0: HYP boot pgd
|
||||
* x1: HYP phys_idmap_start
|
||||
* Reset kvm back to the hyp stub.
|
||||
*/
|
||||
ENTRY(__kvm_hyp_reset)
|
||||
/* We're in trampoline code in VA, switch back to boot page tables */
|
||||
msr ttbr0_el2, x0
|
||||
isb
|
||||
|
||||
/* Ensure the PA branch doesn't find a stale tlb entry or stale code. */
|
||||
ic iallu
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
isb
|
||||
|
||||
/* Branch into PA space */
|
||||
adr x0, 1f
|
||||
bfi x1, x0, #0, #PAGE_SHIFT
|
||||
br x1
|
||||
|
||||
/* We're now in idmap, disable MMU */
|
||||
1: mrs x0, sctlr_el2
|
||||
mrs x0, sctlr_el2
|
||||
ldr x1, =SCTLR_ELx_FLAGS
|
||||
bic x0, x0, x1 // Clear SCTL_M and etc
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
|
||||
/* Invalidate the old TLBs */
|
||||
tlbi alle2
|
||||
dsb sy
|
||||
|
||||
/* Install stub vectors */
|
||||
adr_l x0, __hyp_stub_vectors
|
||||
msr vbar_el2, x0
|
||||
|
|
|
@ -164,22 +164,3 @@ alternative_endif
|
|||
|
||||
eret
|
||||
ENDPROC(__fpsimd_guest_restore)
|
||||
|
||||
/*
|
||||
* When using the extended idmap, we don't have a trampoline page we can use
|
||||
* while we switch pages tables during __kvm_hyp_reset. Accessing the idmap
|
||||
* directly would be ideal, but if we're using the extended idmap then the
|
||||
* idmap is located above HYP_PAGE_OFFSET, and the address will be masked by
|
||||
* kvm_call_hyp using kern_hyp_va.
|
||||
*
|
||||
* x0: HYP boot pgd
|
||||
* x1: HYP phys_idmap_start
|
||||
*/
|
||||
ENTRY(__extended_idmap_trampoline)
|
||||
mov x4, x1
|
||||
adr_l x3, __kvm_hyp_reset
|
||||
|
||||
/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
|
||||
bfi x4, x3, #0, #PAGE_SHIFT
|
||||
br x4
|
||||
ENDPROC(__extended_idmap_trampoline)
|
||||
|
|
|
@ -63,6 +63,21 @@ ENTRY(__vhe_hyp_call)
|
|||
ret
|
||||
ENDPROC(__vhe_hyp_call)
|
||||
|
||||
/*
|
||||
* Compute the idmap address of __kvm_hyp_reset based on the idmap
|
||||
* start passed as a parameter, and jump there.
|
||||
*
|
||||
* x0: HYP phys_idmap_start
|
||||
*/
|
||||
ENTRY(__kvm_hyp_teardown)
|
||||
mov x4, x0
|
||||
adr_l x3, __kvm_hyp_reset
|
||||
|
||||
/* insert __kvm_hyp_reset()s offset into phys_idmap_start */
|
||||
bfi x4, x3, #0, #PAGE_SHIFT
|
||||
br x4
|
||||
ENDPROC(__kvm_hyp_teardown)
|
||||
|
||||
el1_sync: // Guest trapped into EL2
|
||||
save_x0_to_x3
|
||||
|
||||
|
|
|
@ -299,9 +299,16 @@ static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%
|
|||
|
||||
static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
|
||||
{
|
||||
unsigned long str_va = (unsigned long)__hyp_panic_string;
|
||||
unsigned long str_va;
|
||||
|
||||
__hyp_do_panic(hyp_kern_va(str_va),
|
||||
/*
|
||||
* Force the panic string to be loaded from the literal pool,
|
||||
* making sure it is a kernel address and not a PC-relative
|
||||
* reference.
|
||||
*/
|
||||
asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va));
|
||||
|
||||
__hyp_do_panic(str_va,
|
||||
spsr, elr,
|
||||
read_sysreg(esr_el2), read_sysreg_el2(far),
|
||||
read_sysreg(hpfar_el2), par,
|
||||
|
|
|
@ -65,7 +65,7 @@ static bool cpu_has_32bit_el1(void)
|
|||
* We currently assume that the number of HW registers is uniform
|
||||
* across all CPUs (see cpuinfo_sanity_check).
|
||||
*/
|
||||
int kvm_arch_dev_ioctl_check_extension(long ext)
|
||||
int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext)
|
||||
{
|
||||
int r;
|
||||
|
||||
|
@ -86,6 +86,12 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
|
|||
case KVM_CAP_VCPU_ATTRIBUTES:
|
||||
r = 1;
|
||||
break;
|
||||
case KVM_CAP_MSI_DEVID:
|
||||
if (!kvm)
|
||||
r = -EINVAL;
|
||||
else
|
||||
r = kvm->arch.vgic.msis_require_devid;
|
||||
break;
|
||||
default:
|
||||
r = 0;
|
||||
}
|
||||
|
@ -98,7 +104,7 @@ int kvm_arch_dev_ioctl_check_extension(long ext)
|
|||
* @vcpu: The VCPU pointer
|
||||
*
|
||||
* This function finds the right table above and sets the registers on
|
||||
* the virtual CPU struct to their architectually defined reset
|
||||
* the virtual CPU struct to their architecturally defined reset
|
||||
* values.
|
||||
*/
|
||||
int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
||||
|
@ -132,31 +138,3 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
|
|||
/* Reset timer */
|
||||
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
|
||||
}
|
||||
|
||||
extern char __hyp_idmap_text_start[];
|
||||
|
||||
unsigned long kvm_hyp_reset_entry(void)
|
||||
{
|
||||
if (!__kvm_cpu_uses_extended_idmap()) {
|
||||
unsigned long offset;
|
||||
|
||||
/*
|
||||
* Find the address of __kvm_hyp_reset() in the trampoline page.
|
||||
* This is present in the running page tables, and the boot page
|
||||
* tables, so we call the code here to start the trampoline
|
||||
* dance in reverse.
|
||||
*/
|
||||
offset = (unsigned long)__kvm_hyp_reset
|
||||
- ((unsigned long)__hyp_idmap_text_start & PAGE_MASK);
|
||||
|
||||
return TRAMPOLINE_VA + offset;
|
||||
} else {
|
||||
/*
|
||||
* KVM is running with merged page tables, which don't have the
|
||||
* trampoline page mapped. We know the idmap is still mapped,
|
||||
* but can't be called into directly. Use
|
||||
* __extended_idmap_trampoline to do the call.
|
||||
*/
|
||||
return (unsigned long)kvm_ksym_ref(__extended_idmap_trampoline);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1546,7 +1546,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
|||
struct sys_reg_params *params)
|
||||
{
|
||||
u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
|
||||
int cp;
|
||||
int cp = -1;
|
||||
|
||||
switch(hsr_ec) {
|
||||
case ESR_ELx_EC_CP15_32:
|
||||
|
@ -1558,7 +1558,7 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
|
|||
cp = 14;
|
||||
break;
|
||||
default:
|
||||
WARN_ON((cp = -1));
|
||||
WARN_ON(1);
|
||||
}
|
||||
|
||||
kvm_err("Unsupported guest CP%d access at: %08lx\n",
|
||||
|
|
|
@ -397,7 +397,7 @@ static int __init init_axis_flash(void)
|
|||
if (!romfs_in_flash) {
|
||||
/* Create an RAM device for the root partition (romfs). */
|
||||
|
||||
#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
|
||||
#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0)
|
||||
/* No use trying to boot this kernel from RAM. Panic! */
|
||||
printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
|
||||
"device due to kernel (mis)configuration!\n");
|
||||
|
|
|
@ -320,7 +320,7 @@ static int __init init_axis_flash(void)
|
|||
* but its size must be configured as 0 so as not to conflict
|
||||
* with our usage.
|
||||
*/
|
||||
#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0) || (CONFIG_MTDRAM_ABS_POS != 0)
|
||||
#if !defined(CONFIG_MTD_MTDRAM) || (CONFIG_MTDRAM_TOTAL_SIZE != 0)
|
||||
if (!romfs_in_flash && !nand_boot) {
|
||||
printk(KERN_EMERG "axisflashmap: Cannot create an MTD RAM "
|
||||
"device; configure CONFIG_MTD_MTDRAM with size = 0!\n");
|
||||
|
|
|
@ -82,9 +82,6 @@ extern pgprot_t pci_phys_mem_access_prot(struct file *file,
|
|||
pgprot_t prot);
|
||||
|
||||
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
||||
extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
||||
const struct resource *rsrc,
|
||||
resource_size_t *start, resource_size_t *end);
|
||||
|
||||
extern void pcibios_setup_bus_devices(struct pci_bus *bus);
|
||||
extern void pcibios_setup_bus_self(struct pci_bus *bus);
|
||||
|
|
|
@ -218,33 +218,6 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
|
|||
return NULL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
|
||||
* device mapping.
|
||||
*/
|
||||
static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
|
||||
pgprot_t protection,
|
||||
enum pci_mmap_state mmap_state,
|
||||
int write_combine)
|
||||
{
|
||||
pgprot_t prot = protection;
|
||||
|
||||
/* Write combine is always 0 on non-memory space mappings. On
|
||||
* memory space, if the user didn't pass 1, we check for a
|
||||
* "prefetchable" resource. This is a bit hackish, but we use
|
||||
* this to workaround the inability of /sysfs to provide a write
|
||||
* combine bit
|
||||
*/
|
||||
if (mmap_state != pci_mmap_mem)
|
||||
write_combine = 0;
|
||||
else if (write_combine == 0) {
|
||||
if (rp->flags & IORESOURCE_PREFETCH)
|
||||
write_combine = 1;
|
||||
}
|
||||
|
||||
return pgprot_noncached(prot);
|
||||
}
|
||||
|
||||
/*
|
||||
* This one is used by /dev/mem and fbdev who have no clue about the
|
||||
* PCI device, it tries to find the PCI device first and calls the
|
||||
|
@ -317,9 +290,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
|||
return -EINVAL;
|
||||
|
||||
vma->vm_pgoff = offset >> PAGE_SHIFT;
|
||||
vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
|
||||
vma->vm_page_prot,
|
||||
mmap_state, write_combine);
|
||||
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
||||
|
||||
ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
|
||||
vma->vm_end - vma->vm_start, vma->vm_page_prot);
|
||||
|
@ -473,39 +444,25 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
|||
const struct resource *rsrc,
|
||||
resource_size_t *start, resource_size_t *end)
|
||||
{
|
||||
struct pci_controller *hose = pci_bus_to_host(dev->bus);
|
||||
resource_size_t offset = 0;
|
||||
struct pci_bus_region region;
|
||||
|
||||
if (hose == NULL)
|
||||
if (rsrc->flags & IORESOURCE_IO) {
|
||||
pcibios_resource_to_bus(dev->bus, ®ion,
|
||||
(struct resource *) rsrc);
|
||||
*start = region.start;
|
||||
*end = region.end;
|
||||
return;
|
||||
}
|
||||
|
||||
if (rsrc->flags & IORESOURCE_IO)
|
||||
offset = (unsigned long)hose->io_base_virt - _IO_BASE;
|
||||
|
||||
/* We pass a fully fixed up address to userland for MMIO instead of
|
||||
* a BAR value because X is lame and expects to be able to use that
|
||||
* to pass to /dev/mem !
|
||||
/* We pass a CPU physical address to userland for MMIO instead of a
|
||||
* BAR value because X is lame and expects to be able to use that
|
||||
* to pass to /dev/mem!
|
||||
*
|
||||
* That means that we'll have potentially 64 bits values where some
|
||||
* userland apps only expect 32 (like X itself since it thinks only
|
||||
* Sparc has 64 bits MMIO) but if we don't do that, we break it on
|
||||
* 32 bits CHRPs :-(
|
||||
*
|
||||
* Hopefully, the sysfs insterface is immune to that gunk. Once X
|
||||
* has been fixed (and the fix spread enough), we can re-enable the
|
||||
* 2 lines below and pass down a BAR value to userland. In that case
|
||||
* we'll also have to re-enable the matching code in
|
||||
* __pci_mmap_make_offset().
|
||||
*
|
||||
* BenH.
|
||||
* That means we may have 64-bit values where some apps only expect
|
||||
* 32 (like X itself since it thinks only Sparc has 64-bit MMIO).
|
||||
*/
|
||||
#if 0
|
||||
else if (rsrc->flags & IORESOURCE_MEM)
|
||||
offset = hose->pci_mem_offset;
|
||||
#endif
|
||||
|
||||
*start = rsrc->start - offset;
|
||||
*end = rsrc->end - offset;
|
||||
*start = rsrc->start;
|
||||
*end = rsrc->end;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -1488,6 +1488,7 @@ config CPU_MIPS64_R2
|
|||
select CPU_SUPPORTS_HIGHMEM
|
||||
select CPU_SUPPORTS_HUGEPAGES
|
||||
select CPU_SUPPORTS_MSA
|
||||
select HAVE_KVM
|
||||
help
|
||||
Choose this option to build a kernel for release 2 or later of the
|
||||
MIPS64 architecture. Many modern embedded systems with a 64-bit
|
||||
|
@ -1505,6 +1506,7 @@ config CPU_MIPS64_R6
|
|||
select CPU_SUPPORTS_MSA
|
||||
select GENERIC_CSUM
|
||||
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
|
||||
select HAVE_KVM
|
||||
help
|
||||
Choose this option to build a kernel for release 6 or later of the
|
||||
MIPS64 architecture. New MIPS processors, starting with the Warrior
|
||||
|
|
|
@ -45,7 +45,7 @@
|
|||
/*
|
||||
* Returns the kernel segment base of a given address
|
||||
*/
|
||||
#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
|
||||
#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
|
||||
|
||||
/*
|
||||
* Returns the physical address of a CKSEGx / XKPHYS address
|
||||
|
|
|
@ -19,6 +19,9 @@
|
|||
#include <linux/threads.h>
|
||||
#include <linux/spinlock.h>
|
||||
|
||||
#include <asm/inst.h>
|
||||
#include <asm/mipsregs.h>
|
||||
|
||||
/* MIPS KVM register ids */
|
||||
#define MIPS_CP0_32(_R, _S) \
|
||||
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
|
||||
|
@ -53,6 +56,12 @@
|
|||
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
|
||||
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
|
||||
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
|
||||
#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
|
||||
|
||||
|
||||
#define KVM_MAX_VCPUS 1
|
||||
|
@ -65,8 +74,14 @@
|
|||
|
||||
|
||||
|
||||
/* Special address that contains the comm page, used for reducing # of traps */
|
||||
#define KVM_GUEST_COMMPAGE_ADDR 0x0
|
||||
/*
|
||||
* Special address that contains the comm page, used for reducing # of traps
|
||||
* This needs to be within 32Kb of 0x0 (so the zero register can be used), but
|
||||
* preferably not at 0x0 so that most kernel NULL pointer dereferences can be
|
||||
* caught.
|
||||
*/
|
||||
#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
|
||||
(0x8000 - PAGE_SIZE))
|
||||
|
||||
#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
|
||||
((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
|
||||
|
@ -93,9 +108,6 @@
|
|||
#define KVM_INVALID_ADDR 0xdeadbeef
|
||||
|
||||
extern atomic_t kvm_mips_instance;
|
||||
extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
|
||||
extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
|
||||
extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
|
||||
|
||||
struct kvm_vm_stat {
|
||||
u32 remote_tlb_flush;
|
||||
|
@ -126,28 +138,6 @@ struct kvm_vcpu_stat {
|
|||
u32 halt_wakeup;
|
||||
};
|
||||
|
||||
enum kvm_mips_exit_types {
|
||||
WAIT_EXITS,
|
||||
CACHE_EXITS,
|
||||
SIGNAL_EXITS,
|
||||
INT_EXITS,
|
||||
COP_UNUSABLE_EXITS,
|
||||
TLBMOD_EXITS,
|
||||
TLBMISS_LD_EXITS,
|
||||
TLBMISS_ST_EXITS,
|
||||
ADDRERR_ST_EXITS,
|
||||
ADDRERR_LD_EXITS,
|
||||
SYSCALL_EXITS,
|
||||
RESVD_INST_EXITS,
|
||||
BREAK_INST_EXITS,
|
||||
TRAP_INST_EXITS,
|
||||
MSA_FPE_EXITS,
|
||||
FPE_EXITS,
|
||||
MSA_DISABLED_EXITS,
|
||||
FLUSH_DCACHE_EXITS,
|
||||
MAX_KVM_MIPS_EXIT_TYPES
|
||||
};
|
||||
|
||||
struct kvm_arch_memory_slot {
|
||||
};
|
||||
|
||||
|
@ -215,73 +205,6 @@ struct mips_coproc {
|
|||
#define MIPS_CP0_CONFIG4_SEL 4
|
||||
#define MIPS_CP0_CONFIG5_SEL 5
|
||||
|
||||
/* Config0 register bits */
|
||||
#define CP0C0_M 31
|
||||
#define CP0C0_K23 28
|
||||
#define CP0C0_KU 25
|
||||
#define CP0C0_MDU 20
|
||||
#define CP0C0_MM 17
|
||||
#define CP0C0_BM 16
|
||||
#define CP0C0_BE 15
|
||||
#define CP0C0_AT 13
|
||||
#define CP0C0_AR 10
|
||||
#define CP0C0_MT 7
|
||||
#define CP0C0_VI 3
|
||||
#define CP0C0_K0 0
|
||||
|
||||
/* Config1 register bits */
|
||||
#define CP0C1_M 31
|
||||
#define CP0C1_MMU 25
|
||||
#define CP0C1_IS 22
|
||||
#define CP0C1_IL 19
|
||||
#define CP0C1_IA 16
|
||||
#define CP0C1_DS 13
|
||||
#define CP0C1_DL 10
|
||||
#define CP0C1_DA 7
|
||||
#define CP0C1_C2 6
|
||||
#define CP0C1_MD 5
|
||||
#define CP0C1_PC 4
|
||||
#define CP0C1_WR 3
|
||||
#define CP0C1_CA 2
|
||||
#define CP0C1_EP 1
|
||||
#define CP0C1_FP 0
|
||||
|
||||
/* Config2 Register bits */
|
||||
#define CP0C2_M 31
|
||||
#define CP0C2_TU 28
|
||||
#define CP0C2_TS 24
|
||||
#define CP0C2_TL 20
|
||||
#define CP0C2_TA 16
|
||||
#define CP0C2_SU 12
|
||||
#define CP0C2_SS 8
|
||||
#define CP0C2_SL 4
|
||||
#define CP0C2_SA 0
|
||||
|
||||
/* Config3 Register bits */
|
||||
#define CP0C3_M 31
|
||||
#define CP0C3_ISA_ON_EXC 16
|
||||
#define CP0C3_ULRI 13
|
||||
#define CP0C3_DSPP 10
|
||||
#define CP0C3_LPA 7
|
||||
#define CP0C3_VEIC 6
|
||||
#define CP0C3_VInt 5
|
||||
#define CP0C3_SP 4
|
||||
#define CP0C3_MT 2
|
||||
#define CP0C3_SM 1
|
||||
#define CP0C3_TL 0
|
||||
|
||||
/* MMU types, the first four entries have the same layout as the
|
||||
CP0C0_MT field. */
|
||||
enum mips_mmu_types {
|
||||
MMU_TYPE_NONE,
|
||||
MMU_TYPE_R4000,
|
||||
MMU_TYPE_RESERVED,
|
||||
MMU_TYPE_FMT,
|
||||
MMU_TYPE_R3000,
|
||||
MMU_TYPE_R6000,
|
||||
MMU_TYPE_R8000
|
||||
};
|
||||
|
||||
/* Resume Flags */
|
||||
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
|
@ -298,11 +221,6 @@ enum emulation_result {
|
|||
EMULATE_PRIV_FAIL,
|
||||
};
|
||||
|
||||
#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
|
||||
#define MIPS3_PG_V 0x00000002 /* Valid */
|
||||
#define MIPS3_PG_NV 0x00000000
|
||||
#define MIPS3_PG_D 0x00000004 /* Dirty */
|
||||
|
||||
#define mips3_paddr_to_tlbpfn(x) \
|
||||
(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
|
||||
#define mips3_tlbpfn_to_paddr(x) \
|
||||
|
@ -313,13 +231,11 @@ enum emulation_result {
|
|||
|
||||
#define VPN2_MASK 0xffffe000
|
||||
#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
|
||||
#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
|
||||
((x).tlb_lo1 & MIPS3_PG_G))
|
||||
#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
|
||||
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
|
||||
#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
|
||||
#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
|
||||
? ((x).tlb_lo1 & MIPS3_PG_V) \
|
||||
: ((x).tlb_lo0 & MIPS3_PG_V))
|
||||
#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
|
||||
#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
|
||||
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
|
||||
((y) & VPN2_MASK & ~(x).tlb_mask))
|
||||
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
|
||||
|
@ -328,26 +244,23 @@ enum emulation_result {
|
|||
struct kvm_mips_tlb {
|
||||
long tlb_mask;
|
||||
long tlb_hi;
|
||||
long tlb_lo0;
|
||||
long tlb_lo1;
|
||||
long tlb_lo[2];
|
||||
};
|
||||
|
||||
#define KVM_MIPS_FPU_FPU 0x1
|
||||
#define KVM_MIPS_FPU_MSA 0x2
|
||||
#define KVM_MIPS_AUX_FPU 0x1
|
||||
#define KVM_MIPS_AUX_MSA 0x2
|
||||
|
||||
#define KVM_MIPS_GUEST_TLB_SIZE 64
|
||||
struct kvm_vcpu_arch {
|
||||
void *host_ebase, *guest_ebase;
|
||||
void *guest_ebase;
|
||||
int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
unsigned long host_stack;
|
||||
unsigned long host_gp;
|
||||
|
||||
/* Host CP0 registers used when handling exits from guest */
|
||||
unsigned long host_cp0_badvaddr;
|
||||
unsigned long host_cp0_cause;
|
||||
unsigned long host_cp0_epc;
|
||||
unsigned long host_cp0_entryhi;
|
||||
uint32_t guest_inst;
|
||||
u32 host_cp0_cause;
|
||||
|
||||
/* GPRS */
|
||||
unsigned long gprs[32];
|
||||
|
@ -357,8 +270,8 @@ struct kvm_vcpu_arch {
|
|||
|
||||
/* FPU State */
|
||||
struct mips_fpu_struct fpu;
|
||||
/* Which FPU state is loaded (KVM_MIPS_FPU_*) */
|
||||
unsigned int fpu_inuse;
|
||||
/* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
|
||||
unsigned int aux_inuse;
|
||||
|
||||
/* COP0 State */
|
||||
struct mips_coproc *cop0;
|
||||
|
@ -370,11 +283,11 @@ struct kvm_vcpu_arch {
|
|||
|
||||
struct hrtimer comparecount_timer;
|
||||
/* Count timer control KVM register */
|
||||
uint32_t count_ctl;
|
||||
u32 count_ctl;
|
||||
/* Count bias from the raw time */
|
||||
uint32_t count_bias;
|
||||
u32 count_bias;
|
||||
/* Frequency of timer in Hz */
|
||||
uint32_t count_hz;
|
||||
u32 count_hz;
|
||||
/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
|
||||
s64 count_dyn_bias;
|
||||
/* Resume time */
|
||||
|
@ -388,7 +301,7 @@ struct kvm_vcpu_arch {
|
|||
/* Bitmask of pending exceptions to be cleared */
|
||||
unsigned long pending_exceptions_clr;
|
||||
|
||||
unsigned long pending_load_cause;
|
||||
u32 pending_load_cause;
|
||||
|
||||
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
|
||||
unsigned long preempt_entryhi;
|
||||
|
@ -397,8 +310,8 @@ struct kvm_vcpu_arch {
|
|||
struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
|
||||
|
||||
/* Cached guest kernel/user ASIDs */
|
||||
uint32_t guest_user_asid[NR_CPUS];
|
||||
uint32_t guest_kernel_asid[NR_CPUS];
|
||||
u32 guest_user_asid[NR_CPUS];
|
||||
u32 guest_kernel_asid[NR_CPUS];
|
||||
struct mm_struct guest_kernel_mm, guest_user_mm;
|
||||
|
||||
int last_sched_cpu;
|
||||
|
@ -408,6 +321,7 @@ struct kvm_vcpu_arch {
|
|||
|
||||
u8 fpu_enabled;
|
||||
u8 msa_enabled;
|
||||
u8 kscratch_enabled;
|
||||
};
|
||||
|
||||
|
||||
|
@ -461,6 +375,18 @@ struct kvm_vcpu_arch {
|
|||
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
|
||||
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
|
||||
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
|
||||
#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
|
||||
#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
|
||||
#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
|
||||
#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
|
||||
#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
|
||||
#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
|
||||
#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
|
||||
#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
|
||||
#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
|
||||
#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
|
||||
#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
|
||||
#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
|
||||
|
||||
/*
|
||||
* Some of the guest registers may be modified asynchronously (e.g. from a
|
||||
|
@ -474,7 +400,7 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
|
|||
unsigned long temp;
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
||||
" " __LL "%0, %1 \n"
|
||||
" or %0, %2 \n"
|
||||
" " __SC "%0, %1 \n"
|
||||
|
@ -490,7 +416,7 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
|
|||
unsigned long temp;
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
||||
" " __LL "%0, %1 \n"
|
||||
" and %0, %2 \n"
|
||||
" " __SC "%0, %1 \n"
|
||||
|
@ -507,7 +433,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
|||
unsigned long temp;
|
||||
do {
|
||||
__asm__ __volatile__(
|
||||
" .set mips3 \n"
|
||||
" .set "MIPS_ISA_ARCH_LEVEL" \n"
|
||||
" " __LL "%0, %1 \n"
|
||||
" and %0, %2 \n"
|
||||
" or %0, %3 \n"
|
||||
|
@ -542,7 +468,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
|
|||
|
||||
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
|
||||
{
|
||||
return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
|
||||
return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
|
||||
vcpu->fpu_enabled;
|
||||
}
|
||||
|
||||
|
@ -589,9 +515,11 @@ struct kvm_mips_callbacks {
|
|||
void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
u32 cause);
|
||||
int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
u32 cause);
|
||||
unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
|
||||
int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
|
||||
int (*get_one_reg)(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg, s64 *v);
|
||||
int (*set_one_reg)(struct kvm_vcpu *vcpu,
|
||||
|
@ -605,8 +533,13 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
|
|||
/* Debug: dump vcpu state */
|
||||
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Trampoline ASM routine to start running in "Guest" context */
|
||||
extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Building of entry/exception code */
|
||||
int kvm_mips_entry_setup(void);
|
||||
void *kvm_mips_build_vcpu_run(void *addr);
|
||||
void *kvm_mips_build_exception(void *addr, void *handler);
|
||||
void *kvm_mips_build_exit(void *addr);
|
||||
|
||||
/* FPU/MSA context management */
|
||||
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
|
||||
|
@ -622,11 +555,11 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu);
|
|||
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* TLB handling */
|
||||
uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
|
||||
u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
|
||||
|
||||
uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
|
||||
u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
|
||||
|
||||
uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
|
||||
u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
@ -635,22 +568,24 @@ extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
|
|||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_tlb *tlb,
|
||||
unsigned long *hpa0,
|
||||
unsigned long *hpa1);
|
||||
struct kvm_mips_tlb *tlb);
|
||||
|
||||
extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern void kvm_mips_dump_host_tlbs(void);
|
||||
extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
|
||||
extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
||||
unsigned long entrylo0,
|
||||
unsigned long entrylo1,
|
||||
int flush_dcache_mask);
|
||||
extern void kvm_mips_flush_host_tlb(int skip_kseg0);
|
||||
extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
|
||||
|
||||
|
@ -667,90 +602,90 @@ extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
|
|||
extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Emulation */
|
||||
uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
|
||||
enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
|
||||
u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
|
||||
enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_handle_ri(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
|
||||
struct kvm_run *run);
|
||||
|
||||
uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
|
||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare, bool ack);
|
||||
u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
|
||||
void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
|
||||
void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
|
||||
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
|
||||
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
|
||||
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
|
||||
|
@ -759,27 +694,27 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
|
|||
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
|
||||
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
|
||||
|
||||
enum emulation_result kvm_mips_check_privilege(unsigned long cause,
|
||||
uint32_t *opc,
|
||||
enum emulation_result kvm_mips_check_privilege(u32 cause,
|
||||
u32 *opc,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
|
||||
uint32_t *opc,
|
||||
uint32_t cause,
|
||||
enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
|
||||
u32 *opc,
|
||||
u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
|
||||
uint32_t *opc,
|
||||
uint32_t cause,
|
||||
enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
|
||||
u32 *opc,
|
||||
u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
enum emulation_result kvm_mips_emulate_store(uint32_t inst,
|
||||
uint32_t cause,
|
||||
enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
|
||||
u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
enum emulation_result kvm_mips_emulate_load(uint32_t inst,
|
||||
uint32_t cause,
|
||||
enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
|
||||
u32 cause,
|
||||
struct kvm_run *run,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
|
@ -789,13 +724,13 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
|
|||
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Dynamic binary translation */
|
||||
extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
|
||||
extern int kvm_mips_trans_cache_index(union mips_instruction inst,
|
||||
u32 *opc, struct kvm_vcpu *vcpu);
|
||||
extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
|
||||
extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu);
|
||||
extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
|
||||
extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu);
|
||||
|
||||
/* Misc */
|
||||
|
|
|
@ -55,7 +55,7 @@
|
|||
#define cpu_has_mipsmt 0
|
||||
#define cpu_has_vint 0
|
||||
#define cpu_has_veic 0
|
||||
#define cpu_hwrena_impl_bits 0xc0000000
|
||||
#define cpu_hwrena_impl_bits (MIPS_HWRENA_IMPL1 | MIPS_HWRENA_IMPL2)
|
||||
#define cpu_has_wsbh 1
|
||||
|
||||
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
#define CP0_SEGCTL2 $5, 4
|
||||
#define CP0_WIRED $6
|
||||
#define CP0_INFO $7
|
||||
#define CP0_HWRENA $7, 0
|
||||
#define CP0_HWRENA $7
|
||||
#define CP0_BADVADDR $8
|
||||
#define CP0_BADINSTR $8, 1
|
||||
#define CP0_COUNT $9
|
||||
|
@ -533,6 +533,7 @@
|
|||
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
|
||||
|
||||
/* Bits specific to the MIPS32/64 PRA. */
|
||||
#define MIPS_CONF_VI (_ULCAST_(1) << 3)
|
||||
#define MIPS_CONF_MT (_ULCAST_(7) << 7)
|
||||
#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
|
||||
#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
|
||||
|
@ -853,6 +854,24 @@
|
|||
#define MIPS_CDMMBASE_ADDR_SHIFT 11
|
||||
#define MIPS_CDMMBASE_ADDR_START 15
|
||||
|
||||
/* RDHWR register numbers */
|
||||
#define MIPS_HWR_CPUNUM 0 /* CPU number */
|
||||
#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */
|
||||
#define MIPS_HWR_CC 2 /* Cycle counter */
|
||||
#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */
|
||||
#define MIPS_HWR_ULR 29 /* UserLocal */
|
||||
#define MIPS_HWR_IMPL1 30 /* Implementation dependent */
|
||||
#define MIPS_HWR_IMPL2 31 /* Implementation dependent */
|
||||
|
||||
/* Bits in HWREna register */
|
||||
#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM)
|
||||
#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP)
|
||||
#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC)
|
||||
#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES)
|
||||
#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR)
|
||||
#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1)
|
||||
#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2)
|
||||
|
||||
/*
|
||||
* Bitfields in the TX39 family CP0 Configuration Register 3
|
||||
*/
|
||||
|
|
|
@ -80,16 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
|
|||
|
||||
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
|
||||
|
||||
static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
|
||||
const struct resource *rsrc, resource_size_t *start,
|
||||
resource_size_t *end)
|
||||
{
|
||||
phys_addr_t size = resource_size(rsrc);
|
||||
|
||||
*start = fixup_bigphys_addr(rsrc->start, size);
|
||||
*end = rsrc->start + size;
|
||||
}
|
||||
|
||||
/*
|
||||
* Dynamic DMA mapping stuff.
|
||||
* MIPS has everything mapped statically.
|
||||
|
|
|
@ -21,6 +21,7 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
|
|||
|
||||
extern void *set_except_vector(int n, void *addr);
|
||||
extern unsigned long ebase;
|
||||
extern unsigned int hwrena;
|
||||
extern void per_cpu_trap_init(bool);
|
||||
extern void cpu_cache_init(void);
|
||||
|
||||
|
|
|
@ -104,8 +104,13 @@ Ip_u1s2(_bltz);
|
|||
Ip_u1s2(_bltzl);
|
||||
Ip_u1u2s3(_bne);
|
||||
Ip_u2s3u1(_cache);
|
||||
Ip_u1u2(_cfc1);
|
||||
Ip_u2u1(_cfcmsa);
|
||||
Ip_u1u2(_ctc1);
|
||||
Ip_u2u1(_ctcmsa);
|
||||
Ip_u2u1s3(_daddiu);
|
||||
Ip_u3u1u2(_daddu);
|
||||
Ip_u1(_di);
|
||||
Ip_u2u1msbu3(_dins);
|
||||
Ip_u2u1msbu3(_dinsm);
|
||||
Ip_u1u2(_divu);
|
||||
|
@ -141,6 +146,8 @@ Ip_u1(_mfhi);
|
|||
Ip_u1(_mflo);
|
||||
Ip_u1u2u3(_mtc0);
|
||||
Ip_u1u2u3(_mthc0);
|
||||
Ip_u1(_mthi);
|
||||
Ip_u1(_mtlo);
|
||||
Ip_u3u1u2(_mul);
|
||||
Ip_u3u1u2(_or);
|
||||
Ip_u2u1u3(_ori);
|
||||
|
|
|
@ -21,20 +21,20 @@
|
|||
enum major_op {
|
||||
spec_op, bcond_op, j_op, jal_op,
|
||||
beq_op, bne_op, blez_op, bgtz_op,
|
||||
addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
|
||||
addi_op, pop10_op = addi_op, addiu_op, slti_op, sltiu_op,
|
||||
andi_op, ori_op, xori_op, lui_op,
|
||||
cop0_op, cop1_op, cop2_op, cop1x_op,
|
||||
beql_op, bnel_op, blezl_op, bgtzl_op,
|
||||
daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
|
||||
daddi_op, pop30_op = daddi_op, daddiu_op, ldl_op, ldr_op,
|
||||
spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
|
||||
lb_op, lh_op, lwl_op, lw_op,
|
||||
lbu_op, lhu_op, lwr_op, lwu_op,
|
||||
sb_op, sh_op, swl_op, sw_op,
|
||||
sdl_op, sdr_op, swr_op, cache_op,
|
||||
ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
|
||||
lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
|
||||
lld_op, ldc1_op, ldc2_op, pop66_op = ldc2_op, ld_op,
|
||||
sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
|
||||
scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
|
||||
scd_op, sdc1_op, sdc2_op, pop76_op = sdc2_op, sd_op
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -92,6 +92,50 @@ enum spec3_op {
|
|||
rdhwr_op = 0x3b
|
||||
};
|
||||
|
||||
/*
|
||||
* Bits 10-6 minor opcode for r6 spec mult/div encodings
|
||||
*/
|
||||
enum mult_op {
|
||||
mult_mult_op = 0x0,
|
||||
mult_mul_op = 0x2,
|
||||
mult_muh_op = 0x3,
|
||||
};
|
||||
enum multu_op {
|
||||
multu_multu_op = 0x0,
|
||||
multu_mulu_op = 0x2,
|
||||
multu_muhu_op = 0x3,
|
||||
};
|
||||
enum div_op {
|
||||
div_div_op = 0x0,
|
||||
div_div6_op = 0x2,
|
||||
div_mod_op = 0x3,
|
||||
};
|
||||
enum divu_op {
|
||||
divu_divu_op = 0x0,
|
||||
divu_divu6_op = 0x2,
|
||||
divu_modu_op = 0x3,
|
||||
};
|
||||
enum dmult_op {
|
||||
dmult_dmult_op = 0x0,
|
||||
dmult_dmul_op = 0x2,
|
||||
dmult_dmuh_op = 0x3,
|
||||
};
|
||||
enum dmultu_op {
|
||||
dmultu_dmultu_op = 0x0,
|
||||
dmultu_dmulu_op = 0x2,
|
||||
dmultu_dmuhu_op = 0x3,
|
||||
};
|
||||
enum ddiv_op {
|
||||
ddiv_ddiv_op = 0x0,
|
||||
ddiv_ddiv6_op = 0x2,
|
||||
ddiv_dmod_op = 0x3,
|
||||
};
|
||||
enum ddivu_op {
|
||||
ddivu_ddivu_op = 0x0,
|
||||
ddivu_ddivu6_op = 0x2,
|
||||
ddivu_dmodu_op = 0x3,
|
||||
};
|
||||
|
||||
/*
|
||||
* rt field of bcond opcodes.
|
||||
*/
|
||||
|
@ -103,7 +147,7 @@ enum rt_op {
|
|||
bltzal_op, bgezal_op, bltzall_op, bgezall_op,
|
||||
rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
|
||||
rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
|
||||
bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
|
||||
bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -237,6 +281,21 @@ enum bshfl_func {
|
|||
seh_op = 0x18,
|
||||
};
|
||||
|
||||
/*
|
||||
* MSA minor opcodes.
|
||||
*/
|
||||
enum msa_func {
|
||||
msa_elm_op = 0x19,
|
||||
};
|
||||
|
||||
/*
|
||||
* MSA ELM opcodes.
|
||||
*/
|
||||
enum msa_elm {
|
||||
msa_ctc_op = 0x3e,
|
||||
msa_cfc_op = 0x7e,
|
||||
};
|
||||
|
||||
/*
|
||||
* func field for MSA MI10 format.
|
||||
*/
|
||||
|
@ -264,7 +323,7 @@ enum mm_major_op {
|
|||
mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
|
||||
mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
|
||||
mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
|
||||
mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
|
||||
mm_ori32_op, mm_pool32f_op, mm_pool32s_op, mm_reserved2_op,
|
||||
mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
|
||||
mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
|
||||
mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
|
||||
|
@ -360,7 +419,10 @@ enum mm_32axf_minor_op {
|
|||
mm_mflo32_op = 0x075,
|
||||
mm_jalrhb_op = 0x07c,
|
||||
mm_tlbwi_op = 0x08d,
|
||||
mm_mthi32_op = 0x0b5,
|
||||
mm_tlbwr_op = 0x0cd,
|
||||
mm_mtlo32_op = 0x0f5,
|
||||
mm_di_op = 0x11d,
|
||||
mm_jalrs_op = 0x13c,
|
||||
mm_jalrshb_op = 0x17c,
|
||||
mm_sync_op = 0x1ad,
|
||||
|
@ -478,6 +540,13 @@ enum mm_32f_73_minor_op {
|
|||
mm_fcvts1_op = 0xed,
|
||||
};
|
||||
|
||||
/*
|
||||
* (microMIPS) POOL32S minor opcodes.
|
||||
*/
|
||||
enum mm_32s_minor_op {
|
||||
mm_32s_elm_op = 0x16,
|
||||
};
|
||||
|
||||
/*
|
||||
* (microMIPS) POOL16C minor opcodes.
|
||||
*/
|
||||
|
@ -586,6 +655,36 @@ struct r_format { /* Register format */
|
|||
;))))))
|
||||
};
|
||||
|
||||
struct c0r_format { /* C0 register format */
|
||||
__BITFIELD_FIELD(unsigned int opcode : 6,
|
||||
__BITFIELD_FIELD(unsigned int rs : 5,
|
||||
__BITFIELD_FIELD(unsigned int rt : 5,
|
||||
__BITFIELD_FIELD(unsigned int rd : 5,
|
||||
__BITFIELD_FIELD(unsigned int z: 8,
|
||||
__BITFIELD_FIELD(unsigned int sel : 3,
|
||||
;))))))
|
||||
};
|
||||
|
||||
struct mfmc0_format { /* MFMC0 register format */
|
||||
__BITFIELD_FIELD(unsigned int opcode : 6,
|
||||
__BITFIELD_FIELD(unsigned int rs : 5,
|
||||
__BITFIELD_FIELD(unsigned int rt : 5,
|
||||
__BITFIELD_FIELD(unsigned int rd : 5,
|
||||
__BITFIELD_FIELD(unsigned int re : 5,
|
||||
__BITFIELD_FIELD(unsigned int sc : 1,
|
||||
__BITFIELD_FIELD(unsigned int : 2,
|
||||
__BITFIELD_FIELD(unsigned int sel : 3,
|
||||
;))))))))
|
||||
};
|
||||
|
||||
struct co_format { /* C0 CO format */
|
||||
__BITFIELD_FIELD(unsigned int opcode : 6,
|
||||
__BITFIELD_FIELD(unsigned int co : 1,
|
||||
__BITFIELD_FIELD(unsigned int code : 19,
|
||||
__BITFIELD_FIELD(unsigned int func : 6,
|
||||
;))))
|
||||
};
|
||||
|
||||
struct p_format { /* Performance counter format (R10000) */
|
||||
__BITFIELD_FIELD(unsigned int opcode : 6,
|
||||
__BITFIELD_FIELD(unsigned int rs : 5,
|
||||
|
@ -937,6 +1036,9 @@ union mips_instruction {
|
|||
struct u_format u_format;
|
||||
struct c_format c_format;
|
||||
struct r_format r_format;
|
||||
struct c0r_format c0r_format;
|
||||
struct mfmc0_format mfmc0_format;
|
||||
struct co_format co_format;
|
||||
struct p_format p_format;
|
||||
struct f_format f_format;
|
||||
struct ma_format ma_format;
|
||||
|
|
|
@ -339,71 +339,9 @@ void output_pm_defines(void)
|
|||
}
|
||||
#endif
|
||||
|
||||
void output_cpuinfo_defines(void)
|
||||
{
|
||||
COMMENT(" MIPS cpuinfo offsets. ");
|
||||
DEFINE(CPUINFO_SIZE, sizeof(struct cpuinfo_mips));
|
||||
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
|
||||
OFFSET(CPUINFO_ASID_MASK, cpuinfo_mips, asid_mask);
|
||||
#endif
|
||||
}
|
||||
|
||||
void output_kvm_defines(void)
|
||||
{
|
||||
COMMENT(" KVM/MIPS Specfic offsets. ");
|
||||
DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
|
||||
OFFSET(VCPU_RUN, kvm_vcpu, run);
|
||||
OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
|
||||
|
||||
OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
|
||||
OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
|
||||
|
||||
OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
|
||||
OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
|
||||
|
||||
OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
|
||||
OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
|
||||
OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
|
||||
OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
|
||||
|
||||
OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
|
||||
|
||||
OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
|
||||
OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
|
||||
OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
|
||||
OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
|
||||
OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
|
||||
OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
|
||||
OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
|
||||
OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
|
||||
OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
|
||||
OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
|
||||
OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
|
||||
OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
|
||||
OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
|
||||
OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
|
||||
OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
|
||||
OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
|
||||
OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
|
||||
OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
|
||||
OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
|
||||
OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
|
||||
OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
|
||||
OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
|
||||
OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
|
||||
OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
|
||||
OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
|
||||
OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
|
||||
OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
|
||||
OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
|
||||
OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
|
||||
OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
|
||||
OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
|
||||
OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
|
||||
OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
|
||||
OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
|
||||
OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
|
||||
BLANK();
|
||||
|
||||
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
|
||||
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
|
||||
|
@ -441,14 +379,6 @@ void output_kvm_defines(void)
|
|||
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
|
||||
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
|
||||
BLANK();
|
||||
|
||||
OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
|
||||
OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
|
||||
OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
|
||||
|
||||
OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
|
||||
OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
|
||||
BLANK();
|
||||
}
|
||||
|
||||
#ifdef CONFIG_MIPS_CPS
|
||||
|
|
|
@ -790,7 +790,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
|||
epc += 4 + (insn.i_format.simmediate << 2);
|
||||
regs->cp0_epc = epc;
|
||||
break;
|
||||
case beqzcjic_op:
|
||||
case pop66_op:
|
||||
if (!cpu_has_mips_r6) {
|
||||
ret = -SIGILL;
|
||||
break;
|
||||
|
@ -798,7 +798,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
|||
/* Compact branch: BEQZC || JIC */
|
||||
regs->cp0_epc += 8;
|
||||
break;
|
||||
case bnezcjialc_op:
|
||||
case pop76_op:
|
||||
if (!cpu_has_mips_r6) {
|
||||
ret = -SIGILL;
|
||||
break;
|
||||
|
@ -809,8 +809,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
|
|||
regs->cp0_epc += 8;
|
||||
break;
|
||||
#endif
|
||||
case cbcond0_op:
|
||||
case cbcond1_op:
|
||||
case pop10_op:
|
||||
case pop30_op:
|
||||
/* Only valid for MIPS R6 */
|
||||
if (!cpu_has_mips_r6) {
|
||||
ret = -SIGILL;
|
||||
|
|
|
@ -619,17 +619,17 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
|
|||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
|
||||
1, regs, 0);
|
||||
switch (rd) {
|
||||
case 0: /* CPU number */
|
||||
case MIPS_HWR_CPUNUM: /* CPU number */
|
||||
regs->regs[rt] = smp_processor_id();
|
||||
return 0;
|
||||
case 1: /* SYNCI length */
|
||||
case MIPS_HWR_SYNCISTEP: /* SYNCI length */
|
||||
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
|
||||
current_cpu_data.icache.linesz);
|
||||
return 0;
|
||||
case 2: /* Read count register */
|
||||
case MIPS_HWR_CC: /* Read count register */
|
||||
regs->regs[rt] = read_c0_count();
|
||||
return 0;
|
||||
case 3: /* Count register resolution */
|
||||
case MIPS_HWR_CCRES: /* Count register resolution */
|
||||
switch (current_cpu_type()) {
|
||||
case CPU_20KC:
|
||||
case CPU_25KF:
|
||||
|
@ -639,7 +639,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
|
|||
regs->regs[rt] = 2;
|
||||
}
|
||||
return 0;
|
||||
case 29:
|
||||
case MIPS_HWR_ULR: /* Read UserLocal register */
|
||||
regs->regs[rt] = ti->tp_value;
|
||||
return 0;
|
||||
default:
|
||||
|
@ -1859,6 +1859,7 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
|
|||
#define VECTORSPACING 0x100 /* for EI/VI mode */
|
||||
|
||||
unsigned long ebase;
|
||||
EXPORT_SYMBOL_GPL(ebase);
|
||||
unsigned long exception_handlers[32];
|
||||
unsigned long vi_handlers[64];
|
||||
|
||||
|
@ -2063,16 +2064,22 @@ static void configure_status(void)
|
|||
status_set);
|
||||
}
|
||||
|
||||
unsigned int hwrena;
|
||||
EXPORT_SYMBOL_GPL(hwrena);
|
||||
|
||||
/* configure HWRENA register */
|
||||
static void configure_hwrena(void)
|
||||
{
|
||||
unsigned int hwrena = cpu_hwrena_impl_bits;
|
||||
hwrena = cpu_hwrena_impl_bits;
|
||||
|
||||
if (cpu_has_mips_r2_r6)
|
||||
hwrena |= 0x0000000f;
|
||||
hwrena |= MIPS_HWRENA_CPUNUM |
|
||||
MIPS_HWRENA_SYNCISTEP |
|
||||
MIPS_HWRENA_CC |
|
||||
MIPS_HWRENA_CCRES;
|
||||
|
||||
if (!noulri && cpu_has_userlocal)
|
||||
hwrena |= (1 << 29);
|
||||
hwrena |= MIPS_HWRENA_ULR;
|
||||
|
||||
if (hwrena)
|
||||
write_c0_hwrena(hwrena);
|
||||
|
|
|
@ -17,6 +17,7 @@ if VIRTUALIZATION
|
|||
config KVM
|
||||
tristate "Kernel-based Virtual Machine (KVM) support"
|
||||
depends on HAVE_KVM
|
||||
select EXPORT_UASM
|
||||
select PREEMPT_NOTIFIERS
|
||||
select ANON_INODES
|
||||
select KVM_MMIO
|
||||
|
|
|
@ -7,9 +7,10 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
|
|||
|
||||
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
|
||||
|
||||
kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
|
||||
kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
|
||||
interrupt.o stats.o commpage.o \
|
||||
dyntrans.o trap_emul.o fpu.o
|
||||
kvm-objs += mmu.o
|
||||
|
||||
obj-$(CONFIG_KVM) += kvm.o
|
||||
obj-y += callback.o tlb.o
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
* for more details.
|
||||
*
|
||||
* commpage, currently used for Virtual COP0 registers.
|
||||
* Mapped into the guest kernel @ 0x0.
|
||||
* Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
|
|
|
@ -11,6 +11,7 @@
|
|||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/vmalloc.h>
|
||||
|
@ -20,125 +21,114 @@
|
|||
|
||||
#include "commpage.h"
|
||||
|
||||
#define SYNCI_TEMPLATE 0x041f0000
|
||||
#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
|
||||
#define SYNCI_OFFSET ((x) & 0xffff)
|
||||
/**
|
||||
* kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
|
||||
* @vcpu: Virtual CPU.
|
||||
* @opc: PC of instruction to replace.
|
||||
* @replace: Instruction to write
|
||||
*/
|
||||
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
|
||||
union mips_instruction replace)
|
||||
{
|
||||
unsigned long paddr, flags;
|
||||
void *vaddr;
|
||||
|
||||
#define LW_TEMPLATE 0x8c000000
|
||||
#define CLEAR_TEMPLATE 0x00000020
|
||||
#define SW_TEMPLATE 0xac000000
|
||||
if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) {
|
||||
paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
|
||||
(unsigned long)opc);
|
||||
vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
|
||||
vaddr += paddr & ~PAGE_MASK;
|
||||
memcpy(vaddr, (void *)&replace, sizeof(u32));
|
||||
local_flush_icache_range((unsigned long)vaddr,
|
||||
(unsigned long)vaddr + 32);
|
||||
kunmap_atomic(vaddr);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&replace, sizeof(u32));
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int result = 0;
|
||||
unsigned long kseg0_opc;
|
||||
uint32_t synci_inst = 0x0;
|
||||
union mips_instruction nop_inst = { 0 };
|
||||
|
||||
/* Replace the CACHE instruction, with a NOP */
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
|
||||
return result;
|
||||
return kvm_mips_trans_replace(vcpu, opc, nop_inst);
|
||||
}
|
||||
|
||||
/*
|
||||
* Address based CACHE instructions are transformed into synci(s). A little
|
||||
* heavy for just D-cache invalidates, but avoids an expensive trap
|
||||
*/
|
||||
int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
|
||||
int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int result = 0;
|
||||
unsigned long kseg0_opc;
|
||||
uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
|
||||
union mips_instruction synci_inst = { 0 };
|
||||
|
||||
base = (inst >> 21) & 0x1f;
|
||||
offset = inst & 0xffff;
|
||||
synci_inst |= (base << 21);
|
||||
synci_inst |= offset;
|
||||
synci_inst.i_format.opcode = bcond_op;
|
||||
synci_inst.i_format.rs = inst.i_format.rs;
|
||||
synci_inst.i_format.rt = synci_op;
|
||||
if (cpu_has_mips_r6)
|
||||
synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
|
||||
else
|
||||
synci_inst.i_format.simmediate = inst.i_format.simmediate;
|
||||
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
|
||||
return result;
|
||||
return kvm_mips_trans_replace(vcpu, opc, synci_inst);
|
||||
}
|
||||
|
||||
int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int32_t rt, rd, sel;
|
||||
uint32_t mfc0_inst;
|
||||
unsigned long kseg0_opc, flags;
|
||||
union mips_instruction mfc0_inst = { 0 };
|
||||
u32 rd, sel;
|
||||
|
||||
rt = (inst >> 16) & 0x1f;
|
||||
rd = (inst >> 11) & 0x1f;
|
||||
sel = inst & 0x7;
|
||||
rd = inst.c0r_format.rd;
|
||||
sel = inst.c0r_format.sel;
|
||||
|
||||
if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
|
||||
mfc0_inst = CLEAR_TEMPLATE;
|
||||
mfc0_inst |= ((rt & 0x1f) << 16);
|
||||
if (rd == MIPS_CP0_ERRCTL && sel == 0) {
|
||||
mfc0_inst.r_format.opcode = spec_op;
|
||||
mfc0_inst.r_format.rd = inst.c0r_format.rt;
|
||||
mfc0_inst.r_format.func = add_op;
|
||||
} else {
|
||||
mfc0_inst = LW_TEMPLATE;
|
||||
mfc0_inst |= ((rt & 0x1f) << 16);
|
||||
mfc0_inst |= offsetof(struct kvm_mips_commpage,
|
||||
cop0.reg[rd][sel]);
|
||||
mfc0_inst.i_format.opcode = lw_op;
|
||||
mfc0_inst.i_format.rt = inst.c0r_format.rt;
|
||||
mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
|
||||
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
|
||||
mfc0_inst.i_format.simmediate |= 4;
|
||||
#endif
|
||||
}
|
||||
|
||||
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
|
||||
}
|
||||
|
||||
int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int32_t rt, rd, sel;
|
||||
uint32_t mtc0_inst = SW_TEMPLATE;
|
||||
unsigned long kseg0_opc, flags;
|
||||
union mips_instruction mtc0_inst = { 0 };
|
||||
u32 rd, sel;
|
||||
|
||||
rt = (inst >> 16) & 0x1f;
|
||||
rd = (inst >> 11) & 0x1f;
|
||||
sel = inst & 0x7;
|
||||
rd = inst.c0r_format.rd;
|
||||
sel = inst.c0r_format.sel;
|
||||
|
||||
mtc0_inst |= ((rt & 0x1f) << 16);
|
||||
mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
|
||||
mtc0_inst.i_format.opcode = sw_op;
|
||||
mtc0_inst.i_format.rt = inst.c0r_format.rt;
|
||||
mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
|
||||
offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
|
||||
mtc0_inst.i_format.simmediate |= 4;
|
||||
#endif
|
||||
|
||||
if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
|
||||
kseg0_opc =
|
||||
CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
|
||||
(vcpu, (unsigned long) opc));
|
||||
memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
|
||||
} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
|
||||
local_flush_icache_range((unsigned long)opc,
|
||||
(unsigned long)opc + 32);
|
||||
local_irq_restore(flags);
|
||||
} else {
|
||||
kvm_err("%s: Invalid address: %p\n", __func__, opc);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,701 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Generation of main entry point for the guest, exception handling.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*
|
||||
* Copyright (C) 2016 Imagination Technologies Ltd.
|
||||
*/
|
||||
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/msa.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/uasm.h>
|
||||
|
||||
/* Register names */
|
||||
#define ZERO 0
|
||||
#define AT 1
|
||||
#define V0 2
|
||||
#define V1 3
|
||||
#define A0 4
|
||||
#define A1 5
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI32
|
||||
#define T0 8
|
||||
#define T1 9
|
||||
#define T2 10
|
||||
#define T3 11
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
|
||||
|
||||
#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
|
||||
#define T0 12
|
||||
#define T1 13
|
||||
#define T2 14
|
||||
#define T3 15
|
||||
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
|
||||
|
||||
#define S0 16
|
||||
#define S1 17
|
||||
#define T9 25
|
||||
#define K0 26
|
||||
#define K1 27
|
||||
#define GP 28
|
||||
#define SP 29
|
||||
#define RA 31
|
||||
|
||||
/* Some CP0 registers */
|
||||
#define C0_HWRENA 7, 0
|
||||
#define C0_BADVADDR 8, 0
|
||||
#define C0_ENTRYHI 10, 0
|
||||
#define C0_STATUS 12, 0
|
||||
#define C0_CAUSE 13, 0
|
||||
#define C0_EPC 14, 0
|
||||
#define C0_EBASE 15, 1
|
||||
#define C0_CONFIG5 16, 5
|
||||
#define C0_DDATA_LO 28, 3
|
||||
#define C0_ERROREPC 30, 0
|
||||
|
||||
#define CALLFRAME_SIZ 32
|
||||
|
||||
#ifdef CONFIG_64BIT
|
||||
#define ST0_KX_IF_64 ST0_KX
|
||||
#else
|
||||
#define ST0_KX_IF_64 0
|
||||
#endif
|
||||
|
||||
static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
|
||||
static unsigned int scratch_tmp[2] = { C0_ERROREPC };
|
||||
|
||||
enum label_id {
|
||||
label_fpu_1 = 1,
|
||||
label_msa_1,
|
||||
label_return_to_host,
|
||||
label_kernel_asid,
|
||||
label_exit_common,
|
||||
};
|
||||
|
||||
UASM_L_LA(_fpu_1)
|
||||
UASM_L_LA(_msa_1)
|
||||
UASM_L_LA(_return_to_host)
|
||||
UASM_L_LA(_kernel_asid)
|
||||
UASM_L_LA(_exit_common)
|
||||
|
||||
static void *kvm_mips_build_enter_guest(void *addr);
|
||||
static void *kvm_mips_build_ret_from_exit(void *addr);
|
||||
static void *kvm_mips_build_ret_to_guest(void *addr);
|
||||
static void *kvm_mips_build_ret_to_host(void *addr);
|
||||
|
||||
/**
|
||||
* kvm_mips_entry_setup() - Perform global setup for entry code.
|
||||
*
|
||||
* Perform global setup for entry code, such as choosing a scratch register.
|
||||
*
|
||||
* Returns: 0 on success.
|
||||
* -errno on failure.
|
||||
*/
|
||||
int kvm_mips_entry_setup(void)
|
||||
{
|
||||
/*
|
||||
* We prefer to use KScratchN registers if they are available over the
|
||||
* defaults above, which may not work on all cores.
|
||||
*/
|
||||
unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
|
||||
|
||||
/* Pick a scratch register for storing VCPU */
|
||||
if (kscratch_mask) {
|
||||
scratch_vcpu[0] = 31;
|
||||
scratch_vcpu[1] = ffs(kscratch_mask) - 1;
|
||||
kscratch_mask &= ~BIT(scratch_vcpu[1]);
|
||||
}
|
||||
|
||||
/* Pick a scratch register to use as a temp for saving state */
|
||||
if (kscratch_mask) {
|
||||
scratch_tmp[0] = 31;
|
||||
scratch_tmp[1] = ffs(kscratch_mask) - 1;
|
||||
kscratch_mask &= ~BIT(scratch_tmp[1]);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
|
||||
unsigned int frame)
|
||||
{
|
||||
/* Save the VCPU scratch register value in cp0_epc of the stack frame */
|
||||
UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
|
||||
|
||||
/* Save the temp scratch register value in cp0_cause of stack frame */
|
||||
if (scratch_tmp[0] == 31) {
|
||||
UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
|
||||
UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
|
||||
}
|
||||
}
|
||||
|
||||
static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
|
||||
unsigned int frame)
|
||||
{
|
||||
/*
|
||||
* Restore host scratch register values saved by
|
||||
* kvm_mips_build_save_scratch().
|
||||
*/
|
||||
UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
|
||||
UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
|
||||
if (scratch_tmp[0] == 31) {
|
||||
UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
|
||||
UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* build_set_exc_base() - Assemble code to write exception base address.
|
||||
* @p: Code buffer pointer.
|
||||
* @reg: Source register (generated code may set WG bit in @reg).
|
||||
*
|
||||
* Assemble code to modify the exception base address in the EBase register,
|
||||
* using the appropriately sized access and setting the WG bit if necessary.
|
||||
*/
|
||||
static inline void build_set_exc_base(u32 **p, unsigned int reg)
|
||||
{
|
||||
if (cpu_has_ebase_wg) {
|
||||
/* Set WG so that all the bits get written */
|
||||
uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
|
||||
UASM_i_MTC0(p, reg, C0_EBASE);
|
||||
} else {
|
||||
uasm_i_mtc0(p, reg, C0_EBASE);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
|
||||
* @addr: Address to start writing code.
|
||||
*
|
||||
* Assemble the start of the vcpu_run function to run a guest VCPU. The function
|
||||
* conforms to the following prototype:
|
||||
*
|
||||
* int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
|
||||
*
|
||||
* The exit from the guest and return to the caller is handled by the code
|
||||
* generated by kvm_mips_build_ret_to_host().
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
void *kvm_mips_build_vcpu_run(void *addr)
|
||||
{
|
||||
u32 *p = addr;
|
||||
unsigned int i;
|
||||
|
||||
/*
|
||||
* A0: run
|
||||
* A1: vcpu
|
||||
*/
|
||||
|
||||
/* k0/k1 not being used in host kernel context */
|
||||
UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
|
||||
for (i = 16; i < 32; ++i) {
|
||||
if (i == 24)
|
||||
i = 28;
|
||||
UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
|
||||
}
|
||||
|
||||
/* Save host status */
|
||||
uasm_i_mfc0(&p, V0, C0_STATUS);
|
||||
UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
|
||||
|
||||
/* Save scratch registers, will be used to store pointer to vcpu etc */
|
||||
kvm_mips_build_save_scratch(&p, V1, K1);
|
||||
|
||||
/* VCPU scratch register has pointer to vcpu */
|
||||
UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
|
||||
/* Offset into vcpu->arch */
|
||||
UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
|
||||
|
||||
/*
|
||||
* Save the host stack to VCPU, used for exception processing
|
||||
* when we exit from the Guest
|
||||
*/
|
||||
UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
|
||||
|
||||
/* Save the kernel gp as well */
|
||||
UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
|
||||
|
||||
/*
|
||||
* Setup status register for running the guest in UM, interrupts
|
||||
* are disabled
|
||||
*/
|
||||
UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
|
||||
uasm_i_mtc0(&p, K0, C0_STATUS);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
/* load up the new EBASE */
|
||||
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
|
||||
build_set_exc_base(&p, K0);
|
||||
|
||||
/*
|
||||
* Now that the new EBASE has been loaded, unset BEV, set
|
||||
* interrupt mask as it was but make sure that timer interrupts
|
||||
* are enabled
|
||||
*/
|
||||
uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
|
||||
uasm_i_andi(&p, V0, V0, ST0_IM);
|
||||
uasm_i_or(&p, K0, K0, V0);
|
||||
uasm_i_mtc0(&p, K0, C0_STATUS);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
p = kvm_mips_build_enter_guest(p);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
|
||||
* @addr: Address to start writing code.
|
||||
*
|
||||
* Assemble the code to resume guest execution. This code is common between the
|
||||
* initial entry into the guest from the host, and returning from the exit
|
||||
* handler back to the guest.
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
static void *kvm_mips_build_enter_guest(void *addr)
|
||||
{
|
||||
u32 *p = addr;
|
||||
unsigned int i;
|
||||
struct uasm_label labels[2];
|
||||
struct uasm_reloc relocs[2];
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
||||
/* Set Guest EPC */
|
||||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
|
||||
UASM_i_MTC0(&p, T0, C0_EPC);
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
|
||||
UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
|
||||
T0);
|
||||
uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
|
||||
uasm_i_xori(&p, T0, T0, KSU_USER);
|
||||
uasm_il_bnez(&p, &r, T0, label_kernel_asid);
|
||||
UASM_i_ADDIU(&p, T1, K1,
|
||||
offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
|
||||
/* else user */
|
||||
UASM_i_ADDIU(&p, T1, K1,
|
||||
offsetof(struct kvm_vcpu_arch, guest_user_asid));
|
||||
uasm_l_kernel_asid(&l, p);
|
||||
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
/* smp_processor_id */
|
||||
uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
|
||||
/* x4 */
|
||||
uasm_i_sll(&p, T2, T2, 2);
|
||||
UASM_i_ADDU(&p, T3, T1, T2);
|
||||
uasm_i_lw(&p, K0, 0, T3);
|
||||
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
|
||||
/* x sizeof(struct cpuinfo_mips)/4 */
|
||||
uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
|
||||
uasm_i_mul(&p, T2, T2, T3);
|
||||
|
||||
UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
|
||||
UASM_i_ADDU(&p, AT, AT, T2);
|
||||
UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
|
||||
uasm_i_and(&p, K0, K0, T2);
|
||||
#else
|
||||
uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
|
||||
#endif
|
||||
uasm_i_mtc0(&p, K0, C0_ENTRYHI);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
/* Disable RDHWR access */
|
||||
uasm_i_mtc0(&p, ZERO, C0_HWRENA);
|
||||
|
||||
/* load the guest context from VCPU and return */
|
||||
for (i = 1; i < 32; ++i) {
|
||||
/* Guest k0/k1 loaded later */
|
||||
if (i == K0 || i == K1)
|
||||
continue;
|
||||
UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
/* Restore hi/lo */
|
||||
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
|
||||
uasm_i_mthi(&p, K0);
|
||||
|
||||
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
|
||||
uasm_i_mtlo(&p, K0);
|
||||
#endif
|
||||
|
||||
/* Restore the guest's k0/k1 registers */
|
||||
UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
|
||||
UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
|
||||
|
||||
/* Jump to guest */
|
||||
uasm_i_eret(&p);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_exception() - Assemble first level guest exception handler.
|
||||
* @addr: Address to start writing code.
|
||||
* @handler: Address of common handler (within range of @addr).
|
||||
*
|
||||
* Assemble exception vector code for guest execution. The generated vector will
|
||||
* branch to the common exception handler generated by kvm_mips_build_exit().
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
void *kvm_mips_build_exception(void *addr, void *handler)
|
||||
{
|
||||
u32 *p = addr;
|
||||
struct uasm_label labels[2];
|
||||
struct uasm_reloc relocs[2];
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
||||
/* Save guest k1 into scratch register */
|
||||
UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
|
||||
|
||||
/* Get the VCPU pointer from the VCPU scratch register */
|
||||
UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
|
||||
|
||||
/* Save guest k0 into VCPU structure */
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
|
||||
|
||||
/* Branch to the common handler */
|
||||
uasm_il_b(&p, &r, label_exit_common);
|
||||
uasm_i_nop(&p);
|
||||
|
||||
uasm_l_exit_common(&l, handler);
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_exit() - Assemble common guest exit handler.
|
||||
* @addr: Address to start writing code.
|
||||
*
|
||||
* Assemble the generic guest exit handling code. This is called by the
|
||||
* exception vectors (generated by kvm_mips_build_exception()), and calls
|
||||
* kvm_mips_handle_exit(), then either resumes the guest or returns to the host
|
||||
* depending on the return value.
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
void *kvm_mips_build_exit(void *addr)
|
||||
{
|
||||
u32 *p = addr;
|
||||
unsigned int i;
|
||||
struct uasm_label labels[3];
|
||||
struct uasm_reloc relocs[3];
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
||||
/*
|
||||
* Generic Guest exception handler. We end up here when the guest
|
||||
* does something that causes a trap to kernel mode.
|
||||
*
|
||||
* Both k0/k1 registers will have already been saved (k0 into the vcpu
|
||||
* structure, and k1 into the scratch_tmp register).
|
||||
*
|
||||
* The k1 register will already contain the kvm_vcpu_arch pointer.
|
||||
*/
|
||||
|
||||
/* Start saving Guest context to VCPU */
|
||||
for (i = 0; i < 32; ++i) {
|
||||
/* Guest k0/k1 saved later */
|
||||
if (i == K0 || i == K1)
|
||||
continue;
|
||||
UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
/* We need to save hi/lo and restore them on the way out */
|
||||
uasm_i_mfhi(&p, T0);
|
||||
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
|
||||
|
||||
uasm_i_mflo(&p, T0);
|
||||
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
|
||||
#endif
|
||||
|
||||
/* Finally save guest k1 to VCPU */
|
||||
uasm_i_ehb(&p);
|
||||
UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
|
||||
UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
|
||||
|
||||
/* Now that context has been saved, we can use other registers */
|
||||
|
||||
/* Restore vcpu */
|
||||
UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
uasm_i_move(&p, S1, A1);
|
||||
|
||||
/* Restore run (vcpu->run) */
|
||||
UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
|
||||
/* Save pointer to run in s0, will be saved by the compiler */
|
||||
uasm_i_move(&p, S0, A0);
|
||||
|
||||
/*
|
||||
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
|
||||
* the exception
|
||||
*/
|
||||
UASM_i_MFC0(&p, K0, C0_EPC);
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
|
||||
|
||||
UASM_i_MFC0(&p, K0, C0_BADVADDR);
|
||||
UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
|
||||
K1);
|
||||
|
||||
uasm_i_mfc0(&p, K0, C0_CAUSE);
|
||||
uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
|
||||
|
||||
/* Now restore the host state just enough to run the handlers */
|
||||
|
||||
/* Switch EBASE to the one used by Linux */
|
||||
/* load up the host EBASE */
|
||||
uasm_i_mfc0(&p, V0, C0_STATUS);
|
||||
|
||||
uasm_i_lui(&p, AT, ST0_BEV >> 16);
|
||||
uasm_i_or(&p, K0, V0, AT);
|
||||
|
||||
uasm_i_mtc0(&p, K0, C0_STATUS);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
UASM_i_LA_mostly(&p, K0, (long)&ebase);
|
||||
UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
|
||||
build_set_exc_base(&p, K0);
|
||||
|
||||
if (raw_cpu_has_fpu) {
|
||||
/*
|
||||
* If FPU is enabled, save FCR31 and clear it so that later
|
||||
* ctc1's don't trigger FPE for pending exceptions.
|
||||
*/
|
||||
uasm_i_lui(&p, AT, ST0_CU1 >> 16);
|
||||
uasm_i_and(&p, V1, V0, AT);
|
||||
uasm_il_beqz(&p, &r, V1, label_fpu_1);
|
||||
uasm_i_nop(&p);
|
||||
uasm_i_cfc1(&p, T0, 31);
|
||||
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
|
||||
K1);
|
||||
uasm_i_ctc1(&p, ZERO, 31);
|
||||
uasm_l_fpu_1(&l, p);
|
||||
}
|
||||
|
||||
if (cpu_has_msa) {
|
||||
/*
|
||||
* If MSA is enabled, save MSACSR and clear it so that later
|
||||
* instructions don't trigger MSAFPE for pending exceptions.
|
||||
*/
|
||||
uasm_i_mfc0(&p, T0, C0_CONFIG5);
|
||||
uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
|
||||
uasm_il_beqz(&p, &r, T0, label_msa_1);
|
||||
uasm_i_nop(&p);
|
||||
uasm_i_cfcmsa(&p, T0, MSA_CSR);
|
||||
uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
|
||||
K1);
|
||||
uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
|
||||
uasm_l_msa_1(&l, p);
|
||||
}
|
||||
|
||||
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
|
||||
uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
|
||||
uasm_i_and(&p, V0, V0, AT);
|
||||
uasm_i_lui(&p, AT, ST0_CU0 >> 16);
|
||||
uasm_i_or(&p, V0, V0, AT);
|
||||
uasm_i_mtc0(&p, V0, C0_STATUS);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
/* Load up host GP */
|
||||
UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
|
||||
|
||||
/* Need a stack before we can jump to "C" */
|
||||
UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
|
||||
|
||||
/* Saved host state */
|
||||
UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
|
||||
|
||||
/*
|
||||
* XXXKYMA do we need to load the host ASID, maybe not because the
|
||||
* kernel entries are marked GLOBAL, need to verify
|
||||
*/
|
||||
|
||||
/* Restore host scratch registers, as we'll have clobbered them */
|
||||
kvm_mips_build_restore_scratch(&p, K0, SP);
|
||||
|
||||
/* Restore RDHWR access */
|
||||
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
|
||||
uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
|
||||
uasm_i_mtc0(&p, K0, C0_HWRENA);
|
||||
|
||||
/* Jump to handler */
|
||||
/*
|
||||
* XXXKYMA: not sure if this is safe, how large is the stack??
|
||||
* Now jump to the kvm_mips_handle_exit() to see if we can deal
|
||||
* with this in the kernel
|
||||
*/
|
||||
UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
|
||||
uasm_i_jalr(&p, RA, T9);
|
||||
UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
p = kvm_mips_build_ret_from_exit(p);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
|
||||
* @addr: Address to start writing code.
|
||||
*
|
||||
* Assemble the code to handle the return from kvm_mips_handle_exit(), either
|
||||
* resuming the guest or returning to the host depending on the return value.
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
static void *kvm_mips_build_ret_from_exit(void *addr)
|
||||
{
|
||||
u32 *p = addr;
|
||||
struct uasm_label labels[2];
|
||||
struct uasm_reloc relocs[2];
|
||||
struct uasm_label *l = labels;
|
||||
struct uasm_reloc *r = relocs;
|
||||
|
||||
memset(labels, 0, sizeof(labels));
|
||||
memset(relocs, 0, sizeof(relocs));
|
||||
|
||||
/* Return from handler Make sure interrupts are disabled */
|
||||
uasm_i_di(&p, ZERO);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
/*
|
||||
* XXXKYMA: k0/k1 could have been blown away if we processed
|
||||
* an exception while we were handling the exception from the
|
||||
* guest, reload k1
|
||||
*/
|
||||
|
||||
uasm_i_move(&p, K1, S1);
|
||||
UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
|
||||
|
||||
/*
|
||||
* Check return value, should tell us if we are returning to the
|
||||
* host (handle I/O etc)or resuming the guest
|
||||
*/
|
||||
uasm_i_andi(&p, T0, V0, RESUME_HOST);
|
||||
uasm_il_bnez(&p, &r, T0, label_return_to_host);
|
||||
uasm_i_nop(&p);
|
||||
|
||||
p = kvm_mips_build_ret_to_guest(p);
|
||||
|
||||
uasm_l_return_to_host(&l, p);
|
||||
p = kvm_mips_build_ret_to_host(p);
|
||||
|
||||
uasm_resolve_relocs(relocs, labels);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
|
||||
* @addr: Address to start writing code.
|
||||
*
|
||||
* Assemble the code to handle return from the guest exit handler
|
||||
* (kvm_mips_handle_exit()) back to the guest.
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
static void *kvm_mips_build_ret_to_guest(void *addr)
|
||||
{
|
||||
u32 *p = addr;
|
||||
|
||||
/* Put the saved pointer to vcpu (s1) back into the scratch register */
|
||||
UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
|
||||
|
||||
/* Load up the Guest EBASE to minimize the window where BEV is set */
|
||||
UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
|
||||
|
||||
/* Switch EBASE back to the one used by KVM */
|
||||
uasm_i_mfc0(&p, V1, C0_STATUS);
|
||||
uasm_i_lui(&p, AT, ST0_BEV >> 16);
|
||||
uasm_i_or(&p, K0, V1, AT);
|
||||
uasm_i_mtc0(&p, K0, C0_STATUS);
|
||||
uasm_i_ehb(&p);
|
||||
build_set_exc_base(&p, T0);
|
||||
|
||||
/* Setup status register for running guest in UM */
|
||||
uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
|
||||
UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
|
||||
uasm_i_and(&p, V1, V1, AT);
|
||||
uasm_i_mtc0(&p, V1, C0_STATUS);
|
||||
uasm_i_ehb(&p);
|
||||
|
||||
p = kvm_mips_build_enter_guest(p);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_build_ret_to_host() - Assemble code to return to the host.
|
||||
* @addr: Address to start writing code.
|
||||
*
|
||||
* Assemble the code to handle return from the guest exit handler
|
||||
* (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
|
||||
* function generated by kvm_mips_build_vcpu_run().
|
||||
*
|
||||
* Returns: Next address after end of written function.
|
||||
*/
|
||||
static void *kvm_mips_build_ret_to_host(void *addr)
|
||||
{
|
||||
u32 *p = addr;
|
||||
unsigned int i;
|
||||
|
||||
/* EBASE is already pointing to Linux */
|
||||
UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
|
||||
UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
|
||||
|
||||
/*
|
||||
* r2/v0 is the return code, shift it down by 2 (arithmetic)
|
||||
* to recover the err code
|
||||
*/
|
||||
uasm_i_sra(&p, K0, V0, 2);
|
||||
uasm_i_move(&p, V0, K0);
|
||||
|
||||
/* Load context saved on the host stack */
|
||||
for (i = 16; i < 31; ++i) {
|
||||
if (i == 24)
|
||||
i = 28;
|
||||
UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
|
||||
}
|
||||
|
||||
/* Restore RDHWR access */
|
||||
UASM_i_LA_mostly(&p, K0, (long)&hwrena);
|
||||
uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
|
||||
uasm_i_mtc0(&p, K0, C0_HWRENA);
|
||||
|
||||
/* Restore RA, which is the address we will return to */
|
||||
UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
|
||||
uasm_i_jr(&p, RA);
|
||||
uasm_i_nop(&p);
|
||||
|
||||
return p;
|
||||
}
|
||||
|
|
@ -14,13 +14,16 @@
|
|||
#include <asm/mipsregs.h>
|
||||
#include <asm/regdef.h>
|
||||
|
||||
/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
|
||||
#undef fp
|
||||
|
||||
.set noreorder
|
||||
.set noat
|
||||
|
||||
LEAF(__kvm_save_fpu)
|
||||
.set push
|
||||
.set mips64r2
|
||||
SET_HARDFLOAT
|
||||
.set fp=64
|
||||
mfc0 t0, CP0_STATUS
|
||||
sll t0, t0, 5 # is Status.FR set?
|
||||
bgez t0, 1f # no: skip odd doubles
|
||||
|
@ -63,8 +66,8 @@ LEAF(__kvm_save_fpu)
|
|||
|
||||
LEAF(__kvm_restore_fpu)
|
||||
.set push
|
||||
.set mips64r2
|
||||
SET_HARDFLOAT
|
||||
.set fp=64
|
||||
mfc0 t0, CP0_STATUS
|
||||
sll t0, t0, 5 # is Status.FR set?
|
||||
bgez t0, 1f # no: skip odd doubles
|
||||
|
|
|
@ -22,12 +22,12 @@
|
|||
|
||||
#include "interrupt.h"
|
||||
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
|
||||
{
|
||||
set_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
|
||||
{
|
||||
clear_bit(priority, &vcpu->arch.pending_exceptions);
|
||||
}
|
||||
|
@ -114,10 +114,10 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
|
|||
|
||||
/* Deliver the interrupt of the corresponding priority, if possible. */
|
||||
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause)
|
||||
u32 cause)
|
||||
{
|
||||
int allowed = 0;
|
||||
uint32_t exccode;
|
||||
u32 exccode;
|
||||
|
||||
struct kvm_vcpu_arch *arch = &vcpu->arch;
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
|
@ -196,12 +196,12 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
|||
}
|
||||
|
||||
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause)
|
||||
u32 cause)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
|
||||
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
|
||||
{
|
||||
unsigned long *pending = &vcpu->arch.pending_exceptions;
|
||||
unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
|
||||
|
|
|
@ -28,17 +28,13 @@
|
|||
#define MIPS_EXC_MAX 12
|
||||
/* XXXSL More to follow */
|
||||
|
||||
extern char __kvm_mips_vcpu_run_end[];
|
||||
extern char mips32_exception[], mips32_exceptionEnd[];
|
||||
extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
|
||||
|
||||
#define C_TI (_ULCAST_(1) << 30)
|
||||
|
||||
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
|
||||
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
|
||||
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
|
||||
void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
|
||||
void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
|
||||
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
|
||||
|
||||
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
|
||||
|
@ -48,7 +44,7 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
|
|||
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_interrupt *irq);
|
||||
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
u32 cause);
|
||||
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
|
||||
uint32_t cause);
|
||||
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
|
||||
u32 cause);
|
||||
void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
|
||||
|
|
|
@ -1,605 +0,0 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* Main entry point for the guest, exception handling.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <asm/asm.h>
|
||||
#include <asm/asmmacro.h>
|
||||
#include <asm/regdef.h>
|
||||
#include <asm/mipsregs.h>
|
||||
#include <asm/stackframe.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
|
||||
#define _C_LABEL(x) x
|
||||
#define MIPSX(name) mips32_ ## name
|
||||
#define CALLFRAME_SIZ 32
|
||||
|
||||
/*
|
||||
* VECTOR
|
||||
* exception vector entrypoint
|
||||
*/
|
||||
#define VECTOR(x, regmask) \
|
||||
.ent _C_LABEL(x),0; \
|
||||
EXPORT(x);
|
||||
|
||||
#define VECTOR_END(x) \
|
||||
EXPORT(x);
|
||||
|
||||
/* Overload, Danger Will Robinson!! */
|
||||
#define PT_HOST_USERLOCAL PT_EPC
|
||||
|
||||
#define CP0_DDATA_LO $28,3
|
||||
|
||||
/* Resume Flags */
|
||||
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
|
||||
|
||||
#define RESUME_GUEST 0
|
||||
#define RESUME_HOST RESUME_FLAG_HOST
|
||||
|
||||
/*
|
||||
* __kvm_mips_vcpu_run: entry point to the guest
|
||||
* a0: run
|
||||
* a1: vcpu
|
||||
*/
|
||||
.set noreorder
|
||||
|
||||
FEXPORT(__kvm_mips_vcpu_run)
|
||||
/* k0/k1 not being used in host kernel context */
|
||||
INT_ADDIU k1, sp, -PT_SIZE
|
||||
LONG_S $16, PT_R16(k1)
|
||||
LONG_S $17, PT_R17(k1)
|
||||
LONG_S $18, PT_R18(k1)
|
||||
LONG_S $19, PT_R19(k1)
|
||||
LONG_S $20, PT_R20(k1)
|
||||
LONG_S $21, PT_R21(k1)
|
||||
LONG_S $22, PT_R22(k1)
|
||||
LONG_S $23, PT_R23(k1)
|
||||
|
||||
LONG_S $28, PT_R28(k1)
|
||||
LONG_S $29, PT_R29(k1)
|
||||
LONG_S $30, PT_R30(k1)
|
||||
LONG_S $31, PT_R31(k1)
|
||||
|
||||
/* Save hi/lo */
|
||||
mflo v0
|
||||
LONG_S v0, PT_LO(k1)
|
||||
mfhi v1
|
||||
LONG_S v1, PT_HI(k1)
|
||||
|
||||
/* Save host status */
|
||||
mfc0 v0, CP0_STATUS
|
||||
LONG_S v0, PT_STATUS(k1)
|
||||
|
||||
/* Save DDATA_LO, will be used to store pointer to vcpu */
|
||||
mfc0 v1, CP0_DDATA_LO
|
||||
LONG_S v1, PT_HOST_USERLOCAL(k1)
|
||||
|
||||
/* DDATA_LO has pointer to vcpu */
|
||||
mtc0 a1, CP0_DDATA_LO
|
||||
|
||||
/* Offset into vcpu->arch */
|
||||
INT_ADDIU k1, a1, VCPU_HOST_ARCH
|
||||
|
||||
/*
|
||||
* Save the host stack to VCPU, used for exception processing
|
||||
* when we exit from the Guest
|
||||
*/
|
||||
LONG_S sp, VCPU_HOST_STACK(k1)
|
||||
|
||||
/* Save the kernel gp as well */
|
||||
LONG_S gp, VCPU_HOST_GP(k1)
|
||||
|
||||
/*
|
||||
* Setup status register for running the guest in UM, interrupts
|
||||
* are disabled
|
||||
*/
|
||||
li k0, (ST0_EXL | KSU_USER | ST0_BEV)
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* load up the new EBASE */
|
||||
LONG_L k0, VCPU_GUEST_EBASE(k1)
|
||||
mtc0 k0, CP0_EBASE
|
||||
|
||||
/*
|
||||
* Now that the new EBASE has been loaded, unset BEV, set
|
||||
* interrupt mask as it was but make sure that timer interrupts
|
||||
* are enabled
|
||||
*/
|
||||
li k0, (ST0_EXL | KSU_USER | ST0_IE)
|
||||
andi v0, v0, ST0_IM
|
||||
or k0, k0, v0
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* Set Guest EPC */
|
||||
LONG_L t0, VCPU_PC(k1)
|
||||
mtc0 t0, CP0_EPC
|
||||
|
||||
FEXPORT(__kvm_mips_load_asid)
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
PTR_L t0, VCPU_COP0(k1)
|
||||
LONG_L t0, COP0_STATUS(t0)
|
||||
andi t0, KSU_USER | ST0_ERL | ST0_EXL
|
||||
xori t0, KSU_USER
|
||||
bnez t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
LONG_L t2, TI_CPU($28) /* smp_processor_id */
|
||||
INT_SLL t2, t2, 2 /* x4 */
|
||||
REG_ADDU t3, t1, t2
|
||||
LONG_L k0, (t3)
|
||||
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
|
||||
li t3, CPUINFO_SIZE/4
|
||||
mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
|
||||
LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
|
||||
and k0, k0, t2
|
||||
#else
|
||||
andi k0, k0, MIPS_ENTRYHI_ASID
|
||||
#endif
|
||||
mtc0 k0, CP0_ENTRYHI
|
||||
ehb
|
||||
|
||||
/* Disable RDHWR access */
|
||||
mtc0 zero, CP0_HWRENA
|
||||
|
||||
.set noat
|
||||
/* Now load up the Guest Context from VCPU */
|
||||
LONG_L $1, VCPU_R1(k1)
|
||||
LONG_L $2, VCPU_R2(k1)
|
||||
LONG_L $3, VCPU_R3(k1)
|
||||
|
||||
LONG_L $4, VCPU_R4(k1)
|
||||
LONG_L $5, VCPU_R5(k1)
|
||||
LONG_L $6, VCPU_R6(k1)
|
||||
LONG_L $7, VCPU_R7(k1)
|
||||
|
||||
LONG_L $8, VCPU_R8(k1)
|
||||
LONG_L $9, VCPU_R9(k1)
|
||||
LONG_L $10, VCPU_R10(k1)
|
||||
LONG_L $11, VCPU_R11(k1)
|
||||
LONG_L $12, VCPU_R12(k1)
|
||||
LONG_L $13, VCPU_R13(k1)
|
||||
LONG_L $14, VCPU_R14(k1)
|
||||
LONG_L $15, VCPU_R15(k1)
|
||||
LONG_L $16, VCPU_R16(k1)
|
||||
LONG_L $17, VCPU_R17(k1)
|
||||
LONG_L $18, VCPU_R18(k1)
|
||||
LONG_L $19, VCPU_R19(k1)
|
||||
LONG_L $20, VCPU_R20(k1)
|
||||
LONG_L $21, VCPU_R21(k1)
|
||||
LONG_L $22, VCPU_R22(k1)
|
||||
LONG_L $23, VCPU_R23(k1)
|
||||
LONG_L $24, VCPU_R24(k1)
|
||||
LONG_L $25, VCPU_R25(k1)
|
||||
|
||||
/* k0/k1 loaded up later */
|
||||
|
||||
LONG_L $28, VCPU_R28(k1)
|
||||
LONG_L $29, VCPU_R29(k1)
|
||||
LONG_L $30, VCPU_R30(k1)
|
||||
LONG_L $31, VCPU_R31(k1)
|
||||
|
||||
/* Restore hi/lo */
|
||||
LONG_L k0, VCPU_LO(k1)
|
||||
mtlo k0
|
||||
|
||||
LONG_L k0, VCPU_HI(k1)
|
||||
mthi k0
|
||||
|
||||
FEXPORT(__kvm_mips_load_k0k1)
|
||||
/* Restore the guest's k0/k1 registers */
|
||||
LONG_L k0, VCPU_R26(k1)
|
||||
LONG_L k1, VCPU_R27(k1)
|
||||
|
||||
/* Jump to guest */
|
||||
eret
|
||||
EXPORT(__kvm_mips_vcpu_run_end)
|
||||
|
||||
VECTOR(MIPSX(exception), unknown)
|
||||
/* Find out what mode we came from and jump to the proper handler. */
|
||||
mtc0 k0, CP0_ERROREPC #01: Save guest k0
|
||||
ehb #02:
|
||||
|
||||
mfc0 k0, CP0_EBASE #02: Get EBASE
|
||||
INT_SRL k0, k0, 10 #03: Get rid of CPUNum
|
||||
INT_SLL k0, k0, 10 #04
|
||||
LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
|
||||
INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
|
||||
# installed @ offset 0x2000
|
||||
j k0 #07: jump to the function
|
||||
nop #08: branch delay slot
|
||||
VECTOR_END(MIPSX(exceptionEnd))
|
||||
.end MIPSX(exception)
|
||||
|
||||
/*
|
||||
* Generic Guest exception handler. We end up here when the guest
|
||||
* does something that causes a trap to kernel mode.
|
||||
*/
|
||||
NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
|
||||
/* Get the VCPU pointer from DDTATA_LO */
|
||||
mfc0 k1, CP0_DDATA_LO
|
||||
INT_ADDIU k1, k1, VCPU_HOST_ARCH
|
||||
|
||||
/* Start saving Guest context to VCPU */
|
||||
LONG_S $0, VCPU_R0(k1)
|
||||
LONG_S $1, VCPU_R1(k1)
|
||||
LONG_S $2, VCPU_R2(k1)
|
||||
LONG_S $3, VCPU_R3(k1)
|
||||
LONG_S $4, VCPU_R4(k1)
|
||||
LONG_S $5, VCPU_R5(k1)
|
||||
LONG_S $6, VCPU_R6(k1)
|
||||
LONG_S $7, VCPU_R7(k1)
|
||||
LONG_S $8, VCPU_R8(k1)
|
||||
LONG_S $9, VCPU_R9(k1)
|
||||
LONG_S $10, VCPU_R10(k1)
|
||||
LONG_S $11, VCPU_R11(k1)
|
||||
LONG_S $12, VCPU_R12(k1)
|
||||
LONG_S $13, VCPU_R13(k1)
|
||||
LONG_S $14, VCPU_R14(k1)
|
||||
LONG_S $15, VCPU_R15(k1)
|
||||
LONG_S $16, VCPU_R16(k1)
|
||||
LONG_S $17, VCPU_R17(k1)
|
||||
LONG_S $18, VCPU_R18(k1)
|
||||
LONG_S $19, VCPU_R19(k1)
|
||||
LONG_S $20, VCPU_R20(k1)
|
||||
LONG_S $21, VCPU_R21(k1)
|
||||
LONG_S $22, VCPU_R22(k1)
|
||||
LONG_S $23, VCPU_R23(k1)
|
||||
LONG_S $24, VCPU_R24(k1)
|
||||
LONG_S $25, VCPU_R25(k1)
|
||||
|
||||
/* Guest k0/k1 saved later */
|
||||
|
||||
LONG_S $28, VCPU_R28(k1)
|
||||
LONG_S $29, VCPU_R29(k1)
|
||||
LONG_S $30, VCPU_R30(k1)
|
||||
LONG_S $31, VCPU_R31(k1)
|
||||
|
||||
.set at
|
||||
|
||||
/* We need to save hi/lo and restore them on the way out */
|
||||
mfhi t0
|
||||
LONG_S t0, VCPU_HI(k1)
|
||||
|
||||
mflo t0
|
||||
LONG_S t0, VCPU_LO(k1)
|
||||
|
||||
/* Finally save guest k0/k1 to VCPU */
|
||||
mfc0 t0, CP0_ERROREPC
|
||||
LONG_S t0, VCPU_R26(k1)
|
||||
|
||||
/* Get GUEST k1 and save it in VCPU */
|
||||
PTR_LI t1, ~0x2ff
|
||||
mfc0 t0, CP0_EBASE
|
||||
and t0, t0, t1
|
||||
LONG_L t0, 0x3000(t0)
|
||||
LONG_S t0, VCPU_R27(k1)
|
||||
|
||||
/* Now that context has been saved, we can use other registers */
|
||||
|
||||
/* Restore vcpu */
|
||||
mfc0 a1, CP0_DDATA_LO
|
||||
move s1, a1
|
||||
|
||||
/* Restore run (vcpu->run) */
|
||||
LONG_L a0, VCPU_RUN(a1)
|
||||
/* Save pointer to run in s0, will be saved by the compiler */
|
||||
move s0, a0
|
||||
|
||||
/*
|
||||
* Save Host level EPC, BadVaddr and Cause to VCPU, useful to
|
||||
* process the exception
|
||||
*/
|
||||
mfc0 k0,CP0_EPC
|
||||
LONG_S k0, VCPU_PC(k1)
|
||||
|
||||
mfc0 k0, CP0_BADVADDR
|
||||
LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
|
||||
|
||||
mfc0 k0, CP0_CAUSE
|
||||
LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
|
||||
|
||||
mfc0 k0, CP0_ENTRYHI
|
||||
LONG_S k0, VCPU_HOST_ENTRYHI(k1)
|
||||
|
||||
/* Now restore the host state just enough to run the handlers */
|
||||
|
||||
/* Switch EBASE to the one used by Linux */
|
||||
/* load up the host EBASE */
|
||||
mfc0 v0, CP0_STATUS
|
||||
|
||||
or k0, v0, ST0_BEV
|
||||
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
LONG_L k0, VCPU_HOST_EBASE(k1)
|
||||
mtc0 k0,CP0_EBASE
|
||||
|
||||
/*
|
||||
* If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
|
||||
* trigger FPE for pending exceptions.
|
||||
*/
|
||||
and v1, v0, ST0_CU1
|
||||
beqz v1, 1f
|
||||
nop
|
||||
.set push
|
||||
SET_HARDFLOAT
|
||||
cfc1 t0, fcr31
|
||||
sw t0, VCPU_FCR31(k1)
|
||||
ctc1 zero,fcr31
|
||||
.set pop
|
||||
1:
|
||||
|
||||
#ifdef CONFIG_CPU_HAS_MSA
|
||||
/*
|
||||
* If MSA is enabled, save MSACSR and clear it so that later
|
||||
* instructions don't trigger MSAFPE for pending exceptions.
|
||||
*/
|
||||
mfc0 t0, CP0_CONFIG3
|
||||
ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
|
||||
beqz t0, 1f
|
||||
nop
|
||||
mfc0 t0, CP0_CONFIG5
|
||||
ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
|
||||
beqz t0, 1f
|
||||
nop
|
||||
_cfcmsa t0, MSA_CSR
|
||||
sw t0, VCPU_MSA_CSR(k1)
|
||||
_ctcmsa MSA_CSR, zero
|
||||
1:
|
||||
#endif
|
||||
|
||||
/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
|
||||
and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
|
||||
or v0, v0, ST0_CU0
|
||||
mtc0 v0, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* Load up host GP */
|
||||
LONG_L gp, VCPU_HOST_GP(k1)
|
||||
|
||||
/* Need a stack before we can jump to "C" */
|
||||
LONG_L sp, VCPU_HOST_STACK(k1)
|
||||
|
||||
/* Saved host state */
|
||||
INT_ADDIU sp, sp, -PT_SIZE
|
||||
|
||||
/*
|
||||
* XXXKYMA do we need to load the host ASID, maybe not because the
|
||||
* kernel entries are marked GLOBAL, need to verify
|
||||
*/
|
||||
|
||||
/* Restore host DDATA_LO */
|
||||
LONG_L k0, PT_HOST_USERLOCAL(sp)
|
||||
mtc0 k0, CP0_DDATA_LO
|
||||
|
||||
/* Restore RDHWR access */
|
||||
PTR_LI k0, 0x2000000F
|
||||
mtc0 k0, CP0_HWRENA
|
||||
|
||||
/* Jump to handler */
|
||||
FEXPORT(__kvm_mips_jump_to_handler)
|
||||
/*
|
||||
* XXXKYMA: not sure if this is safe, how large is the stack??
|
||||
* Now jump to the kvm_mips_handle_exit() to see if we can deal
|
||||
* with this in the kernel
|
||||
*/
|
||||
PTR_LA t9, kvm_mips_handle_exit
|
||||
jalr.hb t9
|
||||
INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
|
||||
|
||||
/* Return from handler Make sure interrupts are disabled */
|
||||
di
|
||||
ehb
|
||||
|
||||
/*
|
||||
* XXXKYMA: k0/k1 could have been blown away if we processed
|
||||
* an exception while we were handling the exception from the
|
||||
* guest, reload k1
|
||||
*/
|
||||
|
||||
move k1, s1
|
||||
INT_ADDIU k1, k1, VCPU_HOST_ARCH
|
||||
|
||||
/*
|
||||
* Check return value, should tell us if we are returning to the
|
||||
* host (handle I/O etc)or resuming the guest
|
||||
*/
|
||||
andi t0, v0, RESUME_HOST
|
||||
bnez t0, __kvm_mips_return_to_host
|
||||
nop
|
||||
|
||||
__kvm_mips_return_to_guest:
|
||||
/* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
|
||||
mtc0 s1, CP0_DDATA_LO
|
||||
|
||||
/* Load up the Guest EBASE to minimize the window where BEV is set */
|
||||
LONG_L t0, VCPU_GUEST_EBASE(k1)
|
||||
|
||||
/* Switch EBASE back to the one used by KVM */
|
||||
mfc0 v1, CP0_STATUS
|
||||
or k0, v1, ST0_BEV
|
||||
mtc0 k0, CP0_STATUS
|
||||
ehb
|
||||
mtc0 t0, CP0_EBASE
|
||||
|
||||
/* Setup status register for running guest in UM */
|
||||
or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
|
||||
and v1, v1, ~(ST0_CU0 | ST0_MX)
|
||||
mtc0 v1, CP0_STATUS
|
||||
ehb
|
||||
|
||||
/* Set Guest EPC */
|
||||
LONG_L t0, VCPU_PC(k1)
|
||||
mtc0 t0, CP0_EPC
|
||||
|
||||
/* Set the ASID for the Guest Kernel */
|
||||
PTR_L t0, VCPU_COP0(k1)
|
||||
LONG_L t0, COP0_STATUS(t0)
|
||||
andi t0, KSU_USER | ST0_ERL | ST0_EXL
|
||||
xori t0, KSU_USER
|
||||
bnez t0, 1f /* If kernel */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
|
||||
INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
|
||||
1:
|
||||
/* t1: contains the base of the ASID array, need to get the cpu id */
|
||||
LONG_L t2, TI_CPU($28) /* smp_processor_id */
|
||||
INT_SLL t2, t2, 2 /* x4 */
|
||||
REG_ADDU t3, t1, t2
|
||||
LONG_L k0, (t3)
|
||||
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
|
||||
li t3, CPUINFO_SIZE/4
|
||||
mul t2, t2, t3 /* x sizeof(struct cpuinfo_mips)/4 */
|
||||
LONG_L t2, (cpu_data + CPUINFO_ASID_MASK)(t2)
|
||||
and k0, k0, t2
|
||||
#else
|
||||
andi k0, k0, MIPS_ENTRYHI_ASID
|
||||
#endif
|
||||
mtc0 k0, CP0_ENTRYHI
|
||||
ehb
|
||||
|
||||
/* Disable RDHWR access */
|
||||
mtc0 zero, CP0_HWRENA
|
||||
|
||||
.set noat
|
||||
/* load the guest context from VCPU and return */
|
||||
LONG_L $0, VCPU_R0(k1)
|
||||
LONG_L $1, VCPU_R1(k1)
|
||||
LONG_L $2, VCPU_R2(k1)
|
||||
LONG_L $3, VCPU_R3(k1)
|
||||
LONG_L $4, VCPU_R4(k1)
|
||||
LONG_L $5, VCPU_R5(k1)
|
||||
LONG_L $6, VCPU_R6(k1)
|
||||
LONG_L $7, VCPU_R7(k1)
|
||||
LONG_L $8, VCPU_R8(k1)
|
||||
LONG_L $9, VCPU_R9(k1)
|
||||
LONG_L $10, VCPU_R10(k1)
|
||||
LONG_L $11, VCPU_R11(k1)
|
||||
LONG_L $12, VCPU_R12(k1)
|
||||
LONG_L $13, VCPU_R13(k1)
|
||||
LONG_L $14, VCPU_R14(k1)
|
||||
LONG_L $15, VCPU_R15(k1)
|
||||
LONG_L $16, VCPU_R16(k1)
|
||||
LONG_L $17, VCPU_R17(k1)
|
||||
LONG_L $18, VCPU_R18(k1)
|
||||
LONG_L $19, VCPU_R19(k1)
|
||||
LONG_L $20, VCPU_R20(k1)
|
||||
LONG_L $21, VCPU_R21(k1)
|
||||
LONG_L $22, VCPU_R22(k1)
|
||||
LONG_L $23, VCPU_R23(k1)
|
||||
LONG_L $24, VCPU_R24(k1)
|
||||
LONG_L $25, VCPU_R25(k1)
|
||||
|
||||
/* $/k1 loaded later */
|
||||
LONG_L $28, VCPU_R28(k1)
|
||||
LONG_L $29, VCPU_R29(k1)
|
||||
LONG_L $30, VCPU_R30(k1)
|
||||
LONG_L $31, VCPU_R31(k1)
|
||||
|
||||
FEXPORT(__kvm_mips_skip_guest_restore)
|
||||
LONG_L k0, VCPU_HI(k1)
|
||||
mthi k0
|
||||
|
||||
LONG_L k0, VCPU_LO(k1)
|
||||
mtlo k0
|
||||
|
||||
LONG_L k0, VCPU_R26(k1)
|
||||
LONG_L k1, VCPU_R27(k1)
|
||||
|
||||
eret
|
||||
.set at
|
||||
|
||||
__kvm_mips_return_to_host:
|
||||
/* EBASE is already pointing to Linux */
|
||||
LONG_L k1, VCPU_HOST_STACK(k1)
|
||||
INT_ADDIU k1,k1, -PT_SIZE
|
||||
|
||||
/* Restore host DDATA_LO */
|
||||
LONG_L k0, PT_HOST_USERLOCAL(k1)
|
||||
mtc0 k0, CP0_DDATA_LO
|
||||
|
||||
/*
|
||||
* r2/v0 is the return code, shift it down by 2 (arithmetic)
|
||||
* to recover the err code
|
||||
*/
|
||||
INT_SRA k0, v0, 2
|
||||
move $2, k0
|
||||
|
||||
/* Load context saved on the host stack */
|
||||
LONG_L $16, PT_R16(k1)
|
||||
LONG_L $17, PT_R17(k1)
|
||||
LONG_L $18, PT_R18(k1)
|
||||
LONG_L $19, PT_R19(k1)
|
||||
LONG_L $20, PT_R20(k1)
|
||||
LONG_L $21, PT_R21(k1)
|
||||
LONG_L $22, PT_R22(k1)
|
||||
LONG_L $23, PT_R23(k1)
|
||||
|
||||
LONG_L $28, PT_R28(k1)
|
||||
LONG_L $29, PT_R29(k1)
|
||||
LONG_L $30, PT_R30(k1)
|
||||
|
||||
LONG_L k0, PT_HI(k1)
|
||||
mthi k0
|
||||
|
||||
LONG_L k0, PT_LO(k1)
|
||||
mtlo k0
|
||||
|
||||
/* Restore RDHWR access */
|
||||
PTR_LI k0, 0x2000000F
|
||||
mtc0 k0, CP0_HWRENA
|
||||
|
||||
/* Restore RA, which is the address we will return to */
|
||||
LONG_L ra, PT_R31(k1)
|
||||
j ra
|
||||
nop
|
||||
|
||||
VECTOR_END(MIPSX(GuestExceptionEnd))
|
||||
.end MIPSX(GuestException)
|
||||
|
||||
MIPSX(exceptions):
|
||||
####
|
||||
##### The exception handlers.
|
||||
#####
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 0
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 1
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 2
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 3
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 4
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 5
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 6
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 7
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 8
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 9
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 10
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 11
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 12
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 13
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 14
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 15
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 16
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 17
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 18
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 19
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 20
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 21
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 22
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 23
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 24
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 25
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 26
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 27
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 28
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 29
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 30
|
||||
.word _C_LABEL(MIPSX(GuestException)) # 31
|
|
@ -9,6 +9,7 @@
|
|||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/kdebug.h>
|
||||
|
@ -147,7 +148,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
|
|||
/* Put the pages we reserved for the guest pmap */
|
||||
for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
|
||||
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
|
||||
kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
|
||||
kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
|
||||
}
|
||||
kfree(kvm->arch.guest_pmap);
|
||||
|
||||
|
@ -244,10 +245,27 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
|
|||
}
|
||||
}
|
||||
|
||||
static inline void dump_handler(const char *symbol, void *start, void *end)
|
||||
{
|
||||
u32 *p;
|
||||
|
||||
pr_debug("LEAF(%s)\n", symbol);
|
||||
|
||||
pr_debug("\t.set push\n");
|
||||
pr_debug("\t.set noreorder\n");
|
||||
|
||||
for (p = start; p < (u32 *)end; ++p)
|
||||
pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
|
||||
|
||||
pr_debug("\t.set\tpop\n");
|
||||
|
||||
pr_debug("\tEND(%s)\n", symbol);
|
||||
}
|
||||
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
||||
{
|
||||
int err, size, offset;
|
||||
void *gebase;
|
||||
int err, size;
|
||||
void *gebase, *p, *handler;
|
||||
int i;
|
||||
|
||||
struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
|
||||
|
@ -273,9 +291,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
else
|
||||
size = 0x4000;
|
||||
|
||||
/* Save Linux EBASE */
|
||||
vcpu->arch.host_ebase = (void *)read_c0_ebase();
|
||||
|
||||
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
|
||||
|
||||
if (!gebase) {
|
||||
|
@ -285,44 +300,53 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
|
|||
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
|
||||
ALIGN(size, PAGE_SIZE), gebase);
|
||||
|
||||
/*
|
||||
* Check new ebase actually fits in CP0_EBase. The lack of a write gate
|
||||
* limits us to the low 512MB of physical address space. If the memory
|
||||
* we allocate is out of range, just give up now.
|
||||
*/
|
||||
if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
|
||||
kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
|
||||
gebase);
|
||||
err = -ENOMEM;
|
||||
goto out_free_gebase;
|
||||
}
|
||||
|
||||
/* Save new ebase */
|
||||
vcpu->arch.guest_ebase = gebase;
|
||||
|
||||
/* Copy L1 Guest Exception handler to correct offset */
|
||||
/* Build guest exception vectors dynamically in unmapped memory */
|
||||
handler = gebase + 0x2000;
|
||||
|
||||
/* TLB Refill, EXL = 0 */
|
||||
memcpy(gebase, mips32_exception,
|
||||
mips32_exceptionEnd - mips32_exception);
|
||||
kvm_mips_build_exception(gebase, handler);
|
||||
|
||||
/* General Exception Entry point */
|
||||
memcpy(gebase + 0x180, mips32_exception,
|
||||
mips32_exceptionEnd - mips32_exception);
|
||||
kvm_mips_build_exception(gebase + 0x180, handler);
|
||||
|
||||
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
|
||||
for (i = 0; i < 8; i++) {
|
||||
kvm_debug("L1 Vectored handler @ %p\n",
|
||||
gebase + 0x200 + (i * VECTORSPACING));
|
||||
memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
|
||||
mips32_exceptionEnd - mips32_exception);
|
||||
kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
|
||||
handler);
|
||||
}
|
||||
|
||||
/* General handler, relocate to unmapped space for sanity's sake */
|
||||
offset = 0x2000;
|
||||
kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
|
||||
gebase + offset,
|
||||
mips32_GuestExceptionEnd - mips32_GuestException);
|
||||
/* General exit handler */
|
||||
p = handler;
|
||||
p = kvm_mips_build_exit(p);
|
||||
|
||||
memcpy(gebase + offset, mips32_GuestException,
|
||||
mips32_GuestExceptionEnd - mips32_GuestException);
|
||||
/* Guest entry routine */
|
||||
vcpu->arch.vcpu_run = p;
|
||||
p = kvm_mips_build_vcpu_run(p);
|
||||
|
||||
#ifdef MODULE
|
||||
offset += mips32_GuestExceptionEnd - mips32_GuestException;
|
||||
memcpy(gebase + offset, (char *)__kvm_mips_vcpu_run,
|
||||
__kvm_mips_vcpu_run_end - (char *)__kvm_mips_vcpu_run);
|
||||
vcpu->arch.vcpu_run = gebase + offset;
|
||||
#else
|
||||
vcpu->arch.vcpu_run = __kvm_mips_vcpu_run;
|
||||
#endif
|
||||
/* Dump the generated code */
|
||||
pr_debug("#include <asm/asm.h>\n");
|
||||
pr_debug("#include <asm/regdef.h>\n");
|
||||
pr_debug("\n");
|
||||
dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
|
||||
dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
|
||||
dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
|
||||
|
||||
/* Invalidate the icache for these ranges */
|
||||
local_flush_icache_range((unsigned long)gebase,
|
||||
|
@ -408,17 +432,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
|||
kvm_mips_deliver_interrupts(vcpu,
|
||||
kvm_read_c0_guest_cause(vcpu->arch.cop0));
|
||||
|
||||
__kvm_guest_enter();
|
||||
guest_enter_irqoff();
|
||||
|
||||
/* Disable hardware page table walking while in guest */
|
||||
htw_stop();
|
||||
|
||||
trace_kvm_enter(vcpu);
|
||||
r = vcpu->arch.vcpu_run(run, vcpu);
|
||||
trace_kvm_out(vcpu);
|
||||
|
||||
/* Re-enable HTW before enabling interrupts */
|
||||
htw_start();
|
||||
|
||||
__kvm_guest_exit();
|
||||
guest_exit_irqoff();
|
||||
local_irq_enable();
|
||||
|
||||
if (vcpu->sigset_active)
|
||||
|
@ -507,8 +533,10 @@ static u64 kvm_mips_get_one_regs[] = {
|
|||
KVM_REG_MIPS_R30,
|
||||
KVM_REG_MIPS_R31,
|
||||
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
KVM_REG_MIPS_HI,
|
||||
KVM_REG_MIPS_LO,
|
||||
#endif
|
||||
KVM_REG_MIPS_PC,
|
||||
|
||||
KVM_REG_MIPS_CP0_INDEX,
|
||||
|
@ -539,6 +567,104 @@ static u64 kvm_mips_get_one_regs[] = {
|
|||
KVM_REG_MIPS_COUNT_HZ,
|
||||
};
|
||||
|
||||
static u64 kvm_mips_get_one_regs_fpu[] = {
|
||||
KVM_REG_MIPS_FCR_IR,
|
||||
KVM_REG_MIPS_FCR_CSR,
|
||||
};
|
||||
|
||||
static u64 kvm_mips_get_one_regs_msa[] = {
|
||||
KVM_REG_MIPS_MSA_IR,
|
||||
KVM_REG_MIPS_MSA_CSR,
|
||||
};
|
||||
|
||||
static u64 kvm_mips_get_one_regs_kscratch[] = {
|
||||
KVM_REG_MIPS_CP0_KSCRATCH1,
|
||||
KVM_REG_MIPS_CP0_KSCRATCH2,
|
||||
KVM_REG_MIPS_CP0_KSCRATCH3,
|
||||
KVM_REG_MIPS_CP0_KSCRATCH4,
|
||||
KVM_REG_MIPS_CP0_KSCRATCH5,
|
||||
KVM_REG_MIPS_CP0_KSCRATCH6,
|
||||
};
|
||||
|
||||
static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long ret;
|
||||
|
||||
ret = ARRAY_SIZE(kvm_mips_get_one_regs);
|
||||
if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
|
||||
ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
|
||||
/* odd doubles */
|
||||
if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
|
||||
ret += 16;
|
||||
}
|
||||
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
|
||||
ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
|
||||
ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
|
||||
ret += kvm_mips_callbacks->num_regs(vcpu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
|
||||
{
|
||||
u64 index;
|
||||
unsigned int i;
|
||||
|
||||
if (copy_to_user(indices, kvm_mips_get_one_regs,
|
||||
sizeof(kvm_mips_get_one_regs)))
|
||||
return -EFAULT;
|
||||
indices += ARRAY_SIZE(kvm_mips_get_one_regs);
|
||||
|
||||
if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
|
||||
if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
|
||||
sizeof(kvm_mips_get_one_regs_fpu)))
|
||||
return -EFAULT;
|
||||
indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
|
||||
|
||||
for (i = 0; i < 32; ++i) {
|
||||
index = KVM_REG_MIPS_FPR_32(i);
|
||||
if (copy_to_user(indices, &index, sizeof(index)))
|
||||
return -EFAULT;
|
||||
++indices;
|
||||
|
||||
/* skip odd doubles if no F64 */
|
||||
if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
|
||||
continue;
|
||||
|
||||
index = KVM_REG_MIPS_FPR_64(i);
|
||||
if (copy_to_user(indices, &index, sizeof(index)))
|
||||
return -EFAULT;
|
||||
++indices;
|
||||
}
|
||||
}
|
||||
|
||||
if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
|
||||
if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
|
||||
sizeof(kvm_mips_get_one_regs_msa)))
|
||||
return -EFAULT;
|
||||
indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
|
||||
|
||||
for (i = 0; i < 32; ++i) {
|
||||
index = KVM_REG_MIPS_VEC_128(i);
|
||||
if (copy_to_user(indices, &index, sizeof(index)))
|
||||
return -EFAULT;
|
||||
++indices;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < 6; ++i) {
|
||||
if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
|
||||
continue;
|
||||
|
||||
if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
|
||||
sizeof(kvm_mips_get_one_regs_kscratch[i])))
|
||||
return -EFAULT;
|
||||
++indices;
|
||||
}
|
||||
|
||||
return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
|
||||
}
|
||||
|
||||
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg)
|
||||
{
|
||||
|
@ -554,12 +680,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|||
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
|
||||
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
|
||||
break;
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
case KVM_REG_MIPS_HI:
|
||||
v = (long)vcpu->arch.hi;
|
||||
break;
|
||||
case KVM_REG_MIPS_LO:
|
||||
v = (long)vcpu->arch.lo;
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_MIPS_PC:
|
||||
v = (long)vcpu->arch.pc;
|
||||
break;
|
||||
|
@ -688,17 +816,37 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
|
|||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
v = (long)kvm_read_c0_guest_errorepc(cop0);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
|
||||
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
|
||||
if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
|
||||
return -EINVAL;
|
||||
switch (idx) {
|
||||
case 2:
|
||||
v = (long)kvm_read_c0_guest_kscratch1(cop0);
|
||||
break;
|
||||
case 3:
|
||||
v = (long)kvm_read_c0_guest_kscratch2(cop0);
|
||||
break;
|
||||
case 4:
|
||||
v = (long)kvm_read_c0_guest_kscratch3(cop0);
|
||||
break;
|
||||
case 5:
|
||||
v = (long)kvm_read_c0_guest_kscratch4(cop0);
|
||||
break;
|
||||
case 6:
|
||||
v = (long)kvm_read_c0_guest_kscratch5(cop0);
|
||||
break;
|
||||
case 7:
|
||||
v = (long)kvm_read_c0_guest_kscratch6(cop0);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
/* registers to be handled specially */
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
default:
|
||||
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
|
||||
if (ret)
|
||||
return ret;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
|
||||
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
|
||||
|
@ -755,12 +903,14 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|||
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
|
||||
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
|
||||
break;
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
case KVM_REG_MIPS_HI:
|
||||
vcpu->arch.hi = v;
|
||||
break;
|
||||
case KVM_REG_MIPS_LO:
|
||||
vcpu->arch.lo = v;
|
||||
break;
|
||||
#endif
|
||||
case KVM_REG_MIPS_PC:
|
||||
vcpu->arch.pc = v;
|
||||
break;
|
||||
|
@ -859,22 +1009,34 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
|
|||
case KVM_REG_MIPS_CP0_ERROREPC:
|
||||
kvm_write_c0_guest_errorepc(cop0, v);
|
||||
break;
|
||||
case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
|
||||
idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
|
||||
if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
|
||||
return -EINVAL;
|
||||
switch (idx) {
|
||||
case 2:
|
||||
kvm_write_c0_guest_kscratch1(cop0, v);
|
||||
break;
|
||||
case 3:
|
||||
kvm_write_c0_guest_kscratch2(cop0, v);
|
||||
break;
|
||||
case 4:
|
||||
kvm_write_c0_guest_kscratch3(cop0, v);
|
||||
break;
|
||||
case 5:
|
||||
kvm_write_c0_guest_kscratch4(cop0, v);
|
||||
break;
|
||||
case 6:
|
||||
kvm_write_c0_guest_kscratch5(cop0, v);
|
||||
break;
|
||||
case 7:
|
||||
kvm_write_c0_guest_kscratch6(cop0, v);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
/* registers to be handled specially */
|
||||
case KVM_REG_MIPS_CP0_COUNT:
|
||||
case KVM_REG_MIPS_CP0_COMPARE:
|
||||
case KVM_REG_MIPS_CP0_CAUSE:
|
||||
case KVM_REG_MIPS_CP0_CONFIG:
|
||||
case KVM_REG_MIPS_CP0_CONFIG1:
|
||||
case KVM_REG_MIPS_CP0_CONFIG2:
|
||||
case KVM_REG_MIPS_CP0_CONFIG3:
|
||||
case KVM_REG_MIPS_CP0_CONFIG4:
|
||||
case KVM_REG_MIPS_CP0_CONFIG5:
|
||||
case KVM_REG_MIPS_COUNT_CTL:
|
||||
case KVM_REG_MIPS_COUNT_RESUME:
|
||||
case KVM_REG_MIPS_COUNT_HZ:
|
||||
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
|
||||
default:
|
||||
return -EINVAL;
|
||||
return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -927,23 +1089,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
|
|||
}
|
||||
case KVM_GET_REG_LIST: {
|
||||
struct kvm_reg_list __user *user_list = argp;
|
||||
u64 __user *reg_dest;
|
||||
struct kvm_reg_list reg_list;
|
||||
unsigned n;
|
||||
|
||||
if (copy_from_user(®_list, user_list, sizeof(reg_list)))
|
||||
return -EFAULT;
|
||||
n = reg_list.n;
|
||||
reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
|
||||
reg_list.n = kvm_mips_num_regs(vcpu);
|
||||
if (copy_to_user(user_list, ®_list, sizeof(reg_list)))
|
||||
return -EFAULT;
|
||||
if (n < reg_list.n)
|
||||
return -E2BIG;
|
||||
reg_dest = user_list->reg;
|
||||
if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
|
||||
sizeof(kvm_mips_get_one_regs)))
|
||||
return -EFAULT;
|
||||
return 0;
|
||||
return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
|
||||
}
|
||||
case KVM_NMI:
|
||||
/* Treat the NMI as a CPU reset */
|
||||
|
@ -1222,7 +1379,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
|
||||
static void kvm_mips_set_c0_status(void)
|
||||
{
|
||||
uint32_t status = read_c0_status();
|
||||
u32 status = read_c0_status();
|
||||
|
||||
if (cpu_has_dsp)
|
||||
status |= (ST0_MX);
|
||||
|
@ -1236,9 +1393,9 @@ static void kvm_mips_set_c0_status(void)
|
|||
*/
|
||||
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
uint32_t cause = vcpu->arch.host_cp0_cause;
|
||||
uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
@ -1260,6 +1417,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
|
||||
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
|
||||
cause, opc, run, vcpu);
|
||||
trace_kvm_exit(vcpu, exccode);
|
||||
|
||||
/*
|
||||
* Do a privilege check, if in UM most of these exit conditions end up
|
||||
|
@ -1279,7 +1437,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
|
||||
|
||||
++vcpu->stat.int_exits;
|
||||
trace_kvm_exit(vcpu, INT_EXITS);
|
||||
|
||||
if (need_resched())
|
||||
cond_resched();
|
||||
|
@ -1291,7 +1448,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
|
||||
|
||||
++vcpu->stat.cop_unusable_exits;
|
||||
trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
|
||||
/* XXXKYMA: Might need to return to user space */
|
||||
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
|
||||
|
@ -1300,7 +1456,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
|
||||
case EXCCODE_MOD:
|
||||
++vcpu->stat.tlbmod_exits;
|
||||
trace_kvm_exit(vcpu, TLBMOD_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
|
||||
break;
|
||||
|
||||
|
@ -1310,7 +1465,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
badvaddr);
|
||||
|
||||
++vcpu->stat.tlbmiss_st_exits;
|
||||
trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
|
||||
break;
|
||||
|
||||
|
@ -1319,61 +1473,51 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
cause, opc, badvaddr);
|
||||
|
||||
++vcpu->stat.tlbmiss_ld_exits;
|
||||
trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_ADES:
|
||||
++vcpu->stat.addrerr_st_exits;
|
||||
trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_ADEL:
|
||||
++vcpu->stat.addrerr_ld_exits;
|
||||
trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_SYS:
|
||||
++vcpu->stat.syscall_exits;
|
||||
trace_kvm_exit(vcpu, SYSCALL_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_syscall(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_RI:
|
||||
++vcpu->stat.resvd_inst_exits;
|
||||
trace_kvm_exit(vcpu, RESVD_INST_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_BP:
|
||||
++vcpu->stat.break_inst_exits;
|
||||
trace_kvm_exit(vcpu, BREAK_INST_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_break(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_TR:
|
||||
++vcpu->stat.trap_inst_exits;
|
||||
trace_kvm_exit(vcpu, TRAP_INST_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_trap(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_MSAFPE:
|
||||
++vcpu->stat.msa_fpe_exits;
|
||||
trace_kvm_exit(vcpu, MSA_FPE_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_FPE:
|
||||
++vcpu->stat.fpe_exits;
|
||||
trace_kvm_exit(vcpu, FPE_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_fpe(vcpu);
|
||||
break;
|
||||
|
||||
case EXCCODE_MSADIS:
|
||||
++vcpu->stat.msa_disabled_exits;
|
||||
trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
|
||||
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
|
||||
break;
|
||||
|
||||
|
@ -1400,11 +1544,13 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
|||
run->exit_reason = KVM_EXIT_INTR;
|
||||
ret = (-EINTR << 2) | RESUME_HOST;
|
||||
++vcpu->stat.signal_exits;
|
||||
trace_kvm_exit(vcpu, SIGNAL_EXITS);
|
||||
trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == RESUME_GUEST) {
|
||||
trace_kvm_reenter(vcpu);
|
||||
|
||||
/*
|
||||
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
|
||||
* is live), restore FCR31 / MSACSR.
|
||||
|
@ -1450,7 +1596,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
|||
* not to clobber the status register directly via the commpage.
|
||||
*/
|
||||
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
|
||||
vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
|
||||
vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
|
||||
kvm_lose_fpu(vcpu);
|
||||
|
||||
/*
|
||||
|
@ -1465,9 +1611,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
|
|||
enable_fpu_hazard();
|
||||
|
||||
/* If guest FPU state not active, restore it now */
|
||||
if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
|
||||
if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
|
||||
__kvm_restore_fpu(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
|
||||
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
|
||||
} else {
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
|
||||
}
|
||||
|
||||
preempt_enable();
|
||||
|
@ -1494,8 +1643,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
|
|||
* interacts with MSA state, so play it safe and save it first.
|
||||
*/
|
||||
if (!(sr & ST0_FR) &&
|
||||
(vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
|
||||
KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
|
||||
(vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
|
||||
KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
|
||||
kvm_lose_fpu(vcpu);
|
||||
|
||||
change_c0_status(ST0_CU1 | ST0_FR, sr);
|
||||
|
@ -1509,22 +1658,26 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
|
|||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
|
||||
switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
|
||||
case KVM_MIPS_FPU_FPU:
|
||||
switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
|
||||
case KVM_MIPS_AUX_FPU:
|
||||
/*
|
||||
* Guest FPU state already loaded, only restore upper MSA state
|
||||
*/
|
||||
__kvm_restore_msa_upper(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
|
||||
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
|
||||
break;
|
||||
case 0:
|
||||
/* Neither FPU or MSA already active, restore full MSA state */
|
||||
__kvm_restore_msa(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
|
||||
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
|
||||
if (kvm_mips_guest_has_fpu(&vcpu->arch))
|
||||
vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
|
||||
vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
|
||||
KVM_TRACE_AUX_FPU_MSA);
|
||||
break;
|
||||
default:
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -1536,13 +1689,15 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
|
|||
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
preempt_disable();
|
||||
if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
|
||||
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
||||
disable_msa();
|
||||
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
|
||||
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
|
||||
}
|
||||
if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
||||
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
||||
clear_c0_status(ST0_CU1 | ST0_FR);
|
||||
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
|
||||
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
|
@ -1558,25 +1713,27 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
|
|||
*/
|
||||
|
||||
preempt_disable();
|
||||
if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
|
||||
if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
|
||||
set_c0_config5(MIPS_CONF5_MSAEN);
|
||||
enable_fpu_hazard();
|
||||
|
||||
__kvm_save_msa(&vcpu->arch);
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
|
||||
|
||||
/* Disable MSA & FPU */
|
||||
disable_msa();
|
||||
if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
||||
if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
||||
clear_c0_status(ST0_CU1 | ST0_FR);
|
||||
disable_fpu_hazard();
|
||||
}
|
||||
vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
|
||||
} else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
|
||||
vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
|
||||
} else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
|
||||
set_c0_status(ST0_CU1);
|
||||
enable_fpu_hazard();
|
||||
|
||||
__kvm_save_fpu(&vcpu->arch);
|
||||
vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
|
||||
vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
|
||||
trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
|
||||
|
||||
/* Disable FPU */
|
||||
clear_c0_status(ST0_CU1 | ST0_FR);
|
||||
|
@ -1638,6 +1795,10 @@ static int __init kvm_mips_init(void)
|
|||
{
|
||||
int ret;
|
||||
|
||||
ret = kvm_mips_entry_setup();
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
|
||||
|
||||
if (ret)
|
||||
|
@ -1645,18 +1806,6 @@ static int __init kvm_mips_init(void)
|
|||
|
||||
register_die_notifier(&kvm_mips_csr_die_notifier);
|
||||
|
||||
/*
|
||||
* On MIPS, kernel modules are executed from "mapped space", which
|
||||
* requires TLBs. The TLB handling code is statically linked with
|
||||
* the rest of the kernel (tlb.c) to avoid the possibility of
|
||||
* double faulting. The issue is that the TLB code references
|
||||
* routines that are part of the the KVM module, which are only
|
||||
* available once the module is loaded.
|
||||
*/
|
||||
kvm_mips_gfn_to_pfn = gfn_to_pfn;
|
||||
kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
|
||||
kvm_mips_is_error_pfn = is_error_pfn;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1664,10 +1813,6 @@ static void __exit kvm_mips_exit(void)
|
|||
{
|
||||
kvm_exit();
|
||||
|
||||
kvm_mips_gfn_to_pfn = NULL;
|
||||
kvm_mips_release_pfn_clean = NULL;
|
||||
kvm_mips_is_error_pfn = NULL;
|
||||
|
||||
unregister_die_notifier(&kvm_mips_csr_die_notifier);
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,375 @@
|
|||
/*
|
||||
* This file is subject to the terms and conditions of the GNU General Public
|
||||
* License. See the file "COPYING" in the main directory of this archive
|
||||
* for more details.
|
||||
*
|
||||
* KVM/MIPS MMU handling in the KVM module.
|
||||
*
|
||||
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
|
||||
* Authors: Sanjay Lal <sanjayl@kymasys.com>
|
||||
*/
|
||||
|
||||
#include <linux/highmem.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
return vcpu->arch.guest_kernel_asid[cpu] &
|
||||
cpu_asid_mask(&cpu_data[cpu]);
|
||||
}
|
||||
|
||||
static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
return vcpu->arch.guest_user_asid[cpu] &
|
||||
cpu_asid_mask(&cpu_data[cpu]);
|
||||
}
|
||||
|
||||
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int srcu_idx, err = 0;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
|
||||
return 0;
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (is_error_pfn(pfn)) {
|
||||
kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.guest_pmap[gfn] = pfn;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Translate guest KSEG0 addresses to Host PA */
|
||||
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
|
||||
unsigned long gva)
|
||||
{
|
||||
gfn_t gfn;
|
||||
unsigned long offset = gva & ~PAGE_MASK;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
|
||||
kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
|
||||
__builtin_return_address(0), gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
|
||||
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
|
||||
gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return KVM_INVALID_ADDR;
|
||||
|
||||
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
|
||||
}
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
gfn_t gfn;
|
||||
kvm_pfn_t pfn0, pfn1;
|
||||
unsigned long vaddr = 0;
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
const int flush_dcache_mask = 0;
|
||||
int ret;
|
||||
|
||||
if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
|
||||
kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
|
||||
gfn, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
vaddr = badvaddr & (PAGE_MASK << 1);
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
|
||||
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
|
||||
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
|
||||
ENTRYLO_D | ENTRYLO_V;
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
|
||||
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
|
||||
ENTRYLO_D | ENTRYLO_V;
|
||||
|
||||
preempt_disable();
|
||||
entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
|
||||
ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
flush_dcache_mask);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_tlb *tlb)
|
||||
{
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
kvm_pfn_t pfn0, pfn1;
|
||||
int ret;
|
||||
|
||||
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
|
||||
pfn0 = 0;
|
||||
pfn1 = 0;
|
||||
} else {
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[
|
||||
mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
|
||||
pfn1 = kvm->arch.guest_pmap[
|
||||
mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
|
||||
}
|
||||
|
||||
/* Get attributes from the Guest TLB */
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
|
||||
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
|
||||
(tlb->tlb_lo[0] & ENTRYLO_D) |
|
||||
(tlb->tlb_lo[0] & ENTRYLO_V);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
|
||||
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
|
||||
(tlb->tlb_lo[1] & ENTRYLO_D) |
|
||||
(tlb->tlb_lo[1] & ENTRYLO_V);
|
||||
|
||||
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
||||
tlb->tlb_lo[0], tlb->tlb_lo[1]);
|
||||
|
||||
preempt_disable();
|
||||
entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
|
||||
kvm_mips_get_kernel_asid(vcpu) :
|
||||
kvm_mips_get_user_asid(vcpu));
|
||||
ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
tlb->tlb_mask);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
asid += cpu_asid_inc();
|
||||
if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
kvm_local_flush_tlb_all(); /* start new asid cycle */
|
||||
|
||||
if (!asid) /* fix version if needed */
|
||||
asid = asid_first_version(cpu);
|
||||
}
|
||||
|
||||
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
||||
}
|
||||
|
||||
/**
|
||||
* kvm_mips_migrate_count() - Migrate timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
|
||||
* if it was running prior to being cancelled.
|
||||
*
|
||||
* Must be called when the VCPU is migrated to a different CPU to ensure that
|
||||
* timer expiry during guest execution interrupts the guest and causes the
|
||||
* interrupt to be delivered in a timely manner.
|
||||
*/
|
||||
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
|
||||
hrtimer_restart(&vcpu->arch.comparecount_timer);
|
||||
}
|
||||
|
||||
/* Restore ASID once we are scheduled back after preemption */
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
||||
unsigned long flags;
|
||||
int newasid = 0;
|
||||
|
||||
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
|
||||
|
||||
/* Allocate new kernel and user ASIDs if needed */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
|
||||
asid_version_mask(cpu)) {
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_kernel_asid[cpu] =
|
||||
vcpu->arch.guest_kernel_mm.context.asid[cpu];
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
newasid++;
|
||||
|
||||
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
||||
cpu_context(cpu, current->mm));
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
|
||||
cpu, vcpu->arch.guest_kernel_asid[cpu]);
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
|
||||
vcpu->arch.guest_user_asid[cpu]);
|
||||
}
|
||||
|
||||
if (vcpu->arch.last_sched_cpu != cpu) {
|
||||
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
|
||||
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
|
||||
/*
|
||||
* Migrate the timer interrupt to the current CPU so that it
|
||||
* always interrupts the guest and synchronously triggers a
|
||||
* guest timer interrupt.
|
||||
*/
|
||||
kvm_mips_migrate_count(vcpu);
|
||||
}
|
||||
|
||||
if (!newasid) {
|
||||
/*
|
||||
* If we preempted while the guest was executing, then reload
|
||||
* the pre-empted ASID
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
preempt_entryhi & asid_mask);
|
||||
ehb();
|
||||
}
|
||||
} else {
|
||||
/* New ASIDs were allocated for the VM */
|
||||
|
||||
/*
|
||||
* Were we in guest context? If so then the pre-empted ASID is
|
||||
* no longer valid, we need to set it to what it should be based
|
||||
* on the mode of the Guest (Kernel/User)
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu))
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_kernel_asid[cpu] &
|
||||
asid_mask);
|
||||
else
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_user_asid[cpu] &
|
||||
asid_mask);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
|
||||
/* restore guest state to registers */
|
||||
kvm_mips_callbacks->vcpu_set_regs(vcpu);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
|
||||
/* ASID can change if another task is scheduled during preemption */
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
vcpu->arch.preempt_entryhi = read_c0_entryhi();
|
||||
vcpu->arch.last_sched_cpu = cpu;
|
||||
|
||||
/* save guest state in registers */
|
||||
kvm_mips_callbacks->vcpu_get_regs(vcpu);
|
||||
|
||||
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
|
||||
asid_version_mask(cpu))) {
|
||||
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
|
||||
cpu_context(cpu, current->mm));
|
||||
drop_mmu_context(current->mm, cpu);
|
||||
}
|
||||
write_c0_entryhi(cpu_asid(cpu, current->mm));
|
||||
ehb();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned long paddr, flags, vpn2, asid;
|
||||
unsigned long va = (unsigned long)opc;
|
||||
void *vaddr;
|
||||
u32 inst;
|
||||
int index;
|
||||
|
||||
if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
|
||||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
index = kvm_mips_host_tlb_lookup(vcpu, va);
|
||||
if (index >= 0) {
|
||||
inst = *(opc);
|
||||
} else {
|
||||
vpn2 = va & VPN2_MASK;
|
||||
asid = kvm_read_c0_guest_entryhi(cop0) &
|
||||
KVM_ENTRYHI_ASID;
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
|
||||
if (index < 0) {
|
||||
kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, opc, vcpu, read_c0_entryhi());
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_mips_dump_guest_tlbs(vcpu);
|
||||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.
|
||||
guest_tlb[index]);
|
||||
inst = *(opc);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
} else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
|
||||
paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
|
||||
vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
|
||||
vaddr += paddr & ~PAGE_MASK;
|
||||
inst = *(u32 *)vaddr;
|
||||
kunmap_atomic(vaddr);
|
||||
} else {
|
||||
kvm_err("%s: illegal address: %p\n", __func__, opc);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
|
||||
return inst;
|
||||
}
|
|
@ -11,27 +11,6 @@
|
|||
|
||||
#include <linux/kvm_host.h>
|
||||
|
||||
char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
|
||||
"WAIT",
|
||||
"CACHE",
|
||||
"Signal",
|
||||
"Interrupt",
|
||||
"COP0/1 Unusable",
|
||||
"TLB Mod",
|
||||
"TLB Miss (LD)",
|
||||
"TLB Miss (ST)",
|
||||
"Address Err (ST)",
|
||||
"Address Error (LD)",
|
||||
"System Call",
|
||||
"Reserved Inst",
|
||||
"Break Inst",
|
||||
"Trap Inst",
|
||||
"MSA FPE",
|
||||
"FPE",
|
||||
"MSA Disabled",
|
||||
"D-Cache Flushes",
|
||||
};
|
||||
|
||||
char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
|
||||
"Index",
|
||||
"Random",
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
#include <linux/smp.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/kvm_host.h>
|
||||
#include <linux/srcu.h>
|
||||
|
||||
|
@ -24,6 +24,7 @@
|
|||
#include <asm/pgtable.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbdebug.h>
|
||||
|
||||
#undef CONFIG_MIPS_MT
|
||||
#include <asm/r4kcache.h>
|
||||
|
@ -32,22 +33,10 @@
|
|||
#define KVM_GUEST_PC_TLB 0
|
||||
#define KVM_GUEST_SP_TLB 1
|
||||
|
||||
#define PRIx64 "llx"
|
||||
|
||||
atomic_t kvm_mips_instance;
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_instance);
|
||||
|
||||
/* These function pointers are initialized once the KVM module is loaded */
|
||||
kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
|
||||
|
||||
void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
|
||||
|
||||
bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
|
||||
|
||||
uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
@ -55,7 +44,7 @@ uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
|
|||
cpu_asid_mask(&cpu_data[cpu]);
|
||||
}
|
||||
|
||||
uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
||||
static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
int cpu = smp_processor_id();
|
||||
|
||||
|
@ -63,7 +52,7 @@ uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
|
|||
cpu_asid_mask(&cpu_data[cpu]);
|
||||
}
|
||||
|
||||
inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
|
||||
inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return vcpu->kvm->arch.commpage_tlb;
|
||||
}
|
||||
|
@ -72,50 +61,15 @@ inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
|
|||
|
||||
void kvm_mips_dump_host_tlbs(void)
|
||||
{
|
||||
unsigned long old_entryhi;
|
||||
unsigned long old_pagemask;
|
||||
struct kvm_mips_tlb tlb;
|
||||
unsigned long flags;
|
||||
int i;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
old_pagemask = read_c0_pagemask();
|
||||
|
||||
kvm_info("HOST TLBs:\n");
|
||||
kvm_info("ASID: %#lx\n", read_c0_entryhi() &
|
||||
cpu_asid_mask(¤t_cpu_data));
|
||||
dump_tlb_regs();
|
||||
pr_info("\n");
|
||||
dump_tlb_all();
|
||||
|
||||
for (i = 0; i < current_cpu_data.tlbsize; i++) {
|
||||
write_c0_index(i);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_read();
|
||||
tlbw_use_hazard();
|
||||
|
||||
tlb.tlb_hi = read_c0_entryhi();
|
||||
tlb.tlb_lo0 = read_c0_entrylo0();
|
||||
tlb.tlb_lo1 = read_c0_entrylo1();
|
||||
tlb.tlb_mask = read_c0_pagemask();
|
||||
|
||||
kvm_info("TLB%c%3d Hi 0x%08lx ",
|
||||
(tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
|
||||
i, tlb.tlb_hi);
|
||||
kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
|
||||
(tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo0 >> 3) & 7);
|
||||
kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
|
||||
(tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
|
||||
}
|
||||
write_c0_entryhi(old_entryhi);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
mtc0_tlbw_hazard();
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
|
||||
|
@ -132,74 +86,24 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
|
|||
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
|
||||
tlb = vcpu->arch.guest_tlb[i];
|
||||
kvm_info("TLB%c%3d Hi 0x%08lx ",
|
||||
(tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
|
||||
(tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
|
||||
? ' ' : '*',
|
||||
i, tlb.tlb_hi);
|
||||
kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
|
||||
(tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo0 >> 3) & 7);
|
||||
kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
|
||||
(uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
|
||||
(tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
|
||||
kvm_info("Lo0=0x%09llx %c%c attr %lx ",
|
||||
(u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
|
||||
(tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
|
||||
kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
|
||||
(u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
|
||||
(tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
|
||||
(tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
|
||||
(tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
|
||||
tlb.tlb_mask);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
|
||||
|
||||
static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
|
||||
{
|
||||
int srcu_idx, err = 0;
|
||||
kvm_pfn_t pfn;
|
||||
|
||||
if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
|
||||
return 0;
|
||||
|
||||
srcu_idx = srcu_read_lock(&kvm->srcu);
|
||||
pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
|
||||
|
||||
if (kvm_mips_is_error_pfn(pfn)) {
|
||||
kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
|
||||
err = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
|
||||
kvm->arch.guest_pmap[gfn] = pfn;
|
||||
out:
|
||||
srcu_read_unlock(&kvm->srcu, srcu_idx);
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Translate guest KSEG0 addresses to Host PA */
|
||||
unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
|
||||
unsigned long gva)
|
||||
{
|
||||
gfn_t gfn;
|
||||
uint32_t offset = gva & ~PAGE_MASK;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
|
||||
if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
|
||||
kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
|
||||
__builtin_return_address(0), gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
|
||||
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
|
||||
gva);
|
||||
return KVM_INVALID_PAGE;
|
||||
}
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return KVM_INVALID_ADDR;
|
||||
|
||||
return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
/* set flush_dcache_mask == 0 if no dcache flush required */
|
||||
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
||||
|
@ -243,12 +147,12 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
|||
|
||||
/* Flush D-cache */
|
||||
if (flush_dcache_mask) {
|
||||
if (entrylo0 & MIPS3_PG_V) {
|
||||
if (entrylo0 & ENTRYLO_V) {
|
||||
++vcpu->stat.flush_dcache_exits;
|
||||
flush_data_cache_page((entryhi & VPN2_MASK) &
|
||||
~flush_dcache_mask);
|
||||
}
|
||||
if (entrylo1 & MIPS3_PG_V) {
|
||||
if (entrylo1 & ENTRYLO_V) {
|
||||
++vcpu->stat.flush_dcache_exits;
|
||||
flush_data_cache_page(((entryhi & VPN2_MASK) &
|
||||
~flush_dcache_mask) |
|
||||
|
@ -259,96 +163,35 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
|
|||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
local_irq_restore(flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* XXXKYMA: Must be called with interrupts disabled */
|
||||
int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
gfn_t gfn;
|
||||
kvm_pfn_t pfn0, pfn1;
|
||||
unsigned long vaddr = 0;
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
int even;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
const int flush_dcache_mask = 0;
|
||||
int ret;
|
||||
|
||||
if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
|
||||
kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
|
||||
gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
|
||||
if (gfn >= kvm->arch.guest_pmap_npages) {
|
||||
kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
|
||||
gfn, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
return -1;
|
||||
}
|
||||
even = !(gfn & 0x1);
|
||||
vaddr = badvaddr & (PAGE_MASK << 1);
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
|
||||
return -1;
|
||||
|
||||
if (even) {
|
||||
pfn0 = kvm->arch.guest_pmap[gfn];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
|
||||
} else {
|
||||
pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
|
||||
pfn1 = kvm->arch.guest_pmap[gfn];
|
||||
}
|
||||
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(1 << 2) | (0x1 << 1);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(1 << 2) | (0x1 << 1);
|
||||
|
||||
preempt_disable();
|
||||
entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
|
||||
ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
flush_dcache_mask);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
|
||||
|
||||
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
kvm_pfn_t pfn0, pfn1;
|
||||
kvm_pfn_t pfn;
|
||||
unsigned long flags, old_entryhi = 0, vaddr = 0;
|
||||
unsigned long entrylo0 = 0, entrylo1 = 0;
|
||||
unsigned long entrylo[2] = { 0, 0 };
|
||||
unsigned int pair_idx;
|
||||
|
||||
pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
|
||||
pfn1 = 0;
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(1 << 2) | (0x1 << 1);
|
||||
entrylo1 = 0;
|
||||
pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
|
||||
pair_idx = (badvaddr >> PAGE_SHIFT) & 1;
|
||||
entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) |
|
||||
((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
|
||||
ENTRYLO_D | ENTRYLO_V;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
old_entryhi = read_c0_entryhi();
|
||||
vaddr = badvaddr & (PAGE_MASK << 1);
|
||||
write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo0(entrylo0);
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo1(entrylo1);
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo0(entrylo[0]);
|
||||
write_c0_entrylo1(entrylo[1]);
|
||||
write_c0_index(kvm_mips_get_commpage_asid(vcpu));
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
|
||||
|
@ -358,68 +201,12 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
|
|||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
local_irq_restore(flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
|
||||
|
||||
int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mips_tlb *tlb,
|
||||
unsigned long *hpa0,
|
||||
unsigned long *hpa1)
|
||||
{
|
||||
unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
|
||||
struct kvm *kvm = vcpu->kvm;
|
||||
kvm_pfn_t pfn0, pfn1;
|
||||
int ret;
|
||||
|
||||
if ((tlb->tlb_hi & VPN2_MASK) == 0) {
|
||||
pfn0 = 0;
|
||||
pfn1 = 0;
|
||||
} else {
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
|
||||
>> PAGE_SHIFT) < 0)
|
||||
return -1;
|
||||
|
||||
pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
|
||||
>> PAGE_SHIFT];
|
||||
pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
|
||||
>> PAGE_SHIFT];
|
||||
}
|
||||
|
||||
if (hpa0)
|
||||
*hpa0 = pfn0 << PAGE_SHIFT;
|
||||
|
||||
if (hpa1)
|
||||
*hpa1 = pfn1 << PAGE_SHIFT;
|
||||
|
||||
/* Get attributes from the Guest TLB */
|
||||
entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
|
||||
entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
|
||||
(tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
|
||||
|
||||
kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
|
||||
tlb->tlb_lo0, tlb->tlb_lo1);
|
||||
|
||||
preempt_disable();
|
||||
entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
|
||||
kvm_mips_get_kernel_asid(vcpu) :
|
||||
kvm_mips_get_user_asid(vcpu));
|
||||
ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
|
||||
tlb->tlb_mask);
|
||||
preempt_enable();
|
||||
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
|
||||
|
||||
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
|
||||
{
|
||||
int i;
|
||||
|
@ -435,7 +222,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
|
|||
}
|
||||
|
||||
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
|
||||
__func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
|
||||
__func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
|
||||
|
||||
return index;
|
||||
}
|
||||
|
@ -467,7 +254,6 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
|
|||
/* Restore old ASID */
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
@ -498,21 +284,16 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
|
|||
|
||||
if (idx > 0) {
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_entrylo0(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
write_c0_entrylo1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
|
@ -540,61 +321,39 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
|
|||
/* Blast 'em all away. */
|
||||
for (entry = 0; entry < maxentry; entry++) {
|
||||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
if (skip_kseg0) {
|
||||
mtc0_tlbr_hazard();
|
||||
tlb_read();
|
||||
tlbw_use_hazard();
|
||||
tlb_read_hazard();
|
||||
|
||||
entryhi = read_c0_entryhi();
|
||||
|
||||
/* Don't blow away guest kernel entries */
|
||||
if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
|
||||
continue;
|
||||
|
||||
write_c0_pagemask(old_pagemask);
|
||||
}
|
||||
|
||||
/* Make sure all entries differ. */
|
||||
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo0(0);
|
||||
mtc0_tlbw_hazard();
|
||||
write_c0_entrylo1(0);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
tlb_write_indexed();
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
}
|
||||
|
||||
tlbw_use_hazard();
|
||||
|
||||
write_c0_entryhi(old_entryhi);
|
||||
write_c0_pagemask(old_pagemask);
|
||||
mtc0_tlbw_hazard();
|
||||
tlbw_use_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
|
||||
|
||||
void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
|
||||
struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long asid = asid_cache(cpu);
|
||||
|
||||
asid += cpu_asid_inc();
|
||||
if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
|
||||
if (cpu_has_vtag_icache)
|
||||
flush_icache_all();
|
||||
|
||||
kvm_local_flush_tlb_all(); /* start new asid cycle */
|
||||
|
||||
if (!asid) /* fix version if needed */
|
||||
asid = asid_first_version(cpu);
|
||||
}
|
||||
|
||||
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
|
||||
}
|
||||
|
||||
void kvm_local_flush_tlb_all(void)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
@ -614,185 +373,12 @@ void kvm_local_flush_tlb_all(void)
|
|||
write_c0_index(entry);
|
||||
mtc0_tlbw_hazard();
|
||||
tlb_write_indexed();
|
||||
tlbw_use_hazard();
|
||||
entry++;
|
||||
}
|
||||
tlbw_use_hazard();
|
||||
write_c0_entryhi(old_ctx);
|
||||
mtc0_tlbw_hazard();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
|
||||
|
||||
/**
|
||||
* kvm_mips_migrate_count() - Migrate timer.
|
||||
* @vcpu: Virtual CPU.
|
||||
*
|
||||
* Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
|
||||
* if it was running prior to being cancelled.
|
||||
*
|
||||
* Must be called when the VCPU is migrated to a different CPU to ensure that
|
||||
* timer expiry during guest execution interrupts the guest and causes the
|
||||
* interrupt to be delivered in a timely manner.
|
||||
*/
|
||||
static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
|
||||
hrtimer_restart(&vcpu->arch.comparecount_timer);
|
||||
}
|
||||
|
||||
/* Restore ASID once we are scheduled back after preemption */
|
||||
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
|
||||
{
|
||||
unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
|
||||
unsigned long flags;
|
||||
int newasid = 0;
|
||||
|
||||
kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
|
||||
|
||||
/* Allocate new kernel and user ASIDs if needed */
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
|
||||
asid_version_mask(cpu)) {
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_kernel_asid[cpu] =
|
||||
vcpu->arch.guest_kernel_mm.context.asid[cpu];
|
||||
kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
|
||||
vcpu->arch.guest_user_asid[cpu] =
|
||||
vcpu->arch.guest_user_mm.context.asid[cpu];
|
||||
newasid++;
|
||||
|
||||
kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
|
||||
cpu_context(cpu, current->mm));
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
|
||||
cpu, vcpu->arch.guest_kernel_asid[cpu]);
|
||||
kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
|
||||
vcpu->arch.guest_user_asid[cpu]);
|
||||
}
|
||||
|
||||
if (vcpu->arch.last_sched_cpu != cpu) {
|
||||
kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
|
||||
vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
|
||||
/*
|
||||
* Migrate the timer interrupt to the current CPU so that it
|
||||
* always interrupts the guest and synchronously triggers a
|
||||
* guest timer interrupt.
|
||||
*/
|
||||
kvm_mips_migrate_count(vcpu);
|
||||
}
|
||||
|
||||
if (!newasid) {
|
||||
/*
|
||||
* If we preempted while the guest was executing, then reload
|
||||
* the pre-empted ASID
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
preempt_entryhi & asid_mask);
|
||||
ehb();
|
||||
}
|
||||
} else {
|
||||
/* New ASIDs were allocated for the VM */
|
||||
|
||||
/*
|
||||
* Were we in guest context? If so then the pre-empted ASID is
|
||||
* no longer valid, we need to set it to what it should be based
|
||||
* on the mode of the Guest (Kernel/User)
|
||||
*/
|
||||
if (current->flags & PF_VCPU) {
|
||||
if (KVM_GUEST_KERNEL_MODE(vcpu))
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_kernel_asid[cpu] &
|
||||
asid_mask);
|
||||
else
|
||||
write_c0_entryhi(vcpu->arch.
|
||||
guest_user_asid[cpu] &
|
||||
asid_mask);
|
||||
ehb();
|
||||
}
|
||||
}
|
||||
|
||||
/* restore guest state to registers */
|
||||
kvm_mips_callbacks->vcpu_set_regs(vcpu);
|
||||
|
||||
local_irq_restore(flags);
|
||||
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
|
||||
|
||||
/* ASID can change if another task is scheduled during preemption */
|
||||
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
unsigned long flags;
|
||||
uint32_t cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
|
||||
cpu = smp_processor_id();
|
||||
|
||||
vcpu->arch.preempt_entryhi = read_c0_entryhi();
|
||||
vcpu->arch.last_sched_cpu = cpu;
|
||||
|
||||
/* save guest state in registers */
|
||||
kvm_mips_callbacks->vcpu_get_regs(vcpu);
|
||||
|
||||
if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
|
||||
asid_version_mask(cpu))) {
|
||||
kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
|
||||
cpu_context(cpu, current->mm));
|
||||
drop_mmu_context(current->mm, cpu);
|
||||
}
|
||||
write_c0_entryhi(cpu_asid(cpu, current->mm));
|
||||
ehb();
|
||||
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
|
||||
|
||||
uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
unsigned long paddr, flags, vpn2, asid;
|
||||
uint32_t inst;
|
||||
int index;
|
||||
|
||||
if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
|
||||
KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
|
||||
local_irq_save(flags);
|
||||
index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
|
||||
if (index >= 0) {
|
||||
inst = *(opc);
|
||||
} else {
|
||||
vpn2 = (unsigned long) opc & VPN2_MASK;
|
||||
asid = kvm_read_c0_guest_entryhi(cop0) &
|
||||
KVM_ENTRYHI_ASID;
|
||||
index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
|
||||
if (index < 0) {
|
||||
kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
|
||||
__func__, opc, vcpu, read_c0_entryhi());
|
||||
kvm_mips_dump_host_tlbs();
|
||||
local_irq_restore(flags);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
|
||||
&vcpu->arch.
|
||||
guest_tlb[index],
|
||||
NULL, NULL);
|
||||
inst = *(opc);
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
} else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
|
||||
paddr =
|
||||
kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
|
||||
(unsigned long) opc);
|
||||
inst = *(uint32_t *) CKSEG0ADDR(paddr);
|
||||
} else {
|
||||
kvm_err("%s: illegal address: %p\n", __func__, opc);
|
||||
return KVM_INVALID_INST;
|
||||
}
|
||||
|
||||
return inst;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_get_inst);
|
||||
|
|
|
@ -17,8 +17,75 @@
|
|||
#define TRACE_INCLUDE_PATH .
|
||||
#define TRACE_INCLUDE_FILE trace
|
||||
|
||||
/* Tracepoints for VM eists */
|
||||
extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
|
||||
/*
|
||||
* Tracepoints for VM enters
|
||||
*/
|
||||
DECLARE_EVENT_CLASS(kvm_transition,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(vcpu),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, pc)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
),
|
||||
|
||||
TP_printk("PC: 0x%08lx",
|
||||
__entry->pc)
|
||||
);
|
||||
|
||||
DEFINE_EVENT(kvm_transition, kvm_enter,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(vcpu));
|
||||
|
||||
DEFINE_EVENT(kvm_transition, kvm_reenter,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(vcpu));
|
||||
|
||||
DEFINE_EVENT(kvm_transition, kvm_out,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu),
|
||||
TP_ARGS(vcpu));
|
||||
|
||||
/* The first 32 exit reasons correspond to Cause.ExcCode */
|
||||
#define KVM_TRACE_EXIT_INT 0
|
||||
#define KVM_TRACE_EXIT_TLBMOD 1
|
||||
#define KVM_TRACE_EXIT_TLBMISS_LD 2
|
||||
#define KVM_TRACE_EXIT_TLBMISS_ST 3
|
||||
#define KVM_TRACE_EXIT_ADDRERR_LD 4
|
||||
#define KVM_TRACE_EXIT_ADDRERR_ST 5
|
||||
#define KVM_TRACE_EXIT_SYSCALL 8
|
||||
#define KVM_TRACE_EXIT_BREAK_INST 9
|
||||
#define KVM_TRACE_EXIT_RESVD_INST 10
|
||||
#define KVM_TRACE_EXIT_COP_UNUSABLE 11
|
||||
#define KVM_TRACE_EXIT_TRAP_INST 13
|
||||
#define KVM_TRACE_EXIT_MSA_FPE 14
|
||||
#define KVM_TRACE_EXIT_FPE 15
|
||||
#define KVM_TRACE_EXIT_MSA_DISABLED 21
|
||||
/* Further exit reasons */
|
||||
#define KVM_TRACE_EXIT_WAIT 32
|
||||
#define KVM_TRACE_EXIT_CACHE 33
|
||||
#define KVM_TRACE_EXIT_SIGNAL 34
|
||||
|
||||
/* Tracepoints for VM exits */
|
||||
#define kvm_trace_symbol_exit_types \
|
||||
{ KVM_TRACE_EXIT_INT, "Interrupt" }, \
|
||||
{ KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \
|
||||
{ KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \
|
||||
{ KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \
|
||||
{ KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \
|
||||
{ KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \
|
||||
{ KVM_TRACE_EXIT_SYSCALL, "System Call" }, \
|
||||
{ KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \
|
||||
{ KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \
|
||||
{ KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \
|
||||
{ KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \
|
||||
{ KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
|
||||
{ KVM_TRACE_EXIT_FPE, "FPE" }, \
|
||||
{ KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
|
||||
{ KVM_TRACE_EXIT_WAIT, "WAIT" }, \
|
||||
{ KVM_TRACE_EXIT_CACHE, "CACHE" }, \
|
||||
{ KVM_TRACE_EXIT_SIGNAL, "Signal" }
|
||||
|
||||
TRACE_EVENT(kvm_exit,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
|
||||
|
@ -34,10 +101,173 @@ TRACE_EVENT(kvm_exit,
|
|||
),
|
||||
|
||||
TP_printk("[%s]PC: 0x%08lx",
|
||||
kvm_mips_exit_types_str[__entry->reason],
|
||||
__print_symbolic(__entry->reason,
|
||||
kvm_trace_symbol_exit_types),
|
||||
__entry->pc)
|
||||
);
|
||||
|
||||
#define KVM_TRACE_MFC0 0
|
||||
#define KVM_TRACE_MTC0 1
|
||||
#define KVM_TRACE_DMFC0 2
|
||||
#define KVM_TRACE_DMTC0 3
|
||||
#define KVM_TRACE_RDHWR 4
|
||||
|
||||
#define KVM_TRACE_HWR_COP0 0
|
||||
#define KVM_TRACE_HWR_HWR 1
|
||||
|
||||
#define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \
|
||||
((REG) << 3) | (SEL))
|
||||
#define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \
|
||||
((REG) << 3) | (SEL))
|
||||
|
||||
#define kvm_trace_symbol_hwr_ops \
|
||||
{ KVM_TRACE_MFC0, "MFC0" }, \
|
||||
{ KVM_TRACE_MTC0, "MTC0" }, \
|
||||
{ KVM_TRACE_DMFC0, "DMFC0" }, \
|
||||
{ KVM_TRACE_DMTC0, "DMTC0" }, \
|
||||
{ KVM_TRACE_RDHWR, "RDHWR" }
|
||||
|
||||
#define kvm_trace_symbol_hwr_cop \
|
||||
{ KVM_TRACE_HWR_COP0, "COP0" }, \
|
||||
{ KVM_TRACE_HWR_HWR, "HWR" }
|
||||
|
||||
#define kvm_trace_symbol_hwr_regs \
|
||||
{ KVM_TRACE_COP0( 0, 0), "Index" }, \
|
||||
{ KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \
|
||||
{ KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \
|
||||
{ KVM_TRACE_COP0( 4, 0), "Context" }, \
|
||||
{ KVM_TRACE_COP0( 4, 2), "UserLocal" }, \
|
||||
{ KVM_TRACE_COP0( 5, 0), "PageMask" }, \
|
||||
{ KVM_TRACE_COP0( 6, 0), "Wired" }, \
|
||||
{ KVM_TRACE_COP0( 7, 0), "HWREna" }, \
|
||||
{ KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \
|
||||
{ KVM_TRACE_COP0( 9, 0), "Count" }, \
|
||||
{ KVM_TRACE_COP0(10, 0), "EntryHi" }, \
|
||||
{ KVM_TRACE_COP0(11, 0), "Compare" }, \
|
||||
{ KVM_TRACE_COP0(12, 0), "Status" }, \
|
||||
{ KVM_TRACE_COP0(12, 1), "IntCtl" }, \
|
||||
{ KVM_TRACE_COP0(12, 2), "SRSCtl" }, \
|
||||
{ KVM_TRACE_COP0(13, 0), "Cause" }, \
|
||||
{ KVM_TRACE_COP0(14, 0), "EPC" }, \
|
||||
{ KVM_TRACE_COP0(15, 0), "PRId" }, \
|
||||
{ KVM_TRACE_COP0(15, 1), "EBase" }, \
|
||||
{ KVM_TRACE_COP0(16, 0), "Config" }, \
|
||||
{ KVM_TRACE_COP0(16, 1), "Config1" }, \
|
||||
{ KVM_TRACE_COP0(16, 2), "Config2" }, \
|
||||
{ KVM_TRACE_COP0(16, 3), "Config3" }, \
|
||||
{ KVM_TRACE_COP0(16, 4), "Config4" }, \
|
||||
{ KVM_TRACE_COP0(16, 5), "Config5" }, \
|
||||
{ KVM_TRACE_COP0(16, 7), "Config7" }, \
|
||||
{ KVM_TRACE_COP0(26, 0), "ECC" }, \
|
||||
{ KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
|
||||
{ KVM_TRACE_COP0(31, 2), "KScratch1" }, \
|
||||
{ KVM_TRACE_COP0(31, 3), "KScratch2" }, \
|
||||
{ KVM_TRACE_COP0(31, 4), "KScratch3" }, \
|
||||
{ KVM_TRACE_COP0(31, 5), "KScratch4" }, \
|
||||
{ KVM_TRACE_COP0(31, 6), "KScratch5" }, \
|
||||
{ KVM_TRACE_COP0(31, 7), "KScratch6" }, \
|
||||
{ KVM_TRACE_HWR( 0, 0), "CPUNum" }, \
|
||||
{ KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \
|
||||
{ KVM_TRACE_HWR( 2, 0), "CC" }, \
|
||||
{ KVM_TRACE_HWR( 3, 0), "CCRes" }, \
|
||||
{ KVM_TRACE_HWR(29, 0), "ULR" }
|
||||
|
||||
TRACE_EVENT(kvm_hwr,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
|
||||
unsigned long val),
|
||||
TP_ARGS(vcpu, op, reg, val),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, val)
|
||||
__field(u16, reg)
|
||||
__field(u8, op)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->val = val;
|
||||
__entry->reg = reg;
|
||||
__entry->op = op;
|
||||
),
|
||||
|
||||
TP_printk("%s %s (%s:%u:%u) 0x%08lx",
|
||||
__print_symbolic(__entry->op,
|
||||
kvm_trace_symbol_hwr_ops),
|
||||
__print_symbolic(__entry->reg,
|
||||
kvm_trace_symbol_hwr_regs),
|
||||
__print_symbolic(__entry->reg >> 8,
|
||||
kvm_trace_symbol_hwr_cop),
|
||||
(__entry->reg >> 3) & 0x1f,
|
||||
__entry->reg & 0x7,
|
||||
__entry->val)
|
||||
);
|
||||
|
||||
#define KVM_TRACE_AUX_RESTORE 0
|
||||
#define KVM_TRACE_AUX_SAVE 1
|
||||
#define KVM_TRACE_AUX_ENABLE 2
|
||||
#define KVM_TRACE_AUX_DISABLE 3
|
||||
#define KVM_TRACE_AUX_DISCARD 4
|
||||
|
||||
#define KVM_TRACE_AUX_FPU 1
|
||||
#define KVM_TRACE_AUX_MSA 2
|
||||
#define KVM_TRACE_AUX_FPU_MSA 3
|
||||
|
||||
#define kvm_trace_symbol_aux_op \
|
||||
{ KVM_TRACE_AUX_RESTORE, "restore" }, \
|
||||
{ KVM_TRACE_AUX_SAVE, "save" }, \
|
||||
{ KVM_TRACE_AUX_ENABLE, "enable" }, \
|
||||
{ KVM_TRACE_AUX_DISABLE, "disable" }, \
|
||||
{ KVM_TRACE_AUX_DISCARD, "discard" }
|
||||
|
||||
#define kvm_trace_symbol_aux_state \
|
||||
{ KVM_TRACE_AUX_FPU, "FPU" }, \
|
||||
{ KVM_TRACE_AUX_MSA, "MSA" }, \
|
||||
{ KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
|
||||
|
||||
TRACE_EVENT(kvm_aux,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
|
||||
unsigned int state),
|
||||
TP_ARGS(vcpu, op, state),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, pc)
|
||||
__field(u8, op)
|
||||
__field(u8, state)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->op = op;
|
||||
__entry->state = state;
|
||||
),
|
||||
|
||||
TP_printk("%s %s PC: 0x%08lx",
|
||||
__print_symbolic(__entry->op,
|
||||
kvm_trace_symbol_aux_op),
|
||||
__print_symbolic(__entry->state,
|
||||
kvm_trace_symbol_aux_state),
|
||||
__entry->pc)
|
||||
);
|
||||
|
||||
TRACE_EVENT(kvm_asid_change,
|
||||
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
|
||||
unsigned int new_asid),
|
||||
TP_ARGS(vcpu, old_asid, new_asid),
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned long, pc)
|
||||
__field(u8, old_asid)
|
||||
__field(u8, new_asid)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->pc = vcpu->arch.pc;
|
||||
__entry->old_asid = old_asid;
|
||||
__entry->new_asid = new_asid;
|
||||
),
|
||||
|
||||
TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
|
||||
__entry->pc,
|
||||
__entry->old_asid,
|
||||
__entry->new_asid)
|
||||
);
|
||||
|
||||
#endif /* _TRACE_KVM_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
|
||||
{
|
||||
gpa_t gpa;
|
||||
uint32_t kseg = KSEGX(gva);
|
||||
gva_t kseg = KSEGX(gva);
|
||||
|
||||
if ((kseg == CKSEG0) || (kseg == CKSEG1))
|
||||
gpa = CPHYSADDR(gva);
|
||||
|
@ -40,8 +40,8 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -87,15 +87,15 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
|
||||
|
||||
|
@ -111,14 +111,14 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
|||
* when we are not using HIGHMEM. Need to address this in a
|
||||
* HIGHMEM kernel
|
||||
*/
|
||||
kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
} else {
|
||||
kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
|
@ -128,12 +128,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
|
||||
static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -145,55 +145,8 @@ static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
|
|||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
|
||||
if (er == EMULATE_DONE)
|
||||
ret = RESUME_GUEST;
|
||||
else {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
|
||||
/*
|
||||
* All KSEG0 faults are handled by KVM, as the guest kernel does
|
||||
* not expect to ever get them
|
||||
*/
|
||||
if (kvm_mips_handle_kseg0_tlb_fault
|
||||
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
|
||||
&& KVM_GUEST_KERNEL_MODE(vcpu)) {
|
||||
if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|
||||
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
|
||||
kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
|
||||
vcpu->arch.pc, badvaddr);
|
||||
kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
store ? "ST" : "LD", cause, opc, badvaddr);
|
||||
|
||||
/*
|
||||
* User Address (UA) fault, this could happen if
|
||||
|
@ -213,14 +166,18 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
|||
ret = RESUME_HOST;
|
||||
}
|
||||
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
|
||||
/*
|
||||
* All KSEG0 faults are handled by KVM, as the guest kernel does
|
||||
* not expect to ever get them
|
||||
*/
|
||||
if (kvm_mips_handle_kseg0_tlb_fault
|
||||
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
store ? "ST" : "LD", cause, opc, badvaddr);
|
||||
kvm_mips_dump_host_tlbs();
|
||||
kvm_arch_vcpu_dump_regs(vcpu);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
|
@ -229,12 +186,22 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_trap_emul_handle_tlb_miss(vcpu, true);
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_trap_emul_handle_tlb_miss(vcpu, false);
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -251,7 +218,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
|||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
|
@ -262,9 +229,9 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -280,7 +247,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
|||
ret = RESUME_HOST;
|
||||
}
|
||||
} else {
|
||||
kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
|
||||
kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
|
||||
cause, opc, badvaddr);
|
||||
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
|
||||
ret = RESUME_HOST;
|
||||
|
@ -292,8 +259,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -310,8 +277,8 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -328,8 +295,8 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -346,8 +313,8 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -364,8 +331,8 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -382,8 +349,8 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
|
|||
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *)vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -407,8 +374,8 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
|
|||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
struct kvm_run *run = vcpu->run;
|
||||
uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
|
||||
unsigned long cause = vcpu->arch.host_cp0_cause;
|
||||
u32 __user *opc = (u32 __user *) vcpu->arch.pc;
|
||||
u32 cause = vcpu->arch.host_cp0_cause;
|
||||
enum emulation_result er = EMULATE_DONE;
|
||||
int ret = RESUME_GUEST;
|
||||
|
||||
|
@ -451,24 +418,41 @@ static int kvm_trap_emul_vm_init(struct kvm *kvm)
|
|||
|
||||
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
vcpu->arch.kscratch_enabled = 0xfc;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct mips_coproc *cop0 = vcpu->arch.cop0;
|
||||
uint32_t config1;
|
||||
u32 config, config1;
|
||||
int vcpu_id = vcpu->vcpu_id;
|
||||
|
||||
/*
|
||||
* Arch specific stuff, set up config registers properly so that the
|
||||
* guest will come up as expected, for now we simulate a MIPS 24kc
|
||||
* guest will come up as expected
|
||||
*/
|
||||
#ifndef CONFIG_CPU_MIPSR6
|
||||
/* r2-r5, simulate a MIPS 24kc */
|
||||
kvm_write_c0_guest_prid(cop0, 0x00019300);
|
||||
/* Have config1, Cacheable, noncoherent, write-back, write allocate */
|
||||
kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
|
||||
(0x1 << CP0C0_AR) |
|
||||
(MMU_TYPE_R4000 << CP0C0_MT));
|
||||
#else
|
||||
/* r6+, simulate a generic QEMU machine */
|
||||
kvm_write_c0_guest_prid(cop0, 0x00010000);
|
||||
#endif
|
||||
/*
|
||||
* Have config1, Cacheable, noncoherent, write-back, write allocate.
|
||||
* Endianness, arch revision & virtually tagged icache should match
|
||||
* host.
|
||||
*/
|
||||
config = read_c0_config() & MIPS_CONF_AR;
|
||||
config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
|
||||
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||
config |= CONF_BE;
|
||||
#endif
|
||||
if (cpu_has_vtag_icache)
|
||||
config |= MIPS_CONF_VI;
|
||||
kvm_write_c0_guest_config(cop0, config);
|
||||
|
||||
/* Read the cache characteristics from the host Config1 Register */
|
||||
config1 = (read_c0_config1() & ~0x7f);
|
||||
|
@ -478,9 +462,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
|
||||
|
||||
/* We unset some bits that we aren't emulating */
|
||||
config1 &=
|
||||
~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
|
||||
(1 << CP0C1_WR) | (1 << CP0C1_CA));
|
||||
config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
|
||||
MIPS_CONF1_WR | MIPS_CONF1_CA);
|
||||
kvm_write_c0_guest_config1(cop0, config1);
|
||||
|
||||
/* Have config3, no tertiary/secondary caches implemented */
|
||||
|
@ -511,6 +494,17 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
|
||||
u64 __user *indices)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
|
||||
const struct kvm_one_reg *reg,
|
||||
s64 *v)
|
||||
|
@ -660,6 +654,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
|
|||
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
|
||||
.irq_deliver = kvm_mips_irq_deliver_cb,
|
||||
.irq_clear = kvm_mips_irq_clear_cb,
|
||||
.num_regs = kvm_trap_emul_num_regs,
|
||||
.copy_reg_indices = kvm_trap_emul_copy_reg_indices,
|
||||
.get_one_reg = kvm_trap_emul_get_one_reg,
|
||||
.set_one_reg = kvm_trap_emul_set_one_reg,
|
||||
.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
|
||||
|
|
|
@ -627,8 +627,8 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
|
|||
dec_insn.pc_inc +
|
||||
dec_insn.next_pc_inc;
|
||||
return 1;
|
||||
case cbcond0_op:
|
||||
case cbcond1_op:
|
||||
case pop10_op:
|
||||
case pop30_op:
|
||||
if (!cpu_has_mips_r6)
|
||||
break;
|
||||
if (insn.i_format.rt && !insn.i_format.rs)
|
||||
|
@ -683,14 +683,14 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
|
|||
dec_insn.next_pc_inc;
|
||||
|
||||
return 1;
|
||||
case beqzcjic_op:
|
||||
case pop66_op:
|
||||
if (!cpu_has_mips_r6)
|
||||
break;
|
||||
*contpc = regs->cp0_epc + dec_insn.pc_inc +
|
||||
dec_insn.next_pc_inc;
|
||||
|
||||
return 1;
|
||||
case bnezcjialc_op:
|
||||
case pop76_op:
|
||||
if (!cpu_has_mips_r6)
|
||||
break;
|
||||
if (!insn.i_format.rs)
|
||||
|
|
|
@ -1206,7 +1206,7 @@ static void probe_pcache(void)
|
|||
c->icache.linesz;
|
||||
c->icache.waybit = __ffs(icache_size/c->icache.ways);
|
||||
|
||||
if (config & 0x8) /* VI bit */
|
||||
if (config & MIPS_CONF_VI)
|
||||
c->icache.flags |= MIPS_CACHE_VTAG;
|
||||
|
||||
/*
|
||||
|
|
|
@ -53,8 +53,13 @@ static struct insn insn_table_MM[] = {
|
|||
{ insn_bltzl, 0, 0 },
|
||||
{ insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
|
||||
{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
|
||||
{ insn_cfc1, M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS },
|
||||
{ insn_cfcmsa, M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE },
|
||||
{ insn_ctc1, M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS },
|
||||
{ insn_ctcmsa, M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE },
|
||||
{ insn_daddu, 0, 0 },
|
||||
{ insn_daddiu, 0, 0 },
|
||||
{ insn_di, M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS },
|
||||
{ insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
|
||||
{ insn_dmfc0, 0, 0 },
|
||||
{ insn_dmtc0, 0, 0 },
|
||||
|
@ -84,6 +89,8 @@ static struct insn insn_table_MM[] = {
|
|||
{ insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
|
||||
{ insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
|
||||
{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
|
||||
{ insn_mthi, M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS },
|
||||
{ insn_mtlo, M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS },
|
||||
{ insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
|
||||
{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
|
||||
{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
|
||||
|
@ -166,13 +173,15 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
|
|||
op = ip->match;
|
||||
va_start(ap, opc);
|
||||
if (ip->fields & RS) {
|
||||
if (opc == insn_mfc0 || opc == insn_mtc0)
|
||||
if (opc == insn_mfc0 || opc == insn_mtc0 ||
|
||||
opc == insn_cfc1 || opc == insn_ctc1)
|
||||
op |= build_rt(va_arg(ap, u32));
|
||||
else
|
||||
op |= build_rs(va_arg(ap, u32));
|
||||
}
|
||||
if (ip->fields & RT) {
|
||||
if (opc == insn_mfc0 || opc == insn_mtc0)
|
||||
if (opc == insn_mfc0 || opc == insn_mtc0 ||
|
||||
opc == insn_cfc1 || opc == insn_ctc1)
|
||||
op |= build_rs(va_arg(ap, u32));
|
||||
else
|
||||
op |= build_rt(va_arg(ap, u32));
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue