2016-06-29 19:47:03 +08:00
|
|
|
#ifndef MMU_HASH64_H
|
|
|
|
#define MMU_HASH64_H
|
2013-03-12 08:31:06 +08:00
|
|
|
|
|
|
|
#ifndef CONFIG_USER_ONLY
|
|
|
|
|
|
|
|
#ifdef TARGET_PPC64
|
2016-01-14 12:33:27 +08:00
|
|
|
void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu);
|
2016-01-27 08:07:29 +08:00
|
|
|
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
|
|
|
target_ulong esid, target_ulong vsid);
|
2016-01-14 12:33:27 +08:00
|
|
|
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
2016-03-15 22:12:16 +08:00
|
|
|
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address, int rw,
|
2013-03-12 08:31:12 +08:00
|
|
|
int mmu_idx);
|
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-02-27 13:03:41 +08:00
|
|
|
void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
|
|
|
|
uint64_t pte0, uint64_t pte1);
|
2016-01-15 13:12:09 +08:00
|
|
|
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
|
|
|
target_ulong pte_index,
|
|
|
|
target_ulong pte0, target_ulong pte1);
|
2016-01-27 09:01:20 +08:00
|
|
|
unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
|
2016-07-01 15:10:10 +08:00
|
|
|
uint64_t pte0, uint64_t pte1);
|
2016-07-05 05:37:08 +08:00
|
|
|
void ppc_hash64_update_vrma(CPUPPCState *env);
|
|
|
|
void ppc_hash64_update_rmls(CPUPPCState *env);
|
2013-03-12 08:31:06 +08:00
|
|
|
#endif
|
|
|
|
|
2013-03-12 08:31:18 +08:00
|
|
|
/*
|
|
|
|
* SLB definitions
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Bits in the SLB ESID word */
|
|
|
|
#define SLB_ESID_ESID 0xFFFFFFFFF0000000ULL
|
|
|
|
#define SLB_ESID_V 0x0000000008000000ULL /* valid */
|
|
|
|
|
|
|
|
/* Bits in the SLB VSID word */
|
|
|
|
#define SLB_VSID_SHIFT 12
|
|
|
|
#define SLB_VSID_SHIFT_1T 24
|
|
|
|
#define SLB_VSID_SSIZE_SHIFT 62
|
|
|
|
#define SLB_VSID_B 0xc000000000000000ULL
|
|
|
|
#define SLB_VSID_B_256M 0x0000000000000000ULL
|
|
|
|
#define SLB_VSID_B_1T 0x4000000000000000ULL
|
|
|
|
#define SLB_VSID_VSID 0x3FFFFFFFFFFFF000ULL
|
2016-07-05 05:37:08 +08:00
|
|
|
#define SLB_VSID_VRMA (0x0001FFFFFF000000ULL | SLB_VSID_B_1T)
|
2013-03-12 08:31:18 +08:00
|
|
|
#define SLB_VSID_PTEM (SLB_VSID_B | SLB_VSID_VSID)
|
|
|
|
#define SLB_VSID_KS 0x0000000000000800ULL
|
|
|
|
#define SLB_VSID_KP 0x0000000000000400ULL
|
|
|
|
#define SLB_VSID_N 0x0000000000000200ULL /* no-execute */
|
|
|
|
#define SLB_VSID_L 0x0000000000000100ULL
|
|
|
|
#define SLB_VSID_C 0x0000000000000080ULL /* class */
|
|
|
|
#define SLB_VSID_LP 0x0000000000000030ULL
|
|
|
|
#define SLB_VSID_ATTR 0x0000000000000FFFULL
|
2015-01-26 22:21:58 +08:00
|
|
|
#define SLB_VSID_LLP_MASK (SLB_VSID_L | SLB_VSID_LP)
|
|
|
|
#define SLB_VSID_4K 0x0000000000000000ULL
|
|
|
|
#define SLB_VSID_64K 0x0000000000000110ULL
|
2016-01-15 14:54:42 +08:00
|
|
|
#define SLB_VSID_16M 0x0000000000000100ULL
|
|
|
|
#define SLB_VSID_16G 0x0000000000000120ULL
|
2013-03-12 08:31:18 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Hash page table definitions
|
|
|
|
*/
|
|
|
|
|
|
|
|
#define HPTES_PER_GROUP 8
|
|
|
|
#define HASH_PTE_SIZE_64 16
|
|
|
|
#define HASH_PTEG_SIZE_64 (HASH_PTE_SIZE_64 * HPTES_PER_GROUP)
|
|
|
|
|
|
|
|
#define HPTE64_V_SSIZE_SHIFT 62
|
|
|
|
#define HPTE64_V_AVPN_SHIFT 7
|
|
|
|
#define HPTE64_V_AVPN 0x3fffffffffffff80ULL
|
|
|
|
#define HPTE64_V_AVPN_VAL(x) (((x) & HPTE64_V_AVPN) >> HPTE64_V_AVPN_SHIFT)
|
2016-07-05 10:31:48 +08:00
|
|
|
#define HPTE64_V_COMPARE(x, y) (!(((x) ^ (y)) & 0xffffffffffffff83ULL))
|
2013-03-12 08:31:18 +08:00
|
|
|
#define HPTE64_V_LARGE 0x0000000000000004ULL
|
|
|
|
#define HPTE64_V_SECONDARY 0x0000000000000002ULL
|
|
|
|
#define HPTE64_V_VALID 0x0000000000000001ULL
|
|
|
|
|
|
|
|
#define HPTE64_R_PP0 0x8000000000000000ULL
|
|
|
|
#define HPTE64_R_TS 0x4000000000000000ULL
|
|
|
|
#define HPTE64_R_KEY_HI 0x3000000000000000ULL
|
|
|
|
#define HPTE64_R_RPN_SHIFT 12
|
|
|
|
#define HPTE64_R_RPN 0x0ffffffffffff000ULL
|
|
|
|
#define HPTE64_R_FLAGS 0x00000000000003ffULL
|
|
|
|
#define HPTE64_R_PP 0x0000000000000003ULL
|
|
|
|
#define HPTE64_R_N 0x0000000000000004ULL
|
|
|
|
#define HPTE64_R_G 0x0000000000000008ULL
|
|
|
|
#define HPTE64_R_M 0x0000000000000010ULL
|
|
|
|
#define HPTE64_R_I 0x0000000000000020ULL
|
|
|
|
#define HPTE64_R_W 0x0000000000000040ULL
|
|
|
|
#define HPTE64_R_WIMG 0x0000000000000078ULL
|
|
|
|
#define HPTE64_R_C 0x0000000000000080ULL
|
|
|
|
#define HPTE64_R_R 0x0000000000000100ULL
|
|
|
|
#define HPTE64_R_KEY_LO 0x0000000000000e00ULL
|
2017-01-13 14:28:23 +08:00
|
|
|
#define HPTE64_R_KEY(x) ((((x) & HPTE64_R_KEY_HI) >> 57) | \
|
2013-03-12 08:31:47 +08:00
|
|
|
(((x) & HPTE64_R_KEY_LO) >> 9))
|
2013-03-12 08:31:18 +08:00
|
|
|
|
|
|
|
#define HPTE64_V_1TB_SEG 0x4000000000000000ULL
|
|
|
|
#define HPTE64_V_VRMA_MASK 0x4001ffffff000000ULL
|
|
|
|
|
2016-03-08 08:33:46 +08:00
|
|
|
void ppc_hash64_set_sdr1(PowerPCCPU *cpu, target_ulong value,
|
|
|
|
Error **errp);
|
|
|
|
void ppc_hash64_set_external_hpt(PowerPCCPU *cpu, void *hpt, int shift,
|
|
|
|
Error **errp);
|
|
|
|
|
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-02-27 13:03:41 +08:00
|
|
|
struct ppc_hash_pte64 {
|
|
|
|
uint64_t pte0, pte1;
|
|
|
|
};
|
|
|
|
|
|
|
|
const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
|
|
|
|
hwaddr ptex, int n);
|
|
|
|
void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
|
|
|
|
hwaddr ptex, int n);
|
2014-02-21 01:52:24 +08:00
|
|
|
|
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-02-27 13:03:41 +08:00
|
|
|
static inline uint64_t ppc_hash64_hpte0(PowerPCCPU *cpu,
|
|
|
|
const ppc_hash_pte64_t *hptes, int i)
|
2013-03-12 08:31:19 +08:00
|
|
|
{
|
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-02-27 13:03:41 +08:00
|
|
|
return ldq_p(&(hptes[i].pte0));
|
2013-03-12 08:31:19 +08:00
|
|
|
}
|
|
|
|
|
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-02-27 13:03:41 +08:00
|
|
|
static inline uint64_t ppc_hash64_hpte1(PowerPCCPU *cpu,
|
|
|
|
const ppc_hash_pte64_t *hptes, int i)
|
2013-03-12 08:31:19 +08:00
|
|
|
{
|
target/ppc: Cleanup HPTE accessors for 64-bit hash MMU
Accesses to the hashed page table (HPT) are complicated by the fact that
the HPT could be in one of three places:
1) Within guest memory - when we're emulating a full guest CPU at the
hardware level (e.g. powernv, mac99, g3beige)
2) Within qemu, but outside guest memory - when we're emulating user and
supervisor instructions within TCG, but instead of emulating
the CPU's hypervisor mode, we just emulate a hypervisor's behaviour
(pseries in TCG or KVM-PR)
3) Within the host kernel - a pseries machine using KVM-HV
acceleration. Mostly accesses to the HPT are handled by KVM,
but there are a few cases where qemu needs to access it via a
special fd for the purpose.
In order to batch accesses to the fd in case (3), we use a somewhat awkward
ppc_hash64_start_access() / ppc_hash64_stop_access() pair, which for case
(3) reads / releases several HPTEs from the kernel as a batch (usually a
whole PTEG). For cases (1) & (2) it just returns an address value. The
actual HPTE load helpers then need to interpret the returned token
differently in the 3 cases.
This patch keeps the same basic structure, but simplfiies the details.
First start_access() / stop_access() are renamed to map_hptes() and
unmap_hptes() to make their operation more obvious. Second, map_hptes()
now always returns a qemu pointer, which can always be used in the same way
by the load_hpte() helpers. In case (1) it comes from address_space_map()
in case (2) directly from qemu's HPT buffer and in case (3) from a
temporary buffer read from the KVM fd.
While we're at it, make things a bit more consistent in terms of types and
variable names: avoid variables named 'index' (it shadows index(3) which
can lead to confusing results), use 'hwaddr ptex' for HPTE indices and
uint64_t for each of the HPTE words, use ptex throughout the call stack
instead of pte_offset in some places (we still need that at the bottom
layer, but nowhere else).
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
2017-02-27 13:03:41 +08:00
|
|
|
return ldq_p(&(hptes[i].pte1));
|
2013-03-12 08:31:19 +08:00
|
|
|
}
|
|
|
|
|
2013-03-12 08:31:06 +08:00
|
|
|
#endif /* CONFIG_USER_ONLY */
|
|
|
|
|
2016-06-29 19:47:03 +08:00
|
|
|
#endif /* MMU_HASH64_H */
|