mirror of https://gitee.com/openkylin/linux.git
Merge remote-tracking branch 'remotes/powerpc/topic/xive' into kvm-ppc-next
This merges in the powerpc topic/xive branch to bring in the code for the in-kernel XICS interrupt controller emulation to use the new XIVE (eXternal Interrupt Virtualization Engine) hardware in the POWER9 chip directly, rather than via a XICS emulation in firmware. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
This commit is contained in:
commit
fb7dcf723d
|
@ -55,6 +55,14 @@
|
||||||
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
|
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
|
||||||
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
|
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
|
||||||
|
|
||||||
|
#define PPC_BITLSHIFT32(be) (32 - 1 - (be))
|
||||||
|
#define PPC_BIT32(bit) (1UL << PPC_BITLSHIFT32(bit))
|
||||||
|
#define PPC_BITMASK32(bs, be) ((PPC_BIT32(bs) - PPC_BIT32(be))|PPC_BIT32(bs))
|
||||||
|
|
||||||
|
#define PPC_BITLSHIFT8(be) (8 - 1 - (be))
|
||||||
|
#define PPC_BIT8(bit) (1UL << PPC_BITLSHIFT8(bit))
|
||||||
|
#define PPC_BITMASK8(bs, be) ((PPC_BIT8(bs) - PPC_BIT8(be))|PPC_BIT8(bs))
|
||||||
|
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
|
|
||||||
/* Macro for generating the ***_bits() functions */
|
/* Macro for generating the ***_bits() functions */
|
||||||
|
|
|
@ -192,24 +192,8 @@ DEF_MMIO_OUT_D(out_le32, 32, stw);
|
||||||
|
|
||||||
#endif /* __BIG_ENDIAN */
|
#endif /* __BIG_ENDIAN */
|
||||||
|
|
||||||
/*
|
|
||||||
* Cache inhibitied accessors for use in real mode, you don't want to use these
|
|
||||||
* unless you know what you're doing.
|
|
||||||
*
|
|
||||||
* NB. These use the cpu byte ordering.
|
|
||||||
*/
|
|
||||||
DEF_MMIO_OUT_X(out_rm8, 8, stbcix);
|
|
||||||
DEF_MMIO_OUT_X(out_rm16, 16, sthcix);
|
|
||||||
DEF_MMIO_OUT_X(out_rm32, 32, stwcix);
|
|
||||||
DEF_MMIO_IN_X(in_rm8, 8, lbzcix);
|
|
||||||
DEF_MMIO_IN_X(in_rm16, 16, lhzcix);
|
|
||||||
DEF_MMIO_IN_X(in_rm32, 32, lwzcix);
|
|
||||||
|
|
||||||
#ifdef __powerpc64__
|
#ifdef __powerpc64__
|
||||||
|
|
||||||
DEF_MMIO_OUT_X(out_rm64, 64, stdcix);
|
|
||||||
DEF_MMIO_IN_X(in_rm64, 64, ldcix);
|
|
||||||
|
|
||||||
#ifdef __BIG_ENDIAN__
|
#ifdef __BIG_ENDIAN__
|
||||||
DEF_MMIO_OUT_D(out_be64, 64, std);
|
DEF_MMIO_OUT_D(out_be64, 64, std);
|
||||||
DEF_MMIO_IN_D(in_be64, 64, ld);
|
DEF_MMIO_IN_D(in_be64, 64, ld);
|
||||||
|
@ -242,35 +226,6 @@ static inline void out_be64(volatile u64 __iomem *addr, u64 val)
|
||||||
#endif
|
#endif
|
||||||
#endif /* __powerpc64__ */
|
#endif /* __powerpc64__ */
|
||||||
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Simple Cache inhibited accessors
|
|
||||||
* Unlike the DEF_MMIO_* macros, these don't include any h/w memory
|
|
||||||
* barriers, callers need to manage memory barriers on their own.
|
|
||||||
* These can only be used in hypervisor real mode.
|
|
||||||
*/
|
|
||||||
|
|
||||||
static inline u32 _lwzcix(unsigned long addr)
|
|
||||||
{
|
|
||||||
u32 ret;
|
|
||||||
|
|
||||||
__asm__ __volatile__("lwzcix %0,0, %1"
|
|
||||||
: "=r" (ret) : "r" (addr) : "memory");
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void _stbcix(u64 addr, u8 val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__("stbcix %0,0,%1"
|
|
||||||
: : "r" (val), "r" (addr) : "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void _stwcix(u64 addr, u32 val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__("stwcix %0,0,%1"
|
|
||||||
: : "r" (val), "r" (addr) : "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Low level IO stream instructions are defined out of line for now
|
* Low level IO stream instructions are defined out of line for now
|
||||||
*/
|
*/
|
||||||
|
@ -417,15 +372,64 @@ static inline void __raw_writeq(unsigned long v, volatile void __iomem *addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Real mode version of the above. stdcix is only supposed to be used
|
* Real mode versions of the above. Those instructions are only supposed
|
||||||
* in hypervisor real mode as per the architecture spec.
|
* to be used in hypervisor real mode as per the architecture spec.
|
||||||
*/
|
*/
|
||||||
|
static inline void __raw_rm_writeb(u8 val, volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
__asm__ __volatile__("stbcix %0,0,%1"
|
||||||
|
: : "r" (val), "r" (paddr) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_rm_writew(u16 val, volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
__asm__ __volatile__("sthcix %0,0,%1"
|
||||||
|
: : "r" (val), "r" (paddr) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void __raw_rm_writel(u32 val, volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
__asm__ __volatile__("stwcix %0,0,%1"
|
||||||
|
: : "r" (val), "r" (paddr) : "memory");
|
||||||
|
}
|
||||||
|
|
||||||
static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
|
static inline void __raw_rm_writeq(u64 val, volatile void __iomem *paddr)
|
||||||
{
|
{
|
||||||
__asm__ __volatile__("stdcix %0,0,%1"
|
__asm__ __volatile__("stdcix %0,0,%1"
|
||||||
: : "r" (val), "r" (paddr) : "memory");
|
: : "r" (val), "r" (paddr) : "memory");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline u8 __raw_rm_readb(volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
u8 ret;
|
||||||
|
__asm__ __volatile__("lbzcix %0,0, %1"
|
||||||
|
: "=r" (ret) : "r" (paddr) : "memory");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u16 __raw_rm_readw(volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
u16 ret;
|
||||||
|
__asm__ __volatile__("lhzcix %0,0, %1"
|
||||||
|
: "=r" (ret) : "r" (paddr) : "memory");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 __raw_rm_readl(volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
u32 ret;
|
||||||
|
__asm__ __volatile__("lwzcix %0,0, %1"
|
||||||
|
: "=r" (ret) : "r" (paddr) : "memory");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u64 __raw_rm_readq(volatile void __iomem *paddr)
|
||||||
|
{
|
||||||
|
u64 ret;
|
||||||
|
__asm__ __volatile__("ldcix %0,0, %1"
|
||||||
|
: "=r" (ret) : "r" (paddr) : "memory");
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
#endif /* __powerpc64__ */
|
#endif /* __powerpc64__ */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -110,7 +110,9 @@ struct kvmppc_host_state {
|
||||||
u8 ptid;
|
u8 ptid;
|
||||||
struct kvm_vcpu *kvm_vcpu;
|
struct kvm_vcpu *kvm_vcpu;
|
||||||
struct kvmppc_vcore *kvm_vcore;
|
struct kvmppc_vcore *kvm_vcore;
|
||||||
unsigned long xics_phys;
|
void __iomem *xics_phys;
|
||||||
|
void __iomem *xive_tima_phys;
|
||||||
|
void __iomem *xive_tima_virt;
|
||||||
u32 saved_xirr;
|
u32 saved_xirr;
|
||||||
u64 dabr;
|
u64 dabr;
|
||||||
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
|
u64 host_mmcr[7]; /* MMCR 0,1,A, SIAR, SDAR, MMCR2, SIER */
|
||||||
|
|
|
@ -210,6 +210,12 @@ struct kvmppc_spapr_tce_table {
|
||||||
/* XICS components, defined in book3s_xics.c */
|
/* XICS components, defined in book3s_xics.c */
|
||||||
struct kvmppc_xics;
|
struct kvmppc_xics;
|
||||||
struct kvmppc_icp;
|
struct kvmppc_icp;
|
||||||
|
extern struct kvm_device_ops kvm_xics_ops;
|
||||||
|
|
||||||
|
/* XIVE components, defined in book3s_xive.c */
|
||||||
|
struct kvmppc_xive;
|
||||||
|
struct kvmppc_xive_vcpu;
|
||||||
|
extern struct kvm_device_ops kvm_xive_ops;
|
||||||
|
|
||||||
struct kvmppc_passthru_irqmap;
|
struct kvmppc_passthru_irqmap;
|
||||||
|
|
||||||
|
@ -298,6 +304,7 @@ struct kvm_arch {
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
struct kvmppc_xics *xics;
|
struct kvmppc_xics *xics;
|
||||||
|
struct kvmppc_xive *xive;
|
||||||
struct kvmppc_passthru_irqmap *pimap;
|
struct kvmppc_passthru_irqmap *pimap;
|
||||||
#endif
|
#endif
|
||||||
struct kvmppc_ops *kvm_ops;
|
struct kvmppc_ops *kvm_ops;
|
||||||
|
@ -427,7 +434,7 @@ struct kvmppc_passthru_irqmap {
|
||||||
|
|
||||||
#define KVMPPC_IRQ_DEFAULT 0
|
#define KVMPPC_IRQ_DEFAULT 0
|
||||||
#define KVMPPC_IRQ_MPIC 1
|
#define KVMPPC_IRQ_MPIC 1
|
||||||
#define KVMPPC_IRQ_XICS 2
|
#define KVMPPC_IRQ_XICS 2 /* Includes a XIVE option */
|
||||||
|
|
||||||
#define MMIO_HPTE_CACHE_SIZE 4
|
#define MMIO_HPTE_CACHE_SIZE 4
|
||||||
|
|
||||||
|
@ -454,6 +461,21 @@ struct mmio_hpte_cache {
|
||||||
|
|
||||||
struct openpic;
|
struct openpic;
|
||||||
|
|
||||||
|
/* W0 and W1 of a XIVE thread management context */
|
||||||
|
union xive_tma_w01 {
|
||||||
|
struct {
|
||||||
|
u8 nsr;
|
||||||
|
u8 cppr;
|
||||||
|
u8 ipb;
|
||||||
|
u8 lsmfb;
|
||||||
|
u8 ack;
|
||||||
|
u8 inc;
|
||||||
|
u8 age;
|
||||||
|
u8 pipr;
|
||||||
|
};
|
||||||
|
__be64 w01;
|
||||||
|
};
|
||||||
|
|
||||||
struct kvm_vcpu_arch {
|
struct kvm_vcpu_arch {
|
||||||
ulong host_stack;
|
ulong host_stack;
|
||||||
u32 host_pid;
|
u32 host_pid;
|
||||||
|
@ -714,6 +736,10 @@ struct kvm_vcpu_arch {
|
||||||
struct openpic *mpic; /* KVM_IRQ_MPIC */
|
struct openpic *mpic; /* KVM_IRQ_MPIC */
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
struct kvmppc_icp *icp; /* XICS presentation controller */
|
struct kvmppc_icp *icp; /* XICS presentation controller */
|
||||||
|
struct kvmppc_xive_vcpu *xive_vcpu; /* XIVE virtual CPU data */
|
||||||
|
__be32 xive_cam_word; /* Cooked W2 in proper endian with valid bit */
|
||||||
|
u32 xive_pushed; /* Is the VP pushed on the physical CPU ? */
|
||||||
|
union xive_tma_w01 xive_saved_state; /* W0..1 of XIVE thread state */
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
|
|
@ -240,6 +240,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
|
||||||
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
|
extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
|
||||||
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
|
extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
|
||||||
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
|
extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
|
||||||
|
|
||||||
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
|
extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
|
||||||
u32 priority);
|
u32 priority);
|
||||||
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
||||||
|
@ -425,7 +426,15 @@ struct openpic;
|
||||||
extern void kvm_cma_reserve(void) __init;
|
extern void kvm_cma_reserve(void) __init;
|
||||||
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
||||||
{
|
{
|
||||||
paca[cpu].kvm_hstate.xics_phys = addr;
|
paca[cpu].kvm_hstate.xics_phys = (void __iomem *)addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_xive_tima(int cpu,
|
||||||
|
unsigned long phys_addr,
|
||||||
|
void __iomem *virt_addr)
|
||||||
|
{
|
||||||
|
paca[cpu].kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
|
||||||
|
paca[cpu].kvm_hstate.xive_tima_virt = virt_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline u32 kvmppc_get_xics_latch(void)
|
static inline u32 kvmppc_get_xics_latch(void)
|
||||||
|
@ -458,6 +467,11 @@ static inline void __init kvm_cma_reserve(void)
|
||||||
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
|
||||||
{}
|
{}
|
||||||
|
|
||||||
|
static inline void kvmppc_set_xive_tima(int cpu,
|
||||||
|
unsigned long phys_addr,
|
||||||
|
void __iomem *virt_addr)
|
||||||
|
{}
|
||||||
|
|
||||||
static inline u32 kvmppc_get_xics_latch(void)
|
static inline u32 kvmppc_get_xics_latch(void)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -494,8 +508,6 @@ extern void kvmppc_free_host_rm_ops(void);
|
||||||
extern void kvmppc_free_pimap(struct kvm *kvm);
|
extern void kvmppc_free_pimap(struct kvm *kvm);
|
||||||
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
|
extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
|
||||||
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
|
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
|
||||||
extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
|
|
||||||
extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
|
|
||||||
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
|
extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
|
||||||
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
|
extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
|
||||||
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
|
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
|
||||||
|
@ -510,6 +522,10 @@ extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
|
||||||
struct kvmppc_irq_map *irq_map,
|
struct kvmppc_irq_map *irq_map,
|
||||||
struct kvmppc_passthru_irqmap *pimap,
|
struct kvmppc_passthru_irqmap *pimap,
|
||||||
bool *again);
|
bool *again);
|
||||||
|
|
||||||
|
extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||||
|
int level, bool line_status);
|
||||||
|
|
||||||
extern int h_ipi_redirect;
|
extern int h_ipi_redirect;
|
||||||
#else
|
#else
|
||||||
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
|
static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
|
||||||
|
@ -523,16 +539,64 @@ static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
|
||||||
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
|
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
|
||||||
{ return 0; }
|
{ return 0; }
|
||||||
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
|
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
|
||||||
static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
|
|
||||||
unsigned long server)
|
|
||||||
{ return -EINVAL; }
|
|
||||||
static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
|
|
||||||
struct kvm_irq_level *args)
|
|
||||||
{ return -ENOTTY; }
|
|
||||||
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
|
static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
|
||||||
{ return 0; }
|
{ return 0; }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XIVE
|
||||||
|
/*
|
||||||
|
* Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
|
||||||
|
* ie. P9 new interrupt controller, while the second "xive" is the legacy
|
||||||
|
* "eXternal Interrupt Vector Entry" which is the configuration of an
|
||||||
|
* interrupt on the "xics" interrupt controller on P8 and earlier. Those
|
||||||
|
* two function consume or produce a legacy "XIVE" state from the
|
||||||
|
* new "XIVE" interrupt controller.
|
||||||
|
*/
|
||||||
|
extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
|
||||||
|
u32 priority);
|
||||||
|
extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
||||||
|
u32 *priority);
|
||||||
|
extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
|
||||||
|
extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
|
||||||
|
extern void kvmppc_xive_init_module(void);
|
||||||
|
extern void kvmppc_xive_exit_module(void);
|
||||||
|
|
||||||
|
extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||||
|
struct kvm_vcpu *vcpu, u32 cpu);
|
||||||
|
extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
|
||||||
|
extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
|
||||||
|
struct irq_desc *host_desc);
|
||||||
|
extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
|
||||||
|
struct irq_desc *host_desc);
|
||||||
|
extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
|
||||||
|
extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
|
||||||
|
|
||||||
|
extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||||
|
int level, bool line_status);
|
||||||
|
#else
|
||||||
|
static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
|
||||||
|
u32 priority) { return -1; }
|
||||||
|
static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
|
||||||
|
u32 *priority) { return -1; }
|
||||||
|
static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
|
||||||
|
static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
|
||||||
|
static inline void kvmppc_xive_init_module(void) { }
|
||||||
|
static inline void kvmppc_xive_exit_module(void) { }
|
||||||
|
|
||||||
|
static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
|
||||||
|
struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
|
||||||
|
static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
|
||||||
|
static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
|
||||||
|
struct irq_desc *host_desc) { return -ENODEV; }
|
||||||
|
static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
|
||||||
|
struct irq_desc *host_desc) { return -ENODEV; }
|
||||||
|
static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
|
||||||
|
static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
|
||||||
|
|
||||||
|
static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
|
||||||
|
int level, bool line_status) { return -ENODEV; }
|
||||||
|
#endif /* CONFIG_KVM_XIVE */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Prototypes for functions called only from assembler code.
|
* Prototypes for functions called only from assembler code.
|
||||||
* Having prototypes reduces sparse errors.
|
* Having prototypes reduces sparse errors.
|
||||||
|
@ -570,6 +634,8 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
|
||||||
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
|
long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
|
||||||
unsigned long slb_v, unsigned int status, bool data);
|
unsigned long slb_v, unsigned int status, bool data);
|
||||||
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
|
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
|
||||||
|
unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
|
||||||
|
unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
|
||||||
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
unsigned long mfrr);
|
unsigned long mfrr);
|
||||||
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||||
|
|
|
@ -40,6 +40,8 @@
|
||||||
#define OPAL_I2C_ARBT_LOST -22
|
#define OPAL_I2C_ARBT_LOST -22
|
||||||
#define OPAL_I2C_NACK_RCVD -23
|
#define OPAL_I2C_NACK_RCVD -23
|
||||||
#define OPAL_I2C_STOP_ERR -24
|
#define OPAL_I2C_STOP_ERR -24
|
||||||
|
#define OPAL_XIVE_PROVISIONING -31
|
||||||
|
#define OPAL_XIVE_FREE_ACTIVE -32
|
||||||
|
|
||||||
/* API Tokens (in r0) */
|
/* API Tokens (in r0) */
|
||||||
#define OPAL_INVALID_CALL -1
|
#define OPAL_INVALID_CALL -1
|
||||||
|
@ -168,7 +170,24 @@
|
||||||
#define OPAL_INT_SET_MFRR 125
|
#define OPAL_INT_SET_MFRR 125
|
||||||
#define OPAL_PCI_TCE_KILL 126
|
#define OPAL_PCI_TCE_KILL 126
|
||||||
#define OPAL_NMMU_SET_PTCR 127
|
#define OPAL_NMMU_SET_PTCR 127
|
||||||
#define OPAL_LAST 127
|
#define OPAL_XIVE_RESET 128
|
||||||
|
#define OPAL_XIVE_GET_IRQ_INFO 129
|
||||||
|
#define OPAL_XIVE_GET_IRQ_CONFIG 130
|
||||||
|
#define OPAL_XIVE_SET_IRQ_CONFIG 131
|
||||||
|
#define OPAL_XIVE_GET_QUEUE_INFO 132
|
||||||
|
#define OPAL_XIVE_SET_QUEUE_INFO 133
|
||||||
|
#define OPAL_XIVE_DONATE_PAGE 134
|
||||||
|
#define OPAL_XIVE_ALLOCATE_VP_BLOCK 135
|
||||||
|
#define OPAL_XIVE_FREE_VP_BLOCK 136
|
||||||
|
#define OPAL_XIVE_GET_VP_INFO 137
|
||||||
|
#define OPAL_XIVE_SET_VP_INFO 138
|
||||||
|
#define OPAL_XIVE_ALLOCATE_IRQ 139
|
||||||
|
#define OPAL_XIVE_FREE_IRQ 140
|
||||||
|
#define OPAL_XIVE_SYNC 141
|
||||||
|
#define OPAL_XIVE_DUMP 142
|
||||||
|
#define OPAL_XIVE_RESERVED3 143
|
||||||
|
#define OPAL_XIVE_RESERVED4 144
|
||||||
|
#define OPAL_LAST 144
|
||||||
|
|
||||||
/* Device tree flags */
|
/* Device tree flags */
|
||||||
|
|
||||||
|
@ -928,6 +947,59 @@ enum {
|
||||||
OPAL_PCI_TCE_KILL_ALL,
|
OPAL_PCI_TCE_KILL_ALL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/* The xive operation mode indicates the active "API" and
|
||||||
|
* corresponds to the "mode" parameter of the opal_xive_reset()
|
||||||
|
* call
|
||||||
|
*/
|
||||||
|
enum {
|
||||||
|
OPAL_XIVE_MODE_EMU = 0,
|
||||||
|
OPAL_XIVE_MODE_EXPL = 1,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Flags for OPAL_XIVE_GET_IRQ_INFO */
|
||||||
|
enum {
|
||||||
|
OPAL_XIVE_IRQ_TRIGGER_PAGE = 0x00000001,
|
||||||
|
OPAL_XIVE_IRQ_STORE_EOI = 0x00000002,
|
||||||
|
OPAL_XIVE_IRQ_LSI = 0x00000004,
|
||||||
|
OPAL_XIVE_IRQ_SHIFT_BUG = 0x00000008,
|
||||||
|
OPAL_XIVE_IRQ_MASK_VIA_FW = 0x00000010,
|
||||||
|
OPAL_XIVE_IRQ_EOI_VIA_FW = 0x00000020,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Flags for OPAL_XIVE_GET/SET_QUEUE_INFO */
|
||||||
|
enum {
|
||||||
|
OPAL_XIVE_EQ_ENABLED = 0x00000001,
|
||||||
|
OPAL_XIVE_EQ_ALWAYS_NOTIFY = 0x00000002,
|
||||||
|
OPAL_XIVE_EQ_ESCALATE = 0x00000004,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Flags for OPAL_XIVE_GET/SET_VP_INFO */
|
||||||
|
enum {
|
||||||
|
OPAL_XIVE_VP_ENABLED = 0x00000001,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* "Any chip" replacement for chip ID for allocation functions */
|
||||||
|
enum {
|
||||||
|
OPAL_XIVE_ANY_CHIP = 0xffffffff,
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Xive sync options */
|
||||||
|
enum {
|
||||||
|
/* This bits are cumulative, arg is a girq */
|
||||||
|
XIVE_SYNC_EAS = 0x00000001, /* Sync irq source */
|
||||||
|
XIVE_SYNC_QUEUE = 0x00000002, /* Sync irq target */
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Dump options */
|
||||||
|
enum {
|
||||||
|
XIVE_DUMP_TM_HYP = 0,
|
||||||
|
XIVE_DUMP_TM_POOL = 1,
|
||||||
|
XIVE_DUMP_TM_OS = 2,
|
||||||
|
XIVE_DUMP_TM_USER = 3,
|
||||||
|
XIVE_DUMP_VP = 4,
|
||||||
|
XIVE_DUMP_EMU_STATE = 5,
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* __ASSEMBLY__ */
|
#endif /* __ASSEMBLY__ */
|
||||||
|
|
||||||
#endif /* __OPAL_API_H */
|
#endif /* __OPAL_API_H */
|
||||||
|
|
|
@ -226,6 +226,42 @@ int64_t opal_pci_tce_kill(uint64_t phb_id, uint32_t kill_type,
|
||||||
uint32_t pe_num, uint32_t tce_size,
|
uint32_t pe_num, uint32_t tce_size,
|
||||||
uint64_t dma_addr, uint32_t npages);
|
uint64_t dma_addr, uint32_t npages);
|
||||||
int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr);
|
int64_t opal_nmmu_set_ptcr(uint64_t chip_id, uint64_t ptcr);
|
||||||
|
int64_t opal_xive_reset(uint64_t version);
|
||||||
|
int64_t opal_xive_get_irq_info(uint32_t girq,
|
||||||
|
__be64 *out_flags,
|
||||||
|
__be64 *out_eoi_page,
|
||||||
|
__be64 *out_trig_page,
|
||||||
|
__be32 *out_esb_shift,
|
||||||
|
__be32 *out_src_chip);
|
||||||
|
int64_t opal_xive_get_irq_config(uint32_t girq, __be64 *out_vp,
|
||||||
|
uint8_t *out_prio, __be32 *out_lirq);
|
||||||
|
int64_t opal_xive_set_irq_config(uint32_t girq, uint64_t vp, uint8_t prio,
|
||||||
|
uint32_t lirq);
|
||||||
|
int64_t opal_xive_get_queue_info(uint64_t vp, uint32_t prio,
|
||||||
|
__be64 *out_qpage,
|
||||||
|
__be64 *out_qsize,
|
||||||
|
__be64 *out_qeoi_page,
|
||||||
|
__be32 *out_escalate_irq,
|
||||||
|
__be64 *out_qflags);
|
||||||
|
int64_t opal_xive_set_queue_info(uint64_t vp, uint32_t prio,
|
||||||
|
uint64_t qpage,
|
||||||
|
uint64_t qsize,
|
||||||
|
uint64_t qflags);
|
||||||
|
int64_t opal_xive_donate_page(uint32_t chip_id, uint64_t addr);
|
||||||
|
int64_t opal_xive_alloc_vp_block(uint32_t alloc_order);
|
||||||
|
int64_t opal_xive_free_vp_block(uint64_t vp);
|
||||||
|
int64_t opal_xive_get_vp_info(uint64_t vp,
|
||||||
|
__be64 *out_flags,
|
||||||
|
__be64 *out_cam_value,
|
||||||
|
__be64 *out_report_cl_pair,
|
||||||
|
__be32 *out_chip_id);
|
||||||
|
int64_t opal_xive_set_vp_info(uint64_t vp,
|
||||||
|
uint64_t flags,
|
||||||
|
uint64_t report_cl_pair);
|
||||||
|
int64_t opal_xive_allocate_irq(uint32_t chip_id);
|
||||||
|
int64_t opal_xive_free_irq(uint32_t girq);
|
||||||
|
int64_t opal_xive_sync(uint32_t type, uint32_t id);
|
||||||
|
int64_t opal_xive_dump(uint32_t type, uint32_t id);
|
||||||
|
|
||||||
/* Internal functions */
|
/* Internal functions */
|
||||||
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
|
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
|
||||||
|
|
|
@ -365,6 +365,7 @@
|
||||||
#define LPCR_MER_SH 11
|
#define LPCR_MER_SH 11
|
||||||
#define LPCR_GTSE ASM_CONST(0x0000000000000400) /* Guest Translation Shootdown Enable */
|
#define LPCR_GTSE ASM_CONST(0x0000000000000400) /* Guest Translation Shootdown Enable */
|
||||||
#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
|
#define LPCR_TC ASM_CONST(0x0000000000000200) /* Translation control */
|
||||||
|
#define LPCR_HEIC ASM_CONST(0x0000000000000010) /* Hypervisor External Interrupt Control */
|
||||||
#define LPCR_LPES 0x0000000c
|
#define LPCR_LPES 0x0000000c
|
||||||
#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
|
#define LPCR_LPES0 ASM_CONST(0x0000000000000008) /* LPAR Env selector 0 */
|
||||||
#define LPCR_LPES1 ASM_CONST(0x0000000000000004) /* LPAR Env selector 1 */
|
#define LPCR_LPES1 ASM_CONST(0x0000000000000004) /* LPAR Env selector 1 */
|
||||||
|
|
|
@ -44,6 +44,7 @@ struct smp_ops_t {
|
||||||
#endif
|
#endif
|
||||||
void (*probe)(void);
|
void (*probe)(void);
|
||||||
int (*kick_cpu)(int nr);
|
int (*kick_cpu)(int nr);
|
||||||
|
int (*prepare_cpu)(int nr);
|
||||||
void (*setup_cpu)(int nr);
|
void (*setup_cpu)(int nr);
|
||||||
void (*bringup_done)(void);
|
void (*bringup_done)(void);
|
||||||
void (*take_timebase)(void);
|
void (*take_timebase)(void);
|
||||||
|
@ -61,7 +62,6 @@ extern void smp_generic_take_timebase(void);
|
||||||
DECLARE_PER_CPU(unsigned int, cpu_pvr);
|
DECLARE_PER_CPU(unsigned int, cpu_pvr);
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
#ifdef CONFIG_HOTPLUG_CPU
|
||||||
extern void migrate_irqs(void);
|
|
||||||
int generic_cpu_disable(void);
|
int generic_cpu_disable(void);
|
||||||
void generic_cpu_die(unsigned int cpu);
|
void generic_cpu_die(unsigned int cpu);
|
||||||
void generic_set_cpu_dead(unsigned int cpu);
|
void generic_set_cpu_dead(unsigned int cpu);
|
||||||
|
|
|
@ -0,0 +1,97 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016,2017 IBM Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
#ifndef _ASM_POWERPC_XIVE_REGS_H
|
||||||
|
#define _ASM_POWERPC_XIVE_REGS_H
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Thread Management (aka "TM") registers
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* TM register offsets */
|
||||||
|
#define TM_QW0_USER 0x000 /* All rings */
|
||||||
|
#define TM_QW1_OS 0x010 /* Ring 0..2 */
|
||||||
|
#define TM_QW2_HV_POOL 0x020 /* Ring 0..1 */
|
||||||
|
#define TM_QW3_HV_PHYS 0x030 /* Ring 0..1 */
|
||||||
|
|
||||||
|
/* Byte offsets inside a QW QW0 QW1 QW2 QW3 */
|
||||||
|
#define TM_NSR 0x0 /* + + - + */
|
||||||
|
#define TM_CPPR 0x1 /* - + - + */
|
||||||
|
#define TM_IPB 0x2 /* - + + + */
|
||||||
|
#define TM_LSMFB 0x3 /* - + + + */
|
||||||
|
#define TM_ACK_CNT 0x4 /* - + - - */
|
||||||
|
#define TM_INC 0x5 /* - + - + */
|
||||||
|
#define TM_AGE 0x6 /* - + - + */
|
||||||
|
#define TM_PIPR 0x7 /* - + - + */
|
||||||
|
|
||||||
|
#define TM_WORD0 0x0
|
||||||
|
#define TM_WORD1 0x4
|
||||||
|
|
||||||
|
/*
|
||||||
|
* QW word 2 contains the valid bit at the top and other fields
|
||||||
|
* depending on the QW.
|
||||||
|
*/
|
||||||
|
#define TM_WORD2 0x8
|
||||||
|
#define TM_QW0W2_VU PPC_BIT32(0)
|
||||||
|
#define TM_QW0W2_LOGIC_SERV PPC_BITMASK32(1,31) // XX 2,31 ?
|
||||||
|
#define TM_QW1W2_VO PPC_BIT32(0)
|
||||||
|
#define TM_QW1W2_OS_CAM PPC_BITMASK32(8,31)
|
||||||
|
#define TM_QW2W2_VP PPC_BIT32(0)
|
||||||
|
#define TM_QW2W2_POOL_CAM PPC_BITMASK32(8,31)
|
||||||
|
#define TM_QW3W2_VT PPC_BIT32(0)
|
||||||
|
#define TM_QW3W2_LP PPC_BIT32(6)
|
||||||
|
#define TM_QW3W2_LE PPC_BIT32(7)
|
||||||
|
#define TM_QW3W2_T PPC_BIT32(31)
|
||||||
|
|
||||||
|
/*
|
||||||
|
* In addition to normal loads to "peek" and writes (only when invalid)
|
||||||
|
* using 4 and 8 bytes accesses, the above registers support these
|
||||||
|
* "special" byte operations:
|
||||||
|
*
|
||||||
|
* - Byte load from QW0[NSR] - User level NSR (EBB)
|
||||||
|
* - Byte store to QW0[NSR] - User level NSR (EBB)
|
||||||
|
* - Byte load/store to QW1[CPPR] and QW3[CPPR] - CPPR access
|
||||||
|
* - Byte load from QW3[TM_WORD2] - Read VT||00000||LP||LE on thrd 0
|
||||||
|
* otherwise VT||0000000
|
||||||
|
* - Byte store to QW3[TM_WORD2] - Set VT bit (and LP/LE if present)
|
||||||
|
*
|
||||||
|
* Then we have all these "special" CI ops at these offset that trigger
|
||||||
|
* all sorts of side effects:
|
||||||
|
*/
|
||||||
|
#define TM_SPC_ACK_EBB 0x800 /* Load8 ack EBB to reg*/
|
||||||
|
#define TM_SPC_ACK_OS_REG 0x810 /* Load16 ack OS irq to reg */
|
||||||
|
#define TM_SPC_PUSH_USR_CTX 0x808 /* Store32 Push/Validate user context */
|
||||||
|
#define TM_SPC_PULL_USR_CTX 0x808 /* Load32 Pull/Invalidate user context */
|
||||||
|
#define TM_SPC_SET_OS_PENDING 0x812 /* Store8 Set OS irq pending bit */
|
||||||
|
#define TM_SPC_PULL_OS_CTX 0x818 /* Load32/Load64 Pull/Invalidate OS context to reg */
|
||||||
|
#define TM_SPC_PULL_POOL_CTX 0x828 /* Load32/Load64 Pull/Invalidate Pool context to reg*/
|
||||||
|
#define TM_SPC_ACK_HV_REG 0x830 /* Load16 ack HV irq to reg */
|
||||||
|
#define TM_SPC_PULL_USR_CTX_OL 0xc08 /* Store8 Pull/Inval usr ctx to odd line */
|
||||||
|
#define TM_SPC_ACK_OS_EL 0xc10 /* Store8 ack OS irq to even line */
|
||||||
|
#define TM_SPC_ACK_HV_POOL_EL 0xc20 /* Store8 ack HV evt pool to even line */
|
||||||
|
#define TM_SPC_ACK_HV_EL 0xc30 /* Store8 ack HV irq to even line */
|
||||||
|
/* XXX more... */
|
||||||
|
|
||||||
|
/* NSR fields for the various QW ack types */
|
||||||
|
#define TM_QW0_NSR_EB PPC_BIT8(0)
|
||||||
|
#define TM_QW1_NSR_EO PPC_BIT8(0)
|
||||||
|
#define TM_QW3_NSR_HE PPC_BITMASK8(0,1)
|
||||||
|
#define TM_QW3_NSR_HE_NONE 0
|
||||||
|
#define TM_QW3_NSR_HE_POOL 1
|
||||||
|
#define TM_QW3_NSR_HE_PHYS 2
|
||||||
|
#define TM_QW3_NSR_HE_LSI 3
|
||||||
|
#define TM_QW3_NSR_I PPC_BIT8(2)
|
||||||
|
#define TM_QW3_NSR_GRP_LVL PPC_BIT8(3,7)
|
||||||
|
|
||||||
|
/* Utilities to manipulate these (originaly from OPAL) */
|
||||||
|
#define MASK_TO_LSH(m) (__builtin_ffsl(m) - 1)
|
||||||
|
#define GETFIELD(m, v) (((v) & (m)) >> MASK_TO_LSH(m))
|
||||||
|
#define SETFIELD(m, v, val) \
|
||||||
|
(((v) & ~(m)) | ((((typeof(v))(val)) << MASK_TO_LSH(m)) & (m)))
|
||||||
|
|
||||||
|
#endif /* _ASM_POWERPC_XIVE_REGS_H */
|
|
@ -0,0 +1,162 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016,2017 IBM Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
#ifndef _ASM_POWERPC_XIVE_H
|
||||||
|
#define _ASM_POWERPC_XIVE_H
|
||||||
|
|
||||||
|
#define XIVE_INVALID_VP 0xffffffff
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_XIVE
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Thread Interrupt Management Area (TIMA)
|
||||||
|
*
|
||||||
|
* This is a global MMIO region divided in 4 pages of varying access
|
||||||
|
* permissions, providing access to per-cpu interrupt management
|
||||||
|
* functions. It always identifies the CPU doing the access based
|
||||||
|
* on the PowerBus initiator ID, thus we always access via the
|
||||||
|
* same offset regardless of where the code is executing
|
||||||
|
*/
|
||||||
|
extern void __iomem *xive_tima;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Offset in the TM area of our current execution level (provided by
|
||||||
|
* the backend)
|
||||||
|
*/
|
||||||
|
extern u32 xive_tima_offset;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Per-irq data (irq_get_handler_data for normal IRQs), IPIs
|
||||||
|
* have it stored in the xive_cpu structure. We also cache
|
||||||
|
* for normal interrupts the current target CPU.
|
||||||
|
*
|
||||||
|
* This structure is setup by the backend for each interrupt.
|
||||||
|
*/
|
||||||
|
struct xive_irq_data {
|
||||||
|
u64 flags;
|
||||||
|
u64 eoi_page;
|
||||||
|
void __iomem *eoi_mmio;
|
||||||
|
u64 trig_page;
|
||||||
|
void __iomem *trig_mmio;
|
||||||
|
u32 esb_shift;
|
||||||
|
int src_chip;
|
||||||
|
|
||||||
|
/* Setup/used by frontend */
|
||||||
|
int target;
|
||||||
|
bool saved_p;
|
||||||
|
};
|
||||||
|
#define XIVE_IRQ_FLAG_STORE_EOI 0x01
|
||||||
|
#define XIVE_IRQ_FLAG_LSI 0x02
|
||||||
|
#define XIVE_IRQ_FLAG_SHIFT_BUG 0x04
|
||||||
|
#define XIVE_IRQ_FLAG_MASK_FW 0x08
|
||||||
|
#define XIVE_IRQ_FLAG_EOI_FW 0x10
|
||||||
|
|
||||||
|
#define XIVE_INVALID_CHIP_ID -1
|
||||||
|
|
||||||
|
/* A queue tracking structure in a CPU */
|
||||||
|
struct xive_q {
|
||||||
|
__be32 *qpage;
|
||||||
|
u32 msk;
|
||||||
|
u32 idx;
|
||||||
|
u32 toggle;
|
||||||
|
u64 eoi_phys;
|
||||||
|
u32 esc_irq;
|
||||||
|
atomic_t count;
|
||||||
|
atomic_t pending_count;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* "magic" Event State Buffer (ESB) MMIO offsets.
|
||||||
|
*
|
||||||
|
* Each interrupt source has a 2-bit state machine called ESB
|
||||||
|
* which can be controlled by MMIO. It's made of 2 bits, P and
|
||||||
|
* Q. P indicates that an interrupt is pending (has been sent
|
||||||
|
* to a queue and is waiting for an EOI). Q indicates that the
|
||||||
|
* interrupt has been triggered while pending.
|
||||||
|
*
|
||||||
|
* This acts as a coalescing mechanism in order to guarantee
|
||||||
|
* that a given interrupt only occurs at most once in a queue.
|
||||||
|
*
|
||||||
|
* When doing an EOI, the Q bit will indicate if the interrupt
|
||||||
|
* needs to be re-triggered.
|
||||||
|
*
|
||||||
|
* The following offsets into the ESB MMIO allow to read or
|
||||||
|
* manipulate the PQ bits. They must be used with an 8-bytes
|
||||||
|
* load instruction. They all return the previous state of the
|
||||||
|
* interrupt (atomically).
|
||||||
|
*
|
||||||
|
* Additionally, some ESB pages support doing an EOI via a
|
||||||
|
* store at 0 and some ESBs support doing a trigger via a
|
||||||
|
* separate trigger page.
|
||||||
|
*/
|
||||||
|
#define XIVE_ESB_GET 0x800
|
||||||
|
#define XIVE_ESB_SET_PQ_00 0xc00
|
||||||
|
#define XIVE_ESB_SET_PQ_01 0xd00
|
||||||
|
#define XIVE_ESB_SET_PQ_10 0xe00
|
||||||
|
#define XIVE_ESB_SET_PQ_11 0xf00
|
||||||
|
|
||||||
|
#define XIVE_ESB_VAL_P 0x2
|
||||||
|
#define XIVE_ESB_VAL_Q 0x1
|
||||||
|
|
||||||
|
/* Global enable flags for the XIVE support */
|
||||||
|
extern bool __xive_enabled;
|
||||||
|
|
||||||
|
static inline bool xive_enabled(void) { return __xive_enabled; }
|
||||||
|
|
||||||
|
extern bool xive_native_init(void);
|
||||||
|
extern void xive_smp_probe(void);
|
||||||
|
extern int xive_smp_prepare_cpu(unsigned int cpu);
|
||||||
|
extern void xive_smp_setup_cpu(void);
|
||||||
|
extern void xive_smp_disable_cpu(void);
|
||||||
|
extern void xive_kexec_teardown_cpu(int secondary);
|
||||||
|
extern void xive_shutdown(void);
|
||||||
|
extern void xive_flush_interrupt(void);
|
||||||
|
|
||||||
|
/* xmon hook */
|
||||||
|
extern void xmon_xive_do_dump(int cpu);
|
||||||
|
|
||||||
|
/* APIs used by KVM */
|
||||||
|
extern u32 xive_native_default_eq_shift(void);
|
||||||
|
extern u32 xive_native_alloc_vp_block(u32 max_vcpus);
|
||||||
|
extern void xive_native_free_vp_block(u32 vp_base);
|
||||||
|
extern int xive_native_populate_irq_data(u32 hw_irq,
|
||||||
|
struct xive_irq_data *data);
|
||||||
|
extern void xive_cleanup_irq_data(struct xive_irq_data *xd);
|
||||||
|
extern u32 xive_native_alloc_irq(void);
|
||||||
|
extern void xive_native_free_irq(u32 irq);
|
||||||
|
extern int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
|
||||||
|
|
||||||
|
extern int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
|
||||||
|
__be32 *qpage, u32 order, bool can_escalate);
|
||||||
|
extern void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio);
|
||||||
|
|
||||||
|
extern void xive_native_sync_source(u32 hw_irq);
|
||||||
|
extern bool is_xive_irq(struct irq_chip *chip);
|
||||||
|
extern int xive_native_enable_vp(u32 vp_id);
|
||||||
|
extern int xive_native_disable_vp(u32 vp_id);
|
||||||
|
extern int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id);
|
||||||
|
|
||||||
|
#else
|
||||||
|
|
||||||
|
static inline bool xive_enabled(void) { return false; }
|
||||||
|
|
||||||
|
static inline bool xive_native_init(void) { return false; }
|
||||||
|
static inline void xive_smp_probe(void) { }
|
||||||
|
extern inline int xive_smp_prepare_cpu(unsigned int cpu) { return -EINVAL; }
|
||||||
|
static inline void xive_smp_setup_cpu(void) { }
|
||||||
|
static inline void xive_smp_disable_cpu(void) { }
|
||||||
|
static inline void xive_kexec_teardown_cpu(int secondary) { }
|
||||||
|
static inline void xive_shutdown(void) { }
|
||||||
|
static inline void xive_flush_interrupt(void) { }
|
||||||
|
|
||||||
|
static inline u32 xive_native_alloc_vp_block(u32 max_vcpus) { return XIVE_INVALID_VP; }
|
||||||
|
static inline void xive_native_free_vp_block(u32 vp_base) { }
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#endif /* _ASM_POWERPC_XIVE_H */
|
|
@ -29,5 +29,7 @@ static inline void xmon_register_spus(struct list_head *list) { };
|
||||||
extern int cpus_are_in_xmon(void);
|
extern int cpus_are_in_xmon(void);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
extern void xmon_printf(const char *format, ...);
|
||||||
|
|
||||||
#endif /* __KERNEL __ */
|
#endif /* __KERNEL __ */
|
||||||
#endif /* __ASM_POWERPC_XMON_H */
|
#endif /* __ASM_POWERPC_XMON_H */
|
||||||
|
|
|
@ -630,6 +630,8 @@ int main(void)
|
||||||
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
|
HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
|
||||||
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
|
HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
|
||||||
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
|
HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
|
||||||
|
HSTATE_FIELD(HSTATE_XIVE_TIMA_PHYS, xive_tima_phys);
|
||||||
|
HSTATE_FIELD(HSTATE_XIVE_TIMA_VIRT, xive_tima_virt);
|
||||||
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
|
HSTATE_FIELD(HSTATE_SAVED_XIRR, saved_xirr);
|
||||||
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
|
HSTATE_FIELD(HSTATE_HOST_IPI, host_ipi);
|
||||||
HSTATE_FIELD(HSTATE_PTID, ptid);
|
HSTATE_FIELD(HSTATE_PTID, ptid);
|
||||||
|
@ -715,6 +717,14 @@ int main(void)
|
||||||
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
|
OFFSET(VCPU_HOST_MAS6, kvm_vcpu, arch.host_mas6);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
DEFINE(VCPU_XIVE_SAVED_STATE, offsetof(struct kvm_vcpu,
|
||||||
|
arch.xive_saved_state));
|
||||||
|
DEFINE(VCPU_XIVE_CAM_WORD, offsetof(struct kvm_vcpu,
|
||||||
|
arch.xive_cam_word));
|
||||||
|
DEFINE(VCPU_XIVE_PUSHED, offsetof(struct kvm_vcpu, arch.xive_pushed));
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_EXIT_TIMING
|
#ifdef CONFIG_KVM_EXIT_TIMING
|
||||||
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
|
OFFSET(VCPU_TIMING_EXIT_TBU, kvm_vcpu, arch.timing_exit.tv32.tbu);
|
||||||
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
|
OFFSET(VCPU_TIMING_EXIT_TBL, kvm_vcpu, arch.timing_exit.tv32.tbl);
|
||||||
|
|
|
@ -29,6 +29,7 @@ _GLOBAL(__setup_cpu_power7)
|
||||||
li r0,0
|
li r0,0
|
||||||
mtspr SPRN_LPID,r0
|
mtspr SPRN_LPID,r0
|
||||||
mfspr r3,SPRN_LPCR
|
mfspr r3,SPRN_LPCR
|
||||||
|
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||||
bl __init_LPCR
|
bl __init_LPCR
|
||||||
bl __init_tlb_power7
|
bl __init_tlb_power7
|
||||||
mtlr r11
|
mtlr r11
|
||||||
|
@ -42,6 +43,7 @@ _GLOBAL(__restore_cpu_power7)
|
||||||
li r0,0
|
li r0,0
|
||||||
mtspr SPRN_LPID,r0
|
mtspr SPRN_LPID,r0
|
||||||
mfspr r3,SPRN_LPCR
|
mfspr r3,SPRN_LPCR
|
||||||
|
li r4,(LPCR_LPES1 >> LPCR_LPES_SH)
|
||||||
bl __init_LPCR
|
bl __init_LPCR
|
||||||
bl __init_tlb_power7
|
bl __init_tlb_power7
|
||||||
mtlr r11
|
mtlr r11
|
||||||
|
@ -59,6 +61,7 @@ _GLOBAL(__setup_cpu_power8)
|
||||||
mtspr SPRN_LPID,r0
|
mtspr SPRN_LPID,r0
|
||||||
mfspr r3,SPRN_LPCR
|
mfspr r3,SPRN_LPCR
|
||||||
ori r3, r3, LPCR_PECEDH
|
ori r3, r3, LPCR_PECEDH
|
||||||
|
li r4,0 /* LPES = 0 */
|
||||||
bl __init_LPCR
|
bl __init_LPCR
|
||||||
bl __init_HFSCR
|
bl __init_HFSCR
|
||||||
bl __init_tlb_power8
|
bl __init_tlb_power8
|
||||||
|
@ -80,6 +83,7 @@ _GLOBAL(__restore_cpu_power8)
|
||||||
mtspr SPRN_LPID,r0
|
mtspr SPRN_LPID,r0
|
||||||
mfspr r3,SPRN_LPCR
|
mfspr r3,SPRN_LPCR
|
||||||
ori r3, r3, LPCR_PECEDH
|
ori r3, r3, LPCR_PECEDH
|
||||||
|
li r4,0 /* LPES = 0 */
|
||||||
bl __init_LPCR
|
bl __init_LPCR
|
||||||
bl __init_HFSCR
|
bl __init_HFSCR
|
||||||
bl __init_tlb_power8
|
bl __init_tlb_power8
|
||||||
|
@ -99,10 +103,11 @@ _GLOBAL(__setup_cpu_power9)
|
||||||
mtspr SPRN_PSSCR,r0
|
mtspr SPRN_PSSCR,r0
|
||||||
mtspr SPRN_LPID,r0
|
mtspr SPRN_LPID,r0
|
||||||
mfspr r3,SPRN_LPCR
|
mfspr r3,SPRN_LPCR
|
||||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
|
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||||
or r3, r3, r4
|
or r3, r3, r4
|
||||||
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
|
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
|
||||||
andc r3, r3, r4
|
andc r3, r3, r4
|
||||||
|
li r4,0 /* LPES = 0 */
|
||||||
bl __init_LPCR
|
bl __init_LPCR
|
||||||
bl __init_HFSCR
|
bl __init_HFSCR
|
||||||
bl __init_tlb_power9
|
bl __init_tlb_power9
|
||||||
|
@ -122,10 +127,11 @@ _GLOBAL(__restore_cpu_power9)
|
||||||
mtspr SPRN_PSSCR,r0
|
mtspr SPRN_PSSCR,r0
|
||||||
mtspr SPRN_LPID,r0
|
mtspr SPRN_LPID,r0
|
||||||
mfspr r3,SPRN_LPCR
|
mfspr r3,SPRN_LPCR
|
||||||
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE)
|
LOAD_REG_IMMEDIATE(r4, LPCR_PECEDH | LPCR_PECE_HVEE | LPCR_HVICE | LPCR_HEIC)
|
||||||
or r3, r3, r4
|
or r3, r3, r4
|
||||||
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
|
LOAD_REG_IMMEDIATE(r4, LPCR_UPRT | LPCR_HR)
|
||||||
andc r3, r3, r4
|
andc r3, r3, r4
|
||||||
|
li r4,0 /* LPES = 0 */
|
||||||
bl __init_LPCR
|
bl __init_LPCR
|
||||||
bl __init_HFSCR
|
bl __init_HFSCR
|
||||||
bl __init_tlb_power9
|
bl __init_tlb_power9
|
||||||
|
@ -146,7 +152,7 @@ __init_hvmode_206:
|
||||||
|
|
||||||
__init_LPCR:
|
__init_LPCR:
|
||||||
/* Setup a sane LPCR:
|
/* Setup a sane LPCR:
|
||||||
* Called with initial LPCR in R3
|
* Called with initial LPCR in R3 and desired LPES 2-bit value in R4
|
||||||
*
|
*
|
||||||
* LPES = 0b01 (HSRR0/1 used for 0x500)
|
* LPES = 0b01 (HSRR0/1 used for 0x500)
|
||||||
* PECE = 0b111
|
* PECE = 0b111
|
||||||
|
@ -157,8 +163,7 @@ __init_LPCR:
|
||||||
*
|
*
|
||||||
* Other bits untouched for now
|
* Other bits untouched for now
|
||||||
*/
|
*/
|
||||||
li r5,1
|
rldimi r3,r4, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
|
||||||
rldimi r3,r5, LPCR_LPES_SH, 64-LPCR_LPES_SH-2
|
|
||||||
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
|
ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2)
|
||||||
li r5,4
|
li r5,4
|
||||||
rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
|
rldimi r3,r5, LPCR_DPFD_SH, 64-LPCR_DPFD_SH-3
|
||||||
|
|
|
@ -442,46 +442,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
|
||||||
return sum;
|
return sum;
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_HOTPLUG_CPU
|
|
||||||
void migrate_irqs(void)
|
|
||||||
{
|
|
||||||
struct irq_desc *desc;
|
|
||||||
unsigned int irq;
|
|
||||||
static int warned;
|
|
||||||
cpumask_var_t mask;
|
|
||||||
const struct cpumask *map = cpu_online_mask;
|
|
||||||
|
|
||||||
alloc_cpumask_var(&mask, GFP_KERNEL);
|
|
||||||
|
|
||||||
for_each_irq_desc(irq, desc) {
|
|
||||||
struct irq_data *data;
|
|
||||||
struct irq_chip *chip;
|
|
||||||
|
|
||||||
data = irq_desc_get_irq_data(desc);
|
|
||||||
if (irqd_is_per_cpu(data))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
chip = irq_data_get_irq_chip(data);
|
|
||||||
|
|
||||||
cpumask_and(mask, irq_data_get_affinity_mask(data), map);
|
|
||||||
if (cpumask_any(mask) >= nr_cpu_ids) {
|
|
||||||
pr_warn("Breaking affinity for irq %i\n", irq);
|
|
||||||
cpumask_copy(mask, map);
|
|
||||||
}
|
|
||||||
if (chip->irq_set_affinity)
|
|
||||||
chip->irq_set_affinity(data, mask, true);
|
|
||||||
else if (desc->action && !(warned++))
|
|
||||||
pr_err("Cannot set affinity for irq %i\n", irq);
|
|
||||||
}
|
|
||||||
|
|
||||||
free_cpumask_var(mask);
|
|
||||||
|
|
||||||
local_irq_enable();
|
|
||||||
mdelay(1);
|
|
||||||
local_irq_disable();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static inline void check_stack_overflow(void)
|
static inline void check_stack_overflow(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
#ifdef CONFIG_DEBUG_STACKOVERFLOW
|
||||||
|
|
|
@ -439,7 +439,14 @@ int generic_cpu_disable(void)
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
vdso_data->processorCount--;
|
vdso_data->processorCount--;
|
||||||
#endif
|
#endif
|
||||||
migrate_irqs();
|
/* Update affinity of all IRQs previously aimed at this CPU */
|
||||||
|
irq_migrate_all_off_this_cpu();
|
||||||
|
|
||||||
|
/* Give the CPU time to drain in-flight ones */
|
||||||
|
local_irq_enable();
|
||||||
|
mdelay(1);
|
||||||
|
local_irq_disable();
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -521,6 +528,16 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
|
||||||
|
|
||||||
cpu_idle_thread_init(cpu, tidle);
|
cpu_idle_thread_init(cpu, tidle);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The platform might need to allocate resources prior to bringing
|
||||||
|
* up the CPU
|
||||||
|
*/
|
||||||
|
if (smp_ops->prepare_cpu) {
|
||||||
|
rc = smp_ops->prepare_cpu(cpu);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
/* Make sure callin-map entry is 0 (can be leftover a CPU
|
/* Make sure callin-map entry is 0 (can be leftover a CPU
|
||||||
* hotplug
|
* hotplug
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -197,6 +197,11 @@ config KVM_XICS
|
||||||
Specification) interrupt controller architecture used on
|
Specification) interrupt controller architecture used on
|
||||||
IBM POWER (pSeries) servers.
|
IBM POWER (pSeries) servers.
|
||||||
|
|
||||||
|
config KVM_XIVE
|
||||||
|
bool
|
||||||
|
default y
|
||||||
|
depends on KVM_XICS && PPC_XIVE_NATIVE && KVM_BOOK3S_HV_POSSIBLE
|
||||||
|
|
||||||
source drivers/vhost/Kconfig
|
source drivers/vhost/Kconfig
|
||||||
|
|
||||||
endif # VIRTUALIZATION
|
endif # VIRTUALIZATION
|
||||||
|
|
|
@ -74,7 +74,7 @@ kvm-hv-y += \
|
||||||
book3s_64_mmu_radix.o
|
book3s_64_mmu_radix.o
|
||||||
|
|
||||||
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
|
kvm-book3s_64-builtin-xics-objs-$(CONFIG_KVM_XICS) := \
|
||||||
book3s_hv_rm_xics.o
|
book3s_hv_rm_xics.o book3s_hv_rm_xive.o
|
||||||
|
|
||||||
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
|
||||||
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
kvm-book3s_64-builtin-objs-$(CONFIG_KVM_BOOK3S_64_HANDLER) += \
|
||||||
|
@ -89,6 +89,8 @@ endif
|
||||||
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
|
kvm-book3s_64-objs-$(CONFIG_KVM_XICS) += \
|
||||||
book3s_xics.o
|
book3s_xics.o
|
||||||
|
|
||||||
|
kvm-book3s_64-objs-$(CONFIG_KVM_XIVE) += book3s_xive.o
|
||||||
|
|
||||||
kvm-book3s_64-module-objs := \
|
kvm-book3s_64-module-objs := \
|
||||||
$(common-objs-y) \
|
$(common-objs-y) \
|
||||||
book3s.o \
|
book3s.o \
|
||||||
|
|
|
@ -20,6 +20,10 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
|
#include <linux/gfp.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
|
||||||
#include <asm/reg.h>
|
#include <asm/reg.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
|
@ -31,10 +35,7 @@
|
||||||
#include <asm/kvm_book3s.h>
|
#include <asm/kvm_book3s.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
#include <linux/gfp.h>
|
#include <asm/xive.h>
|
||||||
#include <linux/sched.h>
|
|
||||||
#include <linux/vmalloc.h>
|
|
||||||
#include <linux/highmem.h>
|
|
||||||
|
|
||||||
#include "book3s.h"
|
#include "book3s.h"
|
||||||
#include "trace.h"
|
#include "trace.h"
|
||||||
|
@ -596,11 +597,14 @@ int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||||
break;
|
break;
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
case KVM_REG_PPC_ICP_STATE:
|
case KVM_REG_PPC_ICP_STATE:
|
||||||
if (!vcpu->arch.icp) {
|
if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
|
||||||
r = -ENXIO;
|
r = -ENXIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
|
if (xive_enabled())
|
||||||
|
*val = get_reg_val(id, kvmppc_xive_get_icp(vcpu));
|
||||||
|
else
|
||||||
|
*val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
|
||||||
break;
|
break;
|
||||||
#endif /* CONFIG_KVM_XICS */
|
#endif /* CONFIG_KVM_XICS */
|
||||||
case KVM_REG_PPC_FSCR:
|
case KVM_REG_PPC_FSCR:
|
||||||
|
@ -666,12 +670,14 @@ int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
|
||||||
#endif /* CONFIG_VSX */
|
#endif /* CONFIG_VSX */
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
case KVM_REG_PPC_ICP_STATE:
|
case KVM_REG_PPC_ICP_STATE:
|
||||||
if (!vcpu->arch.icp) {
|
if (!vcpu->arch.icp && !vcpu->arch.xive_vcpu) {
|
||||||
r = -ENXIO;
|
r = -ENXIO;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
r = kvmppc_xics_set_icp(vcpu,
|
if (xive_enabled())
|
||||||
set_reg_val(id, *val));
|
r = kvmppc_xive_set_icp(vcpu, set_reg_val(id, *val));
|
||||||
|
else
|
||||||
|
r = kvmppc_xics_set_icp(vcpu, set_reg_val(id, *val));
|
||||||
break;
|
break;
|
||||||
#endif /* CONFIG_KVM_XICS */
|
#endif /* CONFIG_KVM_XICS */
|
||||||
case KVM_REG_PPC_FSCR:
|
case KVM_REG_PPC_FSCR:
|
||||||
|
@ -942,6 +948,50 @@ int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
|
||||||
return kvm->arch.kvm_ops->hcall_implemented(hcall);
|
return kvm->arch.kvm_ops->hcall_implemented(hcall);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
||||||
|
bool line_status)
|
||||||
|
{
|
||||||
|
if (xive_enabled())
|
||||||
|
return kvmppc_xive_set_irq(kvm, irq_source_id, irq, level,
|
||||||
|
line_status);
|
||||||
|
else
|
||||||
|
return kvmppc_xics_set_irq(kvm, irq_source_id, irq, level,
|
||||||
|
line_status);
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
|
||||||
|
struct kvm *kvm, int irq_source_id,
|
||||||
|
int level, bool line_status)
|
||||||
|
{
|
||||||
|
return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
|
||||||
|
level, line_status);
|
||||||
|
}
|
||||||
|
static int kvmppc_book3s_set_irq(struct kvm_kernel_irq_routing_entry *e,
|
||||||
|
struct kvm *kvm, int irq_source_id, int level,
|
||||||
|
bool line_status)
|
||||||
|
{
|
||||||
|
return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvm_irq_map_gsi(struct kvm *kvm,
|
||||||
|
struct kvm_kernel_irq_routing_entry *entries, int gsi)
|
||||||
|
{
|
||||||
|
entries->gsi = gsi;
|
||||||
|
entries->type = KVM_IRQ_ROUTING_IRQCHIP;
|
||||||
|
entries->set = kvmppc_book3s_set_irq;
|
||||||
|
entries->irqchip.irqchip = 0;
|
||||||
|
entries->irqchip.pin = gsi;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
|
||||||
|
{
|
||||||
|
return pin;
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif /* CONFIG_KVM_XICS */
|
||||||
|
|
||||||
static int kvmppc_book3s_init(void)
|
static int kvmppc_book3s_init(void)
|
||||||
{
|
{
|
||||||
int r;
|
int r;
|
||||||
|
@ -952,12 +1002,25 @@ static int kvmppc_book3s_init(void)
|
||||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||||
r = kvmppc_book3s_init_pr();
|
r = kvmppc_book3s_init_pr();
|
||||||
#endif
|
#endif
|
||||||
return r;
|
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
#ifdef CONFIG_KVM_XIVE
|
||||||
|
if (xive_enabled()) {
|
||||||
|
kvmppc_xive_init_module();
|
||||||
|
kvm_register_device_ops(&kvm_xive_ops, KVM_DEV_TYPE_XICS);
|
||||||
|
} else
|
||||||
|
#endif
|
||||||
|
kvm_register_device_ops(&kvm_xics_ops, KVM_DEV_TYPE_XICS);
|
||||||
|
#endif
|
||||||
|
return r;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void kvmppc_book3s_exit(void)
|
static void kvmppc_book3s_exit(void)
|
||||||
{
|
{
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
if (xive_enabled())
|
||||||
|
kvmppc_xive_exit_module();
|
||||||
|
#endif
|
||||||
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
|
||||||
kvmppc_book3s_exit_pr();
|
kvmppc_book3s_exit_pr();
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -35,6 +35,15 @@
|
||||||
#include <linux/srcu.h>
|
#include <linux/srcu.h>
|
||||||
#include <linux/miscdevice.h>
|
#include <linux/miscdevice.h>
|
||||||
#include <linux/debugfs.h>
|
#include <linux/debugfs.h>
|
||||||
|
#include <linux/gfp.h>
|
||||||
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/highmem.h>
|
||||||
|
#include <linux/hugetlb.h>
|
||||||
|
#include <linux/kvm_irqfd.h>
|
||||||
|
#include <linux/irqbypass.h>
|
||||||
|
#include <linux/module.h>
|
||||||
|
#include <linux/compiler.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
|
||||||
#include <asm/reg.h>
|
#include <asm/reg.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
|
@ -58,15 +67,7 @@
|
||||||
#include <asm/mmu.h>
|
#include <asm/mmu.h>
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
#include <asm/xics.h>
|
#include <asm/xics.h>
|
||||||
#include <linux/gfp.h>
|
#include <asm/xive.h>
|
||||||
#include <linux/vmalloc.h>
|
|
||||||
#include <linux/highmem.h>
|
|
||||||
#include <linux/hugetlb.h>
|
|
||||||
#include <linux/kvm_irqfd.h>
|
|
||||||
#include <linux/irqbypass.h>
|
|
||||||
#include <linux/module.h>
|
|
||||||
#include <linux/compiler.h>
|
|
||||||
#include <linux/of.h>
|
|
||||||
|
|
||||||
#include "book3s.h"
|
#include "book3s.h"
|
||||||
|
|
||||||
|
@ -837,6 +838,10 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
|
||||||
case H_IPOLL:
|
case H_IPOLL:
|
||||||
case H_XIRR_X:
|
case H_XIRR_X:
|
||||||
if (kvmppc_xics_enabled(vcpu)) {
|
if (kvmppc_xics_enabled(vcpu)) {
|
||||||
|
if (xive_enabled()) {
|
||||||
|
ret = H_NOT_AVAILABLE;
|
||||||
|
return RESUME_GUEST;
|
||||||
|
}
|
||||||
ret = kvmppc_xics_hcall(vcpu, req);
|
ret = kvmppc_xics_hcall(vcpu, req);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2947,8 +2952,12 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
|
||||||
r = kvmppc_book3s_hv_page_fault(run, vcpu,
|
r = kvmppc_book3s_hv_page_fault(run, vcpu,
|
||||||
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
|
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
|
||||||
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
|
||||||
} else if (r == RESUME_PASSTHROUGH)
|
} else if (r == RESUME_PASSTHROUGH) {
|
||||||
r = kvmppc_xics_rm_complete(vcpu, 0);
|
if (WARN_ON(xive_enabled()))
|
||||||
|
r = H_SUCCESS;
|
||||||
|
else
|
||||||
|
r = kvmppc_xics_rm_complete(vcpu, 0);
|
||||||
|
}
|
||||||
} while (is_kvmppc_resume_guest(r));
|
} while (is_kvmppc_resume_guest(r));
|
||||||
|
|
||||||
out:
|
out:
|
||||||
|
@ -3400,10 +3409,20 @@ static int kvmppc_core_init_vm_hv(struct kvm *kvm)
|
||||||
/*
|
/*
|
||||||
* On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
|
* On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
|
||||||
* Set HVICE bit to enable hypervisor virtualization interrupts.
|
* Set HVICE bit to enable hypervisor virtualization interrupts.
|
||||||
|
* Set HEIC to prevent OS interrupts to go to hypervisor (should
|
||||||
|
* be unnecessary but better safe than sorry in case we re-enable
|
||||||
|
* EE in HV mode with this LPCR still set)
|
||||||
*/
|
*/
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||||
lpcr &= ~LPCR_VPM0;
|
lpcr &= ~LPCR_VPM0;
|
||||||
lpcr |= LPCR_HVICE;
|
lpcr |= LPCR_HVICE | LPCR_HEIC;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If xive is enabled, we route 0x500 interrupts directly
|
||||||
|
* to the guest.
|
||||||
|
*/
|
||||||
|
if (xive_enabled())
|
||||||
|
lpcr |= LPCR_LPES;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3533,7 +3552,7 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||||
struct kvmppc_irq_map *irq_map;
|
struct kvmppc_irq_map *irq_map;
|
||||||
struct kvmppc_passthru_irqmap *pimap;
|
struct kvmppc_passthru_irqmap *pimap;
|
||||||
struct irq_chip *chip;
|
struct irq_chip *chip;
|
||||||
int i;
|
int i, rc = 0;
|
||||||
|
|
||||||
if (!kvm_irq_bypass)
|
if (!kvm_irq_bypass)
|
||||||
return 1;
|
return 1;
|
||||||
|
@ -3558,10 +3577,10 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||||
/*
|
/*
|
||||||
* For now, we only support interrupts for which the EOI operation
|
* For now, we only support interrupts for which the EOI operation
|
||||||
* is an OPAL call followed by a write to XIRR, since that's
|
* is an OPAL call followed by a write to XIRR, since that's
|
||||||
* what our real-mode EOI code does.
|
* what our real-mode EOI code does, or a XIVE interrupt
|
||||||
*/
|
*/
|
||||||
chip = irq_data_get_irq_chip(&desc->irq_data);
|
chip = irq_data_get_irq_chip(&desc->irq_data);
|
||||||
if (!chip || !is_pnv_opal_msi(chip)) {
|
if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
|
||||||
pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
|
pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
|
||||||
host_irq, guest_gsi);
|
host_irq, guest_gsi);
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
@ -3603,7 +3622,12 @@ static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||||
if (i == pimap->n_mapped)
|
if (i == pimap->n_mapped)
|
||||||
pimap->n_mapped++;
|
pimap->n_mapped++;
|
||||||
|
|
||||||
kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
|
if (xive_enabled())
|
||||||
|
rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
|
||||||
|
else
|
||||||
|
kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
|
||||||
|
if (rc)
|
||||||
|
irq_map->r_hwirq = 0;
|
||||||
|
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
|
|
||||||
|
@ -3614,7 +3638,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||||
{
|
{
|
||||||
struct irq_desc *desc;
|
struct irq_desc *desc;
|
||||||
struct kvmppc_passthru_irqmap *pimap;
|
struct kvmppc_passthru_irqmap *pimap;
|
||||||
int i;
|
int i, rc = 0;
|
||||||
|
|
||||||
if (!kvm_irq_bypass)
|
if (!kvm_irq_bypass)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -3639,9 +3663,12 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
}
|
}
|
||||||
|
|
||||||
kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
|
if (xive_enabled())
|
||||||
|
rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
|
||||||
|
else
|
||||||
|
kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
|
||||||
|
|
||||||
/* invalidate the entry */
|
/* invalidate the entry (what do do on error from the above ?) */
|
||||||
pimap->mapped[i].r_hwirq = 0;
|
pimap->mapped[i].r_hwirq = 0;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -3650,7 +3677,7 @@ static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
|
||||||
*/
|
*/
|
||||||
unlock:
|
unlock:
|
||||||
mutex_unlock(&kvm->lock);
|
mutex_unlock(&kvm->lock);
|
||||||
return 0;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
|
static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
|
||||||
|
@ -3928,7 +3955,7 @@ static int kvmppc_book3s_init_hv(void)
|
||||||
* indirectly, via OPAL.
|
* indirectly, via OPAL.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
if (!local_paca->kvm_hstate.xics_phys) {
|
if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
|
||||||
struct device_node *np;
|
struct device_node *np;
|
||||||
|
|
||||||
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
|
np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
|
||||||
|
|
|
@ -23,6 +23,7 @@
|
||||||
#include <asm/kvm_book3s.h>
|
#include <asm/kvm_book3s.h>
|
||||||
#include <asm/archrandom.h>
|
#include <asm/archrandom.h>
|
||||||
#include <asm/xics.h>
|
#include <asm/xics.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
#include <asm/dbell.h>
|
#include <asm/dbell.h>
|
||||||
#include <asm/cputhreads.h>
|
#include <asm/cputhreads.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
|
@ -31,6 +32,24 @@
|
||||||
|
|
||||||
#define KVM_CMA_CHUNK_ORDER 18
|
#define KVM_CMA_CHUNK_ORDER 18
|
||||||
|
|
||||||
|
#include "book3s_xics.h"
|
||||||
|
#include "book3s_xive.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The XIVE module will populate these when it loads
|
||||||
|
*/
|
||||||
|
unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
|
||||||
|
unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
|
||||||
|
int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
|
unsigned long mfrr);
|
||||||
|
int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||||
|
int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||||
|
EXPORT_SYMBOL_GPL(__xive_vm_h_xirr);
|
||||||
|
EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll);
|
||||||
|
EXPORT_SYMBOL_GPL(__xive_vm_h_ipi);
|
||||||
|
EXPORT_SYMBOL_GPL(__xive_vm_h_cppr);
|
||||||
|
EXPORT_SYMBOL_GPL(__xive_vm_h_eoi);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
* Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
|
||||||
* should be power of 2.
|
* should be power of 2.
|
||||||
|
@ -193,12 +212,6 @@ long kvmppc_h_random(struct kvm_vcpu *vcpu)
|
||||||
return H_HARDWARE;
|
return H_HARDWARE;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void rm_writeb(unsigned long paddr, u8 val)
|
|
||||||
{
|
|
||||||
__asm__ __volatile__("stbcix %0,0,%1"
|
|
||||||
: : "r" (val), "r" (paddr) : "memory");
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Send an interrupt or message to another CPU.
|
* Send an interrupt or message to another CPU.
|
||||||
* The caller needs to include any barrier needed to order writes
|
* The caller needs to include any barrier needed to order writes
|
||||||
|
@ -206,7 +219,7 @@ static inline void rm_writeb(unsigned long paddr, u8 val)
|
||||||
*/
|
*/
|
||||||
void kvmhv_rm_send_ipi(int cpu)
|
void kvmhv_rm_send_ipi(int cpu)
|
||||||
{
|
{
|
||||||
unsigned long xics_phys;
|
void __iomem *xics_phys;
|
||||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||||
|
|
||||||
/* On POWER9 we can use msgsnd for any destination cpu. */
|
/* On POWER9 we can use msgsnd for any destination cpu. */
|
||||||
|
@ -215,6 +228,7 @@ void kvmhv_rm_send_ipi(int cpu)
|
||||||
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
|
__asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
|
/* On POWER8 for IPIs to threads in the same core, use msgsnd. */
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
|
if (cpu_has_feature(CPU_FTR_ARCH_207S) &&
|
||||||
cpu_first_thread_sibling(cpu) ==
|
cpu_first_thread_sibling(cpu) ==
|
||||||
|
@ -224,10 +238,14 @@ void kvmhv_rm_send_ipi(int cpu)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* We should never reach this */
|
||||||
|
if (WARN_ON_ONCE(xive_enabled()))
|
||||||
|
return;
|
||||||
|
|
||||||
/* Else poke the target with an IPI */
|
/* Else poke the target with an IPI */
|
||||||
xics_phys = paca[cpu].kvm_hstate.xics_phys;
|
xics_phys = paca[cpu].kvm_hstate.xics_phys;
|
||||||
if (xics_phys)
|
if (xics_phys)
|
||||||
rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY);
|
__raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
|
||||||
else
|
else
|
||||||
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
|
opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
|
||||||
}
|
}
|
||||||
|
@ -386,6 +404,9 @@ long kvmppc_read_intr(void)
|
||||||
long rc;
|
long rc;
|
||||||
bool again;
|
bool again;
|
||||||
|
|
||||||
|
if (xive_enabled())
|
||||||
|
return 1;
|
||||||
|
|
||||||
do {
|
do {
|
||||||
again = false;
|
again = false;
|
||||||
rc = kvmppc_read_one_intr(&again);
|
rc = kvmppc_read_one_intr(&again);
|
||||||
|
@ -397,13 +418,16 @@ long kvmppc_read_intr(void)
|
||||||
|
|
||||||
static long kvmppc_read_one_intr(bool *again)
|
static long kvmppc_read_one_intr(bool *again)
|
||||||
{
|
{
|
||||||
unsigned long xics_phys;
|
void __iomem *xics_phys;
|
||||||
u32 h_xirr;
|
u32 h_xirr;
|
||||||
__be32 xirr;
|
__be32 xirr;
|
||||||
u32 xisr;
|
u32 xisr;
|
||||||
u8 host_ipi;
|
u8 host_ipi;
|
||||||
int64_t rc;
|
int64_t rc;
|
||||||
|
|
||||||
|
if (xive_enabled())
|
||||||
|
return 1;
|
||||||
|
|
||||||
/* see if a host IPI is pending */
|
/* see if a host IPI is pending */
|
||||||
host_ipi = local_paca->kvm_hstate.host_ipi;
|
host_ipi = local_paca->kvm_hstate.host_ipi;
|
||||||
if (host_ipi)
|
if (host_ipi)
|
||||||
|
@ -415,7 +439,7 @@ static long kvmppc_read_one_intr(bool *again)
|
||||||
if (!xics_phys)
|
if (!xics_phys)
|
||||||
rc = opal_int_get_xirr(&xirr, false);
|
rc = opal_int_get_xirr(&xirr, false);
|
||||||
else
|
else
|
||||||
xirr = _lwzcix(xics_phys + XICS_XIRR);
|
xirr = __raw_rm_readl(xics_phys + XICS_XIRR);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
|
@ -445,8 +469,8 @@ static long kvmppc_read_one_intr(bool *again)
|
||||||
if (xisr == XICS_IPI) {
|
if (xisr == XICS_IPI) {
|
||||||
rc = 0;
|
rc = 0;
|
||||||
if (xics_phys) {
|
if (xics_phys) {
|
||||||
_stbcix(xics_phys + XICS_MFRR, 0xff);
|
__raw_rm_writeb(0xff, xics_phys + XICS_MFRR);
|
||||||
_stwcix(xics_phys + XICS_XIRR, xirr);
|
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
|
||||||
} else {
|
} else {
|
||||||
opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
|
opal_int_set_mfrr(hard_smp_processor_id(), 0xff);
|
||||||
rc = opal_int_eoi(h_xirr);
|
rc = opal_int_eoi(h_xirr);
|
||||||
|
@ -471,7 +495,8 @@ static long kvmppc_read_one_intr(bool *again)
|
||||||
* we need to resend that IPI, bummer
|
* we need to resend that IPI, bummer
|
||||||
*/
|
*/
|
||||||
if (xics_phys)
|
if (xics_phys)
|
||||||
_stbcix(xics_phys + XICS_MFRR, IPI_PRIORITY);
|
__raw_rm_writeb(IPI_PRIORITY,
|
||||||
|
xics_phys + XICS_MFRR);
|
||||||
else
|
else
|
||||||
opal_int_set_mfrr(hard_smp_processor_id(),
|
opal_int_set_mfrr(hard_smp_processor_id(),
|
||||||
IPI_PRIORITY);
|
IPI_PRIORITY);
|
||||||
|
@ -487,3 +512,84 @@ static long kvmppc_read_one_intr(bool *again)
|
||||||
|
|
||||||
return kvmppc_check_passthru(xisr, xirr, again);
|
return kvmppc_check_passthru(xisr, xirr, again);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
static inline bool is_rm(void)
|
||||||
|
{
|
||||||
|
return !(mfmsr() & MSR_DR);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
if (xive_enabled()) {
|
||||||
|
if (is_rm())
|
||||||
|
return xive_rm_h_xirr(vcpu);
|
||||||
|
if (unlikely(!__xive_vm_h_xirr))
|
||||||
|
return H_NOT_AVAILABLE;
|
||||||
|
return __xive_vm_h_xirr(vcpu);
|
||||||
|
} else
|
||||||
|
return xics_rm_h_xirr(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
vcpu->arch.gpr[5] = get_tb();
|
||||||
|
if (xive_enabled()) {
|
||||||
|
if (is_rm())
|
||||||
|
return xive_rm_h_xirr(vcpu);
|
||||||
|
if (unlikely(!__xive_vm_h_xirr))
|
||||||
|
return H_NOT_AVAILABLE;
|
||||||
|
return __xive_vm_h_xirr(vcpu);
|
||||||
|
} else
|
||||||
|
return xics_rm_h_xirr(vcpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server)
|
||||||
|
{
|
||||||
|
if (xive_enabled()) {
|
||||||
|
if (is_rm())
|
||||||
|
return xive_rm_h_ipoll(vcpu, server);
|
||||||
|
if (unlikely(!__xive_vm_h_ipoll))
|
||||||
|
return H_NOT_AVAILABLE;
|
||||||
|
return __xive_vm_h_ipoll(vcpu, server);
|
||||||
|
} else
|
||||||
|
return H_TOO_HARD;
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
|
unsigned long mfrr)
|
||||||
|
{
|
||||||
|
if (xive_enabled()) {
|
||||||
|
if (is_rm())
|
||||||
|
return xive_rm_h_ipi(vcpu, server, mfrr);
|
||||||
|
if (unlikely(!__xive_vm_h_ipi))
|
||||||
|
return H_NOT_AVAILABLE;
|
||||||
|
return __xive_vm_h_ipi(vcpu, server, mfrr);
|
||||||
|
} else
|
||||||
|
return xics_rm_h_ipi(vcpu, server, mfrr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||||
|
{
|
||||||
|
if (xive_enabled()) {
|
||||||
|
if (is_rm())
|
||||||
|
return xive_rm_h_cppr(vcpu, cppr);
|
||||||
|
if (unlikely(!__xive_vm_h_cppr))
|
||||||
|
return H_NOT_AVAILABLE;
|
||||||
|
return __xive_vm_h_cppr(vcpu, cppr);
|
||||||
|
} else
|
||||||
|
return xics_rm_h_cppr(vcpu, cppr);
|
||||||
|
}
|
||||||
|
|
||||||
|
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||||
|
{
|
||||||
|
if (xive_enabled()) {
|
||||||
|
if (is_rm())
|
||||||
|
return xive_rm_h_eoi(vcpu, xirr);
|
||||||
|
if (unlikely(!__xive_vm_h_eoi))
|
||||||
|
return H_NOT_AVAILABLE;
|
||||||
|
return __xive_vm_h_eoi(vcpu, xirr);
|
||||||
|
} else
|
||||||
|
return xics_rm_h_eoi(vcpu, xirr);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_KVM_XICS */
|
||||||
|
|
|
@ -485,7 +485,7 @@ static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
{
|
{
|
||||||
union kvmppc_icp_state old_state, new_state;
|
union kvmppc_icp_state old_state, new_state;
|
||||||
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
||||||
|
@ -523,8 +523,8 @@ unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
|
||||||
return check_too_hard(xics, icp);
|
return check_too_hard(xics, icp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
unsigned long mfrr)
|
unsigned long mfrr)
|
||||||
{
|
{
|
||||||
union kvmppc_icp_state old_state, new_state;
|
union kvmppc_icp_state old_state, new_state;
|
||||||
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
||||||
|
@ -610,7 +610,7 @@ int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
return check_too_hard(xics, this_icp);
|
return check_too_hard(xics, this_icp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||||
{
|
{
|
||||||
union kvmppc_icp_state old_state, new_state;
|
union kvmppc_icp_state old_state, new_state;
|
||||||
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
||||||
|
@ -730,7 +730,7 @@ static int ics_rm_eoi(struct kvm_vcpu *vcpu, u32 irq)
|
||||||
return check_too_hard(xics, icp);
|
return check_too_hard(xics, icp);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||||
{
|
{
|
||||||
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
|
||||||
struct kvmppc_icp *icp = vcpu->arch.icp;
|
struct kvmppc_icp *icp = vcpu->arch.icp;
|
||||||
|
@ -766,7 +766,7 @@ unsigned long eoi_rc;
|
||||||
|
|
||||||
static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
|
static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
|
||||||
{
|
{
|
||||||
unsigned long xics_phys;
|
void __iomem *xics_phys;
|
||||||
int64_t rc;
|
int64_t rc;
|
||||||
|
|
||||||
rc = pnv_opal_pci_msi_eoi(c, hwirq);
|
rc = pnv_opal_pci_msi_eoi(c, hwirq);
|
||||||
|
@ -779,7 +779,7 @@ static void icp_eoi(struct irq_chip *c, u32 hwirq, __be32 xirr, bool *again)
|
||||||
/* EOI it */
|
/* EOI it */
|
||||||
xics_phys = local_paca->kvm_hstate.xics_phys;
|
xics_phys = local_paca->kvm_hstate.xics_phys;
|
||||||
if (xics_phys) {
|
if (xics_phys) {
|
||||||
_stwcix(xics_phys + XICS_XIRR, xirr);
|
__raw_rm_writel(xirr, xics_phys + XICS_XIRR);
|
||||||
} else {
|
} else {
|
||||||
rc = opal_int_eoi(be32_to_cpu(xirr));
|
rc = opal_int_eoi(be32_to_cpu(xirr));
|
||||||
*again = rc > 0;
|
*again = rc > 0;
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/kvm_host.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/kernel_stat.h>
|
||||||
|
|
||||||
|
#include <asm/kvm_book3s.h>
|
||||||
|
#include <asm/kvm_ppc.h>
|
||||||
|
#include <asm/hvcall.h>
|
||||||
|
#include <asm/xics.h>
|
||||||
|
#include <asm/debug.h>
|
||||||
|
#include <asm/synch.h>
|
||||||
|
#include <asm/cputhreads.h>
|
||||||
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/ppc-opcode.h>
|
||||||
|
#include <asm/pnv-pci.h>
|
||||||
|
#include <asm/opal.h>
|
||||||
|
#include <asm/smp.h>
|
||||||
|
#include <asm/asm-prototypes.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
|
#include <asm/xive-regs.h>
|
||||||
|
|
||||||
|
#include "book3s_xive.h"
|
||||||
|
|
||||||
|
/* XXX */
|
||||||
|
#include <asm/udbg.h>
|
||||||
|
//#define DBG(fmt...) udbg_printf(fmt)
|
||||||
|
#define DBG(fmt...) do { } while(0)
|
||||||
|
|
||||||
|
static inline void __iomem *get_tima_phys(void)
|
||||||
|
{
|
||||||
|
return local_paca->kvm_hstate.xive_tima_phys;
|
||||||
|
}
|
||||||
|
|
||||||
|
#undef XIVE_RUNTIME_CHECKS
|
||||||
|
#define X_PFX xive_rm_
|
||||||
|
#define X_STATIC
|
||||||
|
#define X_STAT_PFX stat_rm_
|
||||||
|
#define __x_tima get_tima_phys()
|
||||||
|
#define __x_eoi_page(xd) ((void __iomem *)((xd)->eoi_page))
|
||||||
|
#define __x_trig_page(xd) ((void __iomem *)((xd)->trig_page))
|
||||||
|
#define __x_readb __raw_rm_readb
|
||||||
|
#define __x_writeb __raw_rm_writeb
|
||||||
|
#define __x_readw __raw_rm_readw
|
||||||
|
#define __x_readq __raw_rm_readq
|
||||||
|
#define __x_writeq __raw_rm_writeq
|
||||||
|
|
||||||
|
#include "book3s_xive_template.c"
|
|
@ -30,6 +30,7 @@
|
||||||
#include <asm/book3s/64/mmu-hash.h>
|
#include <asm/book3s/64/mmu-hash.h>
|
||||||
#include <asm/tm.h>
|
#include <asm/tm.h>
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
|
#include <asm/xive-regs.h>
|
||||||
|
|
||||||
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
|
||||||
|
|
||||||
|
@ -970,6 +971,23 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
|
||||||
cmpwi r3, 512 /* 1 microsecond */
|
cmpwi r3, 512 /* 1 microsecond */
|
||||||
blt hdec_soon
|
blt hdec_soon
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
/* We are entering the guest on that thread, push VCPU to XIVE */
|
||||||
|
ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
|
||||||
|
cmpldi cr0, r10, r0
|
||||||
|
beq no_xive
|
||||||
|
ld r11, VCPU_XIVE_SAVED_STATE(r4)
|
||||||
|
li r9, TM_QW1_OS
|
||||||
|
stdcix r11,r9,r10
|
||||||
|
eieio
|
||||||
|
lwz r11, VCPU_XIVE_CAM_WORD(r4)
|
||||||
|
li r9, TM_QW1_OS + TM_WORD2
|
||||||
|
stwcix r11,r9,r10
|
||||||
|
li r9, 1
|
||||||
|
stw r9, VCPU_XIVE_PUSHED(r4)
|
||||||
|
no_xive:
|
||||||
|
#endif /* CONFIG_KVM_XICS */
|
||||||
|
|
||||||
deliver_guest_interrupt:
|
deliver_guest_interrupt:
|
||||||
ld r6, VCPU_CTR(r4)
|
ld r6, VCPU_CTR(r4)
|
||||||
ld r7, VCPU_XER(r4)
|
ld r7, VCPU_XER(r4)
|
||||||
|
@ -1307,6 +1325,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
|
||||||
blt deliver_guest_interrupt
|
blt deliver_guest_interrupt
|
||||||
|
|
||||||
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
/* We are exiting, pull the VP from the XIVE */
|
||||||
|
lwz r0, VCPU_XIVE_PUSHED(r9)
|
||||||
|
cmpwi cr0, r0, 0
|
||||||
|
beq 1f
|
||||||
|
li r7, TM_SPC_PULL_OS_CTX
|
||||||
|
li r6, TM_QW1_OS
|
||||||
|
mfmsr r0
|
||||||
|
andi. r0, r0, MSR_IR /* in real mode? */
|
||||||
|
beq 2f
|
||||||
|
ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
|
||||||
|
cmpldi cr0, r10, 0
|
||||||
|
beq 1f
|
||||||
|
/* First load to pull the context, we ignore the value */
|
||||||
|
lwzx r11, r7, r10
|
||||||
|
eieio
|
||||||
|
/* Second load to recover the context state (Words 0 and 1) */
|
||||||
|
ldx r11, r6, r10
|
||||||
|
b 3f
|
||||||
|
2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
|
||||||
|
cmpldi cr0, r10, 0
|
||||||
|
beq 1f
|
||||||
|
/* First load to pull the context, we ignore the value */
|
||||||
|
lwzcix r11, r7, r10
|
||||||
|
eieio
|
||||||
|
/* Second load to recover the context state (Words 0 and 1) */
|
||||||
|
ldcix r11, r6, r10
|
||||||
|
3: std r11, VCPU_XIVE_SAVED_STATE(r9)
|
||||||
|
/* Fixup some of the state for the next load */
|
||||||
|
li r10, 0
|
||||||
|
li r0, 0xff
|
||||||
|
stw r10, VCPU_XIVE_PUSHED(r9)
|
||||||
|
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
|
||||||
|
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
|
||||||
|
1:
|
||||||
|
#endif /* CONFIG_KVM_XICS */
|
||||||
/* Save more register state */
|
/* Save more register state */
|
||||||
mfdar r6
|
mfdar r6
|
||||||
mfdsisr r7
|
mfdsisr r7
|
||||||
|
@ -2011,7 +2065,7 @@ hcall_real_table:
|
||||||
.long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
|
.long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
|
||||||
.long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
|
.long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
|
||||||
.long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
|
.long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
|
||||||
.long 0 /* 0x70 - H_IPOLL */
|
.long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
|
||||||
.long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
|
.long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
|
||||||
#else
|
#else
|
||||||
.long 0 /* 0x64 - H_EOI */
|
.long 0 /* 0x64 - H_EOI */
|
||||||
|
@ -2181,7 +2235,11 @@ hcall_real_table:
|
||||||
.long 0 /* 0x2f0 */
|
.long 0 /* 0x2f0 */
|
||||||
.long 0 /* 0x2f4 */
|
.long 0 /* 0x2f4 */
|
||||||
.long 0 /* 0x2f8 */
|
.long 0 /* 0x2f8 */
|
||||||
.long 0 /* 0x2fc */
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
.long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
|
||||||
|
#else
|
||||||
|
.long 0 /* 0x2fc - H_XIRR_X*/
|
||||||
|
#endif
|
||||||
.long DOTSYM(kvmppc_h_random) - hcall_real_table
|
.long DOTSYM(kvmppc_h_random) - hcall_real_table
|
||||||
.globl hcall_real_table_end
|
.globl hcall_real_table_end
|
||||||
hcall_real_table_end:
|
hcall_real_table_end:
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
#include <asm/kvm_ppc.h>
|
#include <asm/kvm_ppc.h>
|
||||||
#include <asm/hvcall.h>
|
#include <asm/hvcall.h>
|
||||||
#include <asm/rtas.h>
|
#include <asm/rtas.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||||
|
@ -32,7 +33,10 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||||
server = be32_to_cpu(args->args[1]);
|
server = be32_to_cpu(args->args[1]);
|
||||||
priority = be32_to_cpu(args->args[2]);
|
priority = be32_to_cpu(args->args[2]);
|
||||||
|
|
||||||
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
|
if (xive_enabled())
|
||||||
|
rc = kvmppc_xive_set_xive(vcpu->kvm, irq, server, priority);
|
||||||
|
else
|
||||||
|
rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
|
||||||
if (rc)
|
if (rc)
|
||||||
rc = -3;
|
rc = -3;
|
||||||
out:
|
out:
|
||||||
|
@ -52,7 +56,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||||
irq = be32_to_cpu(args->args[0]);
|
irq = be32_to_cpu(args->args[0]);
|
||||||
|
|
||||||
server = priority = 0;
|
server = priority = 0;
|
||||||
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
|
if (xive_enabled())
|
||||||
|
rc = kvmppc_xive_get_xive(vcpu->kvm, irq, &server, &priority);
|
||||||
|
else
|
||||||
|
rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
rc = -3;
|
rc = -3;
|
||||||
goto out;
|
goto out;
|
||||||
|
@ -76,7 +83,10 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||||
|
|
||||||
irq = be32_to_cpu(args->args[0]);
|
irq = be32_to_cpu(args->args[0]);
|
||||||
|
|
||||||
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
|
if (xive_enabled())
|
||||||
|
rc = kvmppc_xive_int_off(vcpu->kvm, irq);
|
||||||
|
else
|
||||||
|
rc = kvmppc_xics_int_off(vcpu->kvm, irq);
|
||||||
if (rc)
|
if (rc)
|
||||||
rc = -3;
|
rc = -3;
|
||||||
out:
|
out:
|
||||||
|
@ -95,7 +105,10 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
|
||||||
|
|
||||||
irq = be32_to_cpu(args->args[0]);
|
irq = be32_to_cpu(args->args[0]);
|
||||||
|
|
||||||
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
|
if (xive_enabled())
|
||||||
|
rc = kvmppc_xive_int_on(vcpu->kvm, irq);
|
||||||
|
else
|
||||||
|
rc = kvmppc_xics_int_on(vcpu->kvm, irq);
|
||||||
if (rc)
|
if (rc)
|
||||||
rc = -3;
|
rc = -3;
|
||||||
out:
|
out:
|
||||||
|
|
|
@ -1084,7 +1084,7 @@ static struct kvmppc_ics *kvmppc_xics_create_ics(struct kvm *kvm,
|
||||||
return xics->ics[icsid];
|
return xics->ics[icsid];
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
|
static int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server_num)
|
||||||
{
|
{
|
||||||
struct kvmppc_icp *icp;
|
struct kvmppc_icp *icp;
|
||||||
|
|
||||||
|
@ -1307,8 +1307,8 @@ static int xics_set_source(struct kvmppc_xics *xics, long irq, u64 addr)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
||||||
bool line_status)
|
bool line_status)
|
||||||
{
|
{
|
||||||
struct kvmppc_xics *xics = kvm->arch.xics;
|
struct kvmppc_xics *xics = kvm->arch.xics;
|
||||||
|
|
||||||
|
@ -1317,14 +1317,6 @@ int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
|
||||||
return ics_deliver_irq(xics, irq, level);
|
return ics_deliver_irq(xics, irq, level);
|
||||||
}
|
}
|
||||||
|
|
||||||
int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *irq_entry,
|
|
||||||
struct kvm *kvm, int irq_source_id,
|
|
||||||
int level, bool line_status)
|
|
||||||
{
|
|
||||||
return kvm_set_irq(kvm, irq_source_id, irq_entry->gsi,
|
|
||||||
level, line_status);
|
|
||||||
}
|
|
||||||
|
|
||||||
static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
static int xics_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
|
||||||
{
|
{
|
||||||
struct kvmppc_xics *xics = dev->private;
|
struct kvmppc_xics *xics = dev->private;
|
||||||
|
@ -1458,29 +1450,6 @@ void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu)
|
||||||
vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
|
vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int xics_set_irq(struct kvm_kernel_irq_routing_entry *e,
|
|
||||||
struct kvm *kvm, int irq_source_id, int level,
|
|
||||||
bool line_status)
|
|
||||||
{
|
|
||||||
return kvm_set_irq(kvm, irq_source_id, e->gsi, level, line_status);
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_irq_map_gsi(struct kvm *kvm,
|
|
||||||
struct kvm_kernel_irq_routing_entry *entries, int gsi)
|
|
||||||
{
|
|
||||||
entries->gsi = gsi;
|
|
||||||
entries->type = KVM_IRQ_ROUTING_IRQCHIP;
|
|
||||||
entries->set = xics_set_irq;
|
|
||||||
entries->irqchip.irqchip = 0;
|
|
||||||
entries->irqchip.pin = gsi;
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin)
|
|
||||||
{
|
|
||||||
return pin;
|
|
||||||
}
|
|
||||||
|
|
||||||
void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
|
void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long irq,
|
||||||
unsigned long host_irq)
|
unsigned long host_irq)
|
||||||
{
|
{
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#ifndef _KVM_PPC_BOOK3S_XICS_H
|
#ifndef _KVM_PPC_BOOK3S_XICS_H
|
||||||
#define _KVM_PPC_BOOK3S_XICS_H
|
#define _KVM_PPC_BOOK3S_XICS_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
/*
|
/*
|
||||||
* We use a two-level tree to store interrupt source information.
|
* We use a two-level tree to store interrupt source information.
|
||||||
* There are up to 1024 ICS nodes, each of which can represent
|
* There are up to 1024 ICS nodes, each of which can represent
|
||||||
|
@ -144,5 +145,11 @@ static inline struct kvmppc_ics *kvmppc_xics_find_ics(struct kvmppc_xics *xics,
|
||||||
return ics;
|
return ics;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
extern unsigned long xics_rm_h_xirr(struct kvm_vcpu *vcpu);
|
||||||
|
extern int xics_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
|
unsigned long mfrr);
|
||||||
|
extern int xics_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||||
|
extern int xics_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||||
|
|
||||||
|
#endif /* CONFIG_KVM_XICS */
|
||||||
#endif /* _KVM_PPC_BOOK3S_XICS_H */
|
#endif /* _KVM_PPC_BOOK3S_XICS_H */
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,256 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License, version 2, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _KVM_PPC_BOOK3S_XIVE_H
|
||||||
|
#define _KVM_PPC_BOOK3S_XIVE_H
|
||||||
|
|
||||||
|
#ifdef CONFIG_KVM_XICS
|
||||||
|
#include "book3s_xics.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* State for one guest irq source.
|
||||||
|
*
|
||||||
|
* For each guest source we allocate a HW interrupt in the XIVE
|
||||||
|
* which we use for all SW triggers. It will be unused for
|
||||||
|
* pass-through but it's easier to keep around as the same
|
||||||
|
* guest interrupt can alternatively be emulated or pass-through
|
||||||
|
* if a physical device is hot unplugged and replaced with an
|
||||||
|
* emulated one.
|
||||||
|
*
|
||||||
|
* This state structure is very similar to the XICS one with
|
||||||
|
* additional XIVE specific tracking.
|
||||||
|
*/
|
||||||
|
struct kvmppc_xive_irq_state {
|
||||||
|
bool valid; /* Interrupt entry is valid */
|
||||||
|
|
||||||
|
u32 number; /* Guest IRQ number */
|
||||||
|
u32 ipi_number; /* XIVE IPI HW number */
|
||||||
|
struct xive_irq_data ipi_data; /* XIVE IPI associated data */
|
||||||
|
u32 pt_number; /* XIVE Pass-through number if any */
|
||||||
|
struct xive_irq_data *pt_data; /* XIVE Pass-through associated data */
|
||||||
|
|
||||||
|
/* Targetting as set by guest */
|
||||||
|
u32 guest_server; /* Current guest selected target */
|
||||||
|
u8 guest_priority; /* Guest set priority */
|
||||||
|
u8 saved_priority; /* Saved priority when masking */
|
||||||
|
|
||||||
|
/* Actual targetting */
|
||||||
|
u32 act_server; /* Actual server */
|
||||||
|
u8 act_priority; /* Actual priority */
|
||||||
|
|
||||||
|
/* Various state bits */
|
||||||
|
bool in_eoi; /* Synchronize with H_EOI */
|
||||||
|
bool old_p; /* P bit state when masking */
|
||||||
|
bool old_q; /* Q bit state when masking */
|
||||||
|
bool lsi; /* level-sensitive interrupt */
|
||||||
|
bool asserted; /* Only for emulated LSI: current state */
|
||||||
|
|
||||||
|
/* Saved for migration state */
|
||||||
|
bool in_queue;
|
||||||
|
bool saved_p;
|
||||||
|
bool saved_q;
|
||||||
|
u8 saved_scan_prio;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Select the "right" interrupt (IPI vs. passthrough) */
|
||||||
|
static inline void kvmppc_xive_select_irq(struct kvmppc_xive_irq_state *state,
|
||||||
|
u32 *out_hw_irq,
|
||||||
|
struct xive_irq_data **out_xd)
|
||||||
|
{
|
||||||
|
if (state->pt_number) {
|
||||||
|
if (out_hw_irq)
|
||||||
|
*out_hw_irq = state->pt_number;
|
||||||
|
if (out_xd)
|
||||||
|
*out_xd = state->pt_data;
|
||||||
|
} else {
|
||||||
|
if (out_hw_irq)
|
||||||
|
*out_hw_irq = state->ipi_number;
|
||||||
|
if (out_xd)
|
||||||
|
*out_xd = &state->ipi_data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* This corresponds to an "ICS" in XICS terminology, we use it
|
||||||
|
* as a mean to break up source information into multiple structures.
|
||||||
|
*/
|
||||||
|
struct kvmppc_xive_src_block {
|
||||||
|
arch_spinlock_t lock;
|
||||||
|
u16 id;
|
||||||
|
struct kvmppc_xive_irq_state irq_state[KVMPPC_XICS_IRQ_PER_ICS];
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
struct kvmppc_xive {
|
||||||
|
struct kvm *kvm;
|
||||||
|
struct kvm_device *dev;
|
||||||
|
struct dentry *dentry;
|
||||||
|
|
||||||
|
/* VP block associated with the VM */
|
||||||
|
u32 vp_base;
|
||||||
|
|
||||||
|
/* Blocks of sources */
|
||||||
|
struct kvmppc_xive_src_block *src_blocks[KVMPPC_XICS_MAX_ICS_ID + 1];
|
||||||
|
u32 max_sbid;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For state save, we lazily scan the queues on the first interrupt
|
||||||
|
* being migrated. We don't have a clean way to reset that flags
|
||||||
|
* so we keep track of the number of valid sources and how many of
|
||||||
|
* them were migrated so we can reset when all of them have been
|
||||||
|
* processed.
|
||||||
|
*/
|
||||||
|
u32 src_count;
|
||||||
|
u32 saved_src_count;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Some irqs are delayed on restore until the source is created,
|
||||||
|
* keep track here of how many of them
|
||||||
|
*/
|
||||||
|
u32 delayed_irqs;
|
||||||
|
|
||||||
|
/* Which queues (priorities) are in use by the guest */
|
||||||
|
u8 qmap;
|
||||||
|
|
||||||
|
/* Queue orders */
|
||||||
|
u32 q_order;
|
||||||
|
u32 q_page_order;
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
#define KVMPPC_XIVE_Q_COUNT 8
|
||||||
|
|
||||||
|
struct kvmppc_xive_vcpu {
|
||||||
|
struct kvmppc_xive *xive;
|
||||||
|
struct kvm_vcpu *vcpu;
|
||||||
|
bool valid;
|
||||||
|
|
||||||
|
/* Server number. This is the HW CPU ID from a guest perspective */
|
||||||
|
u32 server_num;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* HW VP corresponding to this VCPU. This is the base of the VP
|
||||||
|
* block plus the server number.
|
||||||
|
*/
|
||||||
|
u32 vp_id;
|
||||||
|
u32 vp_chip_id;
|
||||||
|
u32 vp_cam;
|
||||||
|
|
||||||
|
/* IPI used for sending ... IPIs */
|
||||||
|
u32 vp_ipi;
|
||||||
|
struct xive_irq_data vp_ipi_data;
|
||||||
|
|
||||||
|
/* Local emulation state */
|
||||||
|
uint8_t cppr; /* guest CPPR */
|
||||||
|
uint8_t hw_cppr;/* Hardware CPPR */
|
||||||
|
uint8_t mfrr;
|
||||||
|
uint8_t pending;
|
||||||
|
|
||||||
|
/* Each VP has 8 queues though we only provision some */
|
||||||
|
struct xive_q queues[KVMPPC_XIVE_Q_COUNT];
|
||||||
|
u32 esc_virq[KVMPPC_XIVE_Q_COUNT];
|
||||||
|
char *esc_virq_names[KVMPPC_XIVE_Q_COUNT];
|
||||||
|
|
||||||
|
/* Stash a delayed irq on restore from migration (see set_icp) */
|
||||||
|
u32 delayed_irq;
|
||||||
|
|
||||||
|
/* Stats */
|
||||||
|
u64 stat_rm_h_xirr;
|
||||||
|
u64 stat_rm_h_ipoll;
|
||||||
|
u64 stat_rm_h_cppr;
|
||||||
|
u64 stat_rm_h_eoi;
|
||||||
|
u64 stat_rm_h_ipi;
|
||||||
|
u64 stat_vm_h_xirr;
|
||||||
|
u64 stat_vm_h_ipoll;
|
||||||
|
u64 stat_vm_h_cppr;
|
||||||
|
u64 stat_vm_h_eoi;
|
||||||
|
u64 stat_vm_h_ipi;
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline struct kvm_vcpu *kvmppc_xive_find_server(struct kvm *kvm, u32 nr)
|
||||||
|
{
|
||||||
|
struct kvm_vcpu *vcpu = NULL;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||||
|
if (vcpu->arch.xive_vcpu && nr == vcpu->arch.xive_vcpu->server_num)
|
||||||
|
return vcpu;
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline struct kvmppc_xive_src_block *kvmppc_xive_find_source(struct kvmppc_xive *xive,
|
||||||
|
u32 irq, u16 *source)
|
||||||
|
{
|
||||||
|
u32 bid = irq >> KVMPPC_XICS_ICS_SHIFT;
|
||||||
|
u16 src = irq & KVMPPC_XICS_SRC_MASK;
|
||||||
|
|
||||||
|
if (source)
|
||||||
|
*source = src;
|
||||||
|
if (bid > KVMPPC_XICS_MAX_ICS_ID)
|
||||||
|
return NULL;
|
||||||
|
return xive->src_blocks[bid];
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Mapping between guest priorities and host priorities
|
||||||
|
* is as follow.
|
||||||
|
*
|
||||||
|
* Guest request for 0...6 are honored. Guest request for anything
|
||||||
|
* higher results in a priority of 7 being applied.
|
||||||
|
*
|
||||||
|
* However, when XIRR is returned via H_XIRR, 7 is translated to 0xb
|
||||||
|
* in order to match AIX expectations
|
||||||
|
*
|
||||||
|
* Similar mapping is done for CPPR values
|
||||||
|
*/
|
||||||
|
static inline u8 xive_prio_from_guest(u8 prio)
|
||||||
|
{
|
||||||
|
if (prio == 0xff || prio < 8)
|
||||||
|
return prio;
|
||||||
|
return 7;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u8 xive_prio_to_guest(u8 prio)
|
||||||
|
{
|
||||||
|
if (prio == 0xff || prio < 7)
|
||||||
|
return prio;
|
||||||
|
return 0xb;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline u32 __xive_read_eq(__be32 *qpage, u32 msk, u32 *idx, u32 *toggle)
|
||||||
|
{
|
||||||
|
u32 cur;
|
||||||
|
|
||||||
|
if (!qpage)
|
||||||
|
return 0;
|
||||||
|
cur = be32_to_cpup(qpage + *idx);
|
||||||
|
if ((cur >> 31) == *toggle)
|
||||||
|
return 0;
|
||||||
|
*idx = (*idx + 1) & msk;
|
||||||
|
if (*idx == 0)
|
||||||
|
(*toggle) ^= 1;
|
||||||
|
return cur & 0x7fffffff;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern unsigned long xive_rm_h_xirr(struct kvm_vcpu *vcpu);
|
||||||
|
extern unsigned long xive_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
|
||||||
|
extern int xive_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
|
unsigned long mfrr);
|
||||||
|
extern int xive_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||||
|
extern int xive_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||||
|
|
||||||
|
extern unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu);
|
||||||
|
extern unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server);
|
||||||
|
extern int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
|
unsigned long mfrr);
|
||||||
|
extern int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr);
|
||||||
|
extern int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr);
|
||||||
|
|
||||||
|
#endif /* CONFIG_KVM_XICS */
|
||||||
|
#endif /* _KVM_PPC_BOOK3S_XICS_H */
|
|
@ -0,0 +1,503 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2017 Benjamin Herrenschmidt, IBM Corporation
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or modify
|
||||||
|
* it under the terms of the GNU General Public License, version 2, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* File to be included by other .c files */
|
||||||
|
|
||||||
|
#define XGLUE(a,b) a##b
|
||||||
|
#define GLUE(a,b) XGLUE(a,b)
|
||||||
|
|
||||||
|
static void GLUE(X_PFX,ack_pending)(struct kvmppc_xive_vcpu *xc)
|
||||||
|
{
|
||||||
|
u8 cppr;
|
||||||
|
u16 ack;
|
||||||
|
|
||||||
|
/* XXX DD1 bug workaround: Check PIPR vs. CPPR first ! */
|
||||||
|
|
||||||
|
/* Perform the acknowledge OS to register cycle. */
|
||||||
|
ack = be16_to_cpu(__x_readw(__x_tima + TM_SPC_ACK_OS_REG));
|
||||||
|
|
||||||
|
/* Synchronize subsequent queue accesses */
|
||||||
|
mb();
|
||||||
|
|
||||||
|
/* XXX Check grouping level */
|
||||||
|
|
||||||
|
/* Anything ? */
|
||||||
|
if (!((ack >> 8) & TM_QW1_NSR_EO))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Grab CPPR of the most favored pending interrupt */
|
||||||
|
cppr = ack & 0xff;
|
||||||
|
if (cppr < 8)
|
||||||
|
xc->pending |= 1 << cppr;
|
||||||
|
|
||||||
|
#ifdef XIVE_RUNTIME_CHECKS
|
||||||
|
/* Check consistency */
|
||||||
|
if (cppr >= xc->hw_cppr)
|
||||||
|
pr_warn("KVM-XIVE: CPU %d odd ack CPPR, got %d at %d\n",
|
||||||
|
smp_processor_id(), cppr, xc->hw_cppr);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Update our image of the HW CPPR. We don't yet modify
|
||||||
|
* xc->cppr, this will be done as we scan for interrupts
|
||||||
|
* in the queues.
|
||||||
|
*/
|
||||||
|
xc->hw_cppr = cppr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u8 GLUE(X_PFX,esb_load)(struct xive_irq_data *xd, u32 offset)
|
||||||
|
{
|
||||||
|
u64 val;
|
||||||
|
|
||||||
|
if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
|
||||||
|
offset |= offset << 4;
|
||||||
|
|
||||||
|
val =__x_readq(__x_eoi_page(xd) + offset);
|
||||||
|
#ifdef __LITTLE_ENDIAN__
|
||||||
|
val >>= 64-8;
|
||||||
|
#endif
|
||||||
|
return (u8)val;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static void GLUE(X_PFX,source_eoi)(u32 hw_irq, struct xive_irq_data *xd)
|
||||||
|
{
|
||||||
|
/* If the XIVE supports the new "store EOI facility, use it */
|
||||||
|
if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
|
||||||
|
__x_writeq(0, __x_eoi_page(xd));
|
||||||
|
else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
|
||||||
|
opal_int_eoi(hw_irq);
|
||||||
|
} else {
|
||||||
|
uint64_t eoi_val;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Otherwise for EOI, we use the special MMIO that does
|
||||||
|
* a clear of both P and Q and returns the old Q,
|
||||||
|
* except for LSIs where we use the "EOI cycle" special
|
||||||
|
* load.
|
||||||
|
*
|
||||||
|
* This allows us to then do a re-trigger if Q was set
|
||||||
|
* rather than synthetizing an interrupt in software
|
||||||
|
*
|
||||||
|
* For LSIs, using the HW EOI cycle works around a problem
|
||||||
|
* on P9 DD1 PHBs where the other ESB accesses don't work
|
||||||
|
* properly.
|
||||||
|
*/
|
||||||
|
if (xd->flags & XIVE_IRQ_FLAG_LSI)
|
||||||
|
__x_readq(__x_eoi_page(xd));
|
||||||
|
else {
|
||||||
|
eoi_val = GLUE(X_PFX,esb_load)(xd, XIVE_ESB_SET_PQ_00);
|
||||||
|
|
||||||
|
/* Re-trigger if needed */
|
||||||
|
if ((eoi_val & 1) && __x_trig_page(xd))
|
||||||
|
__x_writeq(0, __x_trig_page(xd));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
enum {
|
||||||
|
scan_fetch,
|
||||||
|
scan_poll,
|
||||||
|
scan_eoi,
|
||||||
|
};
|
||||||
|
|
||||||
|
static u32 GLUE(X_PFX,scan_interrupts)(struct kvmppc_xive_vcpu *xc,
|
||||||
|
u8 pending, int scan_type)
|
||||||
|
{
|
||||||
|
u32 hirq = 0;
|
||||||
|
u8 prio = 0xff;
|
||||||
|
|
||||||
|
/* Find highest pending priority */
|
||||||
|
while ((xc->mfrr != 0xff || pending != 0) && hirq == 0) {
|
||||||
|
struct xive_q *q;
|
||||||
|
u32 idx, toggle;
|
||||||
|
__be32 *qpage;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If pending is 0 this will return 0xff which is what
|
||||||
|
* we want
|
||||||
|
*/
|
||||||
|
prio = ffs(pending) - 1;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If the most favoured prio we found pending is less
|
||||||
|
* favored (or equal) than a pending IPI, we return
|
||||||
|
* the IPI instead.
|
||||||
|
*
|
||||||
|
* Note: If pending was 0 and mfrr is 0xff, we will
|
||||||
|
* not spurriously take an IPI because mfrr cannot
|
||||||
|
* then be smaller than cppr.
|
||||||
|
*/
|
||||||
|
if (prio >= xc->mfrr && xc->mfrr < xc->cppr) {
|
||||||
|
prio = xc->mfrr;
|
||||||
|
hirq = XICS_IPI;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Don't scan past the guest cppr */
|
||||||
|
if (prio >= xc->cppr || prio > 7)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Grab queue and pointers */
|
||||||
|
q = &xc->queues[prio];
|
||||||
|
idx = q->idx;
|
||||||
|
toggle = q->toggle;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Snapshot the queue page. The test further down for EOI
|
||||||
|
* must use the same "copy" that was used by __xive_read_eq
|
||||||
|
* since qpage can be set concurrently and we don't want
|
||||||
|
* to miss an EOI.
|
||||||
|
*/
|
||||||
|
qpage = READ_ONCE(q->qpage);
|
||||||
|
|
||||||
|
skip_ipi:
|
||||||
|
/*
|
||||||
|
* Try to fetch from the queue. Will return 0 for a
|
||||||
|
* non-queueing priority (ie, qpage = 0).
|
||||||
|
*/
|
||||||
|
hirq = __xive_read_eq(qpage, q->msk, &idx, &toggle);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this was a signal for an MFFR change done by
|
||||||
|
* H_IPI we skip it. Additionally, if we were fetching
|
||||||
|
* we EOI it now, thus re-enabling reception of a new
|
||||||
|
* such signal.
|
||||||
|
*
|
||||||
|
* We also need to do that if prio is 0 and we had no
|
||||||
|
* page for the queue. In this case, we have non-queued
|
||||||
|
* IPI that needs to be EOId.
|
||||||
|
*
|
||||||
|
* This is safe because if we have another pending MFRR
|
||||||
|
* change that wasn't observed above, the Q bit will have
|
||||||
|
* been set and another occurrence of the IPI will trigger.
|
||||||
|
*/
|
||||||
|
if (hirq == XICS_IPI || (prio == 0 && !qpage)) {
|
||||||
|
if (scan_type == scan_fetch)
|
||||||
|
GLUE(X_PFX,source_eoi)(xc->vp_ipi,
|
||||||
|
&xc->vp_ipi_data);
|
||||||
|
/* Loop back on same queue with updated idx/toggle */
|
||||||
|
#ifdef XIVE_RUNTIME_CHECKS
|
||||||
|
WARN_ON(hirq && hirq != XICS_IPI);
|
||||||
|
#endif
|
||||||
|
if (hirq)
|
||||||
|
goto skip_ipi;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If fetching, update queue pointers */
|
||||||
|
if (scan_type == scan_fetch) {
|
||||||
|
q->idx = idx;
|
||||||
|
q->toggle = toggle;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Something found, stop searching */
|
||||||
|
if (hirq)
|
||||||
|
break;
|
||||||
|
|
||||||
|
/* Clear the pending bit on the now empty queue */
|
||||||
|
pending &= ~(1 << prio);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Check if the queue count needs adjusting due to
|
||||||
|
* interrupts being moved away.
|
||||||
|
*/
|
||||||
|
if (atomic_read(&q->pending_count)) {
|
||||||
|
int p = atomic_xchg(&q->pending_count, 0);
|
||||||
|
if (p) {
|
||||||
|
#ifdef XIVE_RUNTIME_CHECKS
|
||||||
|
WARN_ON(p > atomic_read(&q->count));
|
||||||
|
#endif
|
||||||
|
atomic_sub(p, &q->count);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/* If we are just taking a "peek", do nothing else */
|
||||||
|
if (scan_type == scan_poll)
|
||||||
|
return hirq;
|
||||||
|
|
||||||
|
/* Update the pending bits */
|
||||||
|
xc->pending = pending;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this is an EOI that's it, no CPPR adjustment done here,
|
||||||
|
* all we needed was cleanup the stale pending bits and check
|
||||||
|
* if there's anything left.
|
||||||
|
*/
|
||||||
|
if (scan_type == scan_eoi)
|
||||||
|
return hirq;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If we found an interrupt, adjust what the guest CPPR should
|
||||||
|
* be as if we had just fetched that interrupt from HW.
|
||||||
|
*/
|
||||||
|
if (hirq)
|
||||||
|
xc->cppr = prio;
|
||||||
|
/*
|
||||||
|
* If it was an IPI the HW CPPR might have been lowered too much
|
||||||
|
* as the HW interrupt we use for IPIs is routed to priority 0.
|
||||||
|
*
|
||||||
|
* We re-sync it here.
|
||||||
|
*/
|
||||||
|
if (xc->cppr != xc->hw_cppr) {
|
||||||
|
xc->hw_cppr = xc->cppr;
|
||||||
|
__x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
|
||||||
|
}
|
||||||
|
|
||||||
|
return hirq;
|
||||||
|
}
|
||||||
|
|
||||||
|
X_STATIC unsigned long GLUE(X_PFX,h_xirr)(struct kvm_vcpu *vcpu)
|
||||||
|
{
|
||||||
|
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||||
|
u8 old_cppr;
|
||||||
|
u32 hirq;
|
||||||
|
|
||||||
|
pr_devel("H_XIRR\n");
|
||||||
|
|
||||||
|
xc->GLUE(X_STAT_PFX,h_xirr)++;
|
||||||
|
|
||||||
|
/* First collect pending bits from HW */
|
||||||
|
GLUE(X_PFX,ack_pending)(xc);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Cleanup the old-style bits if needed (they may have been
|
||||||
|
* set by pull or an escalation interrupts).
|
||||||
|
*/
|
||||||
|
if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions))
|
||||||
|
clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
|
||||||
|
&vcpu->arch.pending_exceptions);
|
||||||
|
|
||||||
|
pr_devel(" new pending=0x%02x hw_cppr=%d cppr=%d\n",
|
||||||
|
xc->pending, xc->hw_cppr, xc->cppr);
|
||||||
|
|
||||||
|
/* Grab previous CPPR and reverse map it */
|
||||||
|
old_cppr = xive_prio_to_guest(xc->cppr);
|
||||||
|
|
||||||
|
/* Scan for actual interrupts */
|
||||||
|
hirq = GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_fetch);
|
||||||
|
|
||||||
|
pr_devel(" got hirq=0x%x hw_cppr=%d cppr=%d\n",
|
||||||
|
hirq, xc->hw_cppr, xc->cppr);
|
||||||
|
|
||||||
|
#ifdef XIVE_RUNTIME_CHECKS
|
||||||
|
/* That should never hit */
|
||||||
|
if (hirq & 0xff000000)
|
||||||
|
pr_warn("XIVE: Weird guest interrupt number 0x%08x\n", hirq);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX We could check if the interrupt is masked here and
|
||||||
|
* filter it. If we chose to do so, we would need to do:
|
||||||
|
*
|
||||||
|
* if (masked) {
|
||||||
|
* lock();
|
||||||
|
* if (masked) {
|
||||||
|
* old_Q = true;
|
||||||
|
* hirq = 0;
|
||||||
|
* }
|
||||||
|
* unlock();
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
|
||||||
|
/* Return interrupt and old CPPR in GPR4 */
|
||||||
|
vcpu->arch.gpr[4] = hirq | (old_cppr << 24);
|
||||||
|
|
||||||
|
return H_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
X_STATIC unsigned long GLUE(X_PFX,h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server)
|
||||||
|
{
|
||||||
|
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||||
|
u8 pending = xc->pending;
|
||||||
|
u32 hirq;
|
||||||
|
u8 pipr;
|
||||||
|
|
||||||
|
pr_devel("H_IPOLL(server=%ld)\n", server);
|
||||||
|
|
||||||
|
xc->GLUE(X_STAT_PFX,h_ipoll)++;
|
||||||
|
|
||||||
|
/* Grab the target VCPU if not the current one */
|
||||||
|
if (xc->server_num != server) {
|
||||||
|
vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
|
||||||
|
if (!vcpu)
|
||||||
|
return H_PARAMETER;
|
||||||
|
xc = vcpu->arch.xive_vcpu;
|
||||||
|
|
||||||
|
/* Scan all priorities */
|
||||||
|
pending = 0xff;
|
||||||
|
} else {
|
||||||
|
/* Grab pending interrupt if any */
|
||||||
|
pipr = __x_readb(__x_tima + TM_QW1_OS + TM_PIPR);
|
||||||
|
if (pipr < 8)
|
||||||
|
pending |= 1 << pipr;
|
||||||
|
}
|
||||||
|
|
||||||
|
hirq = GLUE(X_PFX,scan_interrupts)(xc, pending, scan_poll);
|
||||||
|
|
||||||
|
/* Return interrupt and old CPPR in GPR4 */
|
||||||
|
vcpu->arch.gpr[4] = hirq | (xc->cppr << 24);
|
||||||
|
|
||||||
|
return H_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void GLUE(X_PFX,push_pending_to_hw)(struct kvmppc_xive_vcpu *xc)
|
||||||
|
{
|
||||||
|
u8 pending, prio;
|
||||||
|
|
||||||
|
pending = xc->pending;
|
||||||
|
if (xc->mfrr != 0xff) {
|
||||||
|
if (xc->mfrr < 8)
|
||||||
|
pending |= 1 << xc->mfrr;
|
||||||
|
else
|
||||||
|
pending |= 0x80;
|
||||||
|
}
|
||||||
|
if (!pending)
|
||||||
|
return;
|
||||||
|
prio = ffs(pending) - 1;
|
||||||
|
|
||||||
|
__x_writeb(prio, __x_tima + TM_SPC_SET_OS_PENDING);
|
||||||
|
}
|
||||||
|
|
||||||
|
X_STATIC int GLUE(X_PFX,h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr)
|
||||||
|
{
|
||||||
|
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||||
|
u8 old_cppr;
|
||||||
|
|
||||||
|
pr_devel("H_CPPR(cppr=%ld)\n", cppr);
|
||||||
|
|
||||||
|
xc->GLUE(X_STAT_PFX,h_cppr)++;
|
||||||
|
|
||||||
|
/* Map CPPR */
|
||||||
|
cppr = xive_prio_from_guest(cppr);
|
||||||
|
|
||||||
|
/* Remember old and update SW state */
|
||||||
|
old_cppr = xc->cppr;
|
||||||
|
xc->cppr = cppr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We are masking less, we need to look for pending things
|
||||||
|
* to deliver and set VP pending bits accordingly to trigger
|
||||||
|
* a new interrupt otherwise we might miss MFRR changes for
|
||||||
|
* which we have optimized out sending an IPI signal.
|
||||||
|
*/
|
||||||
|
if (cppr > old_cppr)
|
||||||
|
GLUE(X_PFX,push_pending_to_hw)(xc);
|
||||||
|
|
||||||
|
/* Apply new CPPR */
|
||||||
|
xc->hw_cppr = cppr;
|
||||||
|
__x_writeb(cppr, __x_tima + TM_QW1_OS + TM_CPPR);
|
||||||
|
|
||||||
|
return H_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
|
X_STATIC int GLUE(X_PFX,h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr)
|
||||||
|
{
|
||||||
|
struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
|
||||||
|
struct kvmppc_xive_src_block *sb;
|
||||||
|
struct kvmppc_xive_irq_state *state;
|
||||||
|
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||||
|
struct xive_irq_data *xd;
|
||||||
|
u8 new_cppr = xirr >> 24;
|
||||||
|
u32 irq = xirr & 0x00ffffff, hw_num;
|
||||||
|
u16 src;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
pr_devel("H_EOI(xirr=%08lx)\n", xirr);
|
||||||
|
|
||||||
|
xc->GLUE(X_STAT_PFX,h_eoi)++;
|
||||||
|
|
||||||
|
xc->cppr = xive_prio_from_guest(new_cppr);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* IPIs are synthetized from MFRR and thus don't need
|
||||||
|
* any special EOI handling. The underlying interrupt
|
||||||
|
* used to signal MFRR changes is EOId when fetched from
|
||||||
|
* the queue.
|
||||||
|
*/
|
||||||
|
if (irq == XICS_IPI || irq == 0)
|
||||||
|
goto bail;
|
||||||
|
|
||||||
|
/* Find interrupt source */
|
||||||
|
sb = kvmppc_xive_find_source(xive, irq, &src);
|
||||||
|
if (!sb) {
|
||||||
|
pr_devel(" source not found !\n");
|
||||||
|
rc = H_PARAMETER;
|
||||||
|
goto bail;
|
||||||
|
}
|
||||||
|
state = &sb->irq_state[src];
|
||||||
|
kvmppc_xive_select_irq(state, &hw_num, &xd);
|
||||||
|
|
||||||
|
state->in_eoi = true;
|
||||||
|
mb();
|
||||||
|
|
||||||
|
again:
|
||||||
|
if (state->guest_priority == MASKED) {
|
||||||
|
arch_spin_lock(&sb->lock);
|
||||||
|
if (state->guest_priority != MASKED) {
|
||||||
|
arch_spin_unlock(&sb->lock);
|
||||||
|
goto again;
|
||||||
|
}
|
||||||
|
pr_devel(" EOI on saved P...\n");
|
||||||
|
|
||||||
|
/* Clear old_p, that will cause unmask to perform an EOI */
|
||||||
|
state->old_p = false;
|
||||||
|
|
||||||
|
arch_spin_unlock(&sb->lock);
|
||||||
|
} else {
|
||||||
|
pr_devel(" EOI on source...\n");
|
||||||
|
|
||||||
|
/* Perform EOI on the source */
|
||||||
|
GLUE(X_PFX,source_eoi)(hw_num, xd);
|
||||||
|
|
||||||
|
/* If it's an emulated LSI, check level and resend */
|
||||||
|
if (state->lsi && state->asserted)
|
||||||
|
__x_writeq(0, __x_trig_page(xd));
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
mb();
|
||||||
|
state->in_eoi = false;
|
||||||
|
bail:
|
||||||
|
|
||||||
|
/* Re-evaluate pending IRQs and update HW */
|
||||||
|
GLUE(X_PFX,scan_interrupts)(xc, xc->pending, scan_eoi);
|
||||||
|
GLUE(X_PFX,push_pending_to_hw)(xc);
|
||||||
|
pr_devel(" after scan pending=%02x\n", xc->pending);
|
||||||
|
|
||||||
|
/* Apply new CPPR */
|
||||||
|
xc->hw_cppr = xc->cppr;
|
||||||
|
__x_writeb(xc->cppr, __x_tima + TM_QW1_OS + TM_CPPR);
|
||||||
|
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
|
||||||
|
X_STATIC int GLUE(X_PFX,h_ipi)(struct kvm_vcpu *vcpu, unsigned long server,
|
||||||
|
unsigned long mfrr)
|
||||||
|
{
|
||||||
|
struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
|
||||||
|
|
||||||
|
pr_devel("H_IPI(server=%08lx,mfrr=%ld)\n", server, mfrr);
|
||||||
|
|
||||||
|
xc->GLUE(X_STAT_PFX,h_ipi)++;
|
||||||
|
|
||||||
|
/* Find target */
|
||||||
|
vcpu = kvmppc_xive_find_server(vcpu->kvm, server);
|
||||||
|
if (!vcpu)
|
||||||
|
return H_PARAMETER;
|
||||||
|
xc = vcpu->arch.xive_vcpu;
|
||||||
|
|
||||||
|
/* Locklessly write over MFRR */
|
||||||
|
xc->mfrr = mfrr;
|
||||||
|
|
||||||
|
/* Shoot the IPI if most favored than target cppr */
|
||||||
|
if (mfrr < xc->cppr)
|
||||||
|
__x_writeq(0, __x_trig_page(&xc->vp_ipi_data));
|
||||||
|
|
||||||
|
return H_SUCCESS;
|
||||||
|
}
|
|
@ -12,6 +12,7 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
ret = ret || (kvm->arch.xics != NULL);
|
ret = ret || (kvm->arch.xics != NULL);
|
||||||
|
ret = ret || (kvm->arch.xive != NULL);
|
||||||
#endif
|
#endif
|
||||||
smp_rmb();
|
smp_rmb();
|
||||||
return ret;
|
return ret;
|
||||||
|
|
|
@ -38,6 +38,8 @@
|
||||||
#include <asm/irqflags.h>
|
#include <asm/irqflags.h>
|
||||||
#include <asm/iommu.h>
|
#include <asm/iommu.h>
|
||||||
#include <asm/switch_to.h>
|
#include <asm/switch_to.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
|
|
||||||
#include "timing.h"
|
#include "timing.h"
|
||||||
#include "irq.h"
|
#include "irq.h"
|
||||||
#include "../mm/mmu_decl.h"
|
#include "../mm/mmu_decl.h"
|
||||||
|
@ -697,7 +699,10 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||||
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
|
kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
|
||||||
break;
|
break;
|
||||||
case KVMPPC_IRQ_XICS:
|
case KVMPPC_IRQ_XICS:
|
||||||
kvmppc_xics_free_icp(vcpu);
|
if (xive_enabled())
|
||||||
|
kvmppc_xive_cleanup_vcpu(vcpu);
|
||||||
|
else
|
||||||
|
kvmppc_xics_free_icp(vcpu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1522,8 +1527,12 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
|
||||||
|
|
||||||
r = -EPERM;
|
r = -EPERM;
|
||||||
dev = kvm_device_from_filp(f.file);
|
dev = kvm_device_from_filp(f.file);
|
||||||
if (dev)
|
if (dev) {
|
||||||
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
|
if (xive_enabled())
|
||||||
|
r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]);
|
||||||
|
else
|
||||||
|
r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
|
||||||
|
}
|
||||||
|
|
||||||
fdput(f);
|
fdput(f);
|
||||||
break;
|
break;
|
||||||
|
@ -1547,7 +1556,7 @@ bool kvm_arch_intc_initialized(struct kvm *kvm)
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
#ifdef CONFIG_KVM_XICS
|
#ifdef CONFIG_KVM_XICS
|
||||||
if (kvm->arch.xics)
|
if (kvm->arch.xics || kvm->arch.xive)
|
||||||
return true;
|
return true;
|
||||||
#endif
|
#endif
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -373,6 +373,7 @@ config PPC_PERF_CTRS
|
||||||
|
|
||||||
config SMP
|
config SMP
|
||||||
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
|
depends on PPC_BOOK3S || PPC_BOOK3E || FSL_BOOKE || PPC_47x
|
||||||
|
select GENERIC_IRQ_MIGRATION
|
||||||
bool "Symmetric multi-processing support"
|
bool "Symmetric multi-processing support"
|
||||||
---help---
|
---help---
|
||||||
This enables support for systems with more than one CPU. If you have
|
This enables support for systems with more than one CPU. If you have
|
||||||
|
|
|
@ -4,6 +4,7 @@ config PPC_POWERNV
|
||||||
select PPC_NATIVE
|
select PPC_NATIVE
|
||||||
select PPC_XICS
|
select PPC_XICS
|
||||||
select PPC_ICP_NATIVE
|
select PPC_ICP_NATIVE
|
||||||
|
select PPC_XIVE_NATIVE
|
||||||
select PPC_P7_NAP
|
select PPC_P7_NAP
|
||||||
select PCI
|
select PCI
|
||||||
select PCI_MSI
|
select PCI_MSI
|
||||||
|
|
|
@ -301,3 +301,18 @@ OPAL_CALL(opal_int_eoi, OPAL_INT_EOI);
|
||||||
OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
|
OPAL_CALL(opal_int_set_mfrr, OPAL_INT_SET_MFRR);
|
||||||
OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
|
OPAL_CALL(opal_pci_tce_kill, OPAL_PCI_TCE_KILL);
|
||||||
OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR);
|
OPAL_CALL(opal_nmmu_set_ptcr, OPAL_NMMU_SET_PTCR);
|
||||||
|
OPAL_CALL(opal_xive_reset, OPAL_XIVE_RESET);
|
||||||
|
OPAL_CALL(opal_xive_get_irq_info, OPAL_XIVE_GET_IRQ_INFO);
|
||||||
|
OPAL_CALL(opal_xive_get_irq_config, OPAL_XIVE_GET_IRQ_CONFIG);
|
||||||
|
OPAL_CALL(opal_xive_set_irq_config, OPAL_XIVE_SET_IRQ_CONFIG);
|
||||||
|
OPAL_CALL(opal_xive_get_queue_info, OPAL_XIVE_GET_QUEUE_INFO);
|
||||||
|
OPAL_CALL(opal_xive_set_queue_info, OPAL_XIVE_SET_QUEUE_INFO);
|
||||||
|
OPAL_CALL(opal_xive_donate_page, OPAL_XIVE_DONATE_PAGE);
|
||||||
|
OPAL_CALL(opal_xive_alloc_vp_block, OPAL_XIVE_ALLOCATE_VP_BLOCK);
|
||||||
|
OPAL_CALL(opal_xive_free_vp_block, OPAL_XIVE_FREE_VP_BLOCK);
|
||||||
|
OPAL_CALL(opal_xive_allocate_irq, OPAL_XIVE_ALLOCATE_IRQ);
|
||||||
|
OPAL_CALL(opal_xive_free_irq, OPAL_XIVE_FREE_IRQ);
|
||||||
|
OPAL_CALL(opal_xive_get_vp_info, OPAL_XIVE_GET_VP_INFO);
|
||||||
|
OPAL_CALL(opal_xive_set_vp_info, OPAL_XIVE_SET_VP_INFO);
|
||||||
|
OPAL_CALL(opal_xive_sync, OPAL_XIVE_SYNC);
|
||||||
|
OPAL_CALL(opal_xive_dump, OPAL_XIVE_DUMP);
|
||||||
|
|
|
@ -890,3 +890,4 @@ EXPORT_SYMBOL_GPL(opal_leds_set_ind);
|
||||||
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
|
EXPORT_SYMBOL_GPL(opal_write_oppanel_async);
|
||||||
/* Export this for KVM */
|
/* Export this for KVM */
|
||||||
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
|
EXPORT_SYMBOL_GPL(opal_int_set_mfrr);
|
||||||
|
EXPORT_SYMBOL_GPL(opal_int_eoi);
|
||||||
|
|
|
@ -62,7 +62,7 @@ int powernv_get_random_real_mode(unsigned long *v)
|
||||||
|
|
||||||
rng = raw_cpu_read(powernv_rng);
|
rng = raw_cpu_read(powernv_rng);
|
||||||
|
|
||||||
*v = rng_whiten(rng, in_rm64(rng->regs_real));
|
*v = rng_whiten(rng, __raw_rm_readq(rng->regs_real));
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,6 +32,7 @@
|
||||||
#include <asm/machdep.h>
|
#include <asm/machdep.h>
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
#include <asm/xics.h>
|
#include <asm/xics.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
|
@ -76,7 +77,9 @@ static void __init pnv_init(void)
|
||||||
|
|
||||||
static void __init pnv_init_IRQ(void)
|
static void __init pnv_init_IRQ(void)
|
||||||
{
|
{
|
||||||
xics_init();
|
/* Try using a XIVE if available, otherwise use a XICS */
|
||||||
|
if (!xive_native_init())
|
||||||
|
xics_init();
|
||||||
|
|
||||||
WARN_ON(!ppc_md.get_irq);
|
WARN_ON(!ppc_md.get_irq);
|
||||||
}
|
}
|
||||||
|
@ -218,10 +221,12 @@ static void pnv_kexec_wait_secondaries_down(void)
|
||||||
|
|
||||||
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
|
static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
|
||||||
{
|
{
|
||||||
xics_kexec_teardown_cpu(secondary);
|
if (xive_enabled())
|
||||||
|
xive_kexec_teardown_cpu(secondary);
|
||||||
|
else
|
||||||
|
xics_kexec_teardown_cpu(secondary);
|
||||||
|
|
||||||
/* On OPAL, we return all CPUs to firmware */
|
/* On OPAL, we return all CPUs to firmware */
|
||||||
|
|
||||||
if (!firmware_has_feature(FW_FEATURE_OPAL))
|
if (!firmware_has_feature(FW_FEATURE_OPAL))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -237,6 +242,10 @@ static void pnv_kexec_cpu_down(int crash_shutdown, int secondary)
|
||||||
/* Primary waits for the secondaries to have reached OPAL */
|
/* Primary waits for the secondaries to have reached OPAL */
|
||||||
pnv_kexec_wait_secondaries_down();
|
pnv_kexec_wait_secondaries_down();
|
||||||
|
|
||||||
|
/* Switch XIVE back to emulation mode */
|
||||||
|
if (xive_enabled())
|
||||||
|
xive_shutdown();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We might be running as little-endian - now that interrupts
|
* We might be running as little-endian - now that interrupts
|
||||||
* are disabled, reset the HILE bit to big-endian so we don't
|
* are disabled, reset the HILE bit to big-endian so we don't
|
||||||
|
|
|
@ -29,6 +29,7 @@
|
||||||
#include <asm/vdso_datapage.h>
|
#include <asm/vdso_datapage.h>
|
||||||
#include <asm/cputhreads.h>
|
#include <asm/cputhreads.h>
|
||||||
#include <asm/xics.h>
|
#include <asm/xics.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
#include <asm/runlatch.h>
|
#include <asm/runlatch.h>
|
||||||
#include <asm/code-patching.h>
|
#include <asm/code-patching.h>
|
||||||
|
@ -47,7 +48,9 @@
|
||||||
|
|
||||||
static void pnv_smp_setup_cpu(int cpu)
|
static void pnv_smp_setup_cpu(int cpu)
|
||||||
{
|
{
|
||||||
if (cpu != boot_cpuid)
|
if (xive_enabled())
|
||||||
|
xive_smp_setup_cpu();
|
||||||
|
else if (cpu != boot_cpuid)
|
||||||
xics_setup_cpu();
|
xics_setup_cpu();
|
||||||
|
|
||||||
#ifdef CONFIG_PPC_DOORBELL
|
#ifdef CONFIG_PPC_DOORBELL
|
||||||
|
@ -132,7 +135,10 @@ static int pnv_smp_cpu_disable(void)
|
||||||
vdso_data->processorCount--;
|
vdso_data->processorCount--;
|
||||||
if (cpu == boot_cpuid)
|
if (cpu == boot_cpuid)
|
||||||
boot_cpuid = cpumask_any(cpu_online_mask);
|
boot_cpuid = cpumask_any(cpu_online_mask);
|
||||||
xics_migrate_irqs_away();
|
if (xive_enabled())
|
||||||
|
xive_smp_disable_cpu();
|
||||||
|
else
|
||||||
|
xics_migrate_irqs_away();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -213,9 +219,12 @@ static void pnv_smp_cpu_kill_self(void)
|
||||||
if (((srr1 & wmask) == SRR1_WAKEEE) ||
|
if (((srr1 & wmask) == SRR1_WAKEEE) ||
|
||||||
((srr1 & wmask) == SRR1_WAKEHVI) ||
|
((srr1 & wmask) == SRR1_WAKEHVI) ||
|
||||||
(local_paca->irq_happened & PACA_IRQ_EE)) {
|
(local_paca->irq_happened & PACA_IRQ_EE)) {
|
||||||
if (cpu_has_feature(CPU_FTR_ARCH_300))
|
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||||
icp_opal_flush_interrupt();
|
if (xive_enabled())
|
||||||
else
|
xive_flush_interrupt();
|
||||||
|
else
|
||||||
|
icp_opal_flush_interrupt();
|
||||||
|
} else
|
||||||
icp_native_flush_interrupt();
|
icp_native_flush_interrupt();
|
||||||
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
} else if ((srr1 & wmask) == SRR1_WAKEHDBELL) {
|
||||||
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
|
||||||
|
@ -252,10 +261,26 @@ static int pnv_cpu_bootable(unsigned int nr)
|
||||||
return smp_generic_cpu_bootable(nr);
|
return smp_generic_cpu_bootable(nr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int pnv_smp_prepare_cpu(int cpu)
|
||||||
|
{
|
||||||
|
if (xive_enabled())
|
||||||
|
return xive_smp_prepare_cpu(cpu);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void __init pnv_smp_probe(void)
|
||||||
|
{
|
||||||
|
if (xive_enabled())
|
||||||
|
xive_smp_probe();
|
||||||
|
else
|
||||||
|
xics_smp_probe();
|
||||||
|
}
|
||||||
|
|
||||||
static struct smp_ops_t pnv_smp_ops = {
|
static struct smp_ops_t pnv_smp_ops = {
|
||||||
.message_pass = smp_muxed_ipi_message_pass,
|
.message_pass = smp_muxed_ipi_message_pass,
|
||||||
.cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */
|
.cause_ipi = NULL, /* Filled at runtime by xi{cs,ve}_smp_probe() */
|
||||||
.probe = xics_smp_probe,
|
.probe = pnv_smp_probe,
|
||||||
|
.prepare_cpu = pnv_smp_prepare_cpu,
|
||||||
.kick_cpu = pnv_smp_kick_cpu,
|
.kick_cpu = pnv_smp_kick_cpu,
|
||||||
.setup_cpu = pnv_smp_setup_cpu,
|
.setup_cpu = pnv_smp_setup_cpu,
|
||||||
.cpu_bootable = pnv_cpu_bootable,
|
.cpu_bootable = pnv_cpu_bootable,
|
||||||
|
|
|
@ -28,6 +28,7 @@ config PPC_MSI_BITMAP
|
||||||
default y if PPC_POWERNV
|
default y if PPC_POWERNV
|
||||||
|
|
||||||
source "arch/powerpc/sysdev/xics/Kconfig"
|
source "arch/powerpc/sysdev/xics/Kconfig"
|
||||||
|
source "arch/powerpc/sysdev/xive/Kconfig"
|
||||||
|
|
||||||
config PPC_SCOM
|
config PPC_SCOM
|
||||||
bool
|
bool
|
||||||
|
|
|
@ -71,5 +71,6 @@ obj-$(CONFIG_PPC_EARLY_DEBUG_MEMCONS) += udbg_memcons.o
|
||||||
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
||||||
|
|
||||||
obj-$(CONFIG_PPC_XICS) += xics/
|
obj-$(CONFIG_PPC_XICS) += xics/
|
||||||
|
obj-$(CONFIG_PPC_XIVE) += xive/
|
||||||
|
|
||||||
obj-$(CONFIG_GE_FPGA) += ge/
|
obj-$(CONFIG_GE_FPGA) += ge/
|
||||||
|
|
|
@ -168,15 +168,15 @@ void icp_native_cause_ipi_rm(int cpu)
|
||||||
* Need the physical address of the XICS to be
|
* Need the physical address of the XICS to be
|
||||||
* previously saved in kvm_hstate in the paca.
|
* previously saved in kvm_hstate in the paca.
|
||||||
*/
|
*/
|
||||||
unsigned long xics_phys;
|
void __iomem *xics_phys;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Just like the cause_ipi functions, it is required to
|
* Just like the cause_ipi functions, it is required to
|
||||||
* include a full barrier (out8 includes a sync) before
|
* include a full barrier before causing the IPI.
|
||||||
* causing the IPI.
|
|
||||||
*/
|
*/
|
||||||
xics_phys = paca[cpu].kvm_hstate.xics_phys;
|
xics_phys = paca[cpu].kvm_hstate.xics_phys;
|
||||||
out_rm8((u8 *)(xics_phys + XICS_MFRR), IPI_PRIORITY);
|
mb();
|
||||||
|
__raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,11 @@
|
||||||
|
config PPC_XIVE
|
||||||
|
bool
|
||||||
|
default n
|
||||||
|
select PPC_SMP_MUXED_IPI
|
||||||
|
select HARDIRQS_SW_RESEND
|
||||||
|
|
||||||
|
config PPC_XIVE_NATIVE
|
||||||
|
bool
|
||||||
|
default n
|
||||||
|
select PPC_XIVE
|
||||||
|
depends on PPC_POWERNV
|
|
@ -0,0 +1,4 @@
|
||||||
|
subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
|
||||||
|
|
||||||
|
obj-y += common.o
|
||||||
|
obj-$(CONFIG_PPC_XIVE_NATIVE) += native.o
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,715 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016,2017 IBM Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#define pr_fmt(fmt) "xive: " fmt
|
||||||
|
|
||||||
|
#include <linux/types.h>
|
||||||
|
#include <linux/irq.h>
|
||||||
|
#include <linux/debugfs.h>
|
||||||
|
#include <linux/smp.h>
|
||||||
|
#include <linux/interrupt.h>
|
||||||
|
#include <linux/seq_file.h>
|
||||||
|
#include <linux/init.h>
|
||||||
|
#include <linux/of.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/spinlock.h>
|
||||||
|
#include <linux/delay.h>
|
||||||
|
#include <linux/cpumask.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
|
||||||
|
#include <asm/prom.h>
|
||||||
|
#include <asm/io.h>
|
||||||
|
#include <asm/smp.h>
|
||||||
|
#include <asm/irq.h>
|
||||||
|
#include <asm/errno.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
|
#include <asm/xive-regs.h>
|
||||||
|
#include <asm/opal.h>
|
||||||
|
#include <asm/kvm_ppc.h>
|
||||||
|
|
||||||
|
#include "xive-internal.h"
|
||||||
|
|
||||||
|
|
||||||
|
static u32 xive_provision_size;
|
||||||
|
static u32 *xive_provision_chips;
|
||||||
|
static u32 xive_provision_chip_count;
|
||||||
|
static u32 xive_queue_shift;
|
||||||
|
static u32 xive_pool_vps = XIVE_INVALID_VP;
|
||||||
|
static struct kmem_cache *xive_provision_cache;
|
||||||
|
|
||||||
|
int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
|
||||||
|
{
|
||||||
|
__be64 flags, eoi_page, trig_page;
|
||||||
|
__be32 esb_shift, src_chip;
|
||||||
|
u64 opal_flags;
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
memset(data, 0, sizeof(*data));
|
||||||
|
|
||||||
|
rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
|
||||||
|
&esb_shift, &src_chip);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
|
||||||
|
hw_irq, rc);
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
opal_flags = be64_to_cpu(flags);
|
||||||
|
if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
|
||||||
|
data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
|
||||||
|
if (opal_flags & OPAL_XIVE_IRQ_LSI)
|
||||||
|
data->flags |= XIVE_IRQ_FLAG_LSI;
|
||||||
|
if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
|
||||||
|
data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
|
||||||
|
if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
|
||||||
|
data->flags |= XIVE_IRQ_FLAG_MASK_FW;
|
||||||
|
if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
|
||||||
|
data->flags |= XIVE_IRQ_FLAG_EOI_FW;
|
||||||
|
data->eoi_page = be64_to_cpu(eoi_page);
|
||||||
|
data->trig_page = be64_to_cpu(trig_page);
|
||||||
|
data->esb_shift = be32_to_cpu(esb_shift);
|
||||||
|
data->src_chip = be32_to_cpu(src_chip);
|
||||||
|
|
||||||
|
data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
|
||||||
|
if (!data->eoi_mmio) {
|
||||||
|
pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!data->trig_page)
|
||||||
|
return 0;
|
||||||
|
if (data->trig_page == data->eoi_page) {
|
||||||
|
data->trig_mmio = data->eoi_mmio;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
|
||||||
|
if (!data->trig_mmio) {
|
||||||
|
pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
|
||||||
|
|
||||||
|
int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
return rc == 0 ? 0 : -ENXIO;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_configure_irq);
|
||||||
|
|
||||||
|
|
||||||
|
/* This can be called multiple time to change a queue configuration */
|
||||||
|
int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
|
||||||
|
__be32 *qpage, u32 order, bool can_escalate)
|
||||||
|
{
|
||||||
|
s64 rc = 0;
|
||||||
|
__be64 qeoi_page_be;
|
||||||
|
__be32 esc_irq_be;
|
||||||
|
u64 flags, qpage_phys;
|
||||||
|
|
||||||
|
/* If there's an actual queue page, clean it */
|
||||||
|
if (order) {
|
||||||
|
if (WARN_ON(!qpage))
|
||||||
|
return -EINVAL;
|
||||||
|
qpage_phys = __pa(qpage);
|
||||||
|
} else
|
||||||
|
qpage_phys = 0;
|
||||||
|
|
||||||
|
/* Initialize the rest of the fields */
|
||||||
|
q->msk = order ? ((1u << (order - 2)) - 1) : 0;
|
||||||
|
q->idx = 0;
|
||||||
|
q->toggle = 0;
|
||||||
|
|
||||||
|
rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
|
||||||
|
&qeoi_page_be,
|
||||||
|
&esc_irq_be,
|
||||||
|
NULL);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Error %lld getting queue info prio %d\n", rc, prio);
|
||||||
|
rc = -EIO;
|
||||||
|
goto fail;
|
||||||
|
}
|
||||||
|
q->eoi_phys = be64_to_cpu(qeoi_page_be);
|
||||||
|
|
||||||
|
/* Default flags */
|
||||||
|
flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
|
||||||
|
|
||||||
|
/* Escalation needed ? */
|
||||||
|
if (can_escalate) {
|
||||||
|
q->esc_irq = be32_to_cpu(esc_irq_be);
|
||||||
|
flags |= OPAL_XIVE_EQ_ESCALATE;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Configure and enable the queue in HW */
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Error %lld setting queue for prio %d\n", rc, prio);
|
||||||
|
rc = -EIO;
|
||||||
|
} else {
|
||||||
|
/*
|
||||||
|
* KVM code requires all of the above to be visible before
|
||||||
|
* q->qpage is set due to how it manages IPI EOIs
|
||||||
|
*/
|
||||||
|
wmb();
|
||||||
|
q->qpage = qpage;
|
||||||
|
}
|
||||||
|
fail:
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_configure_queue);
|
||||||
|
|
||||||
|
static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
/* Disable the queue in HW */
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
if (rc)
|
||||||
|
pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
|
||||||
|
}
|
||||||
|
|
||||||
|
void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
|
||||||
|
{
|
||||||
|
__xive_native_disable_queue(vp_id, q, prio);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_disable_queue);
|
||||||
|
|
||||||
|
static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
|
||||||
|
{
|
||||||
|
struct xive_q *q = &xc->queue[prio];
|
||||||
|
unsigned int alloc_order;
|
||||||
|
struct page *pages;
|
||||||
|
__be32 *qpage;
|
||||||
|
|
||||||
|
alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
|
||||||
|
(xive_queue_shift - PAGE_SHIFT) : 0;
|
||||||
|
pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
|
||||||
|
if (!pages)
|
||||||
|
return -ENOMEM;
|
||||||
|
qpage = (__be32 *)page_address(pages);
|
||||||
|
memset(qpage, 0, 1 << xive_queue_shift);
|
||||||
|
return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
|
||||||
|
q, prio, qpage, xive_queue_shift, false);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
|
||||||
|
{
|
||||||
|
struct xive_q *q = &xc->queue[prio];
|
||||||
|
unsigned int alloc_order;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* We use the variant with no iounmap as this is called on exec
|
||||||
|
* from an IPI and iounmap isn't safe
|
||||||
|
*/
|
||||||
|
__xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
|
||||||
|
alloc_order = (xive_queue_shift > PAGE_SHIFT) ?
|
||||||
|
(xive_queue_shift - PAGE_SHIFT) : 0;
|
||||||
|
free_pages((unsigned long)q->qpage, alloc_order);
|
||||||
|
q->qpage = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool xive_native_match(struct device_node *node)
|
||||||
|
{
|
||||||
|
return of_device_is_compatible(node, "ibm,opal-xive-vc");
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
|
||||||
|
{
|
||||||
|
struct device_node *np;
|
||||||
|
unsigned int chip_id;
|
||||||
|
s64 irq;
|
||||||
|
|
||||||
|
/* Find the chip ID */
|
||||||
|
np = of_get_cpu_node(cpu, NULL);
|
||||||
|
if (np) {
|
||||||
|
if (of_property_read_u32(np, "ibm,chip-id", &chip_id) < 0)
|
||||||
|
chip_id = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Allocate an IPI and populate info about it */
|
||||||
|
for (;;) {
|
||||||
|
irq = opal_xive_allocate_irq(chip_id);
|
||||||
|
if (irq == OPAL_BUSY) {
|
||||||
|
msleep(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (irq < 0) {
|
||||||
|
pr_err("Failed to allocate IPI on CPU %d\n", cpu);
|
||||||
|
return -ENXIO;
|
||||||
|
}
|
||||||
|
xc->hw_ipi = irq;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
u32 xive_native_alloc_irq(void)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
if (rc < 0)
|
||||||
|
return 0;
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
|
||||||
|
|
||||||
|
void xive_native_free_irq(u32 irq)
|
||||||
|
{
|
||||||
|
for (;;) {
|
||||||
|
s64 rc = opal_xive_free_irq(irq);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_free_irq);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
/* Free the IPI */
|
||||||
|
if (!xc->hw_ipi)
|
||||||
|
return;
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_free_irq(xc->hw_ipi);
|
||||||
|
if (rc == OPAL_BUSY) {
|
||||||
|
msleep(1);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
xc->hw_ipi = 0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
static void xive_native_shutdown(void)
|
||||||
|
{
|
||||||
|
/* Switch the XIVE to emulation mode */
|
||||||
|
opal_xive_reset(OPAL_XIVE_MODE_EMU);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Perform an "ack" cycle on the current thread, thus
|
||||||
|
* grabbing the pending active priorities and updating
|
||||||
|
* the CPPR to the most favored one.
|
||||||
|
*/
|
||||||
|
static void xive_native_update_pending(struct xive_cpu *xc)
|
||||||
|
{
|
||||||
|
u8 he, cppr;
|
||||||
|
u16 ack;
|
||||||
|
|
||||||
|
/* Perform the acknowledge hypervisor to register cycle */
|
||||||
|
ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
|
||||||
|
|
||||||
|
/* Synchronize subsequent queue accesses */
|
||||||
|
mb();
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Grab the CPPR and the "HE" field which indicates the source
|
||||||
|
* of the hypervisor interrupt (if any)
|
||||||
|
*/
|
||||||
|
cppr = ack & 0xff;
|
||||||
|
he = GETFIELD(TM_QW3_NSR_HE, (ack >> 8));
|
||||||
|
switch(he) {
|
||||||
|
case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
|
||||||
|
break;
|
||||||
|
case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
|
||||||
|
if (cppr == 0xff)
|
||||||
|
return;
|
||||||
|
/* Mark the priority pending */
|
||||||
|
xc->pending_prio |= 1 << cppr;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* A new interrupt should never have a CPPR less favored
|
||||||
|
* than our current one.
|
||||||
|
*/
|
||||||
|
if (cppr >= xc->cppr)
|
||||||
|
pr_err("CPU %d odd ack CPPR, got %d at %d\n",
|
||||||
|
smp_processor_id(), cppr, xc->cppr);
|
||||||
|
|
||||||
|
/* Update our idea of what the CPPR is */
|
||||||
|
xc->cppr = cppr;
|
||||||
|
break;
|
||||||
|
case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
|
||||||
|
case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
|
||||||
|
pr_err("CPU %d got unexpected interrupt type HE=%d\n",
|
||||||
|
smp_processor_id(), he);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xive_native_eoi(u32 hw_irq)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* Not normally used except if specific interrupts need
|
||||||
|
* a workaround on EOI.
|
||||||
|
*/
|
||||||
|
opal_int_eoi(hw_irq);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
u32 vp;
|
||||||
|
__be64 vp_cam_be;
|
||||||
|
u64 vp_cam;
|
||||||
|
|
||||||
|
if (xive_pool_vps == XIVE_INVALID_VP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Enable the pool VP */
|
||||||
|
vp = xive_pool_vps + cpu;
|
||||||
|
pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Failed to enable pool VP on CPU %d\n", cpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Grab it's CAM value */
|
||||||
|
rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Failed to get pool VP info CPU %d\n", cpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
vp_cam = be64_to_cpu(vp_cam_be);
|
||||||
|
|
||||||
|
pr_debug("VP CAM = %llx\n", vp_cam);
|
||||||
|
|
||||||
|
/* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
|
||||||
|
pr_debug("(Old HW value: %08x)\n",
|
||||||
|
in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
|
||||||
|
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
|
||||||
|
out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
|
||||||
|
TM_QW2W2_VP | vp_cam);
|
||||||
|
pr_debug("(New HW value: %08x)\n",
|
||||||
|
in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
u32 vp;
|
||||||
|
|
||||||
|
if (xive_pool_vps == XIVE_INVALID_VP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Pull the pool VP from the CPU */
|
||||||
|
in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
|
||||||
|
|
||||||
|
/* Disable it */
|
||||||
|
vp = xive_pool_vps + cpu;
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_vp_info(vp, 0, 0);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void xive_native_sync_source(u32 hw_irq)
|
||||||
|
{
|
||||||
|
opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_sync_source);
|
||||||
|
|
||||||
|
static const struct xive_ops xive_native_ops = {
|
||||||
|
.populate_irq_data = xive_native_populate_irq_data,
|
||||||
|
.configure_irq = xive_native_configure_irq,
|
||||||
|
.setup_queue = xive_native_setup_queue,
|
||||||
|
.cleanup_queue = xive_native_cleanup_queue,
|
||||||
|
.match = xive_native_match,
|
||||||
|
.shutdown = xive_native_shutdown,
|
||||||
|
.update_pending = xive_native_update_pending,
|
||||||
|
.eoi = xive_native_eoi,
|
||||||
|
.setup_cpu = xive_native_setup_cpu,
|
||||||
|
.teardown_cpu = xive_native_teardown_cpu,
|
||||||
|
.sync_source = xive_native_sync_source,
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
.get_ipi = xive_native_get_ipi,
|
||||||
|
.put_ipi = xive_native_put_ipi,
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
.name = "native",
|
||||||
|
};
|
||||||
|
|
||||||
|
static bool xive_parse_provisioning(struct device_node *np)
|
||||||
|
{
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
if (of_property_read_u32(np, "ibm,xive-provision-page-size",
|
||||||
|
&xive_provision_size) < 0)
|
||||||
|
return true;
|
||||||
|
rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
|
||||||
|
if (rc < 0) {
|
||||||
|
pr_err("Error %d getting provision chips array\n", rc);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
xive_provision_chip_count = rc;
|
||||||
|
if (rc == 0)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
xive_provision_chips = kzalloc(4 * xive_provision_chip_count,
|
||||||
|
GFP_KERNEL);
|
||||||
|
if (WARN_ON(!xive_provision_chips))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
|
||||||
|
xive_provision_chips,
|
||||||
|
xive_provision_chip_count);
|
||||||
|
if (rc < 0) {
|
||||||
|
pr_err("Error %d reading provision chips array\n", rc);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
xive_provision_cache = kmem_cache_create("xive-provision",
|
||||||
|
xive_provision_size,
|
||||||
|
xive_provision_size,
|
||||||
|
0, NULL);
|
||||||
|
if (!xive_provision_cache) {
|
||||||
|
pr_err("Failed to allocate provision cache\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void xive_native_setup_pools(void)
|
||||||
|
{
|
||||||
|
/* Allocate a pool big enough */
|
||||||
|
pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
|
||||||
|
|
||||||
|
xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
|
||||||
|
if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
|
||||||
|
pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
|
||||||
|
|
||||||
|
pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
|
||||||
|
xive_pool_vps, nr_cpu_ids);
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 xive_native_default_eq_shift(void)
|
||||||
|
{
|
||||||
|
return xive_queue_shift;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
|
||||||
|
|
||||||
|
bool xive_native_init(void)
|
||||||
|
{
|
||||||
|
struct device_node *np;
|
||||||
|
struct resource r;
|
||||||
|
void __iomem *tima;
|
||||||
|
struct property *prop;
|
||||||
|
u8 max_prio = 7;
|
||||||
|
const __be32 *p;
|
||||||
|
u32 val, cpu;
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
if (xive_cmdline_disabled)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
pr_devel("xive_native_init()\n");
|
||||||
|
np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
|
||||||
|
if (!np) {
|
||||||
|
pr_devel("not found !\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
pr_devel("Found %s\n", np->full_name);
|
||||||
|
|
||||||
|
/* Resource 1 is HV window */
|
||||||
|
if (of_address_to_resource(np, 1, &r)) {
|
||||||
|
pr_err("Failed to get thread mgmnt area resource\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
tima = ioremap(r.start, resource_size(&r));
|
||||||
|
if (!tima) {
|
||||||
|
pr_err("Failed to map thread mgmnt area\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Read number of priorities */
|
||||||
|
if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
|
||||||
|
max_prio = val - 1;
|
||||||
|
|
||||||
|
/* Iterate the EQ sizes and pick one */
|
||||||
|
of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
|
||||||
|
xive_queue_shift = val;
|
||||||
|
if (val == PAGE_SHIFT)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Configure Thread Management areas for KVM */
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
kvmppc_set_xive_tima(cpu, r.start, tima);
|
||||||
|
|
||||||
|
/* Grab size of provisionning pages */
|
||||||
|
xive_parse_provisioning(np);
|
||||||
|
|
||||||
|
/* Switch the XIVE to exploitation mode */
|
||||||
|
rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
|
||||||
|
if (rc) {
|
||||||
|
pr_err("Switch to exploitation mode failed with error %lld\n", rc);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Setup some dummy HV pool VPs */
|
||||||
|
xive_native_setup_pools();
|
||||||
|
|
||||||
|
/* Initialize XIVE core with our backend */
|
||||||
|
if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
|
||||||
|
max_prio)) {
|
||||||
|
opal_xive_reset(OPAL_XIVE_MODE_EMU);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool xive_native_provision_pages(void)
|
||||||
|
{
|
||||||
|
u32 i;
|
||||||
|
void *p;
|
||||||
|
|
||||||
|
for (i = 0; i < xive_provision_chip_count; i++) {
|
||||||
|
u32 chip = xive_provision_chips[i];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* XXX TODO: Try to make the allocation local to the node where
|
||||||
|
* the chip resides.
|
||||||
|
*/
|
||||||
|
p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
|
||||||
|
if (!p) {
|
||||||
|
pr_err("Failed to allocate provisioning page\n");
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
opal_xive_donate_page(chip, __pa(p));
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
u32 xive_native_alloc_vp_block(u32 max_vcpus)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
u32 order;
|
||||||
|
|
||||||
|
order = fls(max_vcpus) - 1;
|
||||||
|
if (max_vcpus > (1 << order))
|
||||||
|
order++;
|
||||||
|
|
||||||
|
pr_info("VP block alloc, for max VCPUs %d use order %d\n",
|
||||||
|
max_vcpus, order);
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_alloc_vp_block(order);
|
||||||
|
switch (rc) {
|
||||||
|
case OPAL_BUSY:
|
||||||
|
msleep(1);
|
||||||
|
break;
|
||||||
|
case OPAL_XIVE_PROVISIONING:
|
||||||
|
if (!xive_native_provision_pages())
|
||||||
|
return XIVE_INVALID_VP;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
if (rc < 0) {
|
||||||
|
pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
|
||||||
|
order, rc);
|
||||||
|
return XIVE_INVALID_VP;
|
||||||
|
}
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
|
||||||
|
|
||||||
|
void xive_native_free_vp_block(u32 vp_base)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
if (vp_base == XIVE_INVALID_VP)
|
||||||
|
return;
|
||||||
|
|
||||||
|
rc = opal_xive_free_vp_block(vp_base);
|
||||||
|
if (rc < 0)
|
||||||
|
pr_warn("OPAL error %lld freeing VP block\n", rc);
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
|
||||||
|
|
||||||
|
int xive_native_enable_vp(u32 vp_id)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
return rc ? -EIO : 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_enable_vp);
|
||||||
|
|
||||||
|
int xive_native_disable_vp(u32 vp_id)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
for (;;) {
|
||||||
|
rc = opal_xive_set_vp_info(vp_id, 0, 0);
|
||||||
|
if (rc != OPAL_BUSY)
|
||||||
|
break;
|
||||||
|
msleep(1);
|
||||||
|
}
|
||||||
|
return rc ? -EIO : 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_disable_vp);
|
||||||
|
|
||||||
|
int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
|
||||||
|
{
|
||||||
|
__be64 vp_cam_be;
|
||||||
|
__be32 vp_chip_id_be;
|
||||||
|
s64 rc;
|
||||||
|
|
||||||
|
rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
|
||||||
|
if (rc)
|
||||||
|
return -EIO;
|
||||||
|
*out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
|
||||||
|
*out_chip_id = be32_to_cpu(vp_chip_id_be);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(xive_native_get_vp_info);
|
|
@ -0,0 +1,62 @@
|
||||||
|
/*
|
||||||
|
* Copyright 2016,2017 IBM Corporation.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
#ifndef __XIVE_INTERNAL_H
|
||||||
|
#define __XIVE_INTERNAL_H
|
||||||
|
|
||||||
|
/* Each CPU carry one of these with various per-CPU state */
|
||||||
|
struct xive_cpu {
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
/* HW irq number and data of IPI */
|
||||||
|
u32 hw_ipi;
|
||||||
|
struct xive_irq_data ipi_data;
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
int chip_id;
|
||||||
|
|
||||||
|
/* Queue datas. Only one is populated */
|
||||||
|
#define XIVE_MAX_QUEUES 8
|
||||||
|
struct xive_q queue[XIVE_MAX_QUEUES];
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pending mask. Each bit corresponds to a priority that
|
||||||
|
* potentially has pending interrupts.
|
||||||
|
*/
|
||||||
|
u8 pending_prio;
|
||||||
|
|
||||||
|
/* Cache of HW CPPR */
|
||||||
|
u8 cppr;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Backend ops */
|
||||||
|
struct xive_ops {
|
||||||
|
int (*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
|
||||||
|
int (*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
|
||||||
|
int (*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
|
||||||
|
void (*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
|
||||||
|
void (*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
|
||||||
|
void (*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
|
||||||
|
bool (*match)(struct device_node *np);
|
||||||
|
void (*shutdown)(void);
|
||||||
|
|
||||||
|
void (*update_pending)(struct xive_cpu *xc);
|
||||||
|
void (*eoi)(u32 hw_irq);
|
||||||
|
void (*sync_source)(u32 hw_irq);
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
int (*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
|
||||||
|
void (*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
|
||||||
|
#endif
|
||||||
|
const char *name;
|
||||||
|
};
|
||||||
|
|
||||||
|
bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
|
||||||
|
u8 max_prio);
|
||||||
|
|
||||||
|
extern bool xive_cmdline_disabled;
|
||||||
|
|
||||||
|
#endif /* __XIVE_INTERNAL_H */
|
|
@ -30,6 +30,7 @@
|
||||||
#include <linux/ctype.h>
|
#include <linux/ctype.h>
|
||||||
|
|
||||||
#include <asm/ptrace.h>
|
#include <asm/ptrace.h>
|
||||||
|
#include <asm/smp.h>
|
||||||
#include <asm/string.h>
|
#include <asm/string.h>
|
||||||
#include <asm/prom.h>
|
#include <asm/prom.h>
|
||||||
#include <asm/machdep.h>
|
#include <asm/machdep.h>
|
||||||
|
@ -48,7 +49,7 @@
|
||||||
#include <asm/reg.h>
|
#include <asm/reg.h>
|
||||||
#include <asm/debug.h>
|
#include <asm/debug.h>
|
||||||
#include <asm/hw_breakpoint.h>
|
#include <asm/hw_breakpoint.h>
|
||||||
|
#include <asm/xive.h>
|
||||||
#include <asm/opal.h>
|
#include <asm/opal.h>
|
||||||
#include <asm/firmware.h>
|
#include <asm/firmware.h>
|
||||||
|
|
||||||
|
@ -232,7 +233,13 @@ Commands:\n\
|
||||||
"\
|
"\
|
||||||
dr dump stream of raw bytes\n\
|
dr dump stream of raw bytes\n\
|
||||||
dt dump the tracing buffers (uses printk)\n\
|
dt dump the tracing buffers (uses printk)\n\
|
||||||
e print exception information\n\
|
"
|
||||||
|
#ifdef CONFIG_PPC_POWERNV
|
||||||
|
" dx# dump xive on CPU #\n\
|
||||||
|
dxi# dump xive irq state #\n\
|
||||||
|
dxa dump xive on all CPUs\n"
|
||||||
|
#endif
|
||||||
|
" e print exception information\n\
|
||||||
f flush cache\n\
|
f flush cache\n\
|
||||||
la lookup symbol+offset of specified address\n\
|
la lookup symbol+offset of specified address\n\
|
||||||
ls lookup address of specified symbol\n\
|
ls lookup address of specified symbol\n\
|
||||||
|
@ -2338,6 +2345,81 @@ static void dump_pacas(void)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_POWERNV
|
||||||
|
static void dump_one_xive(int cpu)
|
||||||
|
{
|
||||||
|
unsigned int hwid = get_hard_smp_processor_id(cpu);
|
||||||
|
|
||||||
|
opal_xive_dump(XIVE_DUMP_TM_HYP, hwid);
|
||||||
|
opal_xive_dump(XIVE_DUMP_TM_POOL, hwid);
|
||||||
|
opal_xive_dump(XIVE_DUMP_TM_OS, hwid);
|
||||||
|
opal_xive_dump(XIVE_DUMP_TM_USER, hwid);
|
||||||
|
opal_xive_dump(XIVE_DUMP_VP, hwid);
|
||||||
|
opal_xive_dump(XIVE_DUMP_EMU_STATE, hwid);
|
||||||
|
|
||||||
|
if (setjmp(bus_error_jmp) != 0) {
|
||||||
|
catch_memory_errors = 0;
|
||||||
|
printf("*** Error dumping xive on cpu %d\n", cpu);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
catch_memory_errors = 1;
|
||||||
|
sync();
|
||||||
|
xmon_xive_do_dump(cpu);
|
||||||
|
sync();
|
||||||
|
__delay(200);
|
||||||
|
catch_memory_errors = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dump_all_xives(void)
|
||||||
|
{
|
||||||
|
int cpu;
|
||||||
|
|
||||||
|
if (num_possible_cpus() == 0) {
|
||||||
|
printf("No possible cpus, use 'dx #' to dump individual cpus\n");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for_each_possible_cpu(cpu)
|
||||||
|
dump_one_xive(cpu);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dump_one_xive_irq(u32 num)
|
||||||
|
{
|
||||||
|
s64 rc;
|
||||||
|
__be64 vp;
|
||||||
|
u8 prio;
|
||||||
|
__be32 lirq;
|
||||||
|
|
||||||
|
rc = opal_xive_get_irq_config(num, &vp, &prio, &lirq);
|
||||||
|
xmon_printf("IRQ 0x%x config: vp=0x%llx prio=%d lirq=0x%x (rc=%lld)\n",
|
||||||
|
num, be64_to_cpu(vp), prio, be32_to_cpu(lirq), rc);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void dump_xives(void)
|
||||||
|
{
|
||||||
|
unsigned long num;
|
||||||
|
int c;
|
||||||
|
|
||||||
|
c = inchar();
|
||||||
|
if (c == 'a') {
|
||||||
|
dump_all_xives();
|
||||||
|
return;
|
||||||
|
} else if (c == 'i') {
|
||||||
|
if (scanhex(&num))
|
||||||
|
dump_one_xive_irq(num);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
termch = c; /* Put c back, it wasn't 'a' */
|
||||||
|
|
||||||
|
if (scanhex(&num))
|
||||||
|
dump_one_xive(num);
|
||||||
|
else
|
||||||
|
dump_one_xive(xmon_owner);
|
||||||
|
}
|
||||||
|
#endif /* CONFIG_PPC_POWERNV */
|
||||||
|
|
||||||
static void dump_by_size(unsigned long addr, long count, int size)
|
static void dump_by_size(unsigned long addr, long count, int size)
|
||||||
{
|
{
|
||||||
unsigned char temp[16];
|
unsigned char temp[16];
|
||||||
|
@ -2386,6 +2468,14 @@ dump(void)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef CONFIG_PPC_POWERNV
|
||||||
|
if (c == 'x') {
|
||||||
|
xmon_start_pagination();
|
||||||
|
dump_xives();
|
||||||
|
xmon_end_pagination();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (c == '\n')
|
if (c == '\n')
|
||||||
termch = c;
|
termch = c;
|
||||||
|
|
|
@ -1149,7 +1149,6 @@ int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
|
||||||
void kvm_unregister_device_ops(u32 type);
|
void kvm_unregister_device_ops(u32 type);
|
||||||
|
|
||||||
extern struct kvm_device_ops kvm_mpic_ops;
|
extern struct kvm_device_ops kvm_mpic_ops;
|
||||||
extern struct kvm_device_ops kvm_xics_ops;
|
|
||||||
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
|
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
|
||||||
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
|
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
|
||||||
|
|
||||||
|
|
|
@ -2825,10 +2825,6 @@ static struct kvm_device_ops *kvm_device_ops_table[KVM_DEV_TYPE_MAX] = {
|
||||||
[KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
|
[KVM_DEV_TYPE_FSL_MPIC_20] = &kvm_mpic_ops,
|
||||||
[KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
|
[KVM_DEV_TYPE_FSL_MPIC_42] = &kvm_mpic_ops,
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_KVM_XICS
|
|
||||||
[KVM_DEV_TYPE_XICS] = &kvm_xics_ops,
|
|
||||||
#endif
|
|
||||||
};
|
};
|
||||||
|
|
||||||
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
|
int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
|
||||||
|
|
Loading…
Reference in New Issue