s390/mm: protection exceptions are corrrectly shadowed

As gmap shadows contains correct protection permissions, protection
exceptons can directly be forwarded to guest 3. If we would encounter
a protection exception while faulting, the next guest 3 run will
automatically handle that for us.

Keep the dat_protection logic in place, as it will be helpful later.

Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com>
Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
This commit is contained in:
David Hildenbrand 2016-01-27 17:18:41 +01:00 committed by Christian Borntraeger
parent e52f8b6112
commit 7a6741576b
2 changed files with 2 additions and 6 deletions

View File

@ -1075,7 +1075,6 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* kvm_s390_shadow_fault - handle fault on a shadow page table * kvm_s390_shadow_fault - handle fault on a shadow page table
* @sg: pointer to the shadow guest address space structure * @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap * @saddr: faulting address in the shadow gmap
* @write: =1 map r/w, =0 map r/o
* *
* Returns: - 0 if the shadow fault was successfully resolved * Returns: - 0 if the shadow fault was successfully resolved
* - > 0 (pgm exception code) on exceptions while faulting * - > 0 (pgm exception code) on exceptions while faulting
@ -1083,7 +1082,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* - -EFAULT when accessing invalid guest addresses * - -EFAULT when accessing invalid guest addresses
* - -ENOMEM if out of memory * - -ENOMEM if out of memory
*/ */
int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write) int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
{ {
union vaddress vaddr; union vaddress vaddr;
union page_table_entry pte; union page_table_entry pte;
@ -1104,9 +1103,6 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write)
rc = PGM_PAGE_TRANSLATION; rc = PGM_PAGE_TRANSLATION;
if (!rc && (pte.z || pte.co)) if (!rc && (pte.z || pte.co))
rc = PGM_TRANSLATION_SPEC; rc = PGM_TRANSLATION_SPEC;
dat_protection |= pte.p;
if (!rc && write && dat_protection)
rc = PGM_PROTECTION;
if (!rc) if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
up_read(&sg->mm->mmap_sem); up_read(&sg->mm->mmap_sem);

View File

@ -361,6 +361,6 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu); int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra); int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr, int write); int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr);
#endif /* __KVM_S390_GACCESS_H */ #endif /* __KVM_S390_GACCESS_H */