diff --git a/arch/s390/include/asm/gmap.h b/arch/s390/include/asm/gmap.h index 58e65ee5b2d2..4a47055f58d7 100644 --- a/arch/s390/include/asm/gmap.h +++ b/arch/s390/include/asm/gmap.h @@ -110,8 +110,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt); int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt); int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr, unsigned long *pgt, int *dat_protection); -int gmap_shadow_page(struct gmap *sg, unsigned long saddr, - unsigned long paddr, int write); +int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte); void gmap_register_pte_notifier(struct gmap_notifier *); void gmap_unregister_pte_notifier(struct gmap_notifier *); diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index a6e7fc8f5b49..c7ebba483f09 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -895,7 +895,7 @@ void ptep_zap_unused(struct mm_struct *mm, unsigned long addr, pte_t *ptep , int reset); void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep); int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, - pte_t *sptep, pte_t *tptep, int write); + pte_t *sptep, pte_t *tptep, pte_t pte); void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep); bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long address); diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c index ba4985262bce..c5f79c1205cf 100644 --- a/arch/s390/kvm/gaccess.c +++ b/arch/s390/kvm/gaccess.c @@ -1109,7 +1109,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr, int write) dat_protection |= pte.p; if (write && dat_protection) return PGM_PROTECTION; - rc = gmap_shadow_page(sg, saddr, pte.pfra * 4096, write); + rc = gmap_shadow_page(sg, saddr, __pte(pte.val)); if (rc) return rc; return 0; diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index b02d0d0cc641..a57a87bfeb27 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -1743,8 +1743,7 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt); * gmap_shadow_page - create a shadow page mapping * @sg: pointer to the shadow guest address space structure * @saddr: faulting address in the shadow gmap - * @paddr: parent gmap address to get mapped at @saddr - * @write: =1 map r/w, =0 map r/o + * @pte: pte in parent gmap address space to get shadowed * * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the * shadow table structure is incomplete, -ENOMEM if out of memory and @@ -1752,12 +1751,11 @@ EXPORT_SYMBOL_GPL(gmap_shadow_pgt); * * Called with sg->mm->mmap_sem in read. */ -int gmap_shadow_page(struct gmap *sg, unsigned long saddr, - unsigned long paddr, int write) +int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte) { struct gmap *parent; struct gmap_rmap *rmap; - unsigned long vmaddr; + unsigned long vmaddr, paddr; spinlock_t *ptl; pte_t *sptep, *tptep; int rc; @@ -1771,6 +1769,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE; while (1) { + paddr = pte_val(pte) & PAGE_MASK; vmaddr = __gmap_translate(parent, paddr); if (IS_ERR_VALUE(vmaddr)) { rc = vmaddr; @@ -1791,8 +1790,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, radix_tree_preload_end(); break; } - rc = ptep_shadow_pte(sg->mm, saddr, - sptep, tptep, write); + rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte); if (rc > 0) { /* Success and a new mapping */ gmap_insert_rmap(sg, vmaddr, rmap); diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 5b02583fbf4c..293130b5aee7 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -463,29 +463,27 @@ int ptep_force_prot(struct mm_struct *mm, unsigned long addr, } int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr, - pte_t *sptep, pte_t *tptep, int write) + pte_t *sptep, pte_t *tptep, pte_t pte) { pgste_t spgste, tpgste; pte_t spte, tpte; int rc = -EAGAIN; + if (!(pte_val(*tptep) & _PAGE_INVALID)) + return 0; /* already shadowed */ spgste = pgste_get_lock(sptep); spte = *sptep; if (!(pte_val(spte) & _PAGE_INVALID) && - !(pte_val(spte) & _PAGE_PROTECT)) { - rc = 0; - if (!(pte_val(*tptep) & _PAGE_INVALID)) - /* Update existing mapping */ - ptep_flush_direct(mm, saddr, tptep); - else - rc = 1; + !((pte_val(spte) & _PAGE_PROTECT) && + !(pte_val(pte) & _PAGE_PROTECT))) { pgste_val(spgste) |= PGSTE_VSIE_BIT; tpgste = pgste_get_lock(tptep); pte_val(tpte) = (pte_val(spte) & PAGE_MASK) | - (write ? 0 : _PAGE_PROTECT); + (pte_val(pte) & _PAGE_PROTECT); /* don't touch the storage key - it belongs to parent pgste */ tpgste = pgste_set_pte(tptep, tpgste, tpte); pgste_set_unlock(tptep, tpgste); + rc = 1; } pgste_set_unlock(sptep, spgste); return rc;