Message ID | 20181221162650.11515-9-julien.grall@arm.com |
---|---|
State | New |
Headers | show |
Series | xen/arm: Add xentrace support | expand |
On 21/12/2018 16:26, Julien Grall wrote: > No functional change intended. > > Only reasonable clean-ups are done in this patch. The rest will use _gfn > for the time being. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > Acked-by: Jan Beulich <jbeulich@suse.com> Please ignore this patch. I need to rebase it on Andrew's patch. Cheers, > > --- > Changes in v3: > - Add Jan's acked-by > > Changes in v2: > - Remove >> PAGE_SHIFT in svm code > - Fix typo in the e-mail address > - Small NITs > --- > xen/arch/arm/guestcopy.c | 2 +- > xen/arch/arm/mm.c | 2 +- > xen/arch/x86/cpu/vpmu.c | 2 +- > xen/arch/x86/domain.c | 12 ++++++------ > xen/arch/x86/domctl.c | 6 +++--- > xen/arch/x86/hvm/dm.c | 2 +- > xen/arch/x86/hvm/domain.c | 2 +- > xen/arch/x86/hvm/hvm.c | 9 +++++---- > xen/arch/x86/hvm/svm/svm.c | 8 ++++---- > xen/arch/x86/hvm/viridian/time.c | 8 ++++---- > xen/arch/x86/hvm/viridian/viridian.c | 16 ++++++++-------- > xen/arch/x86/hvm/vmx/vmx.c | 4 ++-- > xen/arch/x86/hvm/vmx/vvmx.c | 12 ++++++------ > xen/arch/x86/mm.c | 24 ++++++++++++++---------- > xen/arch/x86/mm/p2m.c | 2 +- > xen/arch/x86/mm/shadow/hvm.c | 6 +++--- > xen/arch/x86/physdev.c | 3 ++- > xen/arch/x86/pv/descriptor-tables.c | 4 ++-- > xen/arch/x86/pv/emul-priv-op.c | 6 +++--- > xen/arch/x86/pv/mm.c | 2 +- > xen/arch/x86/traps.c | 11 ++++++----- > xen/common/domain.c | 2 +- > xen/common/event_fifo.c | 12 ++++++------ > xen/common/memory.c | 4 ++-- > xen/common/tmem_xen.c | 2 +- > xen/include/asm-arm/p2m.h | 6 +++--- > xen/include/asm-x86/p2m.h | 11 +++++++---- > 27 files changed, 95 insertions(+), 85 deletions(-) > > diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c > index 7a0f3e9d5f..55892062bb 100644 > --- a/xen/arch/arm/guestcopy.c > +++ b/xen/arch/arm/guestcopy.c > @@ -37,7 +37,7 @@ static struct page_info *translate_get_page(copy_info_t info, uint64_t addr, > return get_page_from_gva(info.gva.v, addr, > write ? GV2M_WRITE : GV2M_READ); > > - page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC); > + page = get_page_from_gfn(info.gpa.d, gaddr_to_gfn(addr), &p2mt, P2M_ALLOC); > > if ( !page ) > return NULL; > diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c > index 01ae2cccc0..340a1d1548 100644 > --- a/xen/arch/arm/mm.c > +++ b/xen/arch/arm/mm.c > @@ -1252,7 +1252,7 @@ int xenmem_add_to_physmap_one( > > /* Take reference to the foreign domain page. > * Reference will be released in XENMEM_remove_from_physmap */ > - page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC); > + page = get_page_from_gfn(od, _gfn(idx), &p2mt, P2M_ALLOC); > if ( !page ) > { > put_pg_owner(od); > diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c > index 8a4f753eae..4d8f153031 100644 > --- a/xen/arch/x86/cpu/vpmu.c > +++ b/xen/arch/x86/cpu/vpmu.c > @@ -607,7 +607,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t *params) > struct vcpu *v; > struct vpmu_struct *vpmu; > struct page_info *page; > - uint64_t gfn = params->val; > + gfn_t gfn = _gfn(params->val); > > if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) ) > return -EINVAL; > diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c > index 32dc4253ff..b462a8513b 100644 > --- a/xen/arch/x86/domain.c > +++ b/xen/arch/x86/domain.c > @@ -827,7 +827,7 @@ int arch_set_info_guest( > unsigned long flags; > bool compat; > #ifdef CONFIG_PV > - unsigned long cr3_gfn; > + gfn_t cr3_gfn; > struct page_info *cr3_page; > unsigned long cr4; > int rc = 0; > @@ -1091,9 +1091,9 @@ int arch_set_info_guest( > set_bit(_VPF_in_reset, &v->pause_flags); > > if ( !compat ) > - cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]); > + cr3_gfn = _gfn(xen_cr3_to_pfn(c.nat->ctrlreg[3])); > else > - cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]); > + cr3_gfn = _gfn(compat_cr3_to_pfn(c.cmp->ctrlreg[3])); > cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); > > if ( !cr3_page ) > @@ -1122,7 +1122,7 @@ int arch_set_info_guest( > case 0: > if ( !compat && !VM_ASSIST(d, m2p_strict) && > !paging_mode_refcounts(d) ) > - fill_ro_mpt(_mfn(cr3_gfn)); > + fill_ro_mpt(_mfn(gfn_x(cr3_gfn))); > break; > default: > if ( cr3_page == current->arch.old_guest_table ) > @@ -1137,7 +1137,7 @@ int arch_set_info_guest( > v->arch.guest_table = pagetable_from_page(cr3_page); > if ( c.nat->ctrlreg[1] ) > { > - cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]); > + cr3_gfn = _gfn(xen_cr3_to_pfn(c.nat->ctrlreg[1])); > cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); > > if ( !cr3_page ) > @@ -1162,7 +1162,7 @@ int arch_set_info_guest( > break; > case 0: > if ( VM_ASSIST(d, m2p_strict) ) > - zap_ro_mpt(_mfn(cr3_gfn)); > + zap_ro_mpt(_mfn(gfn_x(cr3_gfn))); > break; > } > } > diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c > index 9bf2d0820f..812a435069 100644 > --- a/xen/arch/x86/domctl.c > +++ b/xen/arch/x86/domctl.c > @@ -448,7 +448,7 @@ long arch_do_domctl( > break; > } > > - page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC); > + page = get_page_from_gfn(d, _gfn(gfn), &t, P2M_ALLOC); > > if ( unlikely(!page) || > unlikely(is_xen_heap_page(page)) ) > @@ -498,11 +498,11 @@ long arch_do_domctl( > > case XEN_DOMCTL_hypercall_init: > { > - unsigned long gmfn = domctl->u.hypercall_init.gmfn; > + gfn_t gfn = _gfn(domctl->u.hypercall_init.gmfn); > struct page_info *page; > void *hypercall_page; > > - page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); > > if ( !page || !get_page_type(page, PGT_writable_page) ) > { > diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c > index d6d0e8be89..3b3ad27938 100644 > --- a/xen/arch/x86/hvm/dm.c > +++ b/xen/arch/x86/hvm/dm.c > @@ -186,7 +186,7 @@ static int modified_memory(struct domain *d, > { > struct page_info *page; > > - page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE); > + page = get_page_from_gfn(d, _gfn(pfn), NULL, P2M_UNSHARE); > if ( page ) > { > paging_mark_pfn_dirty(d, _pfn(pfn)); > diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c > index 5d5a746a25..73d2da8441 100644 > --- a/xen/arch/x86/hvm/domain.c > +++ b/xen/arch/x86/hvm/domain.c > @@ -297,7 +297,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) > { > /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ > struct page_info *page = get_page_from_gfn(v->domain, > - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT, > + gaddr_to_gfn(v->arch.hvm.guest_cr[3]), > NULL, P2M_ALLOC); > if ( !page ) > { > diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c > index d14ddcb527..0109bf6a75 100644 > --- a/xen/arch/x86/hvm/hvm.c > +++ b/xen/arch/x86/hvm/hvm.c > @@ -2168,7 +2168,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer) > { > struct vcpu *v = current; > struct domain *d = v->domain; > - unsigned long gfn, old_value = v->arch.hvm.guest_cr[0]; > + unsigned long old_value = v->arch.hvm.guest_cr[0]; > struct page_info *page; > > HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value); > @@ -2223,7 +2223,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer) > if ( !paging_mode_hap(d) ) > { > /* The guest CR3 must be pointing to the guest physical. */ > - gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT; > + gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]); > + > page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); > if ( !page ) > { > @@ -2315,7 +2316,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer) > { > /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ > HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); > - page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT, > + page = get_page_from_gfn(v->domain, gaddr_to_gfn(value), > NULL, P2M_ALLOC); > if ( !page ) > goto bad_cr3; > @@ -3143,7 +3144,7 @@ enum hvm_translation_result hvm_translate_get_page( > && hvm_mmio_internal(gfn_to_gaddr(gfn)) ) > return HVMTRANS_bad_gfn_to_mfn; > > - page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE); > + page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE); > > if ( !page ) > return HVMTRANS_bad_gfn_to_mfn; > diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c > index 954822c960..29777cd1b8 100644 > --- a/xen/arch/x86/hvm/svm/svm.c > +++ b/xen/arch/x86/hvm/svm/svm.c > @@ -317,7 +317,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) > { > if ( c->cr0 & X86_CR0_PG ) > { > - page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT, > + page = get_page_from_gfn(v->domain, gaddr_to_gfn(c->cr3), > NULL, P2M_ALLOC); > if ( !page ) > { > @@ -2351,9 +2351,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) > return NULL; > > /* Need to translate L1-GPA to MPA */ > - page = get_page_from_gfn(v->domain, > - nv->nv_vvmcxaddr >> PAGE_SHIFT, > - &p2mt, P2M_ALLOC | P2M_UNSHARE); > + page = get_page_from_gfn(v->domain, > + gaddr_to_gfn(nv->nv_vvmcxaddr), > + &p2mt, P2M_ALLOC | P2M_UNSHARE); > if ( !page ) > return NULL; > > diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c > index 840a82b457..a718434456 100644 > --- a/xen/arch/x86/hvm/viridian/time.c > +++ b/xen/arch/x86/hvm/viridian/time.c > @@ -38,16 +38,16 @@ static void dump_reference_tsc(const struct domain *d) > > static void update_reference_tsc(struct domain *d, bool initialize) > { > - unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn; > - struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); > + gfn_t gfn = _gfn(d->arch.hvm.viridian.reference_tsc.fields.pfn); > + struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); > HV_REFERENCE_TSC_PAGE *p; > > if ( !page || !get_page_type(page, PGT_writable_page) ) > { > if ( page ) > put_page(page); > - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", > - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); > + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", > + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); > return; > } > > diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c > index c78b2918d9..2c4e8bdcc6 100644 > --- a/xen/arch/x86/hvm/viridian/viridian.c > +++ b/xen/arch/x86/hvm/viridian/viridian.c > @@ -247,16 +247,16 @@ static void dump_hypercall(const struct domain *d) > > static void enable_hypercall_page(struct domain *d) > { > - unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn; > - struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); > + gfn_t gfn = _gfn(d->arch.hvm.viridian.hypercall_gpa.fields.pfn); > + struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); > uint8_t *p; > > if ( !page || !get_page_type(page, PGT_writable_page) ) > { > if ( page ) > put_page(page); > - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", > - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); > + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", > + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); > return; > } > > @@ -601,13 +601,13 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name, > void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) > { > struct domain *d = v->domain; > - unsigned long gmfn = vp->msr.fields.pfn; > + gfn_t gfn = _gfn(vp->msr.fields.pfn); > struct page_info *page; > > if ( vp->ptr ) > return; > > - page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); > if ( !page ) > goto fail; > > @@ -628,8 +628,8 @@ void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) > return; > > fail: > - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", > - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); > + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", > + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); > } > > void viridian_unmap_guest_page(struct viridian_page *vp) > diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c > index 64af8bf943..088b708d3c 100644 > --- a/xen/arch/x86/hvm/vmx/vmx.c > +++ b/xen/arch/x86/hvm/vmx/vmx.c > @@ -674,7 +674,7 @@ static int vmx_restore_cr0_cr3( > { > if ( cr0 & X86_CR0_PG ) > { > - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, > + page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), > NULL, P2M_ALLOC); > if ( !page ) > { > @@ -1373,7 +1373,7 @@ static void vmx_load_pdptrs(struct vcpu *v) > if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) ) > goto crash; > > - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE); > + page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE); > if ( !page ) > { > /* Ideally you don't want to crash but rather go into a wait > diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c > index 9f6ea5c1f7..bae8aa2360 100644 > --- a/xen/arch/x86/hvm/vmx/vvmx.c > +++ b/xen/arch/x86/hvm/vmx/vvmx.c > @@ -642,11 +642,11 @@ static void nvmx_update_apic_access_address(struct vcpu *v) > if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ) > { > p2m_type_t p2mt; > - unsigned long apic_gpfn; > + gfn_t apic_gfn; > struct page_info *apic_pg; > > - apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT; > - apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC); > + apic_gfn = gaddr_to_gfn(get_vvmcs(v, APIC_ACCESS_ADDR)); > + apic_pg = get_page_from_gfn(v->domain, apic_gfn, &p2mt, P2M_ALLOC); > ASSERT(apic_pg && !p2m_is_paging(p2mt)); > __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg)); > put_page(apic_pg); > @@ -663,11 +663,11 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v) > if ( ctrl & CPU_BASED_TPR_SHADOW ) > { > p2m_type_t p2mt; > - unsigned long vapic_gpfn; > + gfn_t vapic_gfn; > struct page_info *vapic_pg; > > - vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT; > - vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC); > + vapic_gfn = gaddr_to_gfn(get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR)); > + vapic_pg = get_page_from_gfn(v->domain, vapic_gfn, &p2mt, P2M_ALLOC); > ASSERT(vapic_pg && !p2m_is_paging(p2mt)); > __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg)); > put_page(vapic_pg); > diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c > index 08f34722c2..6d4c3a9e35 100644 > --- a/xen/arch/x86/mm.c > +++ b/xen/arch/x86/mm.c > @@ -2049,7 +2049,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, > p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ? > P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC; > > - page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q); > + page = get_page_from_gfn(pg_dom, _gfn(l1e_get_pfn(nl1e)), &p2mt, q); > > if ( p2m_is_paged(p2mt) ) > { > @@ -3212,7 +3212,8 @@ long do_mmuext_op( > if ( paging_mode_refcounts(pg_owner) ) > break; > > - page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL, > + P2M_ALLOC); > if ( unlikely(!page) ) > { > rc = -EINVAL; > @@ -3277,7 +3278,8 @@ long do_mmuext_op( > if ( paging_mode_refcounts(pg_owner) ) > break; > > - page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL, > + P2M_ALLOC); > if ( unlikely(!page) ) > { > gdprintk(XENLOG_WARNING, > @@ -3493,7 +3495,8 @@ long do_mmuext_op( > } > > case MMUEXT_CLEAR_PAGE: > - page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC); > + page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt, > + P2M_ALLOC); > if ( unlikely(p2mt != p2m_ram_rw) && page ) > { > put_page(page); > @@ -3521,7 +3524,7 @@ long do_mmuext_op( > { > struct page_info *src_page, *dst_page; > > - src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt, > + src_page = get_page_from_gfn(pg_owner, _gfn(op.arg2.src_mfn), &p2mt, > P2M_ALLOC); > if ( unlikely(p2mt != p2m_ram_rw) && src_page ) > { > @@ -3537,7 +3540,7 @@ long do_mmuext_op( > break; > } > > - dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, > + dst_page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt, > P2M_ALLOC); > if ( unlikely(p2mt != p2m_ram_rw) && dst_page ) > { > @@ -3625,7 +3628,8 @@ long do_mmu_update( > { > struct mmu_update req; > void *va = NULL; > - unsigned long gpfn, gmfn, mfn; > + unsigned long gpfn, mfn; > + gfn_t gfn; > struct page_info *page; > unsigned int cmd, i = 0, done = 0, pt_dom; > struct vcpu *curr = current, *v = curr; > @@ -3738,8 +3742,8 @@ long do_mmu_update( > rc = -EINVAL; > > req.ptr -= cmd; > - gmfn = req.ptr >> PAGE_SHIFT; > - page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC); > + gfn = gaddr_to_gfn(req.ptr); > + page = get_page_from_gfn(pt_owner, gfn, &p2mt, P2M_ALLOC); > > if ( unlikely(!page) || p2mt != p2m_ram_rw ) > { > @@ -3747,7 +3751,7 @@ long do_mmu_update( > put_page(page); > if ( p2m_is_paged(p2mt) ) > { > - p2m_mem_paging_populate(pt_owner, gmfn); > + p2m_mem_paging_populate(pt_owner, gfn_x(gfn)); > rc = -ENOENT; > } > else > diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c > index fea4497910..9d4c4cb27b 100644 > --- a/xen/arch/x86/mm/p2m.c > +++ b/xen/arch/x86/mm/p2m.c > @@ -2696,7 +2696,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn, > * Take a refcnt on the mfn. NB: following supported for foreign mapping: > * ram_rw | ram_logdirty | ram_ro | paging_out. > */ > - page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC); > + page = get_page_from_gfn(fdom, _gfn(fgfn), &p2mt, P2M_ALLOC); > if ( !page || > !p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) ) > { > diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c > index 8994cb9f87..196c00d63d 100644 > --- a/xen/arch/x86/mm/shadow/hvm.c > +++ b/xen/arch/x86/mm/shadow/hvm.c > @@ -313,15 +313,15 @@ const struct x86_emulate_ops hvm_shadow_emulator_ops = { > static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr, > struct sh_emulate_ctxt *sh_ctxt) > { > - unsigned long gfn; > + gfn_t gfn; > struct page_info *page; > mfn_t mfn; > p2m_type_t p2mt; > uint32_t pfec = PFEC_page_present | PFEC_write_access; > > /* Translate the VA to a GFN. */ > - gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec); > - if ( gfn == gfn_x(INVALID_GFN) ) > + gfn = _gfn(paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec)); > + if ( gfn_eq(gfn, INVALID_GFN) ) > { > x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt); > > diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c > index 3a3c15890b..4f3f438614 100644 > --- a/xen/arch/x86/physdev.c > +++ b/xen/arch/x86/physdev.c > @@ -229,7 +229,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > break; > > ret = -EINVAL; > - page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(current->domain, _gfn(info.gmfn), > + NULL, P2M_ALLOC); > if ( !page ) > break; > if ( !get_page_type(page, PGT_writable_page) ) > diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c > index 940804b18a..7b3fb2806a 100644 > --- a/xen/arch/x86/pv/descriptor-tables.c > +++ b/xen/arch/x86/pv/descriptor-tables.c > @@ -112,7 +112,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries) > { > struct page_info *page; > > - page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC); > + page = get_page_from_gfn(d, _gfn(frames[i]), NULL, P2M_ALLOC); > if ( !page ) > goto fail; > if ( !get_page_type(page, PGT_seg_desc_page) ) > @@ -219,7 +219,7 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d) > if ( !IS_ALIGNED(gaddr, sizeof(d)) || !check_descriptor(currd, &d) ) > return -EINVAL; > > - page = get_page_from_gfn(currd, gfn_x(gfn), NULL, P2M_ALLOC); > + page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC); > if ( !page ) > return -EINVAL; > > diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c > index 942ece2ca0..13b13bdc40 100644 > --- a/xen/arch/x86/pv/emul-priv-op.c > +++ b/xen/arch/x86/pv/emul-priv-op.c > @@ -756,12 +756,12 @@ static int write_cr(unsigned int reg, unsigned long val, > case 3: /* Write CR3 */ > { > struct domain *currd = curr->domain; > - unsigned long gfn; > + gfn_t gfn; > struct page_info *page; > int rc; > > - gfn = !is_pv_32bit_domain(currd) > - ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val); > + gfn = _gfn(!is_pv_32bit_domain(currd) > + ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val)); > page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC); > if ( !page ) > break; > diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c > index f5ea00ca4e..c9ad1152b4 100644 > --- a/xen/arch/x86/pv/mm.c > +++ b/xen/arch/x86/pv/mm.c > @@ -106,7 +106,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset) > if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) ) > return false; > > - page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC); > + page = get_page_from_gfn(currd, _gfn(l1e_get_pfn(gl1e)), NULL, P2M_ALLOC); > if ( unlikely(!page) ) > return false; > > diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c > index 05ddc39bfe..ac2516a709 100644 > --- a/xen/arch/x86/traps.c > +++ b/xen/arch/x86/traps.c > @@ -795,7 +795,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) > case 0: /* Write hypercall page */ > { > void *hypercall_page; > - unsigned long gmfn = val >> PAGE_SHIFT; > + gfn_t gfn = gaddr_to_gfn(val); > unsigned int page_index = val & (PAGE_SIZE - 1); > struct page_info *page; > p2m_type_t t; > @@ -808,7 +808,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) > return X86EMUL_EXCEPTION; > } > > - page = get_page_from_gfn(d, gmfn, &t, P2M_ALLOC); > + page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC); > > if ( !page || !get_page_type(page, PGT_writable_page) ) > { > @@ -817,13 +817,14 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) > > if ( p2m_is_paging(t) ) > { > - p2m_mem_paging_populate(d, gmfn); > + p2m_mem_paging_populate(d, gfn_x(gfn)); > return X86EMUL_RETRY; > } > > gdprintk(XENLOG_WARNING, > - "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n", > - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base); > + "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn") to MSR %08x\n", > + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN), > + base); > return X86EMUL_EXCEPTION; > } > > diff --git a/xen/common/domain.c b/xen/common/domain.c > index c623daec56..9d9731db17 100644 > --- a/xen/common/domain.c > +++ b/xen/common/domain.c > @@ -1250,7 +1250,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) > if ( (v != current) && !(v->pause_flags & VPF_down) ) > return -EINVAL; > > - page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(d, _gfn(gfn), NULL, P2M_ALLOC); > if ( !page ) > return -EINVAL; > > diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c > index c49f446754..71a6f673b2 100644 > --- a/xen/common/event_fifo.c > +++ b/xen/common/event_fifo.c > @@ -358,7 +358,7 @@ static const struct evtchn_port_ops evtchn_port_ops_fifo = > .print_state = evtchn_fifo_print_state, > }; > > -static int map_guest_page(struct domain *d, uint64_t gfn, void **virt) > +static int map_guest_page(struct domain *d, gfn_t gfn, void **virt) > { > struct page_info *p; > > @@ -419,7 +419,7 @@ static int setup_control_block(struct vcpu *v) > return 0; > } > > -static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset) > +static int map_control_block(struct vcpu *v, gfn_t gfn, uint32_t offset) > { > void *virt; > unsigned int i; > @@ -505,7 +505,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) > { > struct domain *d = current->domain; > uint32_t vcpu_id; > - uint64_t gfn; > + gfn_t gfn; > uint32_t offset; > struct vcpu *v; > int rc; > @@ -513,7 +513,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) > init_control->link_bits = EVTCHN_FIFO_LINK_BITS; > > vcpu_id = init_control->vcpu; > - gfn = init_control->control_gfn; > + gfn = _gfn(init_control->control_gfn); > offset = init_control->offset; > > if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] ) > @@ -569,7 +569,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) > return rc; > } > > -static int add_page_to_event_array(struct domain *d, unsigned long gfn) > +static int add_page_to_event_array(struct domain *d, gfn_t gfn) > { > void *virt; > unsigned int slot; > @@ -619,7 +619,7 @@ int evtchn_fifo_expand_array(const struct evtchn_expand_array *expand_array) > return -EOPNOTSUPP; > > spin_lock(&d->event_lock); > - rc = add_page_to_event_array(d, expand_array->array_gfn); > + rc = add_page_to_event_array(d, _gfn(expand_array->array_gfn)); > spin_unlock(&d->event_lock); > > return rc; > diff --git a/xen/common/memory.c b/xen/common/memory.c > index 5f7d081c61..5be8b8b68d 100644 > --- a/xen/common/memory.c > +++ b/xen/common/memory.c > @@ -1386,7 +1386,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) > return rc; > } > > - page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC); > + page = get_page_from_gfn(d, _gfn(xrfp.gpfn), NULL, P2M_ALLOC); > if ( page ) > { > rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn), > @@ -1657,7 +1657,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly, > p2m_type_t p2mt; > struct page_info *page; > > - page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q); > + page = get_page_from_gfn(d, gfn, &p2mt, q); > > #ifdef CONFIG_HAS_MEM_PAGING > if ( p2m_is_paging(p2mt) ) > diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c > index bf7b14f79a..72cba7f10c 100644 > --- a/xen/common/tmem_xen.c > +++ b/xen/common/tmem_xen.c > @@ -52,7 +52,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, mfn_t *pcli_mfn, > p2m_type_t t; > struct page_info *page; > > - page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC); > + page = get_page_from_gfn(current->domain, _gfn(cmfn), &t, P2M_ALLOC); > if ( !page || t != p2m_ram_rw ) > { > if ( page ) > diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h > index 041dea827c..c0aa7fa167 100644 > --- a/xen/include/asm-arm/p2m.h > +++ b/xen/include/asm-arm/p2m.h > @@ -298,7 +298,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn, > p2m_type_t *t); > > static inline struct page_info *get_page_from_gfn( > - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) > + struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q) > { > mfn_t mfn; > p2m_type_t _t; > @@ -309,7 +309,7 @@ static inline struct page_info *get_page_from_gfn( > * not auto-translated. > */ > if ( likely(d != dom_xen) ) > - return p2m_get_page_from_gfn(d, _gfn(gfn), t); > + return p2m_get_page_from_gfn(d, gfn, t); > > if ( !t ) > t = &_t; > @@ -320,7 +320,7 @@ static inline struct page_info *get_page_from_gfn( > * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the > * page. > */ > - mfn = _mfn(gfn); > + mfn = _mfn(gfn_x(gfn)); > page = mfn_to_page(mfn); > > if ( !mfn_valid(mfn) || !get_page(page, d) ) > diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h > index 3304921991..1efbc071c5 100644 > --- a/xen/include/asm-x86/p2m.h > +++ b/xen/include/asm-x86/p2m.h > @@ -491,18 +491,21 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn, > p2m_query_t q); > > static inline struct page_info *get_page_from_gfn( > - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) > + struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q) > { > struct page_info *page; > + mfn_t mfn; > > if ( paging_mode_translate(d) ) > - return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q); > + return p2m_get_page_from_gfn(p2m_get_hostp2m(d), gfn, t, NULL, q); > > /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */ > if ( t ) > *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct; > - page = mfn_to_page(_mfn(gfn)); > - return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; > + > + mfn = _mfn(gfn_x(gfn)); > + page = mfn_to_page(mfn); > + return mfn_valid(mfn) && get_page(page, d) ? page : NULL; > } > > /* General conversion function from mfn to gfn */ >
> From: Julien Grall > Sent: Saturday, December 22, 2018 12:27 AM > > No functional change intended. > > Only reasonable clean-ups are done in this patch. The rest will use _gfn > for the time being. > > Signed-off-by: Julien Grall <julien.grall@arm.com> > Acked-by: Jan Beulich <jbeulich@suse.com> > Reviewed-by: Kevin Tian <kevin.tian@intel.com>
diff --git a/xen/arch/arm/guestcopy.c b/xen/arch/arm/guestcopy.c index 7a0f3e9d5f..55892062bb 100644 --- a/xen/arch/arm/guestcopy.c +++ b/xen/arch/arm/guestcopy.c @@ -37,7 +37,7 @@ static struct page_info *translate_get_page(copy_info_t info, uint64_t addr, return get_page_from_gva(info.gva.v, addr, write ? GV2M_WRITE : GV2M_READ); - page = get_page_from_gfn(info.gpa.d, paddr_to_pfn(addr), &p2mt, P2M_ALLOC); + page = get_page_from_gfn(info.gpa.d, gaddr_to_gfn(addr), &p2mt, P2M_ALLOC); if ( !page ) return NULL; diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c index 01ae2cccc0..340a1d1548 100644 --- a/xen/arch/arm/mm.c +++ b/xen/arch/arm/mm.c @@ -1252,7 +1252,7 @@ int xenmem_add_to_physmap_one( /* Take reference to the foreign domain page. * Reference will be released in XENMEM_remove_from_physmap */ - page = get_page_from_gfn(od, idx, &p2mt, P2M_ALLOC); + page = get_page_from_gfn(od, _gfn(idx), &p2mt, P2M_ALLOC); if ( !page ) { put_pg_owner(od); diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c index 8a4f753eae..4d8f153031 100644 --- a/xen/arch/x86/cpu/vpmu.c +++ b/xen/arch/x86/cpu/vpmu.c @@ -607,7 +607,7 @@ static int pvpmu_init(struct domain *d, xen_pmu_params_t *params) struct vcpu *v; struct vpmu_struct *vpmu; struct page_info *page; - uint64_t gfn = params->val; + gfn_t gfn = _gfn(params->val); if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) ) return -EINVAL; diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c index 32dc4253ff..b462a8513b 100644 --- a/xen/arch/x86/domain.c +++ b/xen/arch/x86/domain.c @@ -827,7 +827,7 @@ int arch_set_info_guest( unsigned long flags; bool compat; #ifdef CONFIG_PV - unsigned long cr3_gfn; + gfn_t cr3_gfn; struct page_info *cr3_page; unsigned long cr4; int rc = 0; @@ -1091,9 +1091,9 @@ int arch_set_info_guest( set_bit(_VPF_in_reset, &v->pause_flags); if ( !compat ) - cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[3]); + cr3_gfn = _gfn(xen_cr3_to_pfn(c.nat->ctrlreg[3])); else - cr3_gfn = compat_cr3_to_pfn(c.cmp->ctrlreg[3]); + cr3_gfn = _gfn(compat_cr3_to_pfn(c.cmp->ctrlreg[3])); cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); if ( !cr3_page ) @@ -1122,7 +1122,7 @@ int arch_set_info_guest( case 0: if ( !compat && !VM_ASSIST(d, m2p_strict) && !paging_mode_refcounts(d) ) - fill_ro_mpt(_mfn(cr3_gfn)); + fill_ro_mpt(_mfn(gfn_x(cr3_gfn))); break; default: if ( cr3_page == current->arch.old_guest_table ) @@ -1137,7 +1137,7 @@ int arch_set_info_guest( v->arch.guest_table = pagetable_from_page(cr3_page); if ( c.nat->ctrlreg[1] ) { - cr3_gfn = xen_cr3_to_pfn(c.nat->ctrlreg[1]); + cr3_gfn = _gfn(xen_cr3_to_pfn(c.nat->ctrlreg[1])); cr3_page = get_page_from_gfn(d, cr3_gfn, NULL, P2M_ALLOC); if ( !cr3_page ) @@ -1162,7 +1162,7 @@ int arch_set_info_guest( break; case 0: if ( VM_ASSIST(d, m2p_strict) ) - zap_ro_mpt(_mfn(cr3_gfn)); + zap_ro_mpt(_mfn(gfn_x(cr3_gfn))); break; } } diff --git a/xen/arch/x86/domctl.c b/xen/arch/x86/domctl.c index 9bf2d0820f..812a435069 100644 --- a/xen/arch/x86/domctl.c +++ b/xen/arch/x86/domctl.c @@ -448,7 +448,7 @@ long arch_do_domctl( break; } - page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC); + page = get_page_from_gfn(d, _gfn(gfn), &t, P2M_ALLOC); if ( unlikely(!page) || unlikely(is_xen_heap_page(page)) ) @@ -498,11 +498,11 @@ long arch_do_domctl( case XEN_DOMCTL_hypercall_init: { - unsigned long gmfn = domctl->u.hypercall_init.gmfn; + gfn_t gfn = _gfn(domctl->u.hypercall_init.gmfn); struct page_info *page; void *hypercall_page; - page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); if ( !page || !get_page_type(page, PGT_writable_page) ) { diff --git a/xen/arch/x86/hvm/dm.c b/xen/arch/x86/hvm/dm.c index d6d0e8be89..3b3ad27938 100644 --- a/xen/arch/x86/hvm/dm.c +++ b/xen/arch/x86/hvm/dm.c @@ -186,7 +186,7 @@ static int modified_memory(struct domain *d, { struct page_info *page; - page = get_page_from_gfn(d, pfn, NULL, P2M_UNSHARE); + page = get_page_from_gfn(d, _gfn(pfn), NULL, P2M_UNSHARE); if ( page ) { paging_mark_pfn_dirty(d, _pfn(pfn)); diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c index 5d5a746a25..73d2da8441 100644 --- a/xen/arch/x86/hvm/domain.c +++ b/xen/arch/x86/hvm/domain.c @@ -297,7 +297,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const vcpu_hvm_context_t *ctx) { /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ struct page_info *page = get_page_from_gfn(v->domain, - v->arch.hvm.guest_cr[3] >> PAGE_SHIFT, + gaddr_to_gfn(v->arch.hvm.guest_cr[3]), NULL, P2M_ALLOC); if ( !page ) { diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c index d14ddcb527..0109bf6a75 100644 --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2168,7 +2168,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer) { struct vcpu *v = current; struct domain *d = v->domain; - unsigned long gfn, old_value = v->arch.hvm.guest_cr[0]; + unsigned long old_value = v->arch.hvm.guest_cr[0]; struct page_info *page; HVM_DBG_LOG(DBG_LEVEL_VMMU, "Update CR0 value = %lx", value); @@ -2223,7 +2223,8 @@ int hvm_set_cr0(unsigned long value, bool may_defer) if ( !paging_mode_hap(d) ) { /* The guest CR3 must be pointing to the guest physical. */ - gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT; + gfn_t gfn = gaddr_to_gfn(v->arch.hvm.guest_cr[3]); + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); if ( !page ) { @@ -2315,7 +2316,7 @@ int hvm_set_cr3(unsigned long value, bool may_defer) { /* Shadow-mode CR3 change. Check PDBR and update refcounts. */ HVM_DBG_LOG(DBG_LEVEL_VMMU, "CR3 value = %lx", value); - page = get_page_from_gfn(v->domain, value >> PAGE_SHIFT, + page = get_page_from_gfn(v->domain, gaddr_to_gfn(value), NULL, P2M_ALLOC); if ( !page ) goto bad_cr3; @@ -3143,7 +3144,7 @@ enum hvm_translation_result hvm_translate_get_page( && hvm_mmio_internal(gfn_to_gaddr(gfn)) ) return HVMTRANS_bad_gfn_to_mfn; - page = get_page_from_gfn(v->domain, gfn_x(gfn), &p2mt, P2M_UNSHARE); + page = get_page_from_gfn(v->domain, gfn, &p2mt, P2M_UNSHARE); if ( !page ) return HVMTRANS_bad_gfn_to_mfn; diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c index 954822c960..29777cd1b8 100644 --- a/xen/arch/x86/hvm/svm/svm.c +++ b/xen/arch/x86/hvm/svm/svm.c @@ -317,7 +317,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct hvm_hw_cpu *c) { if ( c->cr0 & X86_CR0_PG ) { - page = get_page_from_gfn(v->domain, c->cr3 >> PAGE_SHIFT, + page = get_page_from_gfn(v->domain, gaddr_to_gfn(c->cr3), NULL, P2M_ALLOC); if ( !page ) { @@ -2351,9 +2351,9 @@ nsvm_get_nvmcb_page(struct vcpu *v, uint64_t vmcbaddr) return NULL; /* Need to translate L1-GPA to MPA */ - page = get_page_from_gfn(v->domain, - nv->nv_vvmcxaddr >> PAGE_SHIFT, - &p2mt, P2M_ALLOC | P2M_UNSHARE); + page = get_page_from_gfn(v->domain, + gaddr_to_gfn(nv->nv_vvmcxaddr), + &p2mt, P2M_ALLOC | P2M_UNSHARE); if ( !page ) return NULL; diff --git a/xen/arch/x86/hvm/viridian/time.c b/xen/arch/x86/hvm/viridian/time.c index 840a82b457..a718434456 100644 --- a/xen/arch/x86/hvm/viridian/time.c +++ b/xen/arch/x86/hvm/viridian/time.c @@ -38,16 +38,16 @@ static void dump_reference_tsc(const struct domain *d) static void update_reference_tsc(struct domain *d, bool initialize) { - unsigned long gmfn = d->arch.hvm.viridian.reference_tsc.fields.pfn; - struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + gfn_t gfn = _gfn(d->arch.hvm.viridian.reference_tsc.fields.pfn); + struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); HV_REFERENCE_TSC_PAGE *p; if ( !page || !get_page_type(page, PGT_writable_page) ) { if ( page ) put_page(page); - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); return; } diff --git a/xen/arch/x86/hvm/viridian/viridian.c b/xen/arch/x86/hvm/viridian/viridian.c index c78b2918d9..2c4e8bdcc6 100644 --- a/xen/arch/x86/hvm/viridian/viridian.c +++ b/xen/arch/x86/hvm/viridian/viridian.c @@ -247,16 +247,16 @@ static void dump_hypercall(const struct domain *d) static void enable_hypercall_page(struct domain *d) { - unsigned long gmfn = d->arch.hvm.viridian.hypercall_gpa.fields.pfn; - struct page_info *page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + gfn_t gfn = _gfn(d->arch.hvm.viridian.hypercall_gpa.fields.pfn); + struct page_info *page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); uint8_t *p; if ( !page || !get_page_type(page, PGT_writable_page) ) { if ( page ) put_page(page); - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); return; } @@ -601,13 +601,13 @@ void viridian_dump_guest_page(const struct vcpu *v, const char *name, void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) { struct domain *d = v->domain; - unsigned long gmfn = vp->msr.fields.pfn; + gfn_t gfn = _gfn(vp->msr.fields.pfn); struct page_info *page; if ( vp->ptr ) return; - page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); if ( !page ) goto fail; @@ -628,8 +628,8 @@ void viridian_map_guest_page(struct vcpu *v, struct viridian_page *vp) return; fail: - gdprintk(XENLOG_WARNING, "Bad GMFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); + gdprintk(XENLOG_WARNING, "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn")\n", + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN)); } void viridian_unmap_guest_page(struct viridian_page *vp) diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c index 64af8bf943..088b708d3c 100644 --- a/xen/arch/x86/hvm/vmx/vmx.c +++ b/xen/arch/x86/hvm/vmx/vmx.c @@ -674,7 +674,7 @@ static int vmx_restore_cr0_cr3( { if ( cr0 & X86_CR0_PG ) { - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, + page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), NULL, P2M_ALLOC); if ( !page ) { @@ -1373,7 +1373,7 @@ static void vmx_load_pdptrs(struct vcpu *v) if ( (cr3 & 0x1fUL) && !hvm_pcid_enabled(v) ) goto crash; - page = get_page_from_gfn(v->domain, cr3 >> PAGE_SHIFT, &p2mt, P2M_UNSHARE); + page = get_page_from_gfn(v->domain, gaddr_to_gfn(cr3), &p2mt, P2M_UNSHARE); if ( !page ) { /* Ideally you don't want to crash but rather go into a wait diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c index 9f6ea5c1f7..bae8aa2360 100644 --- a/xen/arch/x86/hvm/vmx/vvmx.c +++ b/xen/arch/x86/hvm/vmx/vvmx.c @@ -642,11 +642,11 @@ static void nvmx_update_apic_access_address(struct vcpu *v) if ( ctrl & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES ) { p2m_type_t p2mt; - unsigned long apic_gpfn; + gfn_t apic_gfn; struct page_info *apic_pg; - apic_gpfn = get_vvmcs(v, APIC_ACCESS_ADDR) >> PAGE_SHIFT; - apic_pg = get_page_from_gfn(v->domain, apic_gpfn, &p2mt, P2M_ALLOC); + apic_gfn = gaddr_to_gfn(get_vvmcs(v, APIC_ACCESS_ADDR)); + apic_pg = get_page_from_gfn(v->domain, apic_gfn, &p2mt, P2M_ALLOC); ASSERT(apic_pg && !p2m_is_paging(p2mt)); __vmwrite(APIC_ACCESS_ADDR, page_to_maddr(apic_pg)); put_page(apic_pg); @@ -663,11 +663,11 @@ static void nvmx_update_virtual_apic_address(struct vcpu *v) if ( ctrl & CPU_BASED_TPR_SHADOW ) { p2m_type_t p2mt; - unsigned long vapic_gpfn; + gfn_t vapic_gfn; struct page_info *vapic_pg; - vapic_gpfn = get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR) >> PAGE_SHIFT; - vapic_pg = get_page_from_gfn(v->domain, vapic_gpfn, &p2mt, P2M_ALLOC); + vapic_gfn = gaddr_to_gfn(get_vvmcs(v, VIRTUAL_APIC_PAGE_ADDR)); + vapic_pg = get_page_from_gfn(v->domain, vapic_gfn, &p2mt, P2M_ALLOC); ASSERT(vapic_pg && !p2m_is_paging(p2mt)); __vmwrite(VIRTUAL_APIC_PAGE_ADDR, page_to_maddr(vapic_pg)); put_page(vapic_pg); diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c index 08f34722c2..6d4c3a9e35 100644 --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2049,7 +2049,7 @@ static int mod_l1_entry(l1_pgentry_t *pl1e, l1_pgentry_t nl1e, p2m_query_t q = l1e_get_flags(nl1e) & _PAGE_RW ? P2M_ALLOC | P2M_UNSHARE : P2M_ALLOC; - page = get_page_from_gfn(pg_dom, l1e_get_pfn(nl1e), &p2mt, q); + page = get_page_from_gfn(pg_dom, _gfn(l1e_get_pfn(nl1e)), &p2mt, q); if ( p2m_is_paged(p2mt) ) { @@ -3212,7 +3212,8 @@ long do_mmuext_op( if ( paging_mode_refcounts(pg_owner) ) break; - page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL, + P2M_ALLOC); if ( unlikely(!page) ) { rc = -EINVAL; @@ -3277,7 +3278,8 @@ long do_mmuext_op( if ( paging_mode_refcounts(pg_owner) ) break; - page = get_page_from_gfn(pg_owner, op.arg1.mfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), NULL, + P2M_ALLOC); if ( unlikely(!page) ) { gdprintk(XENLOG_WARNING, @@ -3493,7 +3495,8 @@ long do_mmuext_op( } case MMUEXT_CLEAR_PAGE: - page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, P2M_ALLOC); + page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt, + P2M_ALLOC); if ( unlikely(p2mt != p2m_ram_rw) && page ) { put_page(page); @@ -3521,7 +3524,7 @@ long do_mmuext_op( { struct page_info *src_page, *dst_page; - src_page = get_page_from_gfn(pg_owner, op.arg2.src_mfn, &p2mt, + src_page = get_page_from_gfn(pg_owner, _gfn(op.arg2.src_mfn), &p2mt, P2M_ALLOC); if ( unlikely(p2mt != p2m_ram_rw) && src_page ) { @@ -3537,7 +3540,7 @@ long do_mmuext_op( break; } - dst_page = get_page_from_gfn(pg_owner, op.arg1.mfn, &p2mt, + dst_page = get_page_from_gfn(pg_owner, _gfn(op.arg1.mfn), &p2mt, P2M_ALLOC); if ( unlikely(p2mt != p2m_ram_rw) && dst_page ) { @@ -3625,7 +3628,8 @@ long do_mmu_update( { struct mmu_update req; void *va = NULL; - unsigned long gpfn, gmfn, mfn; + unsigned long gpfn, mfn; + gfn_t gfn; struct page_info *page; unsigned int cmd, i = 0, done = 0, pt_dom; struct vcpu *curr = current, *v = curr; @@ -3738,8 +3742,8 @@ long do_mmu_update( rc = -EINVAL; req.ptr -= cmd; - gmfn = req.ptr >> PAGE_SHIFT; - page = get_page_from_gfn(pt_owner, gmfn, &p2mt, P2M_ALLOC); + gfn = gaddr_to_gfn(req.ptr); + page = get_page_from_gfn(pt_owner, gfn, &p2mt, P2M_ALLOC); if ( unlikely(!page) || p2mt != p2m_ram_rw ) { @@ -3747,7 +3751,7 @@ long do_mmu_update( put_page(page); if ( p2m_is_paged(p2mt) ) { - p2m_mem_paging_populate(pt_owner, gmfn); + p2m_mem_paging_populate(pt_owner, gfn_x(gfn)); rc = -ENOENT; } else diff --git a/xen/arch/x86/mm/p2m.c b/xen/arch/x86/mm/p2m.c index fea4497910..9d4c4cb27b 100644 --- a/xen/arch/x86/mm/p2m.c +++ b/xen/arch/x86/mm/p2m.c @@ -2696,7 +2696,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn, * Take a refcnt on the mfn. NB: following supported for foreign mapping: * ram_rw | ram_logdirty | ram_ro | paging_out. */ - page = get_page_from_gfn(fdom, fgfn, &p2mt, P2M_ALLOC); + page = get_page_from_gfn(fdom, _gfn(fgfn), &p2mt, P2M_ALLOC); if ( !page || !p2m_is_ram(p2mt) || p2m_is_shared(p2mt) || p2m_is_hole(p2mt) ) { diff --git a/xen/arch/x86/mm/shadow/hvm.c b/xen/arch/x86/mm/shadow/hvm.c index 8994cb9f87..196c00d63d 100644 --- a/xen/arch/x86/mm/shadow/hvm.c +++ b/xen/arch/x86/mm/shadow/hvm.c @@ -313,15 +313,15 @@ const struct x86_emulate_ops hvm_shadow_emulator_ops = { static mfn_t emulate_gva_to_mfn(struct vcpu *v, unsigned long vaddr, struct sh_emulate_ctxt *sh_ctxt) { - unsigned long gfn; + gfn_t gfn; struct page_info *page; mfn_t mfn; p2m_type_t p2mt; uint32_t pfec = PFEC_page_present | PFEC_write_access; /* Translate the VA to a GFN. */ - gfn = paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec); - if ( gfn == gfn_x(INVALID_GFN) ) + gfn = _gfn(paging_get_hostmode(v)->gva_to_gfn(v, NULL, vaddr, &pfec)); + if ( gfn_eq(gfn, INVALID_GFN) ) { x86_emul_pagefault(pfec, vaddr, &sh_ctxt->ctxt); diff --git a/xen/arch/x86/physdev.c b/xen/arch/x86/physdev.c index 3a3c15890b..4f3f438614 100644 --- a/xen/arch/x86/physdev.c +++ b/xen/arch/x86/physdev.c @@ -229,7 +229,8 @@ ret_t do_physdev_op(int cmd, XEN_GUEST_HANDLE_PARAM(void) arg) break; ret = -EINVAL; - page = get_page_from_gfn(current->domain, info.gmfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(current->domain, _gfn(info.gmfn), + NULL, P2M_ALLOC); if ( !page ) break; if ( !get_page_type(page, PGT_writable_page) ) diff --git a/xen/arch/x86/pv/descriptor-tables.c b/xen/arch/x86/pv/descriptor-tables.c index 940804b18a..7b3fb2806a 100644 --- a/xen/arch/x86/pv/descriptor-tables.c +++ b/xen/arch/x86/pv/descriptor-tables.c @@ -112,7 +112,7 @@ long pv_set_gdt(struct vcpu *v, unsigned long *frames, unsigned int entries) { struct page_info *page; - page = get_page_from_gfn(d, frames[i], NULL, P2M_ALLOC); + page = get_page_from_gfn(d, _gfn(frames[i]), NULL, P2M_ALLOC); if ( !page ) goto fail; if ( !get_page_type(page, PGT_seg_desc_page) ) @@ -219,7 +219,7 @@ long do_update_descriptor(uint64_t gaddr, seg_desc_t d) if ( !IS_ALIGNED(gaddr, sizeof(d)) || !check_descriptor(currd, &d) ) return -EINVAL; - page = get_page_from_gfn(currd, gfn_x(gfn), NULL, P2M_ALLOC); + page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC); if ( !page ) return -EINVAL; diff --git a/xen/arch/x86/pv/emul-priv-op.c b/xen/arch/x86/pv/emul-priv-op.c index 942ece2ca0..13b13bdc40 100644 --- a/xen/arch/x86/pv/emul-priv-op.c +++ b/xen/arch/x86/pv/emul-priv-op.c @@ -756,12 +756,12 @@ static int write_cr(unsigned int reg, unsigned long val, case 3: /* Write CR3 */ { struct domain *currd = curr->domain; - unsigned long gfn; + gfn_t gfn; struct page_info *page; int rc; - gfn = !is_pv_32bit_domain(currd) - ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val); + gfn = _gfn(!is_pv_32bit_domain(currd) + ? xen_cr3_to_pfn(val) : compat_cr3_to_pfn(val)); page = get_page_from_gfn(currd, gfn, NULL, P2M_ALLOC); if ( !page ) break; diff --git a/xen/arch/x86/pv/mm.c b/xen/arch/x86/pv/mm.c index f5ea00ca4e..c9ad1152b4 100644 --- a/xen/arch/x86/pv/mm.c +++ b/xen/arch/x86/pv/mm.c @@ -106,7 +106,7 @@ bool pv_map_ldt_shadow_page(unsigned int offset) if ( unlikely(!(l1e_get_flags(gl1e) & _PAGE_PRESENT)) ) return false; - page = get_page_from_gfn(currd, l1e_get_pfn(gl1e), NULL, P2M_ALLOC); + page = get_page_from_gfn(currd, _gfn(l1e_get_pfn(gl1e)), NULL, P2M_ALLOC); if ( unlikely(!page) ) return false; diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c index 05ddc39bfe..ac2516a709 100644 --- a/xen/arch/x86/traps.c +++ b/xen/arch/x86/traps.c @@ -795,7 +795,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) case 0: /* Write hypercall page */ { void *hypercall_page; - unsigned long gmfn = val >> PAGE_SHIFT; + gfn_t gfn = gaddr_to_gfn(val); unsigned int page_index = val & (PAGE_SIZE - 1); struct page_info *page; p2m_type_t t; @@ -808,7 +808,7 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) return X86EMUL_EXCEPTION; } - page = get_page_from_gfn(d, gmfn, &t, P2M_ALLOC); + page = get_page_from_gfn(d, gfn, &t, P2M_ALLOC); if ( !page || !get_page_type(page, PGT_writable_page) ) { @@ -817,13 +817,14 @@ int guest_wrmsr_xen(struct vcpu *v, uint32_t idx, uint64_t val) if ( p2m_is_paging(t) ) { - p2m_mem_paging_populate(d, gmfn); + p2m_mem_paging_populate(d, gfn_x(gfn)); return X86EMUL_RETRY; } gdprintk(XENLOG_WARNING, - "Bad GMFN %lx (MFN %#"PRI_mfn") to MSR %08x\n", - gmfn, mfn_x(page ? page_to_mfn(page) : INVALID_MFN), base); + "Bad GFN %#"PRI_gfn" (MFN %#"PRI_mfn") to MSR %08x\n", + gfn_x(gfn), mfn_x(page ? page_to_mfn(page) : INVALID_MFN), + base); return X86EMUL_EXCEPTION; } diff --git a/xen/common/domain.c b/xen/common/domain.c index c623daec56..9d9731db17 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -1250,7 +1250,7 @@ int map_vcpu_info(struct vcpu *v, unsigned long gfn, unsigned offset) if ( (v != current) && !(v->pause_flags & VPF_down) ) return -EINVAL; - page = get_page_from_gfn(d, gfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(d, _gfn(gfn), NULL, P2M_ALLOC); if ( !page ) return -EINVAL; diff --git a/xen/common/event_fifo.c b/xen/common/event_fifo.c index c49f446754..71a6f673b2 100644 --- a/xen/common/event_fifo.c +++ b/xen/common/event_fifo.c @@ -358,7 +358,7 @@ static const struct evtchn_port_ops evtchn_port_ops_fifo = .print_state = evtchn_fifo_print_state, }; -static int map_guest_page(struct domain *d, uint64_t gfn, void **virt) +static int map_guest_page(struct domain *d, gfn_t gfn, void **virt) { struct page_info *p; @@ -419,7 +419,7 @@ static int setup_control_block(struct vcpu *v) return 0; } -static int map_control_block(struct vcpu *v, uint64_t gfn, uint32_t offset) +static int map_control_block(struct vcpu *v, gfn_t gfn, uint32_t offset) { void *virt; unsigned int i; @@ -505,7 +505,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) { struct domain *d = current->domain; uint32_t vcpu_id; - uint64_t gfn; + gfn_t gfn; uint32_t offset; struct vcpu *v; int rc; @@ -513,7 +513,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) init_control->link_bits = EVTCHN_FIFO_LINK_BITS; vcpu_id = init_control->vcpu; - gfn = init_control->control_gfn; + gfn = _gfn(init_control->control_gfn); offset = init_control->offset; if ( vcpu_id >= d->max_vcpus || !d->vcpu[vcpu_id] ) @@ -569,7 +569,7 @@ int evtchn_fifo_init_control(struct evtchn_init_control *init_control) return rc; } -static int add_page_to_event_array(struct domain *d, unsigned long gfn) +static int add_page_to_event_array(struct domain *d, gfn_t gfn) { void *virt; unsigned int slot; @@ -619,7 +619,7 @@ int evtchn_fifo_expand_array(const struct evtchn_expand_array *expand_array) return -EOPNOTSUPP; spin_lock(&d->event_lock); - rc = add_page_to_event_array(d, expand_array->array_gfn); + rc = add_page_to_event_array(d, _gfn(expand_array->array_gfn)); spin_unlock(&d->event_lock); return rc; diff --git a/xen/common/memory.c b/xen/common/memory.c index 5f7d081c61..5be8b8b68d 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -1386,7 +1386,7 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg) return rc; } - page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC); + page = get_page_from_gfn(d, _gfn(xrfp.gpfn), NULL, P2M_ALLOC); if ( page ) { rc = guest_physmap_remove_page(d, _gfn(xrfp.gpfn), @@ -1657,7 +1657,7 @@ int check_get_page_from_gfn(struct domain *d, gfn_t gfn, bool readonly, p2m_type_t p2mt; struct page_info *page; - page = get_page_from_gfn(d, gfn_x(gfn), &p2mt, q); + page = get_page_from_gfn(d, gfn, &p2mt, q); #ifdef CONFIG_HAS_MEM_PAGING if ( p2m_is_paging(p2mt) ) diff --git a/xen/common/tmem_xen.c b/xen/common/tmem_xen.c index bf7b14f79a..72cba7f10c 100644 --- a/xen/common/tmem_xen.c +++ b/xen/common/tmem_xen.c @@ -52,7 +52,7 @@ static inline void *cli_get_page(xen_pfn_t cmfn, mfn_t *pcli_mfn, p2m_type_t t; struct page_info *page; - page = get_page_from_gfn(current->domain, cmfn, &t, P2M_ALLOC); + page = get_page_from_gfn(current->domain, _gfn(cmfn), &t, P2M_ALLOC); if ( !page || t != p2m_ram_rw ) { if ( page ) diff --git a/xen/include/asm-arm/p2m.h b/xen/include/asm-arm/p2m.h index 041dea827c..c0aa7fa167 100644 --- a/xen/include/asm-arm/p2m.h +++ b/xen/include/asm-arm/p2m.h @@ -298,7 +298,7 @@ struct page_info *p2m_get_page_from_gfn(struct domain *d, gfn_t gfn, p2m_type_t *t); static inline struct page_info *get_page_from_gfn( - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) + struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q) { mfn_t mfn; p2m_type_t _t; @@ -309,7 +309,7 @@ static inline struct page_info *get_page_from_gfn( * not auto-translated. */ if ( likely(d != dom_xen) ) - return p2m_get_page_from_gfn(d, _gfn(gfn), t); + return p2m_get_page_from_gfn(d, gfn, t); if ( !t ) t = &_t; @@ -320,7 +320,7 @@ static inline struct page_info *get_page_from_gfn( * DOMID_XEN sees 1-1 RAM. The p2m_type is based on the type of the * page. */ - mfn = _mfn(gfn); + mfn = _mfn(gfn_x(gfn)); page = mfn_to_page(mfn); if ( !mfn_valid(mfn) || !get_page(page, d) ) diff --git a/xen/include/asm-x86/p2m.h b/xen/include/asm-x86/p2m.h index 3304921991..1efbc071c5 100644 --- a/xen/include/asm-x86/p2m.h +++ b/xen/include/asm-x86/p2m.h @@ -491,18 +491,21 @@ struct page_info *p2m_get_page_from_gfn(struct p2m_domain *p2m, gfn_t gfn, p2m_query_t q); static inline struct page_info *get_page_from_gfn( - struct domain *d, unsigned long gfn, p2m_type_t *t, p2m_query_t q) + struct domain *d, gfn_t gfn, p2m_type_t *t, p2m_query_t q) { struct page_info *page; + mfn_t mfn; if ( paging_mode_translate(d) ) - return p2m_get_page_from_gfn(p2m_get_hostp2m(d), _gfn(gfn), t, NULL, q); + return p2m_get_page_from_gfn(p2m_get_hostp2m(d), gfn, t, NULL, q); /* Non-translated guests see 1-1 RAM / MMIO mappings everywhere */ if ( t ) *t = likely(d != dom_io) ? p2m_ram_rw : p2m_mmio_direct; - page = mfn_to_page(_mfn(gfn)); - return mfn_valid(_mfn(gfn)) && get_page(page, d) ? page : NULL; + + mfn = _mfn(gfn_x(gfn)); + page = mfn_to_page(mfn); + return mfn_valid(mfn) && get_page(page, d) ? page : NULL; } /* General conversion function from mfn to gfn */