@@ -117,7 +117,7 @@ static bool_t insert_11_bank(struct domain *d,
goto fail;
}
- res = guest_physmap_add_page(d, spfn, spfn, order);
+ res = guest_physmap_add_page(d, _gfn(spfn), _mfn(spfn), order);
if ( res )
panic("Failed map pages to DOM0: %d", res);
@@ -1153,7 +1153,7 @@ int xenmem_add_to_physmap_one(
}
/* Map at new location. */
- rc = guest_physmap_add_entry(d, gpfn, mfn, 0, t);
+ rc = guest_physmap_add_entry(d, _gfn(gpfn), _mfn(mfn), 0, t);
/* If we fail to add the mapping, we need to drop the reference we
* took earlier on foreign pages */
@@ -1282,8 +1282,8 @@ int create_grant_host_mapping(unsigned long addr, unsigned long frame,
if ( flags & GNTMAP_readonly )
t = p2m_grant_map_ro;
- rc = guest_physmap_add_entry(current->domain, addr >> PAGE_SHIFT,
- frame, 0, t);
+ rc = guest_physmap_add_entry(current->domain, _gfn(addr >> PAGE_SHIFT),
+ _mfn(frame), 0, t);
if ( rc )
return GNTST_general_error;
@@ -1294,13 +1294,13 @@ int create_grant_host_mapping(unsigned long addr, unsigned long frame,
int replace_grant_host_mapping(unsigned long addr, unsigned long mfn,
unsigned long new_addr, unsigned int flags)
{
- unsigned long gfn = (unsigned long)(addr >> PAGE_SHIFT);
+ gfn_t gfn = _gfn(addr >> PAGE_SHIFT);
struct domain *d = current->domain;
if ( new_addr != 0 || (flags & GNTMAP_contains_pte) )
return GNTST_general_error;
- guest_physmap_remove_page(d, gfn, mfn, 0);
+ guest_physmap_remove_page(d, gfn, _mfn(mfn), 0);
return GNTST_okay;
}
@@ -1292,26 +1292,26 @@ int map_dev_mmio_region(struct domain *d,
}
int guest_physmap_add_entry(struct domain *d,
- unsigned long gpfn,
- unsigned long mfn,
+ gfn_t gfn,
+ mfn_t mfn,
unsigned long page_order,
p2m_type_t t)
{
return apply_p2m_changes(d, INSERT,
- pfn_to_paddr(gpfn),
- pfn_to_paddr(gpfn + (1 << page_order)),
- pfn_to_paddr(mfn), MATTR_MEM, 0, t,
+ pfn_to_paddr(gfn_x(gfn)),
+ pfn_to_paddr(gfn_x(gfn) + (1 << page_order)),
+ pfn_to_paddr(mfn_x(mfn)), MATTR_MEM, 0, t,
d->arch.p2m.default_access);
}
void guest_physmap_remove_page(struct domain *d,
- unsigned long gpfn,
- unsigned long mfn, unsigned int page_order)
+ gfn_t gfn,
+ mfn_t mfn, unsigned int page_order)
{
apply_p2m_changes(d, REMOVE,
- pfn_to_paddr(gpfn),
- pfn_to_paddr(gpfn + (1<<page_order)),
- pfn_to_paddr(mfn), MATTR_MEM, 0, p2m_invalid,
+ pfn_to_paddr(gfn_x(gfn)),
+ pfn_to_paddr(gfn_x(gfn) + (1<<page_order)),
+ pfn_to_paddr(mfn_x(mfn)), MATTR_MEM, 0, p2m_invalid,
d->arch.p2m.default_access);
}
@@ -802,9 +802,10 @@ int arch_domain_soft_reset(struct domain *d)
ret = -ENOMEM;
goto exit_put_gfn;
}
- guest_physmap_remove_page(d, gfn, mfn, PAGE_ORDER_4K);
+ guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), PAGE_ORDER_4K);
- ret = guest_physmap_add_page(d, gfn, page_to_mfn(new_page), PAGE_ORDER_4K);
+ ret = guest_physmap_add_page(d, _gfn(gfn), _mfn(page_to_mfn(new_page)),
+ PAGE_ORDER_4K);
if ( ret )
{
printk(XENLOG_G_ERR "Failed to add a page to replace"
@@ -427,7 +427,7 @@ static __init void pvh_add_mem_mapping(struct domain *d, unsigned long gfn,
if ( !iomem_access_permitted(d, mfn + i, mfn + i) )
{
omfn = get_gfn_query_unlocked(d, gfn + i, &t);
- guest_physmap_remove_page(d, gfn + i, mfn_x(omfn), PAGE_ORDER_4K);
+ guest_physmap_remove_page(d, _gfn(gfn + i), omfn, PAGE_ORDER_4K);
continue;
}
@@ -530,7 +530,7 @@ static __init void pvh_map_all_iomem(struct domain *d, unsigned long nr_pages)
if ( get_gpfn_from_mfn(mfn) != INVALID_M2P_ENTRY )
continue;
- rc = guest_physmap_add_page(d, start_pfn, mfn, 0);
+ rc = guest_physmap_add_page(d, _gfn(start_pfn), _mfn(mfn), 0);
if ( rc != 0 )
panic("Unable to add gpfn %#lx mfn %#lx to Dom0 physmap: %d",
start_pfn, mfn, rc);
@@ -605,7 +605,7 @@ static __init void dom0_update_physmap(struct domain *d, unsigned long pfn,
{
if ( is_pvh_domain(d) )
{
- int rc = guest_physmap_add_page(d, pfn, mfn, 0);
+ int rc = guest_physmap_add_page(d, _gfn(pfn), _mfn(mfn), 0);
BUG_ON(rc);
return;
}
@@ -267,8 +267,8 @@ bool_t is_ioreq_server_page(struct domain *d, const struct page_info *page)
static void hvm_remove_ioreq_gmfn(
struct domain *d, struct hvm_ioreq_page *iorp)
{
- guest_physmap_remove_page(d, iorp->gmfn,
- page_to_mfn(iorp->page), 0);
+ guest_physmap_remove_page(d, _gfn(iorp->gmfn),
+ _mfn(page_to_mfn(iorp->page)), 0);
clear_page(iorp->va);
}
@@ -279,8 +279,8 @@ static int hvm_add_ioreq_gmfn(
clear_page(iorp->va);
- rc = guest_physmap_add_page(d, iorp->gmfn,
- page_to_mfn(iorp->page), 0);
+ rc = guest_physmap_add_page(d, _gfn(iorp->gmfn),
+ _mfn(page_to_mfn(iorp->page)), 0);
if ( rc == 0 )
paging_mark_dirty(d, page_to_mfn(iorp->page));
@@ -4211,7 +4211,8 @@ static int create_grant_p2m_mapping(uint64_t addr, unsigned long frame,
else
p2mt = p2m_grant_map_rw;
rc = guest_physmap_add_entry(current->domain,
- addr >> PAGE_SHIFT, frame, PAGE_ORDER_4K, p2mt);
+ _gfn(addr >> PAGE_SHIFT),
+ _mfn(frame), PAGE_ORDER_4K, p2mt);
if ( rc )
return GNTST_general_error;
else
@@ -4268,7 +4269,7 @@ static int replace_grant_p2m_mapping(
type, mfn_x(old_mfn), frame);
return GNTST_general_error;
}
- guest_physmap_remove_page(d, gfn, frame, PAGE_ORDER_4K);
+ guest_physmap_remove_page(d, _gfn(gfn), _mfn(frame), PAGE_ORDER_4K);
put_gfn(d, gfn);
return GNTST_okay;
@@ -4853,7 +4854,8 @@ int xenmem_add_to_physmap_one(
{
if ( is_xen_heap_mfn(prev_mfn) )
/* Xen heap frames are simply unhooked from this phys slot. */
- guest_physmap_remove_page(d, gpfn, prev_mfn, PAGE_ORDER_4K);
+ guest_physmap_remove_page(d, _gfn(gpfn), _mfn(prev_mfn),
+ PAGE_ORDER_4K);
else
/* Normal domain memory is freed, to avoid leaking memory. */
guest_remove_page(d, gpfn);
@@ -4867,10 +4869,10 @@ int xenmem_add_to_physmap_one(
if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
ASSERT( old_gpfn == gfn );
if ( old_gpfn != INVALID_M2P_ENTRY )
- guest_physmap_remove_page(d, old_gpfn, mfn, PAGE_ORDER_4K);
+ guest_physmap_remove_page(d, _gfn(old_gpfn), _mfn(mfn), PAGE_ORDER_4K);
/* Map at new location. */
- rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
+ rc = guest_physmap_add_page(d, _gfn(gpfn), _mfn(mfn), PAGE_ORDER_4K);
/* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
@@ -675,21 +675,20 @@ p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn,
}
int
-guest_physmap_remove_page(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int page_order)
+guest_physmap_remove_page(struct domain *d, gfn_t gfn,
+ mfn_t mfn, unsigned int page_order)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
int rc;
gfn_lock(p2m, gfn, page_order);
- rc = p2m_remove_page(p2m, gfn, mfn, page_order);
+ rc = p2m_remove_page(p2m, gfn_x(gfn), mfn_x(mfn), page_order);
gfn_unlock(p2m, gfn, page_order);
return rc;
}
int
-guest_physmap_add_entry(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int page_order,
- p2m_type_t t)
+guest_physmap_add_entry(struct domain *d, gfn_t gfn, mfn_t mfn,
+ unsigned int page_order, p2m_type_t t)
{
struct p2m_domain *p2m = p2m_get_hostp2m(d);
unsigned long i, ogfn;
@@ -705,13 +704,14 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
{
for ( i = 0; i < (1 << page_order); i++ )
{
- rc = iommu_map_page(
- d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable);
+ rc = iommu_map_page(d, mfn_x(mfn_add(mfn, i)),
+ mfn_x(mfn_add(mfn, i)),
+ IOMMUF_readable|IOMMUF_writable);
if ( rc != 0 )
{
while ( i-- > 0 )
/* If statement to satisfy __must_check. */
- if ( iommu_unmap_page(d, mfn + i) )
+ if ( iommu_unmap_page(d, mfn_x(mfn_add(mfn, i))) )
continue;
return rc;
@@ -727,18 +727,20 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
p2m_lock(p2m);
- P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn);
+ P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn_x(gfn), mfn_x(mfn));
/* First, remove m->p mappings for existing p->m mappings */
for ( i = 0; i < (1UL << page_order); i++ )
{
- omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
+ omfn = p2m->get_entry(p2m, gfn_x(gfn_add(gfn, i)), &ot,
+ &a, 0, NULL, NULL);
if ( p2m_is_shared(ot) )
{
/* Do an unshare to cleanly take care of all corner
* cases. */
int rc;
- rc = mem_sharing_unshare_page(p2m->domain, gfn + i, 0);
+ rc = mem_sharing_unshare_page(p2m->domain,
+ gfn_x(gfn_add(gfn, i)), 0);
if ( rc )
{
p2m_unlock(p2m);
@@ -753,10 +755,13 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
*
* Foreign domains are okay to place an event as they
* won't go to sleep. */
- (void)mem_sharing_notify_enomem(p2m->domain, gfn + i, 0);
+ (void)mem_sharing_notify_enomem(p2m->domain,
+ gfn_x(gfn_add(gfn, i)),
+ 0);
return rc;
}
- omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL, NULL);
+ omfn = p2m->get_entry(p2m, gfn_x(gfn_add(gfn, i)),
+ &ot, &a, 0, NULL, NULL);
ASSERT(!p2m_is_shared(ot));
}
if ( p2m_is_grant(ot) || p2m_is_foreign(ot) )
@@ -787,39 +792,39 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
/* Then, look for m->p mappings for this range and deal with them */
for ( i = 0; i < (1UL << page_order); i++ )
{
- if ( page_get_owner(mfn_to_page(_mfn(mfn + i))) == dom_cow )
+ if ( page_get_owner(mfn_to_page(mfn_add(mfn, i))) == dom_cow )
{
/* This is no way to add a shared page to your physmap! */
- gdprintk(XENLOG_ERR, "Adding shared mfn %lx directly to dom %hu "
- "physmap not allowed.\n", mfn+i, d->domain_id);
+ gdprintk(XENLOG_ERR, "Adding shared mfn %lx directly to dom%d physmap not allowed.\n",
+ mfn_x(mfn_add(mfn, i)), d->domain_id);
p2m_unlock(p2m);
return -EINVAL;
}
- if ( page_get_owner(mfn_to_page(_mfn(mfn + i))) != d )
+ if ( page_get_owner(mfn_to_page(mfn_add(mfn, i))) != d )
continue;
- ogfn = mfn_to_gfn(d, _mfn(mfn+i));
- if ( (ogfn != INVALID_M2P_ENTRY) && (ogfn != gfn + i) )
+ ogfn = mfn_to_gfn(d, mfn_add(mfn, i));
+ if ( (ogfn != INVALID_M2P_ENTRY) && (ogfn != gfn_x(gfn_add(gfn, i))) )
{
/* This machine frame is already mapped at another physical
* address */
P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n",
- mfn + i, ogfn, gfn + i);
+ mfn_x(mfn_add(mfn, i)), ogfn, gfn_x(gfn_add(gfn, i)));
omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL, NULL);
if ( p2m_is_ram(ot) && !p2m_is_paged(ot) )
{
ASSERT(mfn_valid(omfn));
P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
ogfn , mfn_x(omfn));
- if ( mfn_x(omfn) == (mfn + i) )
- p2m_remove_page(p2m, ogfn, mfn + i, 0);
+ if ( mfn_eq(omfn, mfn_add(mfn, i)) )
+ p2m_remove_page(p2m, ogfn, mfn_x(mfn_add(mfn, i)), 0);
}
}
}
/* Now, actually do the two-way mapping */
- if ( mfn_valid(_mfn(mfn)) )
+ if ( mfn_valid(mfn) )
{
- rc = p2m_set_entry(p2m, gfn, _mfn(mfn), page_order, t,
+ rc = p2m_set_entry(p2m, gfn_x(gfn), mfn, page_order, t,
p2m->default_access);
if ( rc )
goto out; /* Failed to update p2m, bail without updating m2p. */
@@ -827,14 +832,15 @@ guest_physmap_add_entry(struct domain *d, unsigned long gfn,
if ( !p2m_is_grant(t) )
{
for ( i = 0; i < (1UL << page_order); i++ )
- set_gpfn_from_mfn(mfn+i, gfn+i);
+ set_gpfn_from_mfn(mfn_x(mfn_add(mfn, i)),
+ gfn_x(gfn_add(gfn, i)));
}
}
else
{
gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
- gfn, mfn);
- rc = p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), page_order,
+ gfn_x(gfn), mfn_x(mfn));
+ rc = p2m_set_entry(p2m, gfn_x(gfn), _mfn(INVALID_MFN), page_order,
p2m_invalid, p2m->default_access);
if ( rc == 0 )
{
@@ -2798,7 +2804,7 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
unsigned long gpfn, domid_t foreigndom)
{
p2m_type_t p2mt, p2mt_prev;
- unsigned long prev_mfn, mfn;
+ mfn_t prev_mfn, mfn;
struct page_info *page;
int rc;
struct domain *fdom;
@@ -2841,15 +2847,15 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
rc = -EINVAL;
goto out;
}
- mfn = mfn_x(page_to_mfn(page));
+ mfn = page_to_mfn(page);
/* Remove previously mapped page if it is present. */
- prev_mfn = mfn_x(get_gfn(tdom, gpfn, &p2mt_prev));
- if ( mfn_valid(_mfn(prev_mfn)) )
+ prev_mfn = get_gfn(tdom, gpfn, &p2mt_prev);
+ if ( mfn_valid(prev_mfn) )
{
- if ( is_xen_heap_mfn(prev_mfn) )
+ if ( is_xen_heap_mfn(mfn_x(prev_mfn)) )
/* Xen heap frames are simply unhooked from this phys slot */
- guest_physmap_remove_page(tdom, gpfn, prev_mfn, 0);
+ guest_physmap_remove_page(tdom, _gfn(gpfn), prev_mfn, 0);
else
/* Normal domain memory is freed, to avoid leaking memory. */
guest_remove_page(tdom, gpfn);
@@ -2859,11 +2865,11 @@ int p2m_add_foreign(struct domain *tdom, unsigned long fgfn,
* will update the m2p table which will result in mfn -> gpfn of dom0
* and not fgfn of domU.
*/
- rc = set_foreign_p2m_entry(tdom, gpfn, _mfn(mfn));
+ rc = set_foreign_p2m_entry(tdom, gpfn, mfn);
if ( rc )
gdprintk(XENLOG_WARNING, "set_foreign_p2m_entry failed. "
"gpfn:%lx mfn:%lx fgfn:%lx td:%d fd:%d\n",
- gpfn, mfn, fgfn, tdom->domain_id, fdom->domain_id);
+ gpfn, mfn_x(mfn), fgfn, tdom->domain_id, fdom->domain_id);
put_page(page);
@@ -1818,7 +1818,7 @@ gnttab_transfer(
goto copyback;
}
- guest_physmap_remove_page(d, gop.mfn, mfn, 0);
+ guest_physmap_remove_page(d, _gfn(gop.mfn), _mfn(mfn), 0);
gnttab_flush_tlb(d);
/* Find the target domain. */
@@ -1946,7 +1946,7 @@ gnttab_transfer(
{
grant_entry_v1_t *sha = &shared_entry_v1(e->grant_table, gop.ref);
- guest_physmap_add_page(e, sha->frame, mfn, 0);
+ guest_physmap_add_page(e, _gfn(sha->frame), _mfn(mfn), 0);
if ( !paging_mode_translate(e) )
sha->frame = mfn;
}
@@ -1954,7 +1954,8 @@ gnttab_transfer(
{
grant_entry_v2_t *sha = &shared_entry_v2(e->grant_table, gop.ref);
- guest_physmap_add_page(e, sha->full_page.frame, mfn, 0);
+ guest_physmap_add_page(e, _gfn(sha->full_page.frame),
+ _mfn(mfn), 0);
if ( !paging_mode_translate(e) )
sha->full_page.frame = mfn;
}
@@ -213,7 +213,7 @@ static void populate_physmap(struct memop_args *a)
mfn = page_to_mfn(page);
}
- guest_physmap_add_page(d, gpfn, mfn, a->extent_order);
+ guest_physmap_add_page(d, _gfn(gpfn), _mfn(mfn), a->extent_order);
if ( !paging_mode_translate(d) )
{
@@ -237,20 +237,20 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
#ifdef CONFIG_X86
p2m_type_t p2mt;
#endif
- unsigned long mfn;
+ mfn_t mfn;
#ifdef CONFIG_X86
- mfn = mfn_x(get_gfn_query(d, gmfn, &p2mt));
+ mfn = get_gfn_query(d, gmfn, &p2mt);
if ( unlikely(p2m_is_paging(p2mt)) )
{
- guest_physmap_remove_page(d, gmfn, mfn, 0);
+ guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
put_gfn(d, gmfn);
/* If the page hasn't yet been paged out, there is an
* actual page that needs to be released. */
if ( p2mt == p2m_ram_paging_out )
{
- ASSERT(mfn_valid(mfn));
- page = mfn_to_page(mfn);
+ ASSERT(mfn_valid(mfn_x(mfn)));
+ page = mfn_to_page(mfn_x(mfn));
if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
}
@@ -259,14 +259,14 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
}
if ( p2mt == p2m_mmio_direct )
{
- clear_mmio_p2m_entry(d, gmfn, _mfn(mfn), 0);
+ clear_mmio_p2m_entry(d, gmfn, mfn, 0);
put_gfn(d, gmfn);
return 1;
}
#else
- mfn = mfn_x(gfn_to_mfn(d, _gfn(gmfn)));
+ mfn = gfn_to_mfn(d, _gfn(gmfn));
#endif
- if ( unlikely(!mfn_valid(mfn)) )
+ if ( unlikely(!mfn_valid(mfn_x(mfn))) )
{
put_gfn(d, gmfn);
gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n",
@@ -288,12 +288,12 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
return 0;
}
/* Maybe the mfn changed */
- mfn = mfn_x(get_gfn_query_unlocked(d, gmfn, &p2mt));
+ mfn = get_gfn_query_unlocked(d, gmfn, &p2mt);
ASSERT(!p2m_is_shared(p2mt));
}
#endif /* CONFIG_X86 */
- page = mfn_to_page(mfn);
+ page = mfn_to_page(mfn_x(mfn));
if ( unlikely(!get_page(page, d)) )
{
put_gfn(d, gmfn);
@@ -316,7 +316,7 @@ int guest_remove_page(struct domain *d, unsigned long gmfn)
test_and_clear_bit(_PGC_allocated, &page->count_info) )
put_page(page);
- guest_physmap_remove_page(d, gmfn, mfn, 0);
+ guest_physmap_remove_page(d, _gfn(gmfn), mfn, 0);
put_page(page);
put_gfn(d, gmfn);
@@ -540,7 +540,7 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
gfn = mfn_to_gmfn(d, mfn);
/* Pages were unshared above */
BUG_ON(SHARED_M2P(gfn));
- guest_physmap_remove_page(d, gfn, mfn, 0);
+ guest_physmap_remove_page(d, _gfn(gfn), _mfn(mfn), 0);
put_page(page);
}
@@ -584,7 +584,8 @@ static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
}
mfn = page_to_mfn(page);
- guest_physmap_add_page(d, gpfn, mfn, exch.out.extent_order);
+ guest_physmap_add_page(d, _gfn(gpfn), _mfn(mfn),
+ exch.out.extent_order);
if ( !paging_mode_translate(d) )
{
@@ -1095,7 +1096,8 @@ long do_memory_op(unsigned long cmd, XEN_GUEST_HANDLE_PARAM(void) arg)
page = get_page_from_gfn(d, xrfp.gpfn, NULL, P2M_ALLOC);
if ( page )
{
- guest_physmap_remove_page(d, xrfp.gpfn, page_to_mfn(page), 0);
+ guest_physmap_remove_page(d, _gfn(xrfp.gpfn),
+ _mfn(page_to_mfn(page)), 0);
put_page(page);
}
else
@@ -2774,7 +2774,7 @@ static int __must_check arm_smmu_map_page(struct domain *d, unsigned long gfn,
* The function guest_physmap_add_entry replaces the current mapping
* if there is already one...
*/
- return guest_physmap_add_entry(d, gfn, mfn, 0, t);
+ return guest_physmap_add_entry(d, _gfn(gfn), _mfn(mfn), 0, t);
}
static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long gfn)
@@ -2786,7 +2786,7 @@ static int __must_check arm_smmu_unmap_page(struct domain *d, unsigned long gfn)
if ( !is_domain_direct_mapped(d) )
return -EINVAL;
- guest_physmap_remove_page(d, gfn, gfn, 0);
+ guest_physmap_remove_page(d, _gfn(gfn), _mfn(gfn), 0);
return 0;
}
@@ -160,23 +160,23 @@ int map_dev_mmio_region(struct domain *d,
unsigned long mfn);
int guest_physmap_add_entry(struct domain *d,
- unsigned long gfn,
- unsigned long mfn,
+ gfn_t gfn,
+ mfn_t mfn,
unsigned long page_order,
p2m_type_t t);
/* Untyped version for RAM only, for compatibility */
static inline int guest_physmap_add_page(struct domain *d,
- unsigned long gfn,
- unsigned long mfn,
+ gfn_t gfn,
+ mfn_t mfn,
unsigned int page_order)
{
return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
}
void guest_physmap_remove_page(struct domain *d,
- unsigned long gpfn,
- unsigned long mfn, unsigned int page_order);
+ gfn_t gfn,
+ mfn_t mfn, unsigned int page_order);
mfn_t gfn_to_mfn(struct domain *d, gfn_t gfn);
@@ -545,14 +545,14 @@ void p2m_teardown(struct p2m_domain *p2m);
void p2m_final_teardown(struct domain *d);
/* Add a page to a domain's p2m table */
-int guest_physmap_add_entry(struct domain *d, unsigned long gfn,
- unsigned long mfn, unsigned int page_order,
+int guest_physmap_add_entry(struct domain *d, gfn_t gfn,
+ mfn_t mfn, unsigned int page_order,
p2m_type_t t);
/* Untyped version for RAM only, for compatibility */
static inline int guest_physmap_add_page(struct domain *d,
- unsigned long gfn,
- unsigned long mfn,
+ gfn_t gfn,
+ mfn_t mfn,
unsigned int page_order)
{
return guest_physmap_add_entry(d, gfn, mfn, page_order, p2m_ram_rw);
@@ -560,8 +560,7 @@ static inline int guest_physmap_add_page(struct domain *d,
/* Remove a page from a domain's p2m table */
int guest_physmap_remove_page(struct domain *d,
- unsigned long gfn,
- unsigned long mfn, unsigned int page_order);
+ gfn_t gfn, mfn_t mfn, unsigned int page_order);
/* Set a p2m range as populate-on-demand */
int guest_physmap_mark_populate_on_demand(struct domain *d, unsigned long gfn,
@@ -552,7 +552,7 @@ int xenmem_add_to_physmap_one(struct domain *d, unsigned int space,
/* Returns 1 on success, 0 on error, negative if the ring
* for event propagation is full in the presence of paging */
-int guest_remove_page(struct domain *d, unsigned long gmfn);
+int guest_remove_page(struct domain *d, unsigned long gfn);
#define RAM_TYPE_CONVENTIONAL 0x00000001
#define RAM_TYPE_RESERVED 0x00000002