Message ID | 1387211803-24933-1-git-send-email-julien.grall@linaro.org |
---|---|
State | Superseded, archived |
Headers | show |
>>> On 16.12.13 at 17:36, Julien Grall <julien.grall@linaro.org> wrote: > --- a/xen/arch/arm/domain_build.c > +++ b/xen/arch/arm/domain_build.c > @@ -65,6 +65,11 @@ struct vcpu *__init alloc_dom0_vcpu0(void) > return alloc_vcpu(dom0, 0, 0); > } > > +int is_domain_direct_mapped(struct domain *d) bool_t please, and const struct domain *. > --- a/xen/include/asm-arm/domain.h > +++ b/xen/include/asm-arm/domain.h > @@ -86,6 +86,8 @@ enum domain_type { > #define is_pv64_domain(d) (0) > #endif > > +int is_domain_direct_mapped(struct domain *d); Furthermore, if this could be made a macro, ... > --- a/xen/include/asm-x86/domain.h > +++ b/xen/include/asm-x86/domain.h > @@ -16,6 +16,8 @@ > #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) > #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) > > +#define is_domain_direct_mapped(d) (0) ... you wouldn't need this here, but rather put it inside an "#ifndef is_domain_direct_mapped" in xen/common/memory.c itself. And you'll want to make sure (even if unlikely to be needed in practice) that you evaluate the macro argument exactly once in both flavors, i.e. in the case here #define is_domain_direct_mapped(d) ((void)(d), 0) or, assuming that d being NULL is a mistake anyway (and would crash elsewhere) #define is_domain_direct_mapped(d) (!(d)) Jan
diff --git a/xen/arch/arm/domain_build.c b/xen/arch/arm/domain_build.c index faff88e..2bbee36 100644 --- a/xen/arch/arm/domain_build.c +++ b/xen/arch/arm/domain_build.c @@ -65,6 +65,11 @@ struct vcpu *__init alloc_dom0_vcpu0(void) return alloc_vcpu(dom0, 0, 0); } +int is_domain_direct_mapped(struct domain *d) +{ + return (dom0_11_mapping && d == dom0); +} + static void allocate_memory_11(struct domain *d, struct kernel_info *kinfo) { paddr_t start; diff --git a/xen/common/memory.c b/xen/common/memory.c index eb7b72b..edbbdc4 100644 --- a/xen/common/memory.c +++ b/xen/common/memory.c @@ -122,7 +122,29 @@ static void populate_physmap(struct memop_args *a) } else { - page = alloc_domheap_pages(d, a->extent_order, a->memflags); + if ( is_domain_direct_mapped(d) ) + { + mfn = gpfn; + if ( !mfn_valid(mfn) ) + { + gdprintk(XENLOG_INFO, "Invalid mfn 0x%"PRI_xen_pfn"\n", + mfn); + goto out; + } + + page = mfn_to_page(mfn); + if ( !get_page(page, d) ) + { + gdprintk(XENLOG_INFO, + "mfn 0x%"PRI_xen_pfn" doesn't belong to the" + " domain\n", mfn); + goto out; + } + put_page(page); + } + else + page = alloc_domheap_pages(d, a->extent_order, a->memflags); + if ( unlikely(page == NULL) ) { if ( !opt_tmem || (a->extent_order != 0) ) @@ -270,6 +292,13 @@ static void decrease_reservation(struct memop_args *a) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) continue; + /* With the lack for iommu on some ARM platform, domain with DMA-capable + * device must retrieve the same pfn when the hypercall + * populate_physmap is called. + */ + if ( is_domain_direct_mapped(a->domain) ) + continue; + for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; diff --git a/xen/include/asm-arm/domain.h b/xen/include/asm-arm/domain.h index 28d39a0..dbc1389 100644 --- a/xen/include/asm-arm/domain.h +++ b/xen/include/asm-arm/domain.h @@ -86,6 +86,8 @@ enum domain_type { #define is_pv64_domain(d) (0) #endif +int is_domain_direct_mapped(struct domain *d); + struct vtimer { struct vcpu *v; int irq; diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h index 9d39061..2c7f809 100644 --- a/xen/include/asm-x86/domain.h +++ b/xen/include/asm-x86/domain.h @@ -16,6 +16,8 @@ #define is_pv_32on64_domain(d) (is_pv_32bit_domain(d)) #define is_pv_32on64_vcpu(v) (is_pv_32on64_domain((v)->domain)) +#define is_domain_direct_mapped(d) (0) + #define is_hvm_pv_evtchn_domain(d) (has_hvm_container_domain(d) && \ d->arch.hvm_domain.irq.callback_via_type == HVMIRQ_callback_vector) #define is_hvm_pv_evtchn_vcpu(v) (is_hvm_pv_evtchn_domain(v->domain))
With the lack of iommu, dom0 must have a 1:1 memory mapping for all these guest physical address. When the ballon decides to give back a page to the kernel, this page must have the same address as previously. Otherwise, we will loose the 1:1 mapping and will break DMA-capable devices. Signed-off-by: Julien Grall <julien.grall@linaro.org> Cc: Keir Fraser <keir@xen.org> Cc: Jan Beulich <jbeulich@suse.com> --- Release: This is a bug that prevents DMA-capable devices to work after a guest has started. Changes in v4: - Fix typoes - Update comment in the code Changes in v3: - Remove spurious page = NULL - Rename is_dom0_mapped_11 to is_domain_direct_mapped - Coding style Changes in v2: - Drop CONFIG_ARM and add is_dom0_mapped_11 --- xen/arch/arm/domain_build.c | 5 +++++ xen/common/memory.c | 31 ++++++++++++++++++++++++++++++- xen/include/asm-arm/domain.h | 2 ++ xen/include/asm-x86/domain.h | 2 ++ 4 files changed, 39 insertions(+), 1 deletion(-)