@@ -767,6 +767,20 @@ int arch_domain_soft_reset(struct domain *d)
return -ENOSYS;
}
+void arch_domain_creation_finished(struct domain *d)
+{
+ /*
+ * To avoid flushing the whole guest RAM on the first Set/Way, we
+ * invalidate the P2M to track what has been accessed.
+ *
+ * This is only turned when IOMMU is not used or the page-table are
+ * not shared because bit[0] (e.g valid bit) unset will result
+ * IOMMU fault that could be not fixed-up.
+ */
+ if ( !iommu_use_hap_pt(d) )
+ p2m_invalidate_root(p2m_get_hostp2m(d));
+}
+
static int is_guest_pv32_psr(uint32_t psr)
{
switch (psr & PSR_MODE_MASK)
@@ -1079,6 +1079,22 @@ static void p2m_invalidate_table(struct p2m_domain *p2m, mfn_t mfn)
}
/*
+ * Invalidate all entries in the root page-tables. This is
+ * useful to get fault on entry and do an action.
+ */
+void p2m_invalidate_root(struct p2m_domain *p2m)
+{
+ unsigned int i;
+
+ p2m_write_lock(p2m);
+
+ for ( i = 0; i < P2M_ROOT_LEVEL; i++ )
+ p2m_invalidate_table(p2m, page_to_mfn(p2m->root + i));
+
+ p2m_write_unlock(p2m);
+}
+
+/*
* Resolve any translation fault due to change in the p2m. This
* includes break-before-make and valid bit cleared.
*/
@@ -1587,10 +1603,12 @@ int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
*/
if ( gfn_eq(start, next_block_gfn) )
{
- mfn = p2m_get_entry(p2m, start, &t, NULL, &order, NULL);
+ bool valid;
+
+ mfn = p2m_get_entry(p2m, start, &t, NULL, &order, &valid);
next_block_gfn = gfn_next_boundary(start, order);
- if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) )
+ if ( mfn_eq(mfn, INVALID_MFN) || !p2m_is_any_ram(t) || !valid )
{
count++;
start = next_block_gfn;
@@ -1624,6 +1642,7 @@ int p2m_cache_flush_range(struct domain *d, gfn_t *pstart, gfn_t end)
*/
void p2m_flush_vm(struct vcpu *v)
{
+ struct p2m_domain *p2m = p2m_get_hostp2m(v->domain);
int rc;
gfn_t start = _gfn(0);
@@ -1643,6 +1662,12 @@ void p2m_flush_vm(struct vcpu *v)
"P2M has not been correctly cleaned (rc = %d)\n",
rc);
+ /*
+ * Invalidate the p2m to track which page was modified by the guest
+ * between call of p2m_flush_vm().
+ */
+ p2m_invalidate_root(p2m);
+
v->arch.need_flush_to_ram = false;
}
@@ -765,6 +765,10 @@ int arch_domain_soft_reset(struct domain *d)
return ret;
}
+void arch_domain_creation_finished(struct domain *d)
+{
+}
+
/*
* These are the masks of CR4 bits (subject to hardware availability) which a
* PV guest may not legitimiately attempt to modify.
@@ -1116,8 +1116,11 @@ int domain_unpause_by_systemcontroller(struct domain *d)
* Creation is considered finished when the controller reference count
* first drops to 0.
*/
- if ( new == 0 )
+ if ( new == 0 && !d->creation_finished )
+ {
d->creation_finished = true;
+ arch_domain_creation_finished(d);
+ }
domain_unpause(d);
@@ -231,6 +231,8 @@ int p2m_set_entry(struct p2m_domain *p2m,
bool p2m_resolve_translation_fault(struct domain *d, gfn_t gfn);
+void p2m_invalidate_root(struct p2m_domain *p2m);
+
/*
* Clean & invalidate caches corresponding to a region [start,end) of guest
* address space.
@@ -70,6 +70,8 @@ void arch_domain_unpause(struct domain *d);
int arch_domain_soft_reset(struct domain *d);
+void arch_domain_creation_finished(struct domain *d);
+
void arch_p2m_set_access_required(struct domain *d, bool access_required);
int arch_set_info_guest(struct vcpu *, vcpu_guest_context_u);