@@ -98,6 +98,11 @@ static void kvm_flush_dcache_pud(pud_t pud)
__kvm_flush_dcache_pud(pud);
}
+static bool kvm_is_device_pfn(unsigned long pfn)
+{
+ return !pfn_valid(pfn);
+}
+
/**
* stage2_dissolve_pmd() - clear and flush huge PMD entry
* @kvm: pointer to kvm structure.
@@ -213,7 +218,7 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
kvm_tlb_flush_vmid_ipa(kvm, addr);
/* No need to invalidate the cache for device mappings */
- if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ if (!kvm_is_device_pfn(__phys_to_pfn(addr)))
kvm_flush_dcache_pte(old_pte);
put_page(virt_to_page(pte));
@@ -305,8 +310,7 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte) &&
- (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ if (!pte_none(*pte) && !kvm_is_device_pfn(__phys_to_pfn(addr)))
kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -1037,11 +1041,6 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
return kvm_vcpu_dabt_iswrite(vcpu);
}
-static bool kvm_is_device_pfn(unsigned long pfn)
-{
- return !pfn_valid(pfn);
-}
-
/**
* stage2_wp_ptes - write protect PMD range
* @pmd: pointer to pmd entry