@@ -189,8 +189,15 @@ static inline void *phys_to_virt(phys_addr_t x)
*/
#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
+#ifndef CONFIG_SPARSEMEM_VMEMMAP
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
+#else
+#define virt_to_page(kaddr) ((struct page *)VMEMMAP_START + \
+ (((u64)(kaddr) & ~PAGE_OFFSET) >> PAGE_SHIFT))
+#define virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
+ + PHYS_OFFSET) >> PAGE_SHIFT)
+#endif
#endif
The mm layer makes heavy use of virt_to_page(), which translates from virtual addresses to offsets in the struct page array using an intermediate translation to physical addresses. However, these physical translations are based on the actual placement of physical memory, which can only be discovered at runtime. This means virt_to_page() translations involve a global PHYS_OFFSET variable, and hence a memory access. Now that the vmemmap region has been redefined to cover the linear region rather than the entire physical address space, we no longer need to perform a virtual-to-physical translation in the implementation of virt_to_page(), which means we can get rid of the memory access. This restricts virt_to_page() translations to the linear region, so redefine virt_addr_valid() as well. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/include/asm/memory.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) -- 2.5.0 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel