@@ -685,17 +685,23 @@ static void update_end_of_memory_vars(u64 start, u64 size)
}
/*
- * Memory is added always to NORMAL zone. This means you will never get
- * additional DMA/DMA32 memory.
+ * Memory is added always to NORMAL or MOVABLE zone. This means you
+ * will never get additional DMA/DMA32 memory.
*/
int arch_add_memory(int nid, u64 start, u64 size)
{
struct pglist_data *pgdat = NODE_DATA(nid);
struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
+ struct zone *movable_zone = pgdat->node_zones + ZONE_MOVABLE;
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
+ if (!zone_is_empty(movable_zone))
+ if (zone_spans_pfn(movable_zone, start_pfn) ||
+ (zone_end_pfn(movable_zone) <= start_pfn))
+ zone = movable_zone;
+
init_memory_mapping(start, start + size);
ret = __add_pages(nid, zone, start_pfn, nr_pages);
This patch add new memory to ZONE_MOVABLE if movable zone is setup and lower than newly added memory for x86_64. Signed-off-by: Wang Nan <wangnan0@huawei.com> --- arch/x86/mm/init_64.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)