@@ -27,6 +27,7 @@ enum migrate_reason {
MR_MEMPOLICY_MBIND,
MR_NUMA_MISPLACED,
MR_CONTIG_RANGE,
+ MR_LONGTERM_PIN,
MR_TYPES
};
@@ -407,8 +407,13 @@ enum zone_type {
* to increase the number of THP/huge pages. Notable special cases are:
*
* 1. Pinned pages: (long-term) pinning of movable pages might
- * essentially turn such pages unmovable. Memory offlining might
- * retry a long time.
+ * essentially turn such pages unmovable. Therefore, we do not allow
+ * pinning long-term pages in ZONE_MOVABLE. When pages are pinned and
+ * faulted, they come from the right zone right away. However, it is
+ * still possible that address space already has pages in
+ * ZONE_MOVABLE at the time when pages are pinned (i.e. user has
+ * touches that memory before pinning). In such case we migrate them
+ * to a different zone. When migration fails - pinning fails.
* 2. memblock allocations: kernelcore/movablecore setups might create
* situations where ZONE_MOVABLE contains unmovable allocations
* after boot. Memory offlining and allocations fail early.
@@ -20,7 +20,8 @@
EM( MR_SYSCALL, "syscall_or_cpuset") \
EM( MR_MEMPOLICY_MBIND, "mempolicy_mbind") \
EM( MR_NUMA_MISPLACED, "numa_misplaced") \
- EMe(MR_CONTIG_RANGE, "contig_range")
+ EM( MR_CONTIG_RANGE, "contig_range") \
+ EMe(MR_LONGTERM_PIN, "longterm_pin")
/*
* First define the enums in the above macros to be exported to userspace
@@ -89,11 +89,12 @@ static __maybe_unused struct page *try_grab_compound_head(struct page *page,
int orig_refs = refs;
/*
- * Can't do FOLL_LONGTERM + FOLL_PIN with CMA in the gup fast
- * path, so fail and let the caller fall back to the slow path.
+ * Can't do FOLL_LONGTERM + FOLL_PIN gup fast path if not in a
+ * right zone, so fail and let the caller fall back to the slow
+ * path.
*/
- if (unlikely(flags & FOLL_LONGTERM) &&
- is_migrate_cma_page(page))
+ if (unlikely((flags & FOLL_LONGTERM) &&
+ !is_pinnable_page(page)))
return NULL;
/*
@@ -1547,17 +1548,16 @@ struct page *get_dump_page(unsigned long addr)
}
#endif /* CONFIG_ELF_CORE */
-#ifdef CONFIG_CMA
-static long check_and_migrate_cma_pages(struct mm_struct *mm,
- unsigned long start,
- unsigned long nr_pages,
- struct page **pages,
- struct vm_area_struct **vmas,
- unsigned int gup_flags)
+static long check_and_migrate_movable_pages(struct mm_struct *mm,
+ unsigned long start,
+ unsigned long nr_pages,
+ struct page **pages,
+ struct vm_area_struct **vmas,
+ unsigned int gup_flags)
{
unsigned long i, isolation_error_count;
bool drain_allow;
- LIST_HEAD(cma_page_list);
+ LIST_HEAD(movable_page_list);
long ret = nr_pages;
struct page *prev_head, *head;
struct migration_target_control mtc = {
@@ -1575,15 +1575,14 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
continue;
prev_head = head;
/*
- * If we get a page from the CMA zone, since we are going to
- * be pinning these entries, we might as well move them out
- * of the CMA zone if possible.
+ * If we get a movable page, since we are going to be pinning
+ * these entries, try to move them out if possible.
*/
- if (is_migrate_cma_page(head)) {
+ if (!is_pinnable_page(head)) {
if (is_zero_pfn(page_to_pfn(head)))
continue;
if (PageHuge(head)) {
- if (!isolate_huge_page(head, &cma_page_list))
+ if (!isolate_huge_page(head, &movable_page_list))
isolation_error_count++;
} else {
if (!PageLRU(head) && drain_allow) {
@@ -1595,7 +1594,7 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
isolation_error_count++;
continue;
}
- list_add_tail(&head->lru, &cma_page_list);
+ list_add_tail(&head->lru, &movable_page_list);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON +
page_is_file_lru(head),
@@ -1608,10 +1607,10 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
* If list is empty, and no isolation errors, means that all pages are
* in the correct zone.
*/
- if (list_empty(&cma_page_list) && !isolation_error_count)
+ if (list_empty(&movable_page_list) && !isolation_error_count)
return ret;
- if (!list_empty(&cma_page_list)) {
+ if (!list_empty(&movable_page_list)) {
/*
* drop the above get_user_pages reference.
*/
@@ -1621,12 +1620,12 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
for (i = 0; i < nr_pages; i++)
put_page(pages[i]);
- ret = migrate_pages(&cma_page_list, alloc_migration_target,
+ ret = migrate_pages(&movable_page_list, alloc_migration_target,
NULL, (unsigned long)&mtc, MIGRATE_SYNC,
- MR_CONTIG_RANGE);
+ MR_LONGTERM_PIN);
if (ret) {
- if (!list_empty(&cma_page_list))
- putback_movable_pages(&cma_page_list);
+ if (!list_empty(&movable_page_list))
+ putback_movable_pages(&movable_page_list);
return ret > 0 ? -ENOMEM : ret;
}
@@ -1644,17 +1643,6 @@ static long check_and_migrate_cma_pages(struct mm_struct *mm,
*/
goto check_again;
}
-#else
-static long check_and_migrate_cma_pages(struct mm_struct *mm,
- unsigned long start,
- unsigned long nr_pages,
- struct page **pages,
- struct vm_area_struct **vmas,
- unsigned int gup_flags)
-{
- return nr_pages;
-}
-#endif /* CONFIG_CMA */
/*
* __gup_longterm_locked() is a wrapper for __get_user_pages_locked which
@@ -1678,8 +1666,9 @@ static long __gup_longterm_locked(struct mm_struct *mm,
if (gup_flags & FOLL_LONGTERM) {
if (rc > 0)
- rc = check_and_migrate_cma_pages(mm, start, rc, pages,
- vmas, gup_flags);
+ rc = check_and_migrate_movable_pages(mm, start, rc,
+ pages, vmas,
+ gup_flags);
memalloc_pin_restore(flags);
}
return rc;