@@ -35,6 +35,8 @@ enum migrate_reason {
#ifdef CONFIG_MIGRATION
+extern int migrate_replace_page(struct page *oldpage, struct page *newpage);
+
extern void putback_lru_pages(struct list_head *l);
extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
@@ -57,6 +59,9 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
#else
+static inline int migrate_replace_page(struct page *oldpage,
+ struct page *newpage) { return -ENOSYS; }
+
static inline void putback_lru_pages(struct list_head *l) {}
static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
@@ -1067,6 +1067,65 @@ out:
return rc;
}
+/*
+ * migrate_replace_page
+ *
+ * The function takes one single page and a target page (newpage) and
+ * tries to migrate data to the target page. The caller must ensure that
+ * the source page is locked with one additional get_page() call, which
+ * will be freed during the migration. The caller also must release newpage
+ * if migration fails, otherwise the ownership of the newpage is taken.
+ * Source page is released if migration succeeds.
+ *
+ * Return: error code or 0 on success.
+ */
+int migrate_replace_page(struct page *page, struct page *newpage)
+{
+ struct zone *zone = page_zone(page);
+ unsigned long flags;
+ int ret = -EAGAIN;
+ int pass;
+
+ migrate_prep();
+
+ spin_lock_irqsave(&zone->lru_lock, flags);
+
+ if (PageLRU(page) &&
+ __isolate_lru_page(page, ISOLATE_UNEVICTABLE) == 0) {
+ struct lruvec *lruvec = mem_cgroup_page_lruvec(page, zone);
+ del_page_from_lru_list(page, lruvec, page_lru(page));
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&zone->lru_lock, flags);
+ return -EAGAIN;
+ }
+
+ /* page is now isolated, so release additional reference */
+ put_page(page);
+
+ for (pass = 0; pass < 10 && ret != 0; pass++) {
+ cond_resched();
+
+ if (page_count(page) == 1) {
+ /* page was freed from under us, so we are done */
+ ret = 0;
+ break;
+ }
+ ret = __unmap_and_move(page, newpage, 1, MIGRATE_SYNC);
+ }
+
+ if (ret == 0) {
+ /* take ownership of newpage and add it to lru */
+ putback_lru_page(newpage);
+ } else {
+ /* restore additional reference to the oldpage */
+ get_page(page);
+ }
+
+ putback_lru_page(page);
+ return ret;
+}
+
#ifdef CONFIG_NUMA
/*
* Move a list of individual pages