diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 91a5193f9f9fc4..61955b56b90472 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1626,20 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags) if (ret > 0) ret = -EIO; } else { - /* - * After page migration succeeds, the source page can - * be trapped in pagevec and actual freeing is delayed. - * Freeing code works differently based on PG_hwpoison, - * so there's a race. We need to make sure that the - * source page should be freed back to buddy before - * setting PG_hwpoison. - */ - if (!is_free_buddy_page(page)) - drain_all_pages(page_zone(page)); SetPageHWPoison(page); - if (!is_free_buddy_page(page)) - pr_info("soft offline: %#lx: page leaked\n", - pfn); atomic_long_inc(&num_poisoned_pages); } } else { @@ -1691,14 +1678,6 @@ int soft_offline_page(struct page *page, int flags) get_online_mems(); - /* - * Isolate the page, so that it doesn't get reallocated if it - * was free. This flag should be kept set until the source page - * is freed and PG_hwpoison on it is set. - */ - if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) - set_migratetype_isolate(page, true); - ret = get_any_page(page, pfn, flags); put_online_mems(); if (ret > 0) { /* for in-use pages */ @@ -1717,6 +1696,5 @@ int soft_offline_page(struct page *page, int flags) atomic_long_inc(&num_poisoned_pages); } } - unset_migratetype_isolate(page, MIGRATE_MOVABLE); return ret; } diff --git a/mm/migrate.c b/mm/migrate.c index 85e04268603143..46b9ab2f4a38c9 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -906,7 +906,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage, */ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct page *page, int force, - enum migrate_mode mode) + enum migrate_mode mode, enum migrate_reason reason) { int rc = 0; int *result = NULL; @@ -937,7 +937,8 @@ static int unmap_and_move(new_page_t get_new_page, free_page_t put_new_page, list_del(&page->lru); dec_zone_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page)); - putback_lru_page(page); + if (reason != MR_MEMORY_FAILURE) + putback_lru_page(page); } /* @@ -1110,7 +1111,8 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, pass > 2, mode); else rc = unmap_and_move(get_new_page, put_new_page, - private, page, pass > 2, mode); + private, page, pass > 2, mode, + reason); switch(rc) { case -ENOMEM: