At this point MIGRATEPAGE_SUCCESS is misnamed for all folio users, and now that we remove MIGRATEPAGE_UNMAP, it's really the only "success" return value that the code uses and expects.
Let's just get rid of MIGRATEPAGE_SUCCESS completely and just use "0" for success. Signed-off-by: David Hildenbrand <da...@redhat.com> --- arch/powerpc/platforms/pseries/cmm.c | 2 +- drivers/misc/vmw_balloon.c | 4 +-- drivers/virtio/virtio_balloon.c | 2 +- fs/aio.c | 2 +- fs/btrfs/inode.c | 4 +-- fs/hugetlbfs/inode.c | 4 +-- fs/jfs/jfs_metapage.c | 8 +++--- include/linux/migrate.h | 10 +------ mm/migrate.c | 40 +++++++++++++--------------- mm/migrate_device.c | 2 +- mm/zsmalloc.c | 4 +-- 11 files changed, 36 insertions(+), 46 deletions(-) diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index 5e0a718d1be7b..0823fa2da1516 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c @@ -545,7 +545,7 @@ static int cmm_migratepage(struct balloon_dev_info *b_dev_info, /* balloon page list reference */ put_page(page); - return MIGRATEPAGE_SUCCESS; + return 0; } static void cmm_balloon_compaction_init(void) diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 6653fc53c951c..6df51ee8db621 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1806,7 +1806,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * the list after acquiring the lock. */ get_page(newpage); - ret = MIGRATEPAGE_SUCCESS; + ret = 0; } /* Update the balloon list under the @pages_lock */ @@ -1817,7 +1817,7 @@ static int vmballoon_migratepage(struct balloon_dev_info *b_dev_info, * If we succeed just insert it to the list and update the statistics * under the lock. */ - if (ret == MIGRATEPAGE_SUCCESS) { + if (!ret) { balloon_page_insert(&b->b_dev_info, newpage); __count_vm_event(BALLOON_MIGRATE); } diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c index e299e18346a30..eae65136cdfb5 100644 --- a/drivers/virtio/virtio_balloon.c +++ b/drivers/virtio/virtio_balloon.c @@ -875,7 +875,7 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info, balloon_page_finalize(page); put_page(page); /* balloon reference */ - return MIGRATEPAGE_SUCCESS; + return 0; } #endif /* CONFIG_BALLOON_COMPACTION */ diff --git a/fs/aio.c b/fs/aio.c index 7fc7b6221312c..059e03cfa088c 100644 --- a/fs/aio.c +++ b/fs/aio.c @@ -445,7 +445,7 @@ static int aio_migrate_folio(struct address_space *mapping, struct folio *dst, folio_get(dst); rc = folio_migrate_mapping(mapping, dst, src, 1); - if (rc != MIGRATEPAGE_SUCCESS) { + if (rc) { folio_put(dst); goto out_unlock; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index b77dd22b8cdbe..1d64fee6f59e6 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -7411,7 +7411,7 @@ static int btrfs_migrate_folio(struct address_space *mapping, { int ret = filemap_migrate_folio(mapping, dst, src, mode); - if (ret != MIGRATEPAGE_SUCCESS) + if (ret) return ret; if (folio_test_ordered(src)) { @@ -7419,7 +7419,7 @@ static int btrfs_migrate_folio(struct address_space *mapping, folio_set_ordered(dst); } - return MIGRATEPAGE_SUCCESS; + return 0; } #else #define btrfs_migrate_folio NULL diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 09d4baef29cf9..34d496a2b7de6 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1052,7 +1052,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping, int rc; rc = migrate_huge_page_move_mapping(mapping, dst, src); - if (rc != MIGRATEPAGE_SUCCESS) + if (rc) return rc; if (hugetlb_folio_subpool(src)) { @@ -1063,7 +1063,7 @@ static int hugetlbfs_migrate_folio(struct address_space *mapping, folio_migrate_flags(dst, src); - return MIGRATEPAGE_SUCCESS; + return 0; } #else #define hugetlbfs_migrate_folio NULL diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c index b98cf3bb6c1fe..871cf4fb36366 100644 --- a/fs/jfs/jfs_metapage.c +++ b/fs/jfs/jfs_metapage.c @@ -169,7 +169,7 @@ static int __metapage_migrate_folio(struct address_space *mapping, } rc = filemap_migrate_folio(mapping, dst, src, mode); - if (rc != MIGRATEPAGE_SUCCESS) + if (rc) return rc; for (i = 0; i < MPS_PER_PAGE; i++) { @@ -199,7 +199,7 @@ static int __metapage_migrate_folio(struct address_space *mapping, } } - return MIGRATEPAGE_SUCCESS; + return 0; } #endif /* CONFIG_MIGRATION */ @@ -242,7 +242,7 @@ static int __metapage_migrate_folio(struct address_space *mapping, return -EAGAIN; rc = filemap_migrate_folio(mapping, dst, src, mode); - if (rc != MIGRATEPAGE_SUCCESS) + if (rc) return rc; if (unlikely(insert_metapage(dst, mp))) @@ -253,7 +253,7 @@ static int __metapage_migrate_folio(struct address_space *mapping, mp->folio = dst; remove_metapage(src, mp); - return MIGRATEPAGE_SUCCESS; + return 0; } #endif /* CONFIG_MIGRATION */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 40f2b5a37efbb..02f11704fb686 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -12,13 +12,6 @@ typedef void free_folio_t(struct folio *folio, unsigned long private); struct migration_target_control; -/* - * Return values from addresss_space_operations.migratepage(): - * - negative errno on page migration failure; - * - zero on page migration success; - */ -#define MIGRATEPAGE_SUCCESS 0 - /** * struct movable_operations - Driver page migration * @isolate_page: @@ -34,8 +27,7 @@ struct migration_target_control; * @src page. The driver should copy the contents of the * @src page to the @dst page and set up the fields of @dst page. * Both pages are locked. - * If page migration is successful, the driver should - * return MIGRATEPAGE_SUCCESS. + * If page migration is successful, the driver should return 0. * If the driver cannot migrate the page at the moment, it can return * -EAGAIN. The VM interprets this as a temporary migration failure and * will retry it later. Any other error value is a permanent migration diff --git a/mm/migrate.c b/mm/migrate.c index e9dacf1028dc7..2db4974178e6a 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -209,18 +209,17 @@ static void putback_movable_ops_page(struct page *page) * src and dst are also released by migration core. These pages will not be * folios in the future, so that must be reworked. * - * Returns MIGRATEPAGE_SUCCESS on success, otherwise a negative error - * code. + * Returns 0 on success, otherwise a negative error code. */ static int migrate_movable_ops_page(struct page *dst, struct page *src, enum migrate_mode mode) { - int rc = MIGRATEPAGE_SUCCESS; + int rc; VM_WARN_ON_ONCE_PAGE(!page_has_movable_ops(src), src); VM_WARN_ON_ONCE_PAGE(!PageMovableOpsIsolated(src), src); rc = page_movable_ops(src)->migrate_page(dst, src, mode); - if (rc == MIGRATEPAGE_SUCCESS) + if (!rc) ClearPageMovableOpsIsolated(src); return rc; } @@ -565,7 +564,7 @@ static int __folio_migrate_mapping(struct address_space *mapping, if (folio_test_swapbacked(folio)) __folio_set_swapbacked(newfolio); - return MIGRATEPAGE_SUCCESS; + return 0; } oldzone = folio_zone(folio); @@ -666,7 +665,7 @@ static int __folio_migrate_mapping(struct address_space *mapping, } local_irq_enable(); - return MIGRATEPAGE_SUCCESS; + return 0; } int folio_migrate_mapping(struct address_space *mapping, @@ -715,7 +714,7 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, xas_unlock_irq(&xas); - return MIGRATEPAGE_SUCCESS; + return 0; } /* @@ -831,14 +830,14 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst, return rc; rc = __folio_migrate_mapping(mapping, dst, src, expected_count); - if (rc != MIGRATEPAGE_SUCCESS) + if (rc) return rc; if (src_private) folio_attach_private(dst, folio_detach_private(src)); folio_migrate_flags(dst, src); - return MIGRATEPAGE_SUCCESS; + return 0; } /** @@ -945,7 +944,7 @@ static int __buffer_migrate_folio(struct address_space *mapping, } rc = filemap_migrate_folio(mapping, dst, src, mode); - if (rc != MIGRATEPAGE_SUCCESS) + if (rc) goto unlock_buffers; bh = head; @@ -1049,7 +1048,7 @@ static int fallback_migrate_folio(struct address_space *mapping, * * Return value: * < 0 - error code - * MIGRATEPAGE_SUCCESS - success + * 0 - success */ static int move_to_new_folio(struct folio *dst, struct folio *src, enum migrate_mode mode) @@ -1077,7 +1076,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, else rc = fallback_migrate_folio(mapping, dst, src, mode); - if (rc == MIGRATEPAGE_SUCCESS) { + if (!rc) { /* * For pagecache folios, src->mapping must be cleared before src * is freed. Anonymous folios must stay anonymous until freed. @@ -1427,7 +1426,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, if (folio_ref_count(src) == 1) { /* page was freed from under us. So we are done. */ folio_putback_hugetlb(src); - return MIGRATEPAGE_SUCCESS; + return 0; } dst = get_new_folio(src, private); @@ -1490,8 +1489,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, rc = move_to_new_folio(dst, src, mode); if (page_was_mapped) - remove_migration_ptes(src, - rc == MIGRATEPAGE_SUCCESS ? dst : src, 0); + remove_migration_ptes(src, !rc ? dst : src, 0); unlock_put_anon: folio_unlock(dst); @@ -1500,7 +1498,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, if (anon_vma) put_anon_vma(anon_vma); - if (rc == MIGRATEPAGE_SUCCESS) { + if (!rc) { move_hugetlb_state(src, dst, reason); put_new_folio = NULL; } @@ -1508,7 +1506,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio, out_unlock: folio_unlock(src); out: - if (rc == MIGRATEPAGE_SUCCESS) + if (!rc) folio_putback_hugetlb(src); else if (rc != -EAGAIN) list_move_tail(&src->lru, ret); @@ -1618,7 +1616,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, reason, ret_folios); /* * The rules are: - * Success: hugetlb folio will be put back + * 0: hugetlb folio will be put back * -EAGAIN: stay on the from list * -ENOMEM: stay on the from list * Other errno: put on ret_folios list @@ -1635,7 +1633,7 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio, retry++; nr_retry_pages += nr_pages; break; - case MIGRATEPAGE_SUCCESS: + case 0: stats->nr_succeeded += nr_pages; break; default: @@ -1689,7 +1687,7 @@ static void migrate_folios_move(struct list_head *src_folios, reason, ret_folios); /* * The rules are: - * Success: folio will be freed + * 0: folio will be freed * -EAGAIN: stay on the unmap_folios list * Other errno: put on ret_folios list */ @@ -1699,7 +1697,7 @@ static void migrate_folios_move(struct list_head *src_folios, *thp_retry += is_thp; *nr_retry_pages += nr_pages; break; - case MIGRATEPAGE_SUCCESS: + case 0: stats->nr_succeeded += nr_pages; stats->nr_thp_succeeded += is_thp; break; diff --git a/mm/migrate_device.c b/mm/migrate_device.c index e05e14d6eacdb..abd9f6850db65 100644 --- a/mm/migrate_device.c +++ b/mm/migrate_device.c @@ -778,7 +778,7 @@ static void __migrate_device_pages(unsigned long *src_pfns, if (migrate && migrate->fault_page == page) extra_cnt = 1; r = folio_migrate_mapping(mapping, newfolio, folio, extra_cnt); - if (r != MIGRATEPAGE_SUCCESS) + if (r) src_pfns[i] &= ~MIGRATE_PFN_MIGRATE; else folio_migrate_flags(newfolio, folio); diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 2c5e56a653544..84eb91d47a226 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c @@ -1746,7 +1746,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, * instead. */ if (!zpdesc->zspage) - return MIGRATEPAGE_SUCCESS; + return 0; /* The page is locked, so this pointer must remain valid */ zspage = get_zspage(zpdesc); @@ -1813,7 +1813,7 @@ static int zs_page_migrate(struct page *newpage, struct page *page, reset_zpdesc(zpdesc); zpdesc_put(zpdesc); - return MIGRATEPAGE_SUCCESS; + return 0; } static void zs_page_putback(struct page *page) -- 2.50.1