On Wed, 24 Apr 2024 13:55:32 +0200 David Hildenbrand <[email protected]> wrote:
> On 24.04.24 00:55, Matthew Wilcox (Oracle) wrote:
> > All callers are now converted, delete this compatibility wrapper.
> >
For some reason,
mm/hugetlb.c: In function 'hugetlb_page_mapping_lock_write':
mm/hugetlb.c:2164:41: error: implicit declaration of function 'page_mapping';
did you mean 'page_mapped'? [-Werror=implicit-function-declaration]
2164 | struct address_space *mapping = page_mapping(hpage);
| ^~~~~~~~~~~~
| page_mapped
mm/hugetlb.c:2164:41: error: initialization of 'struct address_space *' from
'int' makes pointer from integer without a cast [-Werror=int-conversion]
I'll disable "mm: Remove page_mapping()" pending review of the below,
please.
From: Andrew Morton <[email protected]>
Subject: mm: convert hugetlb_page_mapping_lock_write() to
hugetlb_folio_mapping_lock_write
Date: Wed Apr 24 04:20:30 PM PDT 2024
Convert this to use folios, so we can remove page_mapping()
Cc: David Hildenbrand <[email protected]>
Cc: "Matthew Wilcox (Oracle)" <[email protected]>
Signed-off-by: Andrew Morton <[email protected]>
---
include/linux/hugetlb.h | 6 +++---
mm/hugetlb.c | 6 +++---
mm/memory-failure.c | 4 ++--
mm/migrate.c | 2 +-
4 files changed, 9 insertions(+), 9 deletions(-)
---
a/mm/hugetlb.c~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write
+++ a/mm/hugetlb.c
@@ -2155,13 +2155,13 @@ static bool prep_compound_gigantic_folio
/*
* Find and lock address space (mapping) in write mode.
*
- * Upon entry, the page is locked which means that page_mapping() is
+ * Upon entry, the folio is locked which means that folio_mapping() is
* stable. Due to locking order, we can only trylock_write. If we can
* not get the lock, simply return NULL to caller.
*/
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage)
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
{
- struct address_space *mapping = page_mapping(hpage);
+ struct address_space *mapping = folio_mapping(folio);
if (!mapping)
return mapping;
---
a/mm/memory-failure.c~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write
+++ a/mm/memory-failure.c
@@ -1595,7 +1595,7 @@ static bool hwpoison_user_mappings(struc
* XXX: the dirty test could be racy: set_page_dirty() may not always
* be called inside page lock (it's recommended but not enforced).
*/
- mapping = page_mapping(hpage);
+ mapping = folio_mapping(folio);
if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
mapping_can_writeback(mapping)) {
if (page_mkclean(hpage)) {
@@ -1622,7 +1622,7 @@ static bool hwpoison_user_mappings(struc
* TTU_RMAP_LOCKED to indicate we have taken the lock
* at this higher level.
*/
- mapping = hugetlb_page_mapping_lock_write(hpage);
+ mapping = hugetlb_folio_mapping_lock_write(folio);
if (mapping) {
try_to_unmap(folio, ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping);
---
a/include/linux/hugetlb.h~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write
+++ a/include/linux/hugetlb.h
@@ -178,7 +178,7 @@ bool hugetlbfs_pagecache_present(struct
struct vm_area_struct *vma,
unsigned long address);
-struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage);
+struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio);
extern int sysctl_hugetlb_shm_group;
extern struct list_head huge_boot_pages[MAX_NUMNODES];
@@ -297,8 +297,8 @@ static inline unsigned long hugetlb_tota
return 0;
}
-static inline struct address_space *hugetlb_page_mapping_lock_write(
- struct page *hpage)
+static inline struct address_space *hugetlb_folio_mapping_lock_write(
+ struct folio *folio)
{
return NULL;
}
---
a/mm/migrate.c~mm-convert-hugetlb_page_mapping_lock_write-to-hugetlb_folio_mapping_lock_write
+++ a/mm/migrate.c
@@ -1425,7 +1425,7 @@ static int unmap_and_move_huge_page(new_
* semaphore in write mode here and set TTU_RMAP_LOCKED
* to let lower levels know we have taken the lock.
*/
- mapping = hugetlb_page_mapping_lock_write(&src->page);
+ mapping = hugetlb_folio_mapping_lock_write(src);
if (unlikely(!mapping))
goto unlock_put_anon;
_