You're playing games with separating interrupt disabling from spinlocks.
Have you got any feedback from -rt people on this?

On Sat, Aug 01, 2020 at 12:09:17PM +0800, Alex Shi wrote:
> lru_lock and page cache xa_lock have no reason with current sequence,
> put them together isn't necessary. let's narrow the lru locking, but
> left the local_irq_disable to block interrupt re-entry and statistic update.
> 
> Hugh Dickins point: split_huge_page_to_list() was already silly,to be
> using the _irqsave variant: it's just been taking sleeping locks, so
> would already be broken if entered with interrupts enabled.
> so we can save passing flags argument down to __split_huge_page().
> 
> Signed-off-by: Alex Shi <[email protected]>
> Signed-off-by: Wei Yang <[email protected]>
> Reviewed-by: Kirill A. Shutemov <[email protected]>
> Cc: Hugh Dickins <[email protected]>
> Cc: Kirill A. Shutemov <[email protected]>
> Cc: Andrea Arcangeli <[email protected]>
> Cc: Johannes Weiner <[email protected]>
> Cc: Matthew Wilcox <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: [email protected]
> Cc: [email protected]
> ---
>  mm/huge_memory.c | 25 +++++++++++++------------
>  1 file changed, 13 insertions(+), 12 deletions(-)
> 
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index dfe7378ab4be..070480e66bf4 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -2398,7 +2398,7 @@ static void __split_huge_page_tail(struct page *head, 
> int tail,
>  }
>  
>  static void __split_huge_page(struct page *page, struct list_head *list,
> -             pgoff_t end, unsigned long flags)
> +                           pgoff_t end)
>  {
>       struct page *head = compound_head(page);
>       pg_data_t *pgdat = page_pgdat(head);
> @@ -2407,8 +2407,6 @@ static void __split_huge_page(struct page *page, struct 
> list_head *list,
>       unsigned long offset = 0;
>       int i;
>  
> -     lruvec = mem_cgroup_page_lruvec(head, pgdat);
> -
>       /* complete memcg works before add pages to LRU */
>       mem_cgroup_split_huge_fixup(head);
>  
> @@ -2420,6 +2418,11 @@ static void __split_huge_page(struct page *page, 
> struct list_head *list,
>               xa_lock(&swap_cache->i_pages);
>       }
>  
> +     /* prevent PageLRU to go away from under us, and freeze lru stats */
> +     spin_lock(&pgdat->lru_lock);
> +
> +     lruvec = mem_cgroup_page_lruvec(head, pgdat);
> +
>       for (i = HPAGE_PMD_NR - 1; i >= 1; i--) {
>               __split_huge_page_tail(head, i, lruvec, list);
>               /* Some pages can be beyond i_size: drop them from page cache */
> @@ -2439,6 +2442,8 @@ static void __split_huge_page(struct page *page, struct 
> list_head *list,
>       }
>  
>       ClearPageCompound(head);
> +     spin_unlock(&pgdat->lru_lock);
> +     /* Caller disabled irqs, so they are still disabled here */
>  
>       split_page_owner(head, HPAGE_PMD_ORDER);
>  
> @@ -2456,8 +2461,7 @@ static void __split_huge_page(struct page *page, struct 
> list_head *list,
>               page_ref_add(head, 2);
>               xa_unlock(&head->mapping->i_pages);
>       }
> -
> -     spin_unlock_irqrestore(&pgdat->lru_lock, flags);
> +     local_irq_enable();
>  
>       remap_page(head);
>  
> @@ -2596,12 +2600,10 @@ bool can_split_huge_page(struct page *page, int 
> *pextra_pins)
>  int split_huge_page_to_list(struct page *page, struct list_head *list)
>  {
>       struct page *head = compound_head(page);
> -     struct pglist_data *pgdata = NODE_DATA(page_to_nid(head));
>       struct deferred_split *ds_queue = get_deferred_split_queue(head);
>       struct anon_vma *anon_vma = NULL;
>       struct address_space *mapping = NULL;
>       int count, mapcount, extra_pins, ret;
> -     unsigned long flags;
>       pgoff_t end;
>  
>       VM_BUG_ON_PAGE(is_huge_zero_page(head), head);
> @@ -2662,9 +2664,8 @@ int split_huge_page_to_list(struct page *page, struct 
> list_head *list)
>       unmap_page(head);
>       VM_BUG_ON_PAGE(compound_mapcount(head), head);
>  
> -     /* prevent PageLRU to go away from under us, and freeze lru stats */
> -     spin_lock_irqsave(&pgdata->lru_lock, flags);
> -
> +     /* block interrupt reentry in xa_lock and spinlock */
> +     local_irq_disable();
>       if (mapping) {
>               XA_STATE(xas, &mapping->i_pages, page_index(head));
>  
> @@ -2694,7 +2695,7 @@ int split_huge_page_to_list(struct page *page, struct 
> list_head *list)
>                               __dec_node_page_state(head, NR_FILE_THPS);
>               }
>  
> -             __split_huge_page(page, list, end, flags);
> +             __split_huge_page(page, list, end);
>               if (PageSwapCache(head)) {
>                       swp_entry_t entry = { .val = page_private(head) };
>  
> @@ -2713,7 +2714,7 @@ int split_huge_page_to_list(struct page *page, struct 
> list_head *list)
>               spin_unlock(&ds_queue->split_queue_lock);
>  fail:                if (mapping)
>                       xa_unlock(&mapping->i_pages);
> -             spin_unlock_irqrestore(&pgdata->lru_lock, flags);
> +             local_irq_enable();
>               remap_page(head);
>               ret = -EBUSY;
>       }
> -- 
> 1.8.3.1
> 

Reply via email to