On 04/06/2016 12:53 AM, Hugh Dickins wrote:


> +static void shmem_recovery_work(struct work_struct *work)
> +{
> +     struct recovery *recovery;
> +     struct shmem_inode_info *info;
> +     struct address_space *mapping;
> +     struct page *page;
> +     struct page *head = NULL;
> +     int error = -ENOENT;
> +
> +     recovery = container_of(work, struct recovery, work);
> +     info = SHMEM_I(recovery->inode);
> +     if (!shmem_work_still_useful(recovery)) {
> +             shr_stats(work_too_late);
> +             goto out;
> +     }
> +
> +     /* Are we resuming from an earlier partially successful attempt? */
> +     mapping = recovery->inode->i_mapping;
> +     spin_lock_irq(&mapping->tree_lock);
> +     page = shmem_clear_tag_hugehole(mapping, recovery->head_index);
> +     if (page)
> +             head = team_head(page);
> +     spin_unlock_irq(&mapping->tree_lock);
> +     if (head) {
> +             /* Serialize with shrinker so it won't mess with our range */
> +             spin_lock(&shmem_shrinklist_lock);
> +             spin_unlock(&shmem_shrinklist_lock);
> +     }
> +
> +     /* If team is now complete, no tag and head would be found above */
> +     page = recovery->page;
> +     if (PageTeam(page))
> +             head = team_head(page);
> +
> +     /* Get a reference to the head of the team already being assembled */
> +     if (head) {
> +             if (!get_page_unless_zero(head))
> +                     head = NULL;
> +             else if (!PageTeam(head) || head->mapping != mapping ||
> +                             head->index != recovery->head_index) {
> +                     put_page(head);
> +                     head = NULL;
> +             }
> +     }
> +
> +     if (head) {
> +             /* We are resuming work from a previous partial recovery */
> +             if (PageTeam(page))
> +                     shr_stats(resume_teamed);
> +             else
> +                     shr_stats(resume_tagged);
> +     } else {
> +             gfp_t gfp = mapping_gfp_mask(mapping);
> +             /*
> +              * XXX: Note that with swapin readahead, page_to_nid(page) will
> +              * often choose an unsuitable NUMA node: something to fix soon,
> +              * but not an immediate blocker.
> +              */
> +             head = __alloc_pages_node(page_to_nid(page),
> +                     gfp | __GFP_NOWARN | __GFP_THISNODE, HPAGE_PMD_ORDER);  
>  
> +             if (!head) {
> +                     shr_stats(huge_failed);
> +                     error = -ENOMEM;
> +                     goto out;
> +             }

Should this head marked PageTeam? Because in patch 27/31 when given as a hint 
to shmem_getpage_gfp() :

                hugehint = NULL;
+               if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+                   sgp == SGP_TEAM && *pagep) {
+                       struct page *head;
+
+                       if (!get_page_unless_zero(*pagep)) {
+                               error = -ENOENT;
+                               goto decused;
+                       }
+                       page = *pagep;
+                       lock_page(page);
+                       head = page - (index & (HPAGE_PMD_NR-1));     

we fail always because :
+                       if (!PageTeam(head)) {
+                               error = -ENOENT;
+                               goto decused;
+                       }


> +             if (!shmem_work_still_useful(recovery)) {
> +                     __free_pages(head, HPAGE_PMD_ORDER);
> +                     shr_stats(huge_too_late);
> +                     goto out;
> +             }
> +             split_page(head, HPAGE_PMD_ORDER);
> +             get_page(head);
> +             shr_stats(huge_alloced);
> +     }


Thanks,
Mika

Reply via email to