> @@ -676,35 +686,63 @@ static void hv_mem_hot_add(unsigned long start,
> unsigned long size,
> 
>  static void hv_online_page(struct page *pg)  {
> -     struct list_head *cur;
>       struct hv_hotadd_state *has;
> +     struct hv_hotadd_gap *gap;
>       unsigned long cur_start_pgp;
>       unsigned long cur_end_pgp;
> +     bool is_gap = false;
> 
>       list_for_each(cur, &dm_device.ha_region_list) {
>               has = list_entry(cur, struct hv_hotadd_state, list);
>               cur_start_pgp = (unsigned long)
> +                     pfn_to_page(has->start_pfn);
> +             cur_end_pgp = (unsigned long)pfn_to_page(has->end_pfn);
> +
> +             /* The page belongs to a different HAS. */
> +             if (((unsigned long)pg < cur_start_pgp) ||
> +                 ((unsigned long)pg >= cur_end_pgp))
> +                     continue;
> +
> +             cur_start_pgp = (unsigned long)
>                       pfn_to_page(has->covered_start_pfn);
>               cur_end_pgp = (unsigned long)pfn_to_page(has-
> >covered_end_pfn);
> 
> -             if (((unsigned long)pg >= cur_start_pgp) &&
> -                     ((unsigned long)pg < cur_end_pgp)) {
> -                     /*
> -                      * This frame is currently backed; online the
> -                      * page.
> -                      */
> -                     __online_page_set_limits(pg);
> -                     __online_page_increment_counters(pg);
> -                     __online_page_free(pg);
> +             /* The page is not backed. */
> +             if (((unsigned long)pg < cur_start_pgp) ||
> +                 ((unsigned long)pg >= cur_end_pgp))
> +                     continue;
> +
> +             /* Check for gaps. */
> +             list_for_each_entry(gap, &has->gap_list, list) {
> +                     cur_start_pgp = (unsigned long)
> +                             pfn_to_page(gap->start_pfn);
> +                     cur_end_pgp = (unsigned long)
> +                             pfn_to_page(gap->end_pfn);
> +                     if (((unsigned long)pg >= cur_start_pgp) &&
> +                         ((unsigned long)pg < cur_end_pgp)) {
> +                             is_gap = true;
> +                             break;
> +                     }
>               }
> +
> +             if (is_gap)
> +                     break;
> +
> +             /* This frame is currently backed; online the page. */
> +             __online_page_set_limits(pg);
> +             __online_page_increment_counters(pg);
> +             __online_page_free(pg);
> +             break;
>       }
>  }
> 

We may need to add similar logic to check for gaps in hv_bring_pgs_online().

[...]
>  static unsigned long handle_pg_range(unsigned long pg_start, @@ -834,13
> +881,19 @@ static unsigned long process_hot_add(unsigned long pg_start,
>                                       unsigned long rg_size)
>  {
>       struct hv_hotadd_state *ha_region = NULL;
> +     int covered;
> 
>       if (pfn_cnt == 0)
>               return 0;
> 
> -     if (!dm_device.host_specified_ha_region)
> -             if (pfn_covered(pg_start, pfn_cnt))
> +     if (!dm_device.host_specified_ha_region) {
> +             covered = pfn_covered(pg_start, pfn_cnt);
> +             if (covered < 0)
> +                     return 0;

If the hot-add pages aren't covered by any region, then shouldn't it fall 
through instead of returning?
That way the new ha_region can be added to the list and we hot-add the pages 
accordingly.

Reply via email to