Re: [PATCH 1/2] xen: add helpers for caching grant mapping pages

2020-12-07 Thread boris . ostrovsky


On 12/7/20 8:30 AM, Juergen Gross wrote:
> Instead of having similar helpers in multiple backend drivers use
> common helpers for caching pages allocated via gnttab_alloc_pages().
>
> Make use of those helpers in blkback and scsiback.
>
> Signed-off-by: Juergen Gross 


Reviewed-by: Boris Ostrovsky 


> +
> +void gnttab_page_cache_shrink(struct gnttab_page_cache *cache, unsigned int 
> num)
> +{
> + struct page *page[10];
> + unsigned int i = 0;
> + unsigned long flags;
> +
> + spin_lock_irqsave(>lock, flags);
> +
> + while (cache->num_pages > num) {
> + page[i] = list_first_entry(>pages, struct page, lru);
> + list_del([i]->lru);
> + cache->num_pages--;
> + if (++i == ARRAY_SIZE(page)) {
> + spin_unlock_irqrestore(>lock, flags);
> + gnttab_free_pages(i, page);
> + i = 0;
> + spin_lock_irqsave(>lock, flags);
> + }
> + }
> +
> + spin_unlock_irqrestore(>lock, flags);
> +
> + if (i != 0)
> + gnttab_free_pages(i, page);
> +}


How about splitting cache->pages list into two lists (one @num long and the 
other holding the rest) and then batching gntab_free_pages() from that first 
list? Then you won't have to deal with locking.


In fact, I am not even sure batching gains us much, I'd consider going 
one-by-one.


-boris




[PATCH 1/2] xen: add helpers for caching grant mapping pages

2020-12-07 Thread Juergen Gross
Instead of having similar helpers in multiple backend drivers use
common helpers for caching pages allocated via gnttab_alloc_pages().

Make use of those helpers in blkback and scsiback.

Signed-off-by: Juergen Gross 
---
 drivers/block/xen-blkback/blkback.c | 89 ++---
 drivers/block/xen-blkback/common.h  |  4 +-
 drivers/block/xen-blkback/xenbus.c  |  6 +-
 drivers/xen/grant-table.c   | 72 +++
 drivers/xen/xen-scsiback.c  | 60 ---
 include/xen/grant_table.h   | 13 +
 6 files changed, 116 insertions(+), 128 deletions(-)

diff --git a/drivers/block/xen-blkback/blkback.c 
b/drivers/block/xen-blkback/blkback.c
index 501e9dacfff9..9ebf53903d7b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -132,73 +132,12 @@ module_param(log_stats, int, 0644);
 
 #define BLKBACK_INVALID_HANDLE (~0)
 
-/* Number of free pages to remove on each call to gnttab_free_pages */
-#define NUM_BATCH_FREE_PAGES 10
-
 static inline bool persistent_gnt_timeout(struct persistent_gnt 
*persistent_gnt)
 {
return pgrant_timeout && (jiffies - persistent_gnt->last_used >=
HZ * pgrant_timeout);
 }
 
-static inline int get_free_page(struct xen_blkif_ring *ring, struct page 
**page)
-{
-   unsigned long flags;
-
-   spin_lock_irqsave(>free_pages_lock, flags);
-   if (list_empty(>free_pages)) {
-   BUG_ON(ring->free_pages_num != 0);
-   spin_unlock_irqrestore(>free_pages_lock, flags);
-   return gnttab_alloc_pages(1, page);
-   }
-   BUG_ON(ring->free_pages_num == 0);
-   page[0] = list_first_entry(>free_pages, struct page, lru);
-   list_del([0]->lru);
-   ring->free_pages_num--;
-   spin_unlock_irqrestore(>free_pages_lock, flags);
-
-   return 0;
-}
-
-static inline void put_free_pages(struct xen_blkif_ring *ring, struct page 
**page,
-  int num)
-{
-   unsigned long flags;
-   int i;
-
-   spin_lock_irqsave(>free_pages_lock, flags);
-   for (i = 0; i < num; i++)
-   list_add([i]->lru, >free_pages);
-   ring->free_pages_num += num;
-   spin_unlock_irqrestore(>free_pages_lock, flags);
-}
-
-static inline void shrink_free_pagepool(struct xen_blkif_ring *ring, int num)
-{
-   /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
-   struct page *page[NUM_BATCH_FREE_PAGES];
-   unsigned int num_pages = 0;
-   unsigned long flags;
-
-   spin_lock_irqsave(>free_pages_lock, flags);
-   while (ring->free_pages_num > num) {
-   BUG_ON(list_empty(>free_pages));
-   page[num_pages] = list_first_entry(>free_pages,
-  struct page, lru);
-   list_del([num_pages]->lru);
-   ring->free_pages_num--;
-   if (++num_pages == NUM_BATCH_FREE_PAGES) {
-   spin_unlock_irqrestore(>free_pages_lock, flags);
-   gnttab_free_pages(num_pages, page);
-   spin_lock_irqsave(>free_pages_lock, flags);
-   num_pages = 0;
-   }
-   }
-   spin_unlock_irqrestore(>free_pages_lock, flags);
-   if (num_pages != 0)
-   gnttab_free_pages(num_pages, page);
-}
-
 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
 
 static int do_block_io_op(struct xen_blkif_ring *ring, unsigned int 
*eoi_flags);
@@ -331,7 +270,8 @@ static void free_persistent_gnts(struct xen_blkif_ring 
*ring, struct rb_root *ro
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(_data));
 
-   put_free_pages(ring, pages, segs_to_unmap);
+   gnttab_page_cache_put(>free_pages, pages,
+ segs_to_unmap);
segs_to_unmap = 0;
}
 
@@ -371,7 +311,8 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(_data));
-   put_free_pages(ring, pages, segs_to_unmap);
+   gnttab_page_cache_put(>free_pages, pages,
+ segs_to_unmap);
segs_to_unmap = 0;
}
kfree(persistent_gnt);
@@ -379,7 +320,7 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
if (segs_to_unmap > 0) {
unmap_data.count = segs_to_unmap;
BUG_ON(gnttab_unmap_refs_sync(_data));
-   put_free_pages(ring, pages, segs_to_unmap);
+   gnttab_page_cache_put(>free_pages, pages, segs_to_unmap);
}
 }
 
@@ -664,9 +605,10 @@ int