Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
On Fri, Jul 24, 2020 at 12:36:33PM -0400, Boris Ostrovsky wrote: > On 7/24/20 10:34 AM, David Hildenbrand wrote: > > CCing Dan > > > > On 24.07.20 14:42, Roger Pau Monne wrote: > >> + > >> +#include > >> +#include > >> +#include > >> +#include > >> +#include > >> +#include > >> + > >> +#include > >> + > >> +#include > >> +#include > >> + > >> +static DEFINE_MUTEX(lock); > >> +static LIST_HEAD(list); > >> +static unsigned int count; > >> + > >> +static int fill(unsigned int nr_pages) > > > Less generic names? How about list_lock, pg_list, pg_count, > fill_pglist()? (But these are bad too, so maybe you can come up with > something better) OK, I have to admit I like using such short names when the code allows to, for example this code is so simple that it didn't seem to warrant using longer names. Will rename on next version. > >> +{ > >> + struct dev_pagemap *pgmap; > >> + void *vaddr; > >> + unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); > >> + int nid, ret; > >> + > >> + pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); > >> + if (!pgmap) > >> + return -ENOMEM; > >> + > >> + pgmap->type = MEMORY_DEVICE_DEVDAX; > >> + pgmap->res.name = "XEN SCRATCH"; > > > Typically iomem resources only capitalize first letters. > > > >> + pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; > >> + > >> + ret = allocate_resource(_resource, >res, > >> + alloc_pages * PAGE_SIZE, 0, -1, > >> + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); > > > Are we not going to end up with a whole bunch of "Xen scratch" resource > ranges for each miss in the page list? Or do we expect them to get merged? PAGES_PER_SECTION is IMO big enough to prevent ending up with a lot of separated ranges. I think the value is 32 or 64MiB on x86, so while we are likely to end up with more than one resource added, I don't think it's going to be massive. > > >> + if (ret < 0) { > >> + pr_err("Cannot allocate new IOMEM resource\n"); > >> + kfree(pgmap); > >> + return ret; > >> + } > >> + > >> + nid = memory_add_physaddr_to_nid(pgmap->res.start); > > > Should we consider page range crossing node boundaries? I'm not sure whether this is possible (I would think allocate_resource should return a range from a single node), but then it would greatly complicate the code to perform the memremap_pages, as we would have to split the region into multiple dev_pagemap structs. FWIW the current code in the balloon driver does exactly the same (which doesn't mean it's correct, but that's where I got the logic from). > >> + > >> +#ifdef CONFIG_XEN_HAVE_PVMMU > >> + /* > >> + * We don't support PV MMU when Linux and Xen is using > >> + * different page granularity. > >> + */ > >> + BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); > >> + > >> +/* > >> + * memremap will build page tables for the new memory so > >> + * the p2m must contain invalid entries so the correct > >> + * non-present PTEs will be written. > >> + * > >> + * If a failure occurs, the original (identity) p2m entries > >> + * are not restored since this region is now known not to > >> + * conflict with any devices. > >> + */ > >> + if (!xen_feature(XENFEAT_auto_translated_physmap)) { > >> + xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); > >> + > >> + for (i = 0; i < alloc_pages; i++) { > >> + if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { > >> + pr_warn("set_phys_to_machine() failed, no > >> memory added\n"); > >> + release_resource(>res); > >> + kfree(pgmap); > >> + return -ENOMEM; > >> + } > >> +} > >> + } > >> +#endif > >> + > >> + vaddr = memremap_pages(pgmap, nid); > >> + if (IS_ERR(vaddr)) { > >> + pr_err("Cannot remap memory range\n"); > >> + release_resource(>res); > >> + kfree(pgmap); > >> + return PTR_ERR(vaddr); > >> + } > >> + > >> + for (i = 0; i < alloc_pages; i++) { > >> + struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); > >> + > >> + BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); > >> + list_add(>lru, ); > >> + count++; > >> + } > >> + > >> + return 0; > >> +} > >> + > >> +/** > >> + * xen_alloc_unpopulated_pages - alloc unpopulated pages > >> + * @nr_pages: Number of pages > >> + * @pages: pages returned > >> + * @return 0 on success, error otherwise > >> + */ > >> +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page > >> **pages) > >> +{ > >> + unsigned int i; > >> + int ret = 0; > >> + > >> + mutex_lock(); > >> + if (count < nr_pages) { > >> + ret = fill(nr_pages); > > > (nr_pages - count) ? Yup, already fixed as Juergen also pointed it out. > >> + > >> +#ifdef CONFIG_XEN_PV > >> +static
Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
On 24.07.20 18:36, Boris Ostrovsky wrote: > On 7/24/20 10:34 AM, David Hildenbrand wrote: >> CCing Dan >> >> On 24.07.20 14:42, Roger Pau Monne wrote: >>> diff --git a/drivers/xen/unpopulated-alloc.c >>> b/drivers/xen/unpopulated-alloc.c >>> new file mode 100644 >>> index ..aaa91cefbbf9 >>> --- /dev/null >>> +++ b/drivers/xen/unpopulated-alloc.c >>> @@ -0,0 +1,222 @@ > > > >>> + */ >>> + >>> +#include >>> +#include >>> +#include >>> +#include >>> +#include >>> +#include >>> + >>> +#include >>> + >>> +#include >>> +#include >>> + >>> +static DEFINE_MUTEX(lock); >>> +static LIST_HEAD(list); >>> +static unsigned int count; >>> + >>> +static int fill(unsigned int nr_pages) > > > Less generic names? How about list_lock, pg_list, pg_count, > fill_pglist()? (But these are bad too, so maybe you can come up with > something better) > > >>> +{ >>> + struct dev_pagemap *pgmap; >>> + void *vaddr; >>> + unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); >>> + int nid, ret; >>> + >>> + pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); >>> + if (!pgmap) >>> + return -ENOMEM; >>> + >>> + pgmap->type = MEMORY_DEVICE_DEVDAX; >>> + pgmap->res.name = "XEN SCRATCH"; > > > Typically iomem resources only capitalize first letters. > > >>> + pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; >>> + >>> + ret = allocate_resource(_resource, >res, >>> + alloc_pages * PAGE_SIZE, 0, -1, >>> + PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); > > > Are we not going to end up with a whole bunch of "Xen scratch" resource > ranges for each miss in the page list? Or do we expect them to get merged? > AFAIK, no resources will get merged (and it's in the general case not safe to do). The old approach (add_memory_resource()) will end up with the same situation ("Xen Scratch" vs. "System RAM") one new resource per added memory block/section. FWIW, I am looking into merging selected resources in the context of virtio-mem _after_ adding succeeded (not directly when adding the resource to the tree). Interface might look something like void merge_child_mem_resources(struct resource *parent, const char *name); So I can, for example, trigger merging of all "System RAM (virtio_mem)" resources, that are located under a device node (e.g., "virtio0"). I also thought about tagging each mergeable resource via something like "IORESOURCE_MERGEABLE" - whereby the user agrees that it does not hold any pointers to such a resource. But I don't see yet a copelling reason to sacrifice space for a new flag. So with this in place, this code could call once adding succeeded merge_child_mem_resources(_resource, "Xen Scratch"); -- Thanks, David / dhildenb
Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
Hi Roger, Thank you for the patch! Yet something to improve: [auto build test ERROR on xen-tip/linux-next] [also build test ERROR on linus/master v5.8-rc6 next-20200724] [cannot apply to linux/master] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Roger-Pau-Monne/xen-balloon-fixes-for-memory-hotplug/20200724-204452 base: https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git linux-next config: x86_64-allyesconfig (attached as .config) compiler: clang version 12.0.0 (https://github.com/llvm/llvm-project 1d09ecf36175f7910ffedd6d497c07b5c74c22fb) reproduce (this is a W=1 build): wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross chmod +x ~/bin/make.cross # install x86_64 cross compiling tool for clang build # apt-get install binutils-x86-64-linux-gnu # save the attached .config to linux build tree COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All errors (new ones prefixed by >>): >> drivers/gpu/drm/xen/xen_drm_front_gem.c:102:9: error: implicit declaration >> of function 'xen_alloc_unpopulated_pages' >> [-Werror,-Wimplicit-function-declaration] ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, ^ >> drivers/gpu/drm/xen/xen_drm_front_gem.c:155:5: error: implicit declaration >> of function 'xen_free_unpopulated_pages' >> [-Werror,-Wimplicit-function-declaration] xen_free_unpopulated_pages(xen_obj->num_pages, ^ 2 errors generated. vim +/xen_alloc_unpopulated_pages +102 drivers/gpu/drm/xen/xen_drm_front_gem.c 77 78 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) 79 { 80 struct xen_drm_front_drm_info *drm_info = dev->dev_private; 81 struct xen_gem_object *xen_obj; 82 int ret; 83 84 size = round_up(size, PAGE_SIZE); 85 xen_obj = gem_create_obj(dev, size); 86 if (IS_ERR_OR_NULL(xen_obj)) 87 return xen_obj; 88 89 if (drm_info->front_info->cfg.be_alloc) { 90 /* 91 * backend will allocate space for this buffer, so 92 * only allocate array of pointers to pages 93 */ 94 ret = gem_alloc_pages_array(xen_obj, size); 95 if (ret < 0) 96 goto fail; 97 98 /* 99 * allocate ballooned pages which will be used to map 100 * grant references provided by the backend 101 */ > 102 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, 103xen_obj->pages); 104 if (ret < 0) { 105 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", 106xen_obj->num_pages, ret); 107 gem_free_pages_array(xen_obj); 108 goto fail; 109 } 110 111 xen_obj->be_alloc = true; 112 return xen_obj; 113 } 114 /* 115 * need to allocate backing pages now, so we can share those 116 * with the backend 117 */ 118 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 119 xen_obj->pages = drm_gem_get_pages(_obj->base); 120 if (IS_ERR_OR_NULL(xen_obj->pages)) { 121 ret = PTR_ERR(xen_obj->pages); 122 xen_obj->pages = NULL; 123 goto fail; 124 } 125 126 return xen_obj; 127 128 fail: 129 DRM_ERROR("Failed to allocate buffer with size %zu\n", size); 130 return ERR_PTR(ret); 131 } 132 133 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, 134 size_t size) 135 { 136 struct xen_gem_object *xen_obj; 137 138 xen_obj = gem_create(dev, size); 139 if (IS_ERR_OR_NULL(xen_obj)) 140 return ERR_CAST(xen_obj); 141 142 return _obj->base; 143 } 144 145 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) 146 { 147 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); 148 149 if (xen_obj->base.import_attach) { 150
Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
On 7/24/20 10:34 AM, David Hildenbrand wrote: > CCing Dan > > On 24.07.20 14:42, Roger Pau Monne wrote: >> diff --git a/drivers/xen/unpopulated-alloc.c >> b/drivers/xen/unpopulated-alloc.c >> new file mode 100644 >> index ..aaa91cefbbf9 >> --- /dev/null >> +++ b/drivers/xen/unpopulated-alloc.c >> @@ -0,0 +1,222 @@ >> + */ >> + >> +#include >> +#include >> +#include >> +#include >> +#include >> +#include >> + >> +#include >> + >> +#include >> +#include >> + >> +static DEFINE_MUTEX(lock); >> +static LIST_HEAD(list); >> +static unsigned int count; >> + >> +static int fill(unsigned int nr_pages) Less generic names? How about list_lock, pg_list, pg_count, fill_pglist()? (But these are bad too, so maybe you can come up with something better) >> +{ >> +struct dev_pagemap *pgmap; >> +void *vaddr; >> +unsigned int i, alloc_pages = round_up(nr_pages, PAGES_PER_SECTION); >> +int nid, ret; >> + >> +pgmap = kzalloc(sizeof(*pgmap), GFP_KERNEL); >> +if (!pgmap) >> +return -ENOMEM; >> + >> +pgmap->type = MEMORY_DEVICE_DEVDAX; >> +pgmap->res.name = "XEN SCRATCH"; Typically iomem resources only capitalize first letters. >> +pgmap->res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; >> + >> +ret = allocate_resource(_resource, >res, >> +alloc_pages * PAGE_SIZE, 0, -1, >> +PAGES_PER_SECTION * PAGE_SIZE, NULL, NULL); Are we not going to end up with a whole bunch of "Xen scratch" resource ranges for each miss in the page list? Or do we expect them to get merged? >> +if (ret < 0) { >> +pr_err("Cannot allocate new IOMEM resource\n"); >> +kfree(pgmap); >> +return ret; >> +} >> + >> +nid = memory_add_physaddr_to_nid(pgmap->res.start); Should we consider page range crossing node boundaries? >> + >> +#ifdef CONFIG_XEN_HAVE_PVMMU >> +/* >> + * We don't support PV MMU when Linux and Xen is using >> + * different page granularity. >> + */ >> +BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); >> + >> +/* >> + * memremap will build page tables for the new memory so >> + * the p2m must contain invalid entries so the correct >> + * non-present PTEs will be written. >> + * >> + * If a failure occurs, the original (identity) p2m entries >> + * are not restored since this region is now known not to >> + * conflict with any devices. >> + */ >> +if (!xen_feature(XENFEAT_auto_translated_physmap)) { >> +xen_pfn_t pfn = PFN_DOWN(pgmap->res.start); >> + >> +for (i = 0; i < alloc_pages; i++) { >> +if (!set_phys_to_machine(pfn + i, INVALID_P2M_ENTRY)) { >> +pr_warn("set_phys_to_machine() failed, no >> memory added\n"); >> +release_resource(>res); >> +kfree(pgmap); >> +return -ENOMEM; >> +} >> +} >> +} >> +#endif >> + >> +vaddr = memremap_pages(pgmap, nid); >> +if (IS_ERR(vaddr)) { >> +pr_err("Cannot remap memory range\n"); >> +release_resource(>res); >> +kfree(pgmap); >> +return PTR_ERR(vaddr); >> +} >> + >> +for (i = 0; i < alloc_pages; i++) { >> +struct page *pg = virt_to_page(vaddr + PAGE_SIZE * i); >> + >> +BUG_ON(!virt_addr_valid(vaddr + PAGE_SIZE * i)); >> +list_add(>lru, ); >> +count++; >> +} >> + >> +return 0; >> +} >> + >> +/** >> + * xen_alloc_unpopulated_pages - alloc unpopulated pages >> + * @nr_pages: Number of pages >> + * @pages: pages returned >> + * @return 0 on success, error otherwise >> + */ >> +int xen_alloc_unpopulated_pages(unsigned int nr_pages, struct page **pages) >> +{ >> +unsigned int i; >> +int ret = 0; >> + >> +mutex_lock(); >> +if (count < nr_pages) { >> +ret = fill(nr_pages); (nr_pages - count) ? >> +if (ret) >> +goto out; >> +} >> + >> +for (i = 0; i < nr_pages; i++) { >> +struct page *pg = list_first_entry_or_null(, struct page, >> + lru); >> + >> +BUG_ON(!pg); >> +list_del(>lru); >> +count--; >> +pages[i] = pg; >> + >> +#ifdef CONFIG_XEN_HAVE_PVMMU >> +/* >> + * We don't support PV MMU when Linux and Xen is using >> + * different page granularity. >> + */ >> +BUILD_BUG_ON(XEN_PAGE_SIZE != PAGE_SIZE); >> + >> +if (!xen_feature(XENFEAT_auto_translated_physmap)) { >> +ret = xen_alloc_p2m_entry(page_to_pfn(pg)); >> +if (ret < 0) { >> +unsigned int j; >> + >> +for (j = 0; j <= i; j++) { >>
Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
Hi Roger, Thank you for the patch! Yet something to improve: [auto build test ERROR on xen-tip/linux-next] [also build test ERROR on linus/master v5.8-rc6 next-20200724] [cannot apply to linux/master] [If your patch is applied to the wrong git tree, kindly drop us a note. And when submitting patch, we suggest to use '--base' as documented in https://git-scm.com/docs/git-format-patch] url: https://github.com/0day-ci/linux/commits/Roger-Pau-Monne/xen-balloon-fixes-for-memory-hotplug/20200724-204452 base: https://git.kernel.org/pub/scm/linux/kernel/git/xen/tip.git linux-next config: i386-debian-10.3 (attached as .config) compiler: gcc-9 (Debian 9.3.0-14) 9.3.0 reproduce (this is a W=1 build): # save the attached .config to linux build tree make W=1 ARCH=i386 If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot All errors (new ones prefixed by >>): drivers/gpu/drm/xen/xen_drm_front_gem.c: In function 'gem_create': >> drivers/gpu/drm/xen/xen_drm_front_gem.c:102:9: error: implicit declaration >> of function 'xen_alloc_unpopulated_pages' >> [-Werror=implicit-function-declaration] 102 | ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, | ^~~ drivers/gpu/drm/xen/xen_drm_front_gem.c: In function 'xen_drm_front_gem_free_object_unlocked': >> drivers/gpu/drm/xen/xen_drm_front_gem.c:155:5: error: implicit declaration >> of function 'xen_free_unpopulated_pages' >> [-Werror=implicit-function-declaration] 155 | xen_free_unpopulated_pages(xen_obj->num_pages, | ^~ cc1: some warnings being treated as errors vim +/xen_alloc_unpopulated_pages +102 drivers/gpu/drm/xen/xen_drm_front_gem.c 77 78 static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) 79 { 80 struct xen_drm_front_drm_info *drm_info = dev->dev_private; 81 struct xen_gem_object *xen_obj; 82 int ret; 83 84 size = round_up(size, PAGE_SIZE); 85 xen_obj = gem_create_obj(dev, size); 86 if (IS_ERR_OR_NULL(xen_obj)) 87 return xen_obj; 88 89 if (drm_info->front_info->cfg.be_alloc) { 90 /* 91 * backend will allocate space for this buffer, so 92 * only allocate array of pointers to pages 93 */ 94 ret = gem_alloc_pages_array(xen_obj, size); 95 if (ret < 0) 96 goto fail; 97 98 /* 99 * allocate ballooned pages which will be used to map 100 * grant references provided by the backend 101 */ > 102 ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, 103xen_obj->pages); 104 if (ret < 0) { 105 DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", 106xen_obj->num_pages, ret); 107 gem_free_pages_array(xen_obj); 108 goto fail; 109 } 110 111 xen_obj->be_alloc = true; 112 return xen_obj; 113 } 114 /* 115 * need to allocate backing pages now, so we can share those 116 * with the backend 117 */ 118 xen_obj->num_pages = DIV_ROUND_UP(size, PAGE_SIZE); 119 xen_obj->pages = drm_gem_get_pages(_obj->base); 120 if (IS_ERR_OR_NULL(xen_obj->pages)) { 121 ret = PTR_ERR(xen_obj->pages); 122 xen_obj->pages = NULL; 123 goto fail; 124 } 125 126 return xen_obj; 127 128 fail: 129 DRM_ERROR("Failed to allocate buffer with size %zu\n", size); 130 return ERR_PTR(ret); 131 } 132 133 struct drm_gem_object *xen_drm_front_gem_create(struct drm_device *dev, 134 size_t size) 135 { 136 struct xen_gem_object *xen_obj; 137 138 xen_obj = gem_create(dev, size); 139 if (IS_ERR_OR_NULL(xen_obj)) 140 return ERR_CAST(xen_obj); 141 142 return _obj->base; 143 } 144 145 void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) 146 { 147 struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj); 148 149 if (xen_obj->base.import_attach) { 150 drm_prime_gem_destroy(_obj->base, xen_obj->sgt_imported); 151 gem_free_pages_array(xen_obj); 152 } else { 153 if (xen_obj->pages)
Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
CCing Dan On 24.07.20 14:42, Roger Pau Monne wrote: > To be used in order to create foreign mappings. This is based on the > ZONE_DEVICE facility which is used by persistent memory devices in > order to create struct pages and kernel virtual mappings for the IOMEM > areas of such devices. Note that on kernels without support for > ZONE_DEVICE Xen will fallback to use ballooned pages in order to > create foreign mappings. > > The newly added helpers use the same parameters as the existing > {alloc/free}_xenballooned_pages functions, which allows for in-place > replacement of the callers. Once a memory region has been added to be > used as scratch mapping space it will no longer be released, and pages > returned are kept in a linked list. This allows to have a buffer of > pages and prevents resorting to frequent additions and removals of > regions. > > If enabled (because ZONE_DEVICE is supported) the usage of the new > functionality untangles Xen balloon and RAM hotplug from the usage of > unpopulated physical memory ranges to map foreign pages, which is the > correct thing to do in order to avoid mappings of foreign pages depend > on memory hotplug. > > Signed-off-by: Roger Pau Monné > --- > I've not added a new memory_type type and just used > MEMORY_DEVICE_DEVDAX which seems to be what we want for such memory > regions. I'm unsure whether abusing this type is fine, or if I should > instead add a specific type, maybe MEMORY_DEVICE_GENERIC? I don't > think we should be using a specific Xen type at all. > --- > Cc: Oleksandr Andrushchenko > Cc: David Airlie > Cc: Daniel Vetter > Cc: Boris Ostrovsky > Cc: Juergen Gross > Cc: Stefano Stabellini > Cc: Dan Carpenter > Cc: Roger Pau Monne > Cc: Wei Liu > Cc: Yan Yankovskyi > Cc: dri-de...@lists.freedesktop.org > Cc: xen-de...@lists.xenproject.org > Cc: linux...@kvack.org > Cc: David Hildenbrand > Cc: Michal Hocko > --- > drivers/gpu/drm/xen/xen_drm_front_gem.c | 8 +- > drivers/xen/Makefile| 1 + > drivers/xen/balloon.c | 4 +- > drivers/xen/grant-table.c | 4 +- > drivers/xen/privcmd.c | 4 +- > drivers/xen/unpopulated-alloc.c | 222 > drivers/xen/xenbus/xenbus_client.c | 6 +- > drivers/xen/xlate_mmu.c | 4 +- > include/xen/xen.h | 8 + > 9 files changed, 246 insertions(+), 15 deletions(-) > create mode 100644 drivers/xen/unpopulated-alloc.c > > diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c > b/drivers/gpu/drm/xen/xen_drm_front_gem.c > index f0b85e094111..9dd06eae767a 100644 > --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c > +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c > @@ -99,8 +99,8 @@ static struct xen_gem_object *gem_create(struct drm_device > *dev, size_t size) >* allocate ballooned pages which will be used to map >* grant references provided by the backend >*/ > - ret = alloc_xenballooned_pages(xen_obj->num_pages, > -xen_obj->pages); > + ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, > + xen_obj->pages); > if (ret < 0) { > DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", > xen_obj->num_pages, ret); > @@ -152,8 +152,8 @@ void xen_drm_front_gem_free_object_unlocked(struct > drm_gem_object *gem_obj) > } else { > if (xen_obj->pages) { > if (xen_obj->be_alloc) { > - free_xenballooned_pages(xen_obj->num_pages, > - xen_obj->pages); > + xen_free_unpopulated_pages(xen_obj->num_pages, > +xen_obj->pages); > gem_free_pages_array(xen_obj); > } else { > drm_gem_put_pages(_obj->base, > diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile > index 0d322f3d90cd..788a5d9c8ef0 100644 > --- a/drivers/xen/Makefile > +++ b/drivers/xen/Makefile > @@ -42,3 +42,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF) += > gntdev-dmabuf.o > xen-gntalloc-y := gntalloc.o > xen-privcmd-y:= privcmd.o privcmd-buf.o > obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o > +obj-$(CONFIG_ZONE_DEVICE)+= unpopulated-alloc.o > diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c > index b1d8b028bf80..815ef10eb2ff 100644 > --- a/drivers/xen/balloon.c > +++ b/drivers/xen/balloon.c > @@ -654,7 +654,7 @@ void free_xenballooned_pages(int nr_pages, struct page > **pages) > } > EXPORT_SYMBOL(free_xenballooned_pages); > > -#ifdef CONFIG_XEN_PV > +#if defined(CONFIG_XEN_PV) &&
Re: [PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
On 24.07.20 14:42, Roger Pau Monne wrote: To be used in order to create foreign mappings. This is based on the ZONE_DEVICE facility which is used by persistent memory devices in order to create struct pages and kernel virtual mappings for the IOMEM areas of such devices. Note that on kernels without support for ZONE_DEVICE Xen will fallback to use ballooned pages in order to create foreign mappings. The newly added helpers use the same parameters as the existing {alloc/free}_xenballooned_pages functions, which allows for in-place replacement of the callers. Once a memory region has been added to be used as scratch mapping space it will no longer be released, and pages returned are kept in a linked list. This allows to have a buffer of pages and prevents resorting to frequent additions and removals of regions. If enabled (because ZONE_DEVICE is supported) the usage of the new functionality untangles Xen balloon and RAM hotplug from the usage of unpopulated physical memory ranges to map foreign pages, which is the correct thing to do in order to avoid mappings of foreign pages depend on memory hotplug. Signed-off-by: Roger Pau Monné --- I've not added a new memory_type type and just used MEMORY_DEVICE_DEVDAX which seems to be what we want for such memory regions. I'm unsure whether abusing this type is fine, or if I should instead add a specific type, maybe MEMORY_DEVICE_GENERIC? I don't think we should be using a specific Xen type at all. --- Cc: Oleksandr Andrushchenko Cc: David Airlie Cc: Daniel Vetter Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Stefano Stabellini Cc: Dan Carpenter Cc: Roger Pau Monne Cc: Wei Liu Cc: Yan Yankovskyi Cc: dri-de...@lists.freedesktop.org Cc: xen-de...@lists.xenproject.org Cc: linux...@kvack.org Cc: David Hildenbrand Cc: Michal Hocko --- drivers/gpu/drm/xen/xen_drm_front_gem.c | 8 +- drivers/xen/Makefile| 1 + drivers/xen/balloon.c | 4 +- drivers/xen/grant-table.c | 4 +- drivers/xen/privcmd.c | 4 +- drivers/xen/unpopulated-alloc.c | 222 drivers/xen/xenbus/xenbus_client.c | 6 +- drivers/xen/xlate_mmu.c | 4 +- include/xen/xen.h | 8 + 9 files changed, 246 insertions(+), 15 deletions(-) create mode 100644 drivers/xen/unpopulated-alloc.c diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index f0b85e094111..9dd06eae767a 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -99,8 +99,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) * allocate ballooned pages which will be used to map * grant references provided by the backend */ - ret = alloc_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); if (ret < 0) { DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", xen_obj->num_pages, ret); @@ -152,8 +152,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) } else { if (xen_obj->pages) { if (xen_obj->be_alloc) { - free_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + xen_free_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); gem_free_pages_array(xen_obj); } else { drm_gem_put_pages(_obj->base, diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 0d322f3d90cd..788a5d9c8ef0 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -42,3 +42,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF)+= gntdev-dmabuf.o xen-gntalloc-y:= gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o +obj-$(CONFIG_ZONE_DEVICE) += unpopulated-alloc.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b1d8b028bf80..815ef10eb2ff 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -654,7 +654,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) } EXPORT_SYMBOL(free_xenballooned_pages); -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_ZONE_DEVICE) static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { @@ -708,7 +708,7 @@ static
[PATCH v2 4/4] xen: add helpers to allocate unpopulated memory
To be used in order to create foreign mappings. This is based on the ZONE_DEVICE facility which is used by persistent memory devices in order to create struct pages and kernel virtual mappings for the IOMEM areas of such devices. Note that on kernels without support for ZONE_DEVICE Xen will fallback to use ballooned pages in order to create foreign mappings. The newly added helpers use the same parameters as the existing {alloc/free}_xenballooned_pages functions, which allows for in-place replacement of the callers. Once a memory region has been added to be used as scratch mapping space it will no longer be released, and pages returned are kept in a linked list. This allows to have a buffer of pages and prevents resorting to frequent additions and removals of regions. If enabled (because ZONE_DEVICE is supported) the usage of the new functionality untangles Xen balloon and RAM hotplug from the usage of unpopulated physical memory ranges to map foreign pages, which is the correct thing to do in order to avoid mappings of foreign pages depend on memory hotplug. Signed-off-by: Roger Pau Monné --- I've not added a new memory_type type and just used MEMORY_DEVICE_DEVDAX which seems to be what we want for such memory regions. I'm unsure whether abusing this type is fine, or if I should instead add a specific type, maybe MEMORY_DEVICE_GENERIC? I don't think we should be using a specific Xen type at all. --- Cc: Oleksandr Andrushchenko Cc: David Airlie Cc: Daniel Vetter Cc: Boris Ostrovsky Cc: Juergen Gross Cc: Stefano Stabellini Cc: Dan Carpenter Cc: Roger Pau Monne Cc: Wei Liu Cc: Yan Yankovskyi Cc: dri-de...@lists.freedesktop.org Cc: xen-de...@lists.xenproject.org Cc: linux...@kvack.org Cc: David Hildenbrand Cc: Michal Hocko --- drivers/gpu/drm/xen/xen_drm_front_gem.c | 8 +- drivers/xen/Makefile| 1 + drivers/xen/balloon.c | 4 +- drivers/xen/grant-table.c | 4 +- drivers/xen/privcmd.c | 4 +- drivers/xen/unpopulated-alloc.c | 222 drivers/xen/xenbus/xenbus_client.c | 6 +- drivers/xen/xlate_mmu.c | 4 +- include/xen/xen.h | 8 + 9 files changed, 246 insertions(+), 15 deletions(-) create mode 100644 drivers/xen/unpopulated-alloc.c diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index f0b85e094111..9dd06eae767a 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -99,8 +99,8 @@ static struct xen_gem_object *gem_create(struct drm_device *dev, size_t size) * allocate ballooned pages which will be used to map * grant references provided by the backend */ - ret = alloc_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + ret = xen_alloc_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); if (ret < 0) { DRM_ERROR("Cannot allocate %zu ballooned pages: %d\n", xen_obj->num_pages, ret); @@ -152,8 +152,8 @@ void xen_drm_front_gem_free_object_unlocked(struct drm_gem_object *gem_obj) } else { if (xen_obj->pages) { if (xen_obj->be_alloc) { - free_xenballooned_pages(xen_obj->num_pages, - xen_obj->pages); + xen_free_unpopulated_pages(xen_obj->num_pages, + xen_obj->pages); gem_free_pages_array(xen_obj); } else { drm_gem_put_pages(_obj->base, diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 0d322f3d90cd..788a5d9c8ef0 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -42,3 +42,4 @@ xen-gntdev-$(CONFIG_XEN_GNTDEV_DMABUF)+= gntdev-dmabuf.o xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF)+= xen-front-pgdir-shbuf.o +obj-$(CONFIG_ZONE_DEVICE) += unpopulated-alloc.o diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index b1d8b028bf80..815ef10eb2ff 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -654,7 +654,7 @@ void free_xenballooned_pages(int nr_pages, struct page **pages) } EXPORT_SYMBOL(free_xenballooned_pages); -#ifdef CONFIG_XEN_PV +#if defined(CONFIG_XEN_PV) && !defined(CONFIG_ZONE_DEVICE) static void __init balloon_add_region(unsigned long start_pfn, unsigned long pages) { @@ -708,7 +708,7 @@ static int __init balloon_init(void)