On Thursday, March 5, 2020 11:29:40 AM -03 David Hildenbrand wrote:
> When shrinking a mmap we want to re-reserve the already activated area.
> When growing a memory region, we want to activate starting with a given
> fd_offset. Prepare by allowing to pass these parameters.
>
> Also, let's make sure we always process full pages, to avoid
> unmapping/remapping pages that are already in use when
> growing/shrinking. Add some asserts.
>
> Reviewed-by: Richard Henderson <richard.hender...@linaro.org>
> Reviewed-by: Peter Xu <pet...@redhat.com>
> Cc: Igor Kotrasinski <i.kotrasi...@partner.samsung.com>
> Cc: Murilo Opsfelder Araujo <muri...@linux.ibm.com>
> Cc: "Michael S. Tsirkin" <m...@redhat.com>
> Cc: Greg Kurz <gr...@kaod.org>
> Cc: Murilo Opsfelder Araujo <muri...@linux.ibm.com>
> Cc: Eduardo Habkost <ehabk...@redhat.com>
> Cc: "Dr. David Alan Gilbert" <dgilb...@redhat.com>
> Cc: Igor Mammedov <imamm...@redhat.com>
> Signed-off-by: David Hildenbrand <da...@redhat.com>
> ---

Acked-by: Murilo Opsfelder Araujo <muri...@linux.ibm.com>

>  util/mmap-alloc.c | 34 +++++++++++++++++++++++-----------
>  1 file changed, 23 insertions(+), 11 deletions(-)
>
> diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
> index 8f40ef4fed..2767caa33b 100644
> --- a/util/mmap-alloc.c
> +++ b/util/mmap-alloc.c
> @@ -83,12 +83,12 @@ size_t qemu_mempath_getpagesize(const char *mem_path)
>  }
>
>  /*
> - * Reserve a new memory region of the requested size to be used for mapping
> - * from the given fd (if any).
> + * Reserve a new memory region of the requested size or re-reserve parts
> + * of an activated region to be used for mapping from the given fd (if
> any). */
> -static void *mmap_reserve(size_t size, int fd)
> +static void *mmap_reserve(void *ptr, size_t size, int fd)
>  {
> -    int flags = MAP_PRIVATE;
> +    int flags = MAP_PRIVATE | (ptr ? MAP_FIXED : 0);
>
>  #if defined(__powerpc64__) && defined(__linux__)
>      /*
> @@ -111,20 +111,24 @@ static void *mmap_reserve(size_t size, int fd)
>      flags |= MAP_ANONYMOUS;
>  #endif
>
> -    return mmap(0, size, PROT_NONE, flags, fd, 0);
> +    return mmap(ptr, size, PROT_NONE, flags, fd, 0);
>  }
>
>  /*
>   * Activate memory in a reserved region from the given fd (if any), to make
> * it accessible.
>   */
> -static void *mmap_activate(void *ptr, size_t size, int fd, bool shared,
> -                           bool is_pmem)
> +static void *mmap_activate(void *ptr, size_t size, int fd, size_t
> fd_offset, +                           bool shared, bool is_pmem)
>  {
>      int map_sync_flags = 0;
>      int flags = MAP_FIXED;
>      void *activated_ptr;
>
> +    if (fd == -1) {
> +        fd_offset = 0;
> +    }
> +
>      flags |= fd == -1 ? MAP_ANONYMOUS : 0;
>      flags |= shared ? MAP_SHARED : MAP_PRIVATE;
>      if (shared && is_pmem) {
> @@ -132,7 +136,7 @@ static void *mmap_activate(void *ptr, size_t size, int
> fd, bool shared, }
>
>      activated_ptr = mmap(ptr, size, PROT_READ | PROT_WRITE,
> -                         flags | map_sync_flags, fd, 0);
> +                         flags | map_sync_flags, fd, fd_offset);
>      if (activated_ptr == MAP_FAILED && map_sync_flags) {
>          if (errno == ENOTSUP) {
>              char *proc_link = g_strdup_printf("/proc/self/fd/%d", fd);
> @@ -154,7 +158,8 @@ static void *mmap_activate(void *ptr, size_t size, int
> fd, bool shared, * If mmap failed with MAP_SHARED_VALIDATE | MAP_SYNC, we
> will try * again without these flags to handle backwards compatibility. */
> -        activated_ptr = mmap(ptr, size, PROT_READ | PROT_WRITE, flags, fd,
> 0); +        activated_ptr = mmap(ptr, size, PROT_READ | PROT_WRITE, flags,
> fd, +                             fd_offset);
>      }
>      return activated_ptr;
>  }
> @@ -176,16 +181,19 @@ void *qemu_ram_mmap(int fd,
>                      bool is_pmem)
>  {
>      const size_t guard_pagesize = mmap_guard_pagesize(fd);
> +    const size_t pagesize = qemu_fd_getpagesize(fd);
>      size_t offset, total;
>      void *ptr, *guardptr;
>
> +    g_assert(QEMU_IS_ALIGNED(size, pagesize));
> +
>      /*
>       * Note: this always allocates at least one extra page of virtual
> address * space, even if size is already aligned.
>       */
>      total = size + align;
>
> -    guardptr = mmap_reserve(total, fd);
> +    guardptr = mmap_reserve(NULL, total, fd);
>      if (guardptr == MAP_FAILED) {
>          return MAP_FAILED;
>      }
> @@ -196,7 +204,7 @@ void *qemu_ram_mmap(int fd,
>
>      offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) -
> (uintptr_t)guardptr;
>
> -    ptr = mmap_activate(guardptr + offset, size, fd, shared, is_pmem);
> +    ptr = mmap_activate(guardptr + offset, size, fd, 0, shared, is_pmem);
>      if (ptr == MAP_FAILED) {
>          munmap(guardptr, total);
>          return MAP_FAILED;
> @@ -220,6 +228,10 @@ void *qemu_ram_mmap(int fd,
>
>  void qemu_ram_munmap(int fd, void *ptr, size_t size)
>  {
> +    const size_t pagesize = qemu_fd_getpagesize(fd);
> +
> +    g_assert(QEMU_IS_ALIGNED(size, pagesize));
> +
>      if (ptr) {
>          /* Unmap both the RAM block and the guard page */
>          munmap(ptr, size + mmap_guard_pagesize(fd));


--
Murilo

Reply via email to