On 16/4/25 10:14, Kohei Tokunaga wrote:
Signed-off-by: Kohei Tokunaga <ktokunaga.m...@gmail.com>
---
  util/mmap-alloc.c | 18 ++++++++++++++++++
  1 file changed, 18 insertions(+)

diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index ed14f9c64d..91f33682e8 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -145,6 +145,7 @@ static bool map_noreserve_effective(int fd, uint32_t 
qemu_map_flags)
      return false;
  }
+#ifndef EMSCRIPTEN
  /*
   * Reserve a new memory region of the requested size to be used for mapping
   * from the given fd (if any).
@@ -176,6 +177,7 @@ static void *mmap_reserve(size_t size, int fd)
return mmap(0, size, PROT_NONE, flags, fd, 0);
  }
+#endif
/*
   * Activate memory in a reserved region from the given fd (if any), to make
@@ -244,6 +246,21 @@ static inline size_t mmap_guard_pagesize(int fd)
  #endif
  }
+#ifdef EMSCRIPTEN
+void *qemu_ram_mmap(int fd,
+                    size_t size,
+                    size_t align,
+                    uint32_t qemu_map_flags,
+                    off_t map_offset)
+{
+    /*
+     * emscripten doesn't support non-zero first argument for mmap so
+     * mmap a larger region without the hint and return an aligned pointer.
+     */
+    void *ptr = mmap_activate(0, size + align, fd, qemu_map_flags, map_offset);
+    return (void *)QEMU_ALIGN_UP((uintptr_t)ptr, align);
+}
+#else
  void *qemu_ram_mmap(int fd,
                      size_t size,
                      size_t align,
@@ -293,6 +310,7 @@ void *qemu_ram_mmap(int fd,
return ptr;
  }
+#endif /* EMSCRIPTEN */
void qemu_ram_munmap(int fd, void *ptr, size_t size)
  {

Can we keep this code generic? I.e. with something in the lines
of (only build-tested):

-- >8 --
diff --git a/util/mmap-alloc.c b/util/mmap-alloc.c
index ed14f9c64de..0e52cce5b29 100644
--- a/util/mmap-alloc.c
+++ b/util/mmap-alloc.c
@@ -238,3 +238,10 @@ static inline size_t mmap_guard_pagesize(int fd)
 {
-#if defined(__powerpc64__) && defined(__linux__)
+#if defined(EMSCRIPTEN)
+    /*
+     * emscripten doesn't support non-zero first argument for mmap so we
+     * don't use any guard, returning 0 to mmap a larger region without the
+     * hint and return an aligned pointer in qemu_ram_mmap().
+     */
+    return 0;
+#elif defined(__powerpc64__) && defined(__linux__)
     /* Mappings in the same segment must share the same page size */
@@ -246,2 +253,3 @@ static inline size_t mmap_guard_pagesize(int fd)

+
 void *qemu_ram_mmap(int fd,
@@ -253,4 +261,8 @@ void *qemu_ram_mmap(int fd,
     const size_t guard_pagesize = mmap_guard_pagesize(fd);
-    size_t offset, total;
-    void *ptr, *guardptr;
+    size_t offset = 0, total;
+    void *ptr, *guardptr = NULL;
+
+    assert(is_power_of_2(align));
+    /* Always align to host page size */
+    assert(align >= guard_pagesize);

@@ -262,13 +274,11 @@ void *qemu_ram_mmap(int fd,

-    guardptr = mmap_reserve(total, fd);
-    if (guardptr == MAP_FAILED) {
-        return MAP_FAILED;
+    if (guard_pagesize) {
+        guardptr = mmap_reserve(total, fd);
+        if (guardptr == MAP_FAILED) {
+            return MAP_FAILED;
+        }
+
+ offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
     }

-    assert(is_power_of_2(align));
-    /* Always align to host page size */
-    assert(align >= guard_pagesize);
-
- offset = QEMU_ALIGN_UP((uintptr_t)guardptr, align) - (uintptr_t)guardptr;
-
     ptr = mmap_activate(guardptr + offset, size, fd, qemu_map_flags,
---

Reply via email to