The branch main has been updated by markj:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=20a38e847251076b12c173d7aa0b37eef261fd32

commit 20a38e847251076b12c173d7aa0b37eef261fd32
Author:     Mark Johnston <[email protected]>
AuthorDate: 2025-10-28 20:15:13 +0000
Commit:     Mark Johnston <[email protected]>
CommitDate: 2025-10-28 20:15:13 +0000

    vmm: Fix routines which create maps of the guest physical address space
    
    In vm_mmap_memseg(), use vm_map_insert() instead of vm_map_find().
    Existing callers expect to map the GPA that they passed, whereas
    vm_map_find() merely treats the GPA as a hint.  Also check for overflow
    and remove a test for first < 0 since "first" is unsigned.
    
    In vmm_mmio_alloc(), return an error number instead of an object
    pointer, since the sole caller doesn't need the pointer.  As in
    vm_mmap_memseg(), use vm_map_insert() instead of vm_map_find() and
    validate parameters.  This function is not directly reachable via
    ioctl(), but we ought to be careful anyway.
    
    Reviewed by:    corvink, kib
    MFC after:      2 weeks
    Sponsored by:   The FreeBSD Foundation
    Sponsored by:   Klara, Inc.
    Differential Revision:  https://reviews.freebsd.org/D53246
---
 sys/amd64/vmm/vmm.c             |  7 +----
 sys/amd64/vmm/vmm_mem.h         |  5 ++--
 sys/amd64/vmm/vmm_mem_machdep.c | 61 +++++++++++++++++++++++------------------
 sys/dev/vmm/vmm_mem.c           | 13 +++++----
 4 files changed, 46 insertions(+), 40 deletions(-)

diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c
index 473887240b9b..f2bea0d82b5c 100644
--- a/sys/amd64/vmm/vmm.c
+++ b/sys/amd64/vmm/vmm.c
@@ -724,12 +724,7 @@ vm_name(struct vm *vm)
 int
 vm_map_mmio(struct vm *vm, vm_paddr_t gpa, size_t len, vm_paddr_t hpa)
 {
-       vm_object_t obj;
-
-       if ((obj = vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa)) == NULL)
-               return (ENOMEM);
-       else
-               return (0);
+       return (vmm_mmio_alloc(vm_vmspace(vm), gpa, len, hpa));
 }
 
 int
diff --git a/sys/amd64/vmm/vmm_mem.h b/sys/amd64/vmm/vmm_mem.h
index 41b9bf07c4fc..d905fd37001d 100644
--- a/sys/amd64/vmm/vmm_mem.h
+++ b/sys/amd64/vmm/vmm_mem.h
@@ -30,10 +30,9 @@
 #define        _VMM_MEM_H_
 
 struct vmspace;
-struct vm_object;
 
-struct vm_object *vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
-                                vm_paddr_t hpa);
+int            vmm_mmio_alloc(struct vmspace *, vm_paddr_t gpa, size_t len,
+                   vm_paddr_t hpa);
 void           vmm_mmio_free(struct vmspace *, vm_paddr_t gpa, size_t size);
 vm_paddr_t     vmm_mem_maxaddr(void);
 
diff --git a/sys/amd64/vmm/vmm_mem_machdep.c b/sys/amd64/vmm/vmm_mem_machdep.c
index e96c9e4bdc66..afb3a0274e2a 100644
--- a/sys/amd64/vmm/vmm_mem_machdep.c
+++ b/sys/amd64/vmm/vmm_mem_machdep.c
@@ -36,6 +36,7 @@
 #include <vm/vm.h>
 #include <vm/vm_param.h>
 #include <vm/pmap.h>
+#include <vm/vm_extern.h>
 #include <vm/vm_map.h>
 #include <vm/vm_object.h>
 #include <vm/vm_page.h>
@@ -45,40 +46,48 @@
 
 #include "vmm_mem.h"
 
-vm_object_t
+int
 vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, size_t len,
-              vm_paddr_t hpa)
+    vm_paddr_t hpa)
 {
-       int error;
-       vm_object_t obj;
        struct sglist *sg;
+       vm_object_t obj;
+       int error;
+
+       if (gpa + len < gpa || hpa + len < hpa || (gpa & PAGE_MASK) != 0 ||
+           (hpa & PAGE_MASK) != 0 || (len & PAGE_MASK) != 0)
+               return (EINVAL);
 
        sg = sglist_alloc(1, M_WAITOK);
        error = sglist_append_phys(sg, hpa, len);
        KASSERT(error == 0, ("error %d appending physaddr to sglist", error));
 
        obj = vm_pager_allocate(OBJT_SG, sg, len, VM_PROT_RW, 0, NULL);
-       if (obj != NULL) {
-               /*
-                * VT-x ignores the MTRR settings when figuring out the
-                * memory type for translations obtained through EPT.
-                *
-                * Therefore we explicitly force the pages provided by
-                * this object to be mapped as uncacheable.
-                */
-               VM_OBJECT_WLOCK(obj);
-               error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
-               VM_OBJECT_WUNLOCK(obj);
-               if (error != KERN_SUCCESS) {
-                       panic("vmm_mmio_alloc: vm_object_set_memattr error %d",
-                               error);
-               }
-               error = vm_map_find(&vmspace->vm_map, obj, 0, &gpa, len, 0,
-                                   VMFS_NO_SPACE, VM_PROT_RW, VM_PROT_RW, 0);
-               if (error != KERN_SUCCESS) {
-                       vm_object_deallocate(obj);
-                       obj = NULL;
-               }
+       if (obj == NULL)
+               return (ENOMEM);
+
+       /*
+        * VT-x ignores the MTRR settings when figuring out the memory type for
+        * translations obtained through EPT.
+        *
+        * Therefore we explicitly force the pages provided by this object to be
+        * mapped as uncacheable.
+        */
+       VM_OBJECT_WLOCK(obj);
+       error = vm_object_set_memattr(obj, VM_MEMATTR_UNCACHEABLE);
+       VM_OBJECT_WUNLOCK(obj);
+       if (error != KERN_SUCCESS)
+               panic("vmm_mmio_alloc: vm_object_set_memattr error %d", error);
+
+       vm_map_lock(&vmspace->vm_map);
+       error = vm_map_insert(&vmspace->vm_map, obj, 0, gpa, gpa + len,
+           VM_PROT_RW, VM_PROT_RW, 0);
+       vm_map_unlock(&vmspace->vm_map);
+       if (error != KERN_SUCCESS) {
+               error = vm_mmap_to_errno(error);
+               vm_object_deallocate(obj);
+       } else {
+               error = 0;
        }
 
        /*
@@ -94,7 +103,7 @@ vmm_mmio_alloc(struct vmspace *vmspace, vm_paddr_t gpa, 
size_t len,
         */
        sglist_free(sg);
 
-       return (obj);
+       return (error);
 }
 
 void
diff --git a/sys/dev/vmm/vmm_mem.c b/sys/dev/vmm/vmm_mem.c
index 9df31c9ba133..5ae944713c81 100644
--- a/sys/dev/vmm/vmm_mem.c
+++ b/sys/dev/vmm/vmm_mem.c
@@ -279,8 +279,10 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, 
vm_ooffset_t first,
        if (seg->object == NULL)
                return (EINVAL);
 
+       if (first + len < first || gpa + len < gpa)
+               return (EINVAL);
        last = first + len;
-       if (first < 0 || first >= last || last > seg->len)
+       if (first >= last || last > seg->len)
                return (EINVAL);
 
        if ((gpa | first | last) & PAGE_MASK)
@@ -298,11 +300,12 @@ vm_mmap_memseg(struct vm *vm, vm_paddr_t gpa, int segid, 
vm_ooffset_t first,
                return (ENOSPC);
 
        vmmap = &mem->mem_vmspace->vm_map;
-       error = vm_map_find(vmmap, seg->object, first, &gpa, len, 0,
-           VMFS_NO_SPACE, prot, prot, 0);
+       vm_map_lock(vmmap);
+       error = vm_map_insert(vmmap, seg->object, first, gpa, gpa + len,
+           prot, prot, 0);
+       vm_map_unlock(vmmap);
        if (error != KERN_SUCCESS)
-               return (EFAULT);
-
+               return (vm_mmap_to_errno(error));
        vm_object_reference(seg->object);
 
        if (flags & VM_MEMMAP_F_WIRED) {

Reply via email to