Module Name:    src
Committed By:   maxv
Date:           Sat Jun  8 07:27:44 UTC 2019

Modified Files:
        src/lib/libnvmm: libnvmm.3 libnvmm.c libnvmm_x86.c nvmm.h
        src/tests/lib/libnvmm: h_io_assist.c h_mem_assist.c

Log Message:
Change the NVMM API to reduce data movements. Sent to tech-kern@.


To generate a diff of this commit:
cvs rdiff -u -r1.18 -r1.19 src/lib/libnvmm/libnvmm.3
cvs rdiff -u -r1.13 -r1.14 src/lib/libnvmm/libnvmm.c
cvs rdiff -u -r1.30 -r1.31 src/lib/libnvmm/libnvmm_x86.c
cvs rdiff -u -r1.11 -r1.12 src/lib/libnvmm/nvmm.h
cvs rdiff -u -r1.7 -r1.8 src/tests/lib/libnvmm/h_io_assist.c
cvs rdiff -u -r1.10 -r1.11 src/tests/lib/libnvmm/h_mem_assist.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/lib/libnvmm/libnvmm.3
diff -u src/lib/libnvmm/libnvmm.3:1.18 src/lib/libnvmm/libnvmm.3:1.19
--- src/lib/libnvmm/libnvmm.3:1.18	Sat May 11 07:44:00 2019
+++ src/lib/libnvmm/libnvmm.3	Sat Jun  8 07:27:44 2019
@@ -1,4 +1,4 @@
-.\"	$NetBSD: libnvmm.3,v 1.18 2019/05/11 07:44:00 maxv Exp $
+.\"	$NetBSD: libnvmm.3,v 1.19 2019/06/08 07:27:44 maxv Exp $
 .\"
 .\" Copyright (c) 2018, 2019 The NetBSD Foundation, Inc.
 .\" All rights reserved.
@@ -27,7 +27,7 @@
 .\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
 .\" POSSIBILITY OF SUCH DAMAGE.
 .\"
-.Dd May 11, 2019
+.Dd May 30, 2019
 .Dt LIBNVMM 3
 .Os
 .Sh NAME
@@ -47,21 +47,20 @@
 .Fn nvmm_machine_configure "struct nvmm_machine *mach" "uint64_t op" \
     "void *conf"
 .Ft int
-.Fn nvmm_vcpu_create "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid"
+.Fn nvmm_vcpu_create "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
+    "struct nvmm_vcpu *vcpu"
 .Ft int
-.Fn nvmm_vcpu_destroy "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid"
+.Fn nvmm_vcpu_destroy "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu"
 .Ft int
-.Fn nvmm_vcpu_getstate "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
-    "void *state" "uint64_t flags"
+.Fn nvmm_vcpu_getstate "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu" \
+    "uint64_t flags"
 .Ft int
-.Fn nvmm_vcpu_setstate "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
-    "void *state" "uint64_t flags"
+.Fn nvmm_vcpu_setstate "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu" \
+    "uint64_t flags"
 .Ft int
-.Fn nvmm_vcpu_inject "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
-    "struct nvmm_event *event"
+.Fn nvmm_vcpu_inject "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu"
 .Ft int
-.Fn nvmm_vcpu_run "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
-    "struct nvmm_exit *exit"
+.Fn nvmm_vcpu_run "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu"
 .Ft int
 .Fn nvmm_hva_map "struct nvmm_machine *mach" "uintptr_t hva" "size_t size"
 .Ft int
@@ -73,17 +72,15 @@
 .Fn nvmm_gpa_unmap "struct nvmm_machine *mach" "uintptr_t hva" "gpaddr_t gpa" \
     "size_t size"
 .Ft int
-.Fn nvmm_gva_to_gpa "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
+.Fn nvmm_gva_to_gpa "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu" \
     "gvaddr_t gva" "gpaddr_t *gpa" "nvmm_prot_t *prot"
 .Ft int
 .Fn nvmm_gpa_to_hva "struct nvmm_machine *mach" "gpaddr_t gpa" \
     "uintptr_t *hva" "nvmm_prot_t *prot"
 .Ft int
-.Fn nvmm_assist_io "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
-    "struct nvmm_exit *exit"
+.Fn nvmm_assist_io "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu"
 .Ft int
-.Fn nvmm_assist_mem "struct nvmm_machine *mach" "nvmm_cpuid_t cpuid" \
-    "struct nvmm_exit *exit"
+.Fn nvmm_assist_mem "struct nvmm_machine *mach" "struct nvmm_vcpu *vcpu"
 .Sh DESCRIPTION
 .Nm
 provides a library for emulator software to handle hardware-accelerated virtual
@@ -95,6 +92,8 @@ Emulator software should not attempt to 
 should use the API provided by
 .Nm
 to manage virtual machines.
+A virtual CPU is described by a public structure,
+.Cd nvmm_vcpu .
 .Pp
 .Fn nvmm_capability
 gets the capabilities of NVMM.
@@ -124,61 +123,61 @@ describes the value of the parameter.
 creates a virtual CPU in the machine
 .Fa mach ,
 giving it the CPU id
-.Fa cpuid .
+.Fa cpuid ,
+and initializes
+.Fa vcpu .
 .Pp
 .Fn nvmm_vcpu_destroy
 destroys the virtual CPU identified by
-.Fa cpuid
+.Fa vcpu
 in the machine
 .Fa mach .
 .Pp
 .Fn nvmm_vcpu_getstate
 gets the state of the virtual CPU identified by
-.Fa cpuid
+.Fa vcpu
 in the machine
 .Fa mach .
-The
-.Fa state
-argument is the address of a state area, and
 .Fa flags
 is the bitmap of the components that are to be retrieved.
+The components are located in
+.Fa vcpu->state .
 See
 .Sx VCPU State Area
 below for details.
 .Pp
 .Fn nvmm_vcpu_setstate
 sets the state of the virtual CPU identified by
-.Fa cpuid
+.Fa vcpu
 in the machine
 .Fa mach .
-The
-.Fa state
-argument is the address of a state area, and
 .Fa flags
 is the bitmap of the components that are to be set.
+The components are located in
+.Fa vcpu->state .
 See
 .Sx VCPU State Area
 below for details.
 .Pp
 .Fn nvmm_vcpu_inject
 injects into the CPU identified by
-.Fa cpuid
+.Fa vcpu
 of the machine
 .Fa mach
 an event described by
-.Fa event .
+.Fa vcpu->event .
 See
 .Sx Event Injection
 below for details.
 .Pp
 .Fn nvmm_vcpu_run
 runs the CPU identified by
-.Fa cpuid
+.Fa vcpu
 in the machine
 .Fa mach ,
 until a VM exit is triggered.
 The
-.Fa exit
+.Fa vcpu->exit
 structure is filled to indicate the exit reason, and the associated parameters
 if any.
 .Pp
@@ -220,7 +219,7 @@ from the machine
 .Pp
 .Fn nvmm_gva_to_gpa
 translates, on the CPU
-.Fa cpuid
+.Fa vcpu
 from the machine
 .Fa mach ,
 the guest virtual address given in
@@ -246,9 +245,9 @@ must be page-aligned.
 .Pp
 .Fn nvmm_assist_io
 emulates the I/O operation described in
-.Fa exit
+.Fa vcpu->exit
 on CPU
-.Fa cpuid
+.Fa vcpu
 from machine
 .Fa mach .
 See
@@ -257,9 +256,9 @@ below for details.
 .Pp
 .Fn nvmm_assist_mem
 emulates the Mem operation described in
-.Fa exit
+.Fa vcpu->exit
 on CPU
-.Fa cpuid
+.Fa vcpu
 from machine
 .Fa mach .
 See
@@ -364,6 +363,7 @@ struct nvmm_x64_state {
 	struct nvmm_x64_state_intr intr;
 	struct fxsave fpu;
 };
+#define nvmm_vcpu_state nvmm_x64_state
 .Ed
 .Pp
 Refer to functional examples to see precisely how to use this structure.
@@ -379,6 +379,46 @@ During VM exits, a partial VCPU state ar
 see
 .Sx Exit Reasons
 below for details.
+.Ss VCPU Programming Model
+A VCPU is described by a public structure,
+.Cd nvmm_vcpu :
+.Bd -literal
+struct nvmm_vcpu {
+	nvmm_cpuid_t cpuid;
+	struct nvmm_vcpu_state *state;
+	struct nvmm_event *event;
+	struct nvmm_exit *exit;
+};
+.Ed
+.Pp
+This structure is used both publicly by emulator software and internally by
+.Nm .
+Emulator software should not modify the pointers of this structure, because
+they are initialized to special values by
+.Nm .
+.Pp
+A call to
+.Fn nvmm_vcpu_getstate
+will fetch the relevant parts of the VCPU state and put them in
+.Fa vcpu->state .
+A call to
+.Fn nvmm_vcpu_setstate
+will install in the VCPU the relevant parts of
+.Fa vcpu->state .
+A call to
+.Fn nvmm_vcpu_inject
+will inject in the VCPU the event in
+.Fa vcpu->event .
+A call to
+.Fn nvmm_vcpu_run
+will fill
+.Fa vcpu->exit
+with the VCPU exit information.
+.Pp
+If emulator software uses several threads, a VCPU should be associated with
+only one thread, and only this thread should perform VCPU modifications.
+Emulator software should not modify the state of a VCPU with several
+different threads.
 .Ss Exit Reasons
 The
 .Cd nvmm_exit

Index: src/lib/libnvmm/libnvmm.c
diff -u src/lib/libnvmm/libnvmm.c:1.13 src/lib/libnvmm/libnvmm.c:1.14
--- src/lib/libnvmm/libnvmm.c:1.13	Sat May 11 07:31:57 2019
+++ src/lib/libnvmm/libnvmm.c	Sat Jun  8 07:27:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: libnvmm.c,v 1.13 2019/05/11 07:31:57 maxv Exp $	*/
+/*	$NetBSD: libnvmm.c,v 1.14 2019/06/08 07:27:44 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -224,7 +224,6 @@ nvmm_machine_create(struct nvmm_machine 
 	memset(mach, 0, sizeof(*mach));
 	mach->machid = args.machid;
 	mach->pages = pages;
-	mach->npages = __capability.max_vcpus;
 	mach->areas = areas;
 
 	return 0;
@@ -272,7 +271,8 @@ nvmm_machine_configure(struct nvmm_machi
 }
 
 int
-nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
+nvmm_vcpu_create(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
+    struct nvmm_vcpu *vcpu)
 {
 	struct nvmm_ioc_vcpu_create args;
 	struct nvmm_comm_page *comm;
@@ -292,41 +292,42 @@ nvmm_vcpu_create(struct nvmm_machine *ma
 
 	mach->pages[cpuid] = comm;
 
+	vcpu->cpuid = cpuid;
+	vcpu->state = &comm->state;
+	vcpu->event = &comm->event;
+	vcpu->exit = malloc(sizeof(*vcpu->exit));
+
 	return 0;
 }
 
 int
-nvmm_vcpu_destroy(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
+nvmm_vcpu_destroy(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	struct nvmm_ioc_vcpu_destroy args;
 	struct nvmm_comm_page *comm;
 	int ret;
 
 	args.machid = mach->machid;
-	args.cpuid = cpuid;
+	args.cpuid = vcpu->cpuid;
 
 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_DESTROY, &args);
 	if (ret == -1)
 		return -1;
 
-	comm = mach->pages[cpuid];
+	comm = mach->pages[vcpu->cpuid];
 	munmap(comm, PAGE_SIZE);
+	free(vcpu->exit);
 
 	return 0;
 }
 
 int
-nvmm_vcpu_setstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
-    void *state, uint64_t flags)
+nvmm_vcpu_setstate(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
+    uint64_t flags)
 {
 	struct nvmm_comm_page *comm;
 
-	if (__predict_false(cpuid >= mach->npages)) {
-		return -1;
-	}
-	comm = mach->pages[cpuid];
-
-	nvmm_arch_copystate(&comm->state, state, flags);
+	comm = mach->pages[vcpu->cpuid];
 	comm->state_commit |= flags;
 	comm->state_cached |= flags;
 
@@ -334,68 +335,57 @@ nvmm_vcpu_setstate(struct nvmm_machine *
 }
 
 int
-nvmm_vcpu_getstate(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
-    void *state, uint64_t flags)
+nvmm_vcpu_getstate(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
+    uint64_t flags)
 {
 	struct nvmm_ioc_vcpu_getstate args;
 	struct nvmm_comm_page *comm;
 	int ret;
 
-	if (__predict_false(cpuid >= mach->npages)) {
-		return -1;
-	}
-	comm = mach->pages[cpuid];
+	comm = mach->pages[vcpu->cpuid];
 
 	if (__predict_true((flags & ~comm->state_cached) == 0)) {
-		goto out;
+		return 0;
 	}
 	comm->state_wanted = flags & ~comm->state_cached;
 
 	args.machid = mach->machid;
-	args.cpuid = cpuid;
+	args.cpuid = vcpu->cpuid;
 
 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_GETSTATE, &args);
 	if (ret == -1)
 		return -1;
 
-out:
-	nvmm_arch_copystate(state, &comm->state, flags);
 	return 0;
 }
 
 int
-nvmm_vcpu_inject(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
-    struct nvmm_event *event)
+nvmm_vcpu_inject(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	struct nvmm_comm_page *comm;
 
-	if (__predict_false(cpuid >= mach->npages)) {
-		return -1;
-	}
-	comm = mach->pages[cpuid];
-
-	memcpy(&comm->event, event, sizeof(comm->event));
+	comm = mach->pages[vcpu->cpuid];
 	comm->event_commit = true;
 
 	return 0;
 }
 
 int
-nvmm_vcpu_run(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
-    struct nvmm_exit *exit)
+nvmm_vcpu_run(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	struct nvmm_ioc_vcpu_run args;
 	int ret;
 
 	args.machid = mach->machid;
-	args.cpuid = cpuid;
+	args.cpuid = vcpu->cpuid;
 	memset(&args.exit, 0, sizeof(args.exit));
 
 	ret = ioctl(nvmm_fd, NVMM_IOC_VCPU_RUN, &args);
 	if (ret == -1)
 		return -1;
 
-	memcpy(exit, &args.exit, sizeof(args.exit));
+	/* No comm support yet, just copy. */
+	memcpy(vcpu->exit, &args.exit, sizeof(args.exit));
 
 	return 0;
 }

Index: src/lib/libnvmm/libnvmm_x86.c
diff -u src/lib/libnvmm/libnvmm_x86.c:1.30 src/lib/libnvmm/libnvmm_x86.c:1.31
--- src/lib/libnvmm/libnvmm_x86.c:1.30	Sat May 11 07:31:57 2019
+++ src/lib/libnvmm/libnvmm_x86.c	Sat Jun  8 07:27:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: libnvmm_x86.c,v 1.30 2019/05/11 07:31:57 maxv Exp $	*/
+/*	$NetBSD: libnvmm_x86.c,v 1.31 2019/06/08 07:27:44 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -50,44 +50,13 @@
 
 /* -------------------------------------------------------------------------- */
 
-static void
-nvmm_arch_copystate(void *_dst, void *_src, uint64_t flags)
-{
-	struct nvmm_x64_state *src = _src;
-	struct nvmm_x64_state *dst = _dst;
-
-	if (flags & NVMM_X64_STATE_GPRS) {
-		memcpy(dst->gprs, src->gprs, sizeof(dst->gprs));
-	}
-	if (flags & NVMM_X64_STATE_SEGS) {
-		memcpy(dst->segs, src->segs, sizeof(dst->segs));
-	}
-	if (flags & NVMM_X64_STATE_CRS) {
-		memcpy(dst->crs, src->crs, sizeof(dst->crs));
-	}
-	if (flags & NVMM_X64_STATE_DRS) {
-		memcpy(dst->drs, src->drs, sizeof(dst->drs));
-	}
-	if (flags & NVMM_X64_STATE_MSRS) {
-		memcpy(dst->msrs, src->msrs, sizeof(dst->msrs));
-	}
-	if (flags & NVMM_X64_STATE_INTR) {
-		memcpy(&dst->intr, &src->intr, sizeof(dst->intr));
-	}
-	if (flags & NVMM_X64_STATE_FPU) {
-		memcpy(&dst->fpu, &src->fpu, sizeof(dst->fpu));
-	}
-}
-
-/* -------------------------------------------------------------------------- */
-
 /*
  * Undocumented debugging function. Helpful.
  */
 int
-nvmm_vcpu_dump(struct nvmm_machine *mach, nvmm_cpuid_t cpuid)
+nvmm_vcpu_dump(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_x64_state state;
+	struct nvmm_x64_state *state = vcpu->state;
 	uint16_t *attr;
 	size_t i;
 	int ret;
@@ -96,31 +65,31 @@ nvmm_vcpu_dump(struct nvmm_machine *mach
 		"ES", "CS", "SS", "DS", "FS", "GS", "GDT", "IDT", "LDT", "TR"
 	};
 
-	ret = nvmm_vcpu_getstate(mach, cpuid, &state, NVMM_X64_STATE_ALL);
+	ret = nvmm_vcpu_getstate(mach, vcpu, NVMM_X64_STATE_ALL);
 	if (ret == -1)
 		return -1;
 
-	printf("+ VCPU id=%d\n", (int)cpuid);
-	printf("| -> RIP=%"PRIx64"\n", state.gprs[NVMM_X64_GPR_RIP]);
-	printf("| -> RSP=%"PRIx64"\n", state.gprs[NVMM_X64_GPR_RSP]);
-	printf("| -> RAX=%"PRIx64"\n", state.gprs[NVMM_X64_GPR_RAX]);
-	printf("| -> RBX=%"PRIx64"\n", state.gprs[NVMM_X64_GPR_RBX]);
-	printf("| -> RCX=%"PRIx64"\n", state.gprs[NVMM_X64_GPR_RCX]);
-	printf("| -> RFLAGS=%p\n", (void *)state.gprs[NVMM_X64_GPR_RFLAGS]);
+	printf("+ VCPU id=%d\n", (int)vcpu->cpuid);
+	printf("| -> RIP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RIP]);
+	printf("| -> RSP=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RSP]);
+	printf("| -> RAX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RAX]);
+	printf("| -> RBX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RBX]);
+	printf("| -> RCX=%"PRIx64"\n", state->gprs[NVMM_X64_GPR_RCX]);
+	printf("| -> RFLAGS=%p\n", (void *)state->gprs[NVMM_X64_GPR_RFLAGS]);
 	for (i = 0; i < NVMM_X64_NSEG; i++) {
-		attr = (uint16_t *)&state.segs[i].attrib;
+		attr = (uint16_t *)&state->segs[i].attrib;
 		printf("| -> %s: sel=0x%x base=%"PRIx64", limit=%x, attrib=%x\n",
 		    segnames[i],
-		    state.segs[i].selector,
-		    state.segs[i].base,
-		    state.segs[i].limit,
+		    state->segs[i].selector,
+		    state->segs[i].base,
+		    state->segs[i].limit,
 		    *attr);
 	}
-	printf("| -> MSR_EFER=%"PRIx64"\n", state.msrs[NVMM_X64_MSR_EFER]);
-	printf("| -> CR0=%"PRIx64"\n", state.crs[NVMM_X64_CR_CR0]);
-	printf("| -> CR3=%"PRIx64"\n", state.crs[NVMM_X64_CR_CR3]);
-	printf("| -> CR4=%"PRIx64"\n", state.crs[NVMM_X64_CR_CR4]);
-	printf("| -> CR8=%"PRIx64"\n", state.crs[NVMM_X64_CR_CR8]);
+	printf("| -> MSR_EFER=%"PRIx64"\n", state->msrs[NVMM_X64_MSR_EFER]);
+	printf("| -> CR0=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR0]);
+	printf("| -> CR3=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR3]);
+	printf("| -> CR4=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR4]);
+	printf("| -> CR8=%"PRIx64"\n", state->crs[NVMM_X64_CR_CR8]);
 
 	return 0;
 }
@@ -456,18 +425,18 @@ x86_gva_to_gpa(struct nvmm_machine *mach
 }
 
 int
-nvmm_gva_to_gpa(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
+nvmm_gva_to_gpa(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
     gvaddr_t gva, gpaddr_t *gpa, nvmm_prot_t *prot)
 {
-	struct nvmm_x64_state state;
+	struct nvmm_x64_state *state = vcpu->state;
 	int ret;
 
-	ret = nvmm_vcpu_getstate(mach, cpuid, &state,
+	ret = nvmm_vcpu_getstate(mach, vcpu,
 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
 	if (ret == -1)
 		return -1;
 
-	return x86_gva_to_gpa(mach, &state, gva, gpa, prot);
+	return x86_gva_to_gpa(mach, state, gva, gpa, prot);
 }
 
 /* -------------------------------------------------------------------------- */
@@ -720,10 +689,10 @@ assist_io_batch(struct nvmm_machine *mac
 }
 
 int
-nvmm_assist_io(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
-    struct nvmm_exit *exit)
+nvmm_assist_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_x64_state state;
+	struct nvmm_x64_state *state = vcpu->state;
+	struct nvmm_exit *exit = vcpu->exit;
 	struct nvmm_io io;
 	uint64_t cnt = 0; /* GCC */
 	uint8_t iobuf[8];
@@ -743,21 +712,21 @@ nvmm_assist_io(struct nvmm_machine *mach
 	io.size = exit->u.io.operand_size;
 	io.data = iobuf;
 
-	ret = nvmm_vcpu_getstate(mach, cpuid, &state,
+	ret = nvmm_vcpu_getstate(mach, vcpu,
 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
 	if (ret == -1)
 		return -1;
 
 	if (exit->u.io.rep) {
-		cnt = rep_get_cnt(&state, exit->u.io.address_size);
+		cnt = rep_get_cnt(state, exit->u.io.address_size);
 		if (__predict_false(cnt == 0)) {
-			state.gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
+			state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
 			goto out;
 		}
 	}
 
-	if (__predict_false(state.gprs[NVMM_X64_GPR_RFLAGS] & PSL_D)) {
+	if (__predict_false(state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_D)) {
 		psld = true;
 	}
 
@@ -771,7 +740,7 @@ nvmm_assist_io(struct nvmm_machine *mach
 			reg = NVMM_X64_GPR_RSI;
 		}
 
-		gva = state.gprs[reg];
+		gva = state->gprs[reg];
 		gva &= size_to_mask(exit->u.io.address_size);
 
 		if (exit->u.io.seg != -1) {
@@ -780,25 +749,25 @@ nvmm_assist_io(struct nvmm_machine *mach
 			if (io.in) {
 				seg = NVMM_X64_SEG_ES;
 			} else {
-				seg = fetch_segment(mach, &state);
+				seg = fetch_segment(mach, state);
 				if (seg == -1)
 					return -1;
 			}
 		}
 
-		if (__predict_true(is_long_mode(&state))) {
+		if (__predict_true(is_long_mode(state))) {
 			if (seg == NVMM_X64_SEG_GS || seg == NVMM_X64_SEG_FS) {
-				segment_apply(&state.segs[seg], &gva);
+				segment_apply(&state->segs[seg], &gva);
 			}
 		} else {
-			ret = segment_check(&state.segs[seg], gva, io.size);
+			ret = segment_check(&state->segs[seg], gva, io.size);
 			if (ret == -1)
 				return -1;
-			segment_apply(&state.segs[seg], &gva);
+			segment_apply(&state->segs[seg], &gva);
 		}
 
 		if (exit->u.io.rep && !psld) {
-			iocnt = assist_io_batch(mach, &state, &io, gva, cnt);
+			iocnt = assist_io_batch(mach, state, &io, gva, cnt);
 			if (iocnt == -1)
 				return -1;
 			goto done;
@@ -807,9 +776,9 @@ nvmm_assist_io(struct nvmm_machine *mach
 
 	if (!io.in) {
 		if (!exit->u.io.str) {
-			memcpy(io.data, &state.gprs[NVMM_X64_GPR_RAX], io.size);
+			memcpy(io.data, &state->gprs[NVMM_X64_GPR_RAX], io.size);
 		} else {
-			ret = read_guest_memory(mach, &state, gva, io.data,
+			ret = read_guest_memory(mach, state, gva, io.data,
 			    io.size);
 			if (ret == -1)
 				return -1;
@@ -820,13 +789,13 @@ nvmm_assist_io(struct nvmm_machine *mach
 
 	if (io.in) {
 		if (!exit->u.io.str) {
-			memcpy(&state.gprs[NVMM_X64_GPR_RAX], io.data, io.size);
+			memcpy(&state->gprs[NVMM_X64_GPR_RAX], io.data, io.size);
 			if (io.size == 4) {
 				/* Zero-extend to 64 bits. */
-				state.gprs[NVMM_X64_GPR_RAX] &= size_to_mask(4);
+				state->gprs[NVMM_X64_GPR_RAX] &= size_to_mask(4);
 			}
 		} else {
-			ret = write_guest_memory(mach, &state, gva, io.data,
+			ret = write_guest_memory(mach, state, gva, io.data,
 			    io.size);
 			if (ret == -1)
 				return -1;
@@ -836,24 +805,24 @@ nvmm_assist_io(struct nvmm_machine *mach
 done:
 	if (exit->u.io.str) {
 		if (__predict_false(psld)) {
-			state.gprs[reg] -= iocnt * io.size;
+			state->gprs[reg] -= iocnt * io.size;
 		} else {
-			state.gprs[reg] += iocnt * io.size;
+			state->gprs[reg] += iocnt * io.size;
 		}
 	}
 
 	if (exit->u.io.rep) {
 		cnt -= iocnt;
-		rep_set_cnt(&state, exit->u.io.address_size, cnt);
+		rep_set_cnt(state, exit->u.io.address_size, cnt);
 		if (cnt == 0) {
-			state.gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
+			state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
 		}
 	} else {
-		state.gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
+		state->gprs[NVMM_X64_GPR_RIP] = exit->u.io.npc;
 	}
 
 out:
-	ret = nvmm_vcpu_setstate(mach, cpuid, &state, NVMM_X64_STATE_GPRS);
+	ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
 	if (ret == -1)
 		return -1;
 
@@ -3141,10 +3110,10 @@ assist_mem_single(struct nvmm_machine *m
 }
 
 int
-nvmm_assist_mem(struct nvmm_machine *mach, nvmm_cpuid_t cpuid,
-    struct nvmm_exit *exit)
+nvmm_assist_mem(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_x64_state state;
+	struct nvmm_x64_state *state = vcpu->state;
+	struct nvmm_exit *exit = vcpu->exit;
 	struct x86_instr instr;
 	uint64_t cnt = 0; /* GCC */
 	int ret;
@@ -3154,7 +3123,7 @@ nvmm_assist_mem(struct nvmm_machine *mac
 		return -1;
 	}
 
-	ret = nvmm_vcpu_getstate(mach, cpuid, &state,
+	ret = nvmm_vcpu_getstate(mach, vcpu,
 	    NVMM_X64_STATE_GPRS | NVMM_X64_STATE_SEGS |
 	    NVMM_X64_STATE_CRS | NVMM_X64_STATE_MSRS);
 	if (ret == -1)
@@ -3165,30 +3134,30 @@ nvmm_assist_mem(struct nvmm_machine *mac
 		 * The instruction was not fetched from the kernel. Fetch
 		 * it ourselves.
 		 */
-		ret = fetch_instruction(mach, &state, exit);
+		ret = fetch_instruction(mach, state, exit);
 		if (ret == -1)
 			return -1;
 	}
 
 	ret = x86_decode(exit->u.mem.inst_bytes, exit->u.mem.inst_len,
-	    &instr, &state);
+	    &instr, state);
 	if (ret == -1) {
 		errno = ENODEV;
 		return -1;
 	}
 
 	if (instr.legpref.rep || instr.legpref.repn) {
-		cnt = rep_get_cnt(&state, instr.address_size);
+		cnt = rep_get_cnt(state, instr.address_size);
 		if (__predict_false(cnt == 0)) {
-			state.gprs[NVMM_X64_GPR_RIP] += instr.len;
+			state->gprs[NVMM_X64_GPR_RIP] += instr.len;
 			goto out;
 		}
 	}
 
 	if (instr.opcode->movs) {
-		ret = assist_mem_double(mach, &state, &instr);
+		ret = assist_mem_double(mach, state, &instr);
 	} else {
-		ret = assist_mem_single(mach, &state, &instr, exit);
+		ret = assist_mem_single(mach, state, &instr, exit);
 	}
 	if (ret == -1) {
 		errno = ENODEV;
@@ -3197,20 +3166,20 @@ nvmm_assist_mem(struct nvmm_machine *mac
 
 	if (instr.legpref.rep || instr.legpref.repn) {
 		cnt -= 1;
-		rep_set_cnt(&state, instr.address_size, cnt);
+		rep_set_cnt(state, instr.address_size, cnt);
 		if (cnt == 0) {
-			state.gprs[NVMM_X64_GPR_RIP] += instr.len;
+			state->gprs[NVMM_X64_GPR_RIP] += instr.len;
 		} else if (__predict_false(instr.legpref.repn)) {
-			if (state.gprs[NVMM_X64_GPR_RFLAGS] & PSL_Z) {
-				state.gprs[NVMM_X64_GPR_RIP] += instr.len;
+			if (state->gprs[NVMM_X64_GPR_RFLAGS] & PSL_Z) {
+				state->gprs[NVMM_X64_GPR_RIP] += instr.len;
 			}
 		}
 	} else {
-		state.gprs[NVMM_X64_GPR_RIP] += instr.len;
+		state->gprs[NVMM_X64_GPR_RIP] += instr.len;
 	}
 
 out:
-	ret = nvmm_vcpu_setstate(mach, cpuid, &state, NVMM_X64_STATE_GPRS);
+	ret = nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_GPRS);
 	if (ret == -1)
 		return -1;
 

Index: src/lib/libnvmm/nvmm.h
diff -u src/lib/libnvmm/nvmm.h:1.11 src/lib/libnvmm/nvmm.h:1.12
--- src/lib/libnvmm/nvmm.h:1.11	Sat May 11 07:31:57 2019
+++ src/lib/libnvmm/nvmm.h	Sat Jun  8 07:27:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: nvmm.h,v 1.11 2019/05/11 07:31:57 maxv Exp $	*/
+/*	$NetBSD: nvmm.h,v 1.12 2019/06/08 07:27:44 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -60,11 +60,17 @@ struct nvmm_callbacks {
 struct nvmm_machine {
 	nvmm_machid_t machid;
 	struct nvmm_comm_page **pages;
-	size_t npages;
 	void *areas; /* opaque */
 	struct nvmm_callbacks cbs;
 };
 
+struct nvmm_vcpu {
+	nvmm_cpuid_t cpuid;
+	struct nvmm_vcpu_state *state;
+	struct nvmm_event *event;
+	struct nvmm_exit *exit;
+};
+
 #define NVMM_MACH_CONF_CALLBACKS	NVMM_MACH_CONF_LIBNVMM_BEGIN
 
 #define NVMM_PROT_READ		0x01
@@ -80,28 +86,28 @@ int nvmm_machine_create(struct nvmm_mach
 int nvmm_machine_destroy(struct nvmm_machine *);
 int nvmm_machine_configure(struct nvmm_machine *, uint64_t, void *);
 
-int nvmm_vcpu_create(struct nvmm_machine *, nvmm_cpuid_t);
-int nvmm_vcpu_destroy(struct nvmm_machine *, nvmm_cpuid_t);
-int nvmm_vcpu_setstate(struct nvmm_machine *, nvmm_cpuid_t, void *, uint64_t);
-int nvmm_vcpu_getstate(struct nvmm_machine *, nvmm_cpuid_t, void *, uint64_t);
-int nvmm_vcpu_inject(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_event *);
-int nvmm_vcpu_run(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_exit *);
+int nvmm_vcpu_create(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_vcpu *);
+int nvmm_vcpu_destroy(struct nvmm_machine *, struct nvmm_vcpu *);
+int nvmm_vcpu_setstate(struct nvmm_machine *, struct nvmm_vcpu *, uint64_t);
+int nvmm_vcpu_getstate(struct nvmm_machine *, struct nvmm_vcpu *, uint64_t);
+int nvmm_vcpu_inject(struct nvmm_machine *, struct nvmm_vcpu *);
+int nvmm_vcpu_run(struct nvmm_machine *, struct nvmm_vcpu *);
 
 int nvmm_gpa_map(struct nvmm_machine *, uintptr_t, gpaddr_t, size_t, int);
 int nvmm_gpa_unmap(struct nvmm_machine *, uintptr_t, gpaddr_t, size_t);
 int nvmm_hva_map(struct nvmm_machine *, uintptr_t, size_t);
 int nvmm_hva_unmap(struct nvmm_machine *, uintptr_t, size_t);
 
-int nvmm_gva_to_gpa(struct nvmm_machine *, nvmm_cpuid_t, gvaddr_t, gpaddr_t *,
+int nvmm_gva_to_gpa(struct nvmm_machine *, struct nvmm_vcpu *, gvaddr_t, gpaddr_t *,
     nvmm_prot_t *);
 int nvmm_gpa_to_hva(struct nvmm_machine *, gpaddr_t, uintptr_t *,
     nvmm_prot_t *);
 
-int nvmm_assist_io(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_exit *);
-int nvmm_assist_mem(struct nvmm_machine *, nvmm_cpuid_t, struct nvmm_exit *);
+int nvmm_assist_io(struct nvmm_machine *, struct nvmm_vcpu *);
+int nvmm_assist_mem(struct nvmm_machine *, struct nvmm_vcpu *);
 
 int nvmm_ctl(int, void *, size_t);
 
-int nvmm_vcpu_dump(struct nvmm_machine *, nvmm_cpuid_t);
+int nvmm_vcpu_dump(struct nvmm_machine *, struct nvmm_vcpu *);
 
 #endif /* _LIBNVMM_H_ */

Index: src/tests/lib/libnvmm/h_io_assist.c
diff -u src/tests/lib/libnvmm/h_io_assist.c:1.7 src/tests/lib/libnvmm/h_io_assist.c:1.8
--- src/tests/lib/libnvmm/h_io_assist.c:1.7	Sat May 11 07:31:57 2019
+++ src/tests/lib/libnvmm/h_io_assist.c	Sat Jun  8 07:27:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: h_io_assist.c,v 1.7 2019/05/11 07:31:57 maxv Exp $	*/
+/*	$NetBSD: h_io_assist.c,v 1.8 2019/06/08 07:27:44 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -71,33 +71,33 @@ init_seg(struct nvmm_x64_state_seg *seg,
 }
 
 static void
-reset_machine(struct nvmm_machine *mach)
+reset_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_x64_state state;
+	struct nvmm_x64_state *state = vcpu->state;
 
-	memset(&state, 0, sizeof(state));
+	memset(state, 0, sizeof(*state));
 
 	/* Default. */
-	state.gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
-	init_seg(&state.segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	state->gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
+	init_seg(&state->segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
 
 	/* Blank. */
-	init_seg(&state.segs[NVMM_X64_SEG_GDT], 0, 0);
-	init_seg(&state.segs[NVMM_X64_SEG_IDT], 0, 0);
-	init_seg(&state.segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
-	init_seg(&state.segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_GDT], 0, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_IDT], 0, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
 
 	/* Protected mode enabled. */
-	state.crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
+	state->crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
 
 	/* 64bit mode enabled. */
-	state.crs[NVMM_X64_CR_CR4] = CR4_PAE;
-	state.msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
+	state->crs[NVMM_X64_CR_CR4] = CR4_PAE;
+	state->msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
 
 	/* Stolen from x86/pmap.c */
 #define	PATENTRY(n, type)	(type << ((n) * 8))
@@ -107,18 +107,18 @@ reset_machine(struct nvmm_machine *mach)
 #define	PAT_WP		0x5ULL
 #define	PAT_WB		0x6ULL
 #define	PAT_UCMINUS	0x7ULL
-	state.msrs[NVMM_X64_MSR_PAT] =
+	state->msrs[NVMM_X64_MSR_PAT] =
 	    PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WT) |
 	    PATENTRY(2, PAT_UCMINUS) | PATENTRY(3, PAT_UC) |
 	    PATENTRY(4, PAT_WB) | PATENTRY(5, PAT_WT) |
 	    PATENTRY(6, PAT_UCMINUS) | PATENTRY(7, PAT_UC);
 
 	/* Page tables. */
-	state.crs[NVMM_X64_CR_CR3] = 0x3000;
+	state->crs[NVMM_X64_CR_CR3] = 0x3000;
 
-	state.gprs[NVMM_X64_GPR_RIP] = 0x2000;
+	state->gprs[NVMM_X64_GPR_RIP] = 0x2000;
 
-	if (nvmm_vcpu_setstate(mach, 0, &state, NVMM_X64_STATE_ALL) == -1)
+	if (nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
 		err(errno, "nvmm_vcpu_setstate");
 }
 
@@ -227,11 +227,11 @@ io_callback(struct nvmm_io *io)
 }
 
 static int
-handle_io(struct nvmm_machine *mach, struct nvmm_exit *exit)
+handle_io(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	int ret;
 
-	ret = nvmm_assist_io(mach, 0, exit);
+	ret = nvmm_assist_io(mach, vcpu);
 	if (ret == -1) {
 		err(errno, "nvmm_assist_io");
 	}
@@ -240,15 +240,15 @@ handle_io(struct nvmm_machine *mach, str
 }
 
 static void
-run_machine(struct nvmm_machine *mach)
+run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_exit exit;
+	struct nvmm_exit *exit = vcpu->exit;
 
 	while (1) {
-		if (nvmm_vcpu_run(mach, 0, &exit) == -1)
+		if (nvmm_vcpu_run(mach, vcpu) == -1)
 			err(errno, "nvmm_vcpu_run");
 
-		switch (exit.reason) {
+		switch (exit->reason) {
 		case NVMM_EXIT_NONE:
 			break;
 
@@ -257,7 +257,7 @@ run_machine(struct nvmm_machine *mach)
 			return;
 
 		case NVMM_EXIT_IO:
-			handle_io(mach, &exit);
+			handle_io(mach, vcpu);
 			break;
 
 		case NVMM_EXIT_SHUTDOWN:
@@ -282,14 +282,15 @@ struct test {
 };
 
 static void
-run_test(struct nvmm_machine *mach, const struct test *test)
+run_test(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
+    const struct test *test)
 {
 	size_t size;
 	char *res;
 
 	size = (size_t)test->code_end - (size_t)test->code_begin;
 
-	reset_machine(mach);
+	reset_machine(mach, vcpu);
 
 	iobuf_off = 0;
 	memset(iobuf, 0, IO_SIZE);
@@ -302,7 +303,7 @@ run_test(struct nvmm_machine *mach, cons
 		strcpy(databuf, test->wanted);
 	}
 
-	run_machine(mach);
+	run_machine(mach, vcpu);
 
 	if (test->in) {
 		res = databuf;
@@ -369,17 +370,18 @@ static struct nvmm_callbacks callbacks =
 int main(int argc, char *argv[])
 {
 	struct nvmm_machine mach;
+	struct nvmm_vcpu vcpu;
 	size_t i;
 
 	if (nvmm_machine_create(&mach) == -1)
 		err(errno, "nvmm_machine_create");
-	if (nvmm_vcpu_create(&mach, 0) == -1)
+	if (nvmm_vcpu_create(&mach, 0, &vcpu) == -1)
 		err(errno, "nvmm_vcpu_create");
 	nvmm_machine_configure(&mach, NVMM_MACH_CONF_CALLBACKS, &callbacks);
 	map_pages(&mach);
 
 	for (i = 0; tests[i].name != NULL; i++) {
-		run_test(&mach, &tests[i]);
+		run_test(&mach, &vcpu, &tests[i]);
 	}
 
 	return 0;

Index: src/tests/lib/libnvmm/h_mem_assist.c
diff -u src/tests/lib/libnvmm/h_mem_assist.c:1.10 src/tests/lib/libnvmm/h_mem_assist.c:1.11
--- src/tests/lib/libnvmm/h_mem_assist.c:1.10	Sat May 11 07:31:57 2019
+++ src/tests/lib/libnvmm/h_mem_assist.c	Sat Jun  8 07:27:44 2019
@@ -1,4 +1,4 @@
-/*	$NetBSD: h_mem_assist.c,v 1.10 2019/05/11 07:31:57 maxv Exp $	*/
+/*	$NetBSD: h_mem_assist.c,v 1.11 2019/06/08 07:27:44 maxv Exp $	*/
 
 /*
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -68,33 +68,33 @@ init_seg(struct nvmm_x64_state_seg *seg,
 }
 
 static void
-reset_machine(struct nvmm_machine *mach)
+reset_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_x64_state state;
+	struct nvmm_x64_state *state = vcpu->state;
 
-	memset(&state, 0, sizeof(state));
+	memset(state, 0, sizeof(*state));
 
 	/* Default. */
-	state.gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
-	init_seg(&state.segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
-	init_seg(&state.segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	state->gprs[NVMM_X64_GPR_RFLAGS] = PSL_MBO;
+	init_seg(&state->segs[NVMM_X64_SEG_CS], SDT_MEMERA, GSEL(GCODE_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_SS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_DS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_ES], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_FS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
+	init_seg(&state->segs[NVMM_X64_SEG_GS], SDT_MEMRWA, GSEL(GDATA_SEL, SEL_KPL));
 
 	/* Blank. */
-	init_seg(&state.segs[NVMM_X64_SEG_GDT], 0, 0);
-	init_seg(&state.segs[NVMM_X64_SEG_IDT], 0, 0);
-	init_seg(&state.segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
-	init_seg(&state.segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_GDT], 0, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_IDT], 0, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_LDT], SDT_SYSLDT, 0);
+	init_seg(&state->segs[NVMM_X64_SEG_TR], SDT_SYS386BSY, 0);
 
 	/* Protected mode enabled. */
-	state.crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
+	state->crs[NVMM_X64_CR_CR0] = CR0_PG|CR0_PE|CR0_NE|CR0_TS|CR0_MP|CR0_WP|CR0_AM;
 
 	/* 64bit mode enabled. */
-	state.crs[NVMM_X64_CR_CR4] = CR4_PAE;
-	state.msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
+	state->crs[NVMM_X64_CR_CR4] = CR4_PAE;
+	state->msrs[NVMM_X64_MSR_EFER] = EFER_LME | EFER_SCE | EFER_LMA;
 
 	/* Stolen from x86/pmap.c */
 #define	PATENTRY(n, type)	(type << ((n) * 8))
@@ -104,18 +104,18 @@ reset_machine(struct nvmm_machine *mach)
 #define	PAT_WP		0x5ULL
 #define	PAT_WB		0x6ULL
 #define	PAT_UCMINUS	0x7ULL
-	state.msrs[NVMM_X64_MSR_PAT] =
+	state->msrs[NVMM_X64_MSR_PAT] =
 	    PATENTRY(0, PAT_WB) | PATENTRY(1, PAT_WT) |
 	    PATENTRY(2, PAT_UCMINUS) | PATENTRY(3, PAT_UC) |
 	    PATENTRY(4, PAT_WB) | PATENTRY(5, PAT_WT) |
 	    PATENTRY(6, PAT_UCMINUS) | PATENTRY(7, PAT_UC);
 
 	/* Page tables. */
-	state.crs[NVMM_X64_CR_CR3] = 0x3000;
+	state->crs[NVMM_X64_CR_CR3] = 0x3000;
 
-	state.gprs[NVMM_X64_GPR_RIP] = 0x2000;
+	state->gprs[NVMM_X64_GPR_RIP] = 0x2000;
 
-	if (nvmm_vcpu_setstate(mach, 0, &state, NVMM_X64_STATE_ALL) == -1)
+	if (nvmm_vcpu_setstate(mach, vcpu, NVMM_X64_STATE_ALL) == -1)
 		err(errno, "nvmm_vcpu_setstate");
 }
 
@@ -216,11 +216,11 @@ mem_callback(struct nvmm_mem *mem)
 }
 
 static int
-handle_memory(struct nvmm_machine *mach, struct nvmm_exit *exit)
+handle_memory(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
 	int ret;
 
-	ret = nvmm_assist_mem(mach, 0, exit);
+	ret = nvmm_assist_mem(mach, vcpu);
 	if (ret == -1) {
 		err(errno, "nvmm_assist_mem");
 	}
@@ -229,15 +229,15 @@ handle_memory(struct nvmm_machine *mach,
 }
 
 static void
-run_machine(struct nvmm_machine *mach)
+run_machine(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu)
 {
-	struct nvmm_exit exit;
+	struct nvmm_exit *exit = vcpu->exit;
 
 	while (1) {
-		if (nvmm_vcpu_run(mach, 0, &exit) == -1)
+		if (nvmm_vcpu_run(mach, vcpu) == -1)
 			err(errno, "nvmm_vcpu_run");
 
-		switch (exit.reason) {
+		switch (exit->reason) {
 		case NVMM_EXIT_NONE:
 			break;
 
@@ -246,7 +246,7 @@ run_machine(struct nvmm_machine *mach)
 			return;
 
 		case NVMM_EXIT_MEMORY:
-			handle_memory(mach, &exit);
+			handle_memory(mach, vcpu);
 			break;
 
 		case NVMM_EXIT_SHUTDOWN:
@@ -270,19 +270,20 @@ struct test {
 };
 
 static void
-run_test(struct nvmm_machine *mach, const struct test *test)
+run_test(struct nvmm_machine *mach, struct nvmm_vcpu *vcpu,
+    const struct test *test)
 {
 	uint64_t *res;
 	size_t size;
 
 	size = (size_t)test->code_end - (size_t)test->code_begin;
 
-	reset_machine(mach);
+	reset_machine(mach, vcpu);
 
 	memset(mmiobuf, 0, PAGE_SIZE);
 	memcpy(instbuf, test->code_begin, size);
 
-	run_machine(mach);
+	run_machine(mach, vcpu);
 
 	res = (uint64_t *)mmiobuf;
 	if (*res == test->wanted) {
@@ -344,17 +345,18 @@ static struct nvmm_callbacks callbacks =
 int main(int argc, char *argv[])
 {
 	struct nvmm_machine mach;
+	struct nvmm_vcpu vcpu;
 	size_t i;
 
 	if (nvmm_machine_create(&mach) == -1)
 		err(errno, "nvmm_machine_create");
-	if (nvmm_vcpu_create(&mach, 0) == -1)
+	if (nvmm_vcpu_create(&mach, 0, &vcpu) == -1)
 		err(errno, "nvmm_vcpu_create");
 	nvmm_machine_configure(&mach, NVMM_MACH_CONF_CALLBACKS, &callbacks);
 	map_pages(&mach);
 
 	for (i = 0; tests[i].name != NULL; i++) {
-		run_test(&mach, &tests[i]);
+		run_test(&mach, &vcpu, &tests[i]);
 	}
 
 	return 0;

Reply via email to