Module Name: src Committed By: jdolecek Date: Sun Apr 19 18:47:40 UTC 2020
Modified Files: src/sys/arch/xen/include: xen_shm.h src/sys/arch/xen/x86: xen_shm_machdep.c src/sys/arch/xen/xen: hypervisor.c xbdback_xenbus.c Log Message: change interface for xen_shm_map() so that caller always supplies the VA, it now fails only if the Xen hypercall fails, in which case the failure is final change xbdback to pre-allocate KVA on xbdback attach (and free on detach), so it has always KVA to map the request pages remove no longer needed KVA allocation failure handling To generate a diff of this commit: cvs rdiff -u -r1.10 -r1.11 src/sys/arch/xen/include/xen_shm.h cvs rdiff -u -r1.14 -r1.15 src/sys/arch/xen/x86/xen_shm_machdep.c cvs rdiff -u -r1.74 -r1.75 src/sys/arch/xen/xen/hypervisor.c cvs rdiff -u -r1.78 -r1.79 src/sys/arch/xen/xen/xbdback_xenbus.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/xen/include/xen_shm.h diff -u src/sys/arch/xen/include/xen_shm.h:1.10 src/sys/arch/xen/include/xen_shm.h:1.11 --- src/sys/arch/xen/include/xen_shm.h:1.10 Tue Jan 8 19:59:24 2019 +++ src/sys/arch/xen/include/xen_shm.h Sun Apr 19 18:47:40 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: xen_shm.h,v 1.10 2019/01/08 19:59:24 jdolecek Exp $ */ +/* $NetBSD: xen_shm.h,v 1.11 2020/04/19 18:47:40 jdolecek Exp $ */ /* * Copyright (c) 2005 Manuel Bouyer. @@ -27,7 +27,7 @@ #include "opt_xen.h" -#define XENSHM_MAX_PAGES_PER_REQUEST (MAXPHYS >> PAGE_SHIFT) +#define XENSHM_MAX_PAGES_PER_REQUEST ((MAXPHYS + PAGE_SIZE) >> PAGE_SHIFT) /* * Helper routines for the backend drivers. This implement the necessary @@ -35,10 +35,8 @@ * space, do I/O to it, and unmap it. */ -int xen_shm_map(int, int, grant_ref_t *, vaddr_t *, grant_handle_t *, int); +int xen_shm_map(int, int, grant_ref_t *, vaddr_t, grant_handle_t *, int); void xen_shm_unmap(vaddr_t, int, grant_handle_t *); -int xen_shm_callback(int (*)(void *), void *); /* flags for xen_shm_map() */ -#define XSHM_CALLBACK 0x01 /* called from a callback */ #define XSHM_RO 0x02 /* map the guest's memory read-only */ Index: src/sys/arch/xen/x86/xen_shm_machdep.c diff -u src/sys/arch/xen/x86/xen_shm_machdep.c:1.14 src/sys/arch/xen/x86/xen_shm_machdep.c:1.15 --- src/sys/arch/xen/x86/xen_shm_machdep.c:1.14 Mon Apr 13 00:27:16 2020 +++ src/sys/arch/xen/x86/xen_shm_machdep.c Sun Apr 19 18:47:40 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: xen_shm_machdep.c,v 1.14 2020/04/13 00:27:16 chs Exp $ */ +/* $NetBSD: xen_shm_machdep.c,v 1.15 2020/04/19 18:47:40 jdolecek Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -25,7 +25,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.14 2020/04/13 00:27:16 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xen_shm_machdep.c,v 1.15 2020/04/19 18:47:40 jdolecek Exp $"); #include <sys/types.h> #include <sys/param.h> @@ -55,71 +55,12 @@ __KERNEL_RCSID(0, "$NetBSD: xen_shm_mach * available. */ -/* Grab enough VM space to map an entire vbd ring. */ -/* Xen3 linux guests seems to eat more pages, gives enough for 10 vbd rings */ -#define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) -#define XENSHM_NPAGES (BLKIF_RING_SIZE * (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) * 10) - -/* vm space management */ -static vmem_t *xen_shm_arena __read_mostly; - -/* callbacks are registered in a FIFO list. */ -static SIMPLEQ_HEAD(xen_shm_callback_head, xen_shm_callback_entry) - xen_shm_callbacks; - -struct xen_shm_callback_entry { - SIMPLEQ_ENTRY(xen_shm_callback_entry) xshmc_entries; - int (*xshmc_callback)(void *); /* our callback */ - void *xshmc_arg; /* cookie passed to the callback */ -}; - -/* a pool of struct xen_shm_callback_entry */ -static struct pool xen_shm_callback_pool; - -#ifdef DEBUG -/* for ratecheck(9) */ -static struct timeval xen_shm_errintvl = { 60, 0 }; /* a minute, each */ -#endif - -void -xen_shm_init(void) -{ - vaddr_t xen_shm_base_address; - vaddr_t xen_shm_end_address; - u_long xen_shm_base_address_pg; - vsize_t xen_shm_size; - - SIMPLEQ_INIT(&xen_shm_callbacks); - pool_init(&xen_shm_callback_pool, sizeof(struct xen_shm_callback_entry), - 0, 0, 0, "xshmc", NULL, IPL_VM); - /* ensure we'll always get items */ - pool_prime(&xen_shm_callback_pool, 1); - - xen_shm_size = (XENSHM_NPAGES * PAGE_SIZE); - - xen_shm_base_address = uvm_km_alloc(kernel_map, xen_shm_size, 0, - UVM_KMF_VAONLY); - xen_shm_end_address = xen_shm_base_address + xen_shm_size; - xen_shm_base_address_pg = xen_shm_base_address >> PAGE_SHIFT; - if (xen_shm_base_address == 0) { - panic("xen_shm_init no VM space"); - } - xen_shm_arena = vmem_create("xen_shm", xen_shm_base_address_pg, - (xen_shm_end_address >> PAGE_SHIFT) - 1 - xen_shm_base_address_pg, - 1, NULL, NULL, NULL, 1, VM_NOSLEEP, IPL_VM); - if (xen_shm_arena == NULL) { - panic("xen_shm_init no arena"); - } -} - int -xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t *vap, +xen_shm_map(int nentries, int domid, grant_ref_t *grefp, vaddr_t va, grant_handle_t *handlep, int flags) { gnttab_map_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST]; - vmem_addr_t new_va_pg; - vaddr_t new_va; - int ret, i, s; + int ret, i; #ifdef DIAGNOSTIC if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) { @@ -127,41 +68,8 @@ xen_shm_map(int nentries, int domid, gra } #endif - /* XXXSMP */ - s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */ - - /* - * If a driver is waiting for resources, don't try to allocate - * yet. This is to avoid a flood of small requests stalling large - * ones. - */ - if (__predict_false(SIMPLEQ_FIRST(&xen_shm_callbacks) != NULL) && - (flags & XSHM_CALLBACK) == 0) { - splx(s); -#ifdef DEBUG - static struct timeval lasttime; - if (ratecheck(&lasttime, &xen_shm_errintvl)) - printf("xen_shm_map: ENOMEM1\n"); -#endif - return ENOMEM; - } - - /* Allocate the needed virtual space. */ - if (vmem_alloc(xen_shm_arena, nentries, - VM_INSTANTFIT | VM_NOSLEEP, &new_va_pg) != 0) { - splx(s); -#ifdef DEBUG - static struct timeval lasttime; - if (ratecheck(&lasttime, &xen_shm_errintvl)) - printf("xen_shm_map: ENOMEM\n"); -#endif - return ENOMEM; - } - splx(s); - - new_va = new_va_pg << PAGE_SHIFT; for (i = 0; i < nentries; i++) { - op[i].host_addr = new_va + i * PAGE_SIZE; + op[i].host_addr = va + i * PAGE_SIZE; op[i].dom = domid; op[i].ref = grefp[i]; op[i].flags = GNTMAP_host_map | @@ -170,16 +78,19 @@ xen_shm_map(int nentries, int domid, gra ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, op, nentries); if (__predict_false(ret)) { - panic("xen_shm_map: HYPERVISOR_grant_table_op failed"); + printf("%s: HYPERVISOR_grant_table_op failed\n", __func__); + return EINVAL; } for (i = 0; i < nentries; i++) { +#ifdef DIAGNOSTIC if (__predict_false(op[i].status)) - return op[i].status; + panic("%s: op[%d] status %d", __func__, i, + op[i].status); +#endif handlep[i] = op[i].handle; } - *vap = new_va; return 0; } @@ -187,8 +98,7 @@ void xen_shm_unmap(vaddr_t va, int nentries, grant_handle_t *handlep) { gnttab_unmap_grant_ref_t op[XENSHM_MAX_PAGES_PER_REQUEST]; - struct xen_shm_callback_entry *xshmc; - int ret, i, s; + int ret, i; #ifdef DIAGNOSTIC if (nentries > XENSHM_MAX_PAGES_PER_REQUEST) { @@ -207,48 +117,4 @@ xen_shm_unmap(vaddr_t va, int nentries, if (__predict_false(ret)) { panic("xen_shm_unmap: unmap failed"); } - - va = va >> PAGE_SHIFT; - - /* XXXSMP */ - s = splvm(); /* splvm is the lowest level blocking disk and net IRQ */ - - vmem_free(xen_shm_arena, va, nentries); - while (__predict_false((xshmc = SIMPLEQ_FIRST(&xen_shm_callbacks)) - != NULL)) { - SIMPLEQ_REMOVE_HEAD(&xen_shm_callbacks, xshmc_entries); - splx(s); - if (xshmc->xshmc_callback(xshmc->xshmc_arg) == 0) { - /* callback succeeded */ - s = splvm(); /* XXXSMP */ - pool_put(&xen_shm_callback_pool, xshmc); - } else { - /* callback failed, probably out of resources */ - s = splvm(); /* XXXSMP */ - SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc, - xshmc_entries); - break; - } - } - - splx(s); -} - -int -xen_shm_callback(int (*callback)(void *), void *arg) -{ - struct xen_shm_callback_entry *xshmc; - int s; - - s = splvm(); /* XXXSMP */ - xshmc = pool_get(&xen_shm_callback_pool, PR_NOWAIT); - if (xshmc == NULL) { - splx(s); - return ENOMEM; - } - xshmc->xshmc_arg = arg; - xshmc->xshmc_callback = callback; - SIMPLEQ_INSERT_TAIL(&xen_shm_callbacks, xshmc, xshmc_entries); - splx(s); - return 0; } Index: src/sys/arch/xen/xen/hypervisor.c diff -u src/sys/arch/xen/xen/hypervisor.c:1.74 src/sys/arch/xen/xen/hypervisor.c:1.75 --- src/sys/arch/xen/xen/hypervisor.c:1.74 Fri Apr 10 14:54:33 2020 +++ src/sys/arch/xen/xen/hypervisor.c Sun Apr 19 18:47:40 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: hypervisor.c,v 1.74 2020/04/10 14:54:33 jdolecek Exp $ */ +/* $NetBSD: hypervisor.c,v 1.75 2020/04/19 18:47:40 jdolecek Exp $ */ /* * Copyright (c) 2005 Manuel Bouyer. @@ -53,7 +53,7 @@ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: hypervisor.c,v 1.74 2020/04/10 14:54:33 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: hypervisor.c,v 1.75 2020/04/19 18:47:40 jdolecek Exp $"); #include <sys/param.h> #include <sys/systm.h> @@ -415,7 +415,7 @@ hypervisor_match(device_t parent, cfdata bi.common.len = sizeof(struct btinfo_rootdevice); /* From i386/multiboot.c */ - /* $NetBSD: hypervisor.c,v 1.74 2020/04/10 14:54:33 jdolecek Exp $ */ + /* $NetBSD: hypervisor.c,v 1.75 2020/04/19 18:47:40 jdolecek Exp $ */ int i, len; vaddr_t data; extern struct bootinfo bootinfo; @@ -620,7 +620,6 @@ hypervisor_attach(device_t parent, devic if (xendomain_is_privileged()) { xenprivcmd_init(); - xen_shm_init(); } #endif /* DOM0OPS */ Index: src/sys/arch/xen/xen/xbdback_xenbus.c diff -u src/sys/arch/xen/xen/xbdback_xenbus.c:1.78 src/sys/arch/xen/xen/xbdback_xenbus.c:1.79 --- src/sys/arch/xen/xen/xbdback_xenbus.c:1.78 Mon Apr 13 00:27:17 2020 +++ src/sys/arch/xen/xen/xbdback_xenbus.c Sun Apr 19 18:47:40 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: xbdback_xenbus.c,v 1.78 2020/04/13 00:27:17 chs Exp $ */ +/* $NetBSD: xbdback_xenbus.c,v 1.79 2020/04/19 18:47:40 jdolecek Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -26,7 +26,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.78 2020/04/13 00:27:17 chs Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xbdback_xenbus.c,v 1.79 2020/04/19 18:47:40 jdolecek Exp $"); #include <sys/atomic.h> #include <sys/buf.h> @@ -61,19 +61,19 @@ __KERNEL_RCSID(0, "$NetBSD: xbdback_xenb #define XENPRINTF(x) #endif -#define BLKIF_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE) +#define BLKIF_RING_SIZE __CONST_RING_SIZE(blkif, PAGE_SIZE) /* * Backend block device driver for Xen */ -/* Max number of pages per request. The request may not be page aligned */ -#define BLKIF_MAX_PAGES_PER_REQUEST (BLKIF_MAX_SEGMENTS_PER_REQUEST + 1) - /* Values are expressed in 512-byte sectors */ #define VBD_BSIZE 512 #define VBD_MAXSECT ((PAGE_SIZE / VBD_BSIZE) - 1) +/* Need to alloc one extra page to account for possible mapping offset */ +#define VBD_VA_SIZE (MAXPHYS + PAGE_SIZE) + struct xbdback_request; struct xbdback_io; struct xbdback_fragment; @@ -150,6 +150,11 @@ enum xbdi_proto { XBDIP_64 }; +struct xbdback_va { + SLIST_ENTRY(xbdback_va) xv_next; + vaddr_t xv_vaddr; +}; + /* we keep the xbdback instances in a linked list */ struct xbdback_instance { SLIST_ENTRY(xbdback_instance) next; @@ -162,6 +167,9 @@ struct xbdback_instance { kmutex_t xbdi_lock; kcondvar_t xbdi_cv; /* wait channel for thread work */ xbdback_state_t xbdi_status; /* thread's status */ + /* KVA for mapping transfers */ + struct xbdback_va xbdi_va[BLKIF_RING_SIZE]; + SLIST_HEAD(, xbdback_va) xbdi_va_free; /* backing device parameters */ dev_t xbdi_dev; const struct bdevsw *xbdi_bdevsw; /* pointer to the device's bdevsw */ @@ -185,7 +193,6 @@ struct xbdback_instance { */ RING_IDX xbdi_req_prod; /* limit on request indices */ xbdback_cont_t xbdi_cont, xbdi_cont_aux; - SIMPLEQ_ENTRY(xbdback_instance) xbdi_on_hold; /* waiting on resources */ /* _request state: track requests fetched from ring */ struct xbdback_request *xbdi_req; /* if NULL, ignore following */ blkif_request_t xbdi_xen_req; @@ -246,6 +253,7 @@ struct xbdback_io { SLIST_HEAD(, xbdback_fragment) xio_rq; /* the virtual address to map the request at */ vaddr_t xio_vaddr; + struct xbdback_va *xio_xv; /* grants to map */ grant_ref_t xio_gref[XENSHM_MAX_PAGES_PER_REQUEST]; /* grants release */ @@ -259,6 +267,7 @@ struct xbdback_io { #define xio_buf u.xio_rw.xio_buf #define xio_rq u.xio_rw.xio_rq #define xio_vaddr u.xio_rw.xio_vaddr +#define xio_xv u.xio_rw.xio_xv #define xio_gref u.xio_rw.xio_gref #define xio_gh u.xio_rw.xio_gh #define xio_nrma u.xio_rw.xio_nrma @@ -283,20 +292,16 @@ struct xbdback_fragment { * submitted by frontend. */ /* XXXSMP */ -struct xbdback_pool { +static struct xbdback_pool { struct pool_cache pc; struct timeval last_warning; } xbdback_request_pool, xbdback_io_pool, xbdback_fragment_pool; -SIMPLEQ_HEAD(xbdback_iqueue, xbdback_instance); -static struct xbdback_iqueue xbdback_shmq; -static int xbdback_shmcb; /* have we already registered a callback? */ - /* Interval between reports of I/O errors from frontend */ -struct timeval xbdback_err_intvl = { 1, 0 }; +static const struct timeval xbdback_err_intvl = { 1, 0 }; #ifdef DEBUG -struct timeval xbdback_fragio_intvl = { 60, 0 }; +static const struct timeval xbdback_fragio_intvl = { 60, 0 }; #endif void xbdbackattach(int); static int xbdback_xenbus_create(struct xenbus_device *); @@ -333,9 +338,6 @@ static void *xbdback_co_io_gotfrag2(stru static void *xbdback_co_map_io(struct xbdback_instance *, void *); static void *xbdback_co_do_io(struct xbdback_instance *, void *); -static void *xbdback_co_wait_shm_callback(struct xbdback_instance *, void *); - -static int xbdback_shm_callback(void *); static void xbdback_io_error(struct xbdback_io *, int); static void xbdback_iodone(struct buf *); static void xbdback_send_reply(struct xbdback_instance *, uint64_t , int , int); @@ -366,8 +368,6 @@ xbdbackattach(int n) */ SLIST_INIT(&xbdback_instances); mutex_init(&xbdback_lock, MUTEX_DEFAULT, IPL_NONE); - SIMPLEQ_INIT(&xbdback_shmq); - xbdback_shmcb = 0; pool_cache_bootstrap(&xbdback_request_pool.pc, sizeof(struct xbdback_request), 0, 0, 0, "xbbrp", NULL, @@ -449,6 +449,13 @@ xbdback_xenbus_create(struct xenbus_devi xbusd->xbusd_otherend_changed = xbdback_frontend_changed; xbdi->xbdi_xbusd = xbusd; + for (i = 0; i < BLKIF_RING_SIZE; i++) { + xbdi->xbdi_va[i].xv_vaddr = uvm_km_alloc(kernel_map, + VBD_VA_SIZE, 0, UVM_KMF_VAONLY|UVM_KMF_WAITVA); + SLIST_INSERT_HEAD(&xbdi->xbdi_va_free, &xbdi->xbdi_va[i], + xv_next); + } + error = xenbus_watch_path2(xbusd, xbusd->xbusd_path, "physical-device", &xbdi->xbdi_watch, xbdback_backend_changed); if (error) { @@ -515,6 +522,15 @@ xbdback_xenbus_destroy(void *arg) mutex_enter(&xbdback_lock); SLIST_REMOVE(&xbdback_instances, xbdi, xbdback_instance, next); mutex_exit(&xbdback_lock); + + for (int i = 0; i < BLKIF_RING_SIZE; i++) { + if (xbdi->xbdi_va[i].xv_vaddr != 0) { + uvm_km_free(kernel_map, xbdi->xbdi_va[i].xv_vaddr, + VBD_VA_SIZE, UVM_KMF_VAONLY); + xbdi->xbdi_va[i].xv_vaddr = 0; + } + } + mutex_destroy(&xbdi->xbdi_lock); cv_destroy(&xbdi->xbdi_cv); kmem_free(xbdi, sizeof(*xbdi)); @@ -1406,7 +1422,8 @@ xbdback_co_io_gotio(struct xbdback_insta xbd_io->xio_operation = xbdi->xbdi_xen_req.operation; start_offset = xbdi->xbdi_this_fs * VBD_BSIZE; - + KASSERT(start_offset < PAGE_SIZE); + if (xbdi->xbdi_xen_req.operation == BLKIF_OP_WRITE) { buf_flags = B_WRITE; } else { @@ -1531,6 +1548,8 @@ static void * xbdback_co_do_io(struct xbdback_instance *xbdi, void *obj) { struct xbdback_io *xbd_io = xbdi->xbdi_io; + vaddr_t start_offset; + int nsegs __diagused; switch (xbd_io->xio_operation) { case BLKIF_OP_FLUSH_DISKCACHE: @@ -1559,27 +1578,20 @@ xbdback_co_do_io(struct xbdback_instance } case BLKIF_OP_READ: case BLKIF_OP_WRITE: + start_offset = (vaddr_t)xbd_io->xio_buf.b_data; + KASSERT(xbd_io->xio_buf.b_bcount + start_offset < VDB_VA_SIZE); xbd_io->xio_buf.b_data = (void *) - ((vaddr_t)xbd_io->xio_buf.b_data + xbd_io->xio_vaddr); + (start_offset + xbd_io->xio_vaddr); #ifdef DIAGNOSTIC - { - vaddr_t bdata = (vaddr_t)xbd_io->xio_buf.b_data; - int nsegs = - ((((bdata + xbd_io->xio_buf.b_bcount - 1) & ~PAGE_MASK) - - (bdata & ~PAGE_MASK)) >> PAGE_SHIFT) + 1; - if ((bdata & ~PAGE_MASK) != (xbd_io->xio_vaddr & ~PAGE_MASK)) { - printf("xbdback_co_do_io: vaddr %#" PRIxVADDR - " bdata %#" PRIxVADDR "\n", - xbd_io->xio_vaddr, bdata); - panic("xbdback_co_do_io: bdata page change"); - } + nsegs = round_page(start_offset + xbd_io->xio_buf.b_bcount) + >> PAGE_SHIFT; if (nsegs > xbd_io->xio_nrma) { printf("xbdback_co_do_io: vaddr %#" PRIxVADDR " bcount %#x doesn't fit in %d pages\n", - bdata, xbd_io->xio_buf.b_bcount, xbd_io->xio_nrma); + start_offset, xbd_io->xio_buf.b_bcount, + xbd_io->xio_nrma); panic("xbdback_co_do_io: not enough pages"); } - } #endif if ((xbd_io->xio_buf.b_flags & B_READ) == 0) { mutex_enter(xbd_io->xio_buf.b_vp->v_interlock); @@ -1745,7 +1757,7 @@ xbdback_send_reply(struct xbdback_instan static void * xbdback_map_shm(struct xbdback_io *xbd_io) { - struct xbdback_instance *xbdi; + struct xbdback_instance *xbdi = xbd_io->xio_xbdi; struct xbdback_request *xbd_rq; int error, s; @@ -1759,11 +1771,17 @@ xbdback_map_shm(struct xbdback_io *xbd_i KASSERT(xbd_io->xio_mapped == 0); - xbdi = xbd_io->xio_xbdi; + s = splvm(); /* XXXSMP */ xbd_rq = SLIST_FIRST(&xbd_io->xio_rq)->car; + xbd_io->xio_xv = SLIST_FIRST(&xbdi->xbdi_va_free); + KASSERT(xbd_io->xio_xv != NULL); + SLIST_REMOVE_HEAD(&xbdi->xbdi_va_free, xv_next); + xbd_io->xio_vaddr = xbd_io->xio_xv->xv_vaddr; + splx(s); + error = xen_shm_map(xbd_io->xio_nrma, xbdi->xbdi_domid, - xbd_io->xio_gref, &xbd_io->xio_vaddr, xbd_io->xio_gh, + xbd_io->xio_gref, xbd_io->xio_vaddr, xbd_io->xio_gh, (xbd_rq->rq_operation == BLKIF_OP_WRITE) ? XSHM_RO : 0); switch(error) { @@ -1777,107 +1795,25 @@ xbdback_map_shm(struct xbdback_io *xbd_i #endif xbd_io->xio_mapped = 1; return xbdi; - case ENOMEM: - s = splvm(); /* XXXSMP */ - if (!xbdback_shmcb) { - if (xen_shm_callback(xbdback_shm_callback, xbdi) - != 0) { - splx(s); - panic("xbdback_map_shm: " - "xen_shm_callback failed"); - } - xbdback_shmcb = 1; - } - SIMPLEQ_INSERT_TAIL(&xbdback_shmq, xbdi, xbdi_on_hold); - splx(s); - /* Put the thread to sleep until the callback is called */ - xbdi->xbdi_cont = xbdback_co_wait_shm_callback; - return NULL; default: if (ratecheck(&xbdi->xbdi_lasterr_time, &xbdback_err_intvl)) { printf("xbdback_map_shm: xen_shm error %d ", error); } xbdback_io_error(xbdi->xbdi_io, error); + SLIST_INSERT_HEAD(&xbdi->xbdi_va_free, xbd_io->xio_xv, xv_next); + xbd_io->xio_xv = NULL; xbdi->xbdi_io = NULL; xbdi->xbdi_cont = xbdi->xbdi_cont_aux; return xbdi; } } -static int -xbdback_shm_callback(void *arg) -{ - int error, s; - - /* - * The shm callback may be executed at any level, including - * IPL_BIO and IPL_NET levels. Raise to the lowest priority level - * that can mask both. - */ - s = splvm(); /* XXXSMP */ - while(!SIMPLEQ_EMPTY(&xbdback_shmq)) { - struct xbdback_instance *xbdi; - struct xbdback_io *xbd_io; - struct xbdback_request *xbd_rq; - - xbdi = SIMPLEQ_FIRST(&xbdback_shmq); - xbd_io = xbdi->xbdi_io; - xbd_rq = SLIST_FIRST(&xbd_io->xio_rq)->car; - KASSERT(xbd_io->xio_mapped == 0); - - error = xen_shm_map(xbd_io->xio_nrma, - xbdi->xbdi_domid, xbd_io->xio_gref, - &xbd_io->xio_vaddr, xbd_io->xio_gh, - XSHM_CALLBACK | - ((xbd_rq->rq_operation == BLKIF_OP_WRITE) ? XSHM_RO: 0)); - switch(error) { - case ENOMEM: - splx(s); - return -1; /* will try again later */ - case 0: - SIMPLEQ_REMOVE_HEAD(&xbdback_shmq, xbdi_on_hold); - xbd_io->xio_mapped = 1; - xbdback_wakeup_thread(xbdi); - break; - default: - SIMPLEQ_REMOVE_HEAD(&xbdback_shmq, xbdi_on_hold); - printf("xbdback_shm_callback: xen_shm error %d\n", - error); - xbdback_io_error(xbd_io, error); - xbdi->xbdi_io = NULL; - xbdback_wakeup_thread(xbdi); - break; - } - } - xbdback_shmcb = 0; - splx(s); - return 0; -} - -/* - * Allows waiting for the shm callback to complete. - */ -static void * -xbdback_co_wait_shm_callback(struct xbdback_instance *xbdi, void *obj) -{ - - if (xbdi->xbdi_io == NULL || xbdi->xbdi_io->xio_mapped == 1) { - /* - * Only proceed to next step when the callback reported - * success or failure. - */ - xbdi->xbdi_cont = xbdi->xbdi_cont_aux; - return xbdi; - } else { - /* go back to sleep */ - return NULL; - } -} - /* unmap a request from our virtual address space (request is done) */ static void xbdback_unmap_shm(struct xbdback_io *xbd_io) { + struct xbdback_instance *xbdi = xbd_io->xio_xbdi; + #ifdef XENDEBUG_VBD int i; printf("xbdback_unmap_shm handle "); @@ -1891,6 +1827,8 @@ xbdback_unmap_shm(struct xbdback_io *xbd xbd_io->xio_mapped = 0; xen_shm_unmap(xbd_io->xio_vaddr, xbd_io->xio_nrma, xbd_io->xio_gh); + SLIST_INSERT_HEAD(&xbdi->xbdi_va_free, xbd_io->xio_xv, xv_next); + xbd_io->xio_xv = NULL; xbd_io->xio_vaddr = -1; }