Module Name: src Committed By: jdolecek Date: Sun Apr 5 17:26:47 UTC 2020
Modified Files: src/sys/arch/xen/xen: if_xennet_xenbus.c xennetback_xenbus.c Log Message: remove support for legacy rx-flip mode for xennet(4)/xvif(4), making rx-copy (first shipped in NetBSD 6.0 in 2012) the only supported mode this is mostly to simplify maintenance and future development rx-flip is not supported by Linux Dom0/DomU, and NetBSD Dom0/DomU defaults to rx-copy for over 8 years now too, so there is little need to keep the support for compatibility besides compatibility there is no other reason to keep rx-flip - page transfer is generally slower than copy due to necessary MMU/TLB manipulation, especially on MP systems To generate a diff of this commit: cvs rdiff -u -r1.98 -r1.99 src/sys/arch/xen/xen/if_xennet_xenbus.c cvs rdiff -u -r1.91 -r1.92 src/sys/arch/xen/xen/xennetback_xenbus.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: src/sys/arch/xen/xen/if_xennet_xenbus.c diff -u src/sys/arch/xen/xen/if_xennet_xenbus.c:1.98 src/sys/arch/xen/xen/if_xennet_xenbus.c:1.99 --- src/sys/arch/xen/xen/if_xennet_xenbus.c:1.98 Sun Apr 5 14:20:18 2020 +++ src/sys/arch/xen/xen/if_xennet_xenbus.c Sun Apr 5 17:26:46 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: if_xennet_xenbus.c,v 1.98 2020/04/05 14:20:18 jdolecek Exp $ */ +/* $NetBSD: if_xennet_xenbus.c,v 1.99 2020/04/05 17:26:46 jdolecek Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -81,7 +81,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.98 2020/04/05 14:20:18 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: if_xennet_xenbus.c,v 1.99 2020/04/05 17:26:46 jdolecek Exp $"); #include "opt_xen.h" #include "opt_nfs_boot.h" @@ -208,18 +208,11 @@ struct xennet_xenbus_softc { #define BEST_DISCONNECTED 1 #define BEST_CONNECTED 2 #define BEST_SUSPENDED 3 - unsigned long sc_rx_feature; -#define FEATURE_RX_FLIP 0 -#define FEATURE_RX_COPY 1 krndsource_t sc_rnd_source; }; #define SC_NLIVEREQ(sc) ((sc)->sc_rx_ring.req_prod_pvt - \ (sc)->sc_rx_ring.sring->rsp_prod) -/* too big to be on stack */ -static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+1]; -static u_long xennet_pages[NET_RX_RING_SIZE]; - static pool_cache_t if_xennetrxbuf_cache; static int if_xennetrxbuf_cache_inited=0; @@ -551,16 +544,12 @@ xennet_talk_to_backend(struct xennet_xen error = xenbus_read_ul(NULL, sc->sc_xbusd->xbusd_otherend, "feature-rx-copy", &rx_copy, 10); - if (error) - rx_copy = 0; /* default value if key is absent */ - - if (rx_copy == 1) { - aprint_normal_dev(sc->sc_dev, "using RX copy mode\n"); - sc->sc_rx_feature = FEATURE_RX_COPY; - } else { - aprint_normal_dev(sc->sc_dev, "using RX flip mode\n"); - sc->sc_rx_feature = FEATURE_RX_FLIP; + if (error || !rx_copy) { + xenbus_dev_fatal(sc->sc_xbusd, error, + "feature-rx-copy not supported"); + return false; } + aprint_normal_dev(sc->sc_dev, "using RX copy mode\n"); again: xbt = xenbus_transaction_start(); @@ -705,8 +694,7 @@ xennet_alloc_rx_buffer(struct xennet_xen RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt; RING_IDX i; struct xennet_rxreq *req; - struct xen_memory_reservation reservation; - int s, otherend_id, notify; + int otherend_id, notify; otherend_id = sc->sc_xbusd->xbusd_otherend_id; @@ -718,23 +706,10 @@ xennet_alloc_rx_buffer(struct xennet_xen RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id = req->rxreq_id; - switch (sc->sc_rx_feature) { - case FEATURE_RX_COPY: - if (xengnt_grant_access(otherend_id, - xpmap_ptom_masked(req->rxreq_pa), - 0, &req->rxreq_gntref) != 0) { - goto out_loop; - } - break; - case FEATURE_RX_FLIP: - if (xengnt_grant_transfer(otherend_id, - &req->rxreq_gntref) != 0) { - goto out_loop; - } - break; - default: - panic("%s: unsupported RX feature mode: %ld\n", - __func__, sc->sc_rx_feature); + if (xengnt_grant_access(otherend_id, + xpmap_ptom_masked(req->rxreq_pa), + 0, &req->rxreq_gntref) != 0) { + goto out_loop; } RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref = @@ -742,19 +717,6 @@ xennet_alloc_rx_buffer(struct xennet_xen SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next); sc->sc_free_rxreql--; - - if (sc->sc_rx_feature == FEATURE_RX_FLIP) { - /* unmap the page */ - MULTI_update_va_mapping(&rx_mcl[i], - req->rxreq_va, 0, 0); - /* - * Remove this page from pseudo phys map before - * passing back to Xen. - */ - xennet_pages[i] = - xpmap_ptom(req->rxreq_pa) >> PAGE_SHIFT; - xpmap_ptom_unmap(req->rxreq_pa); - } } out_loop: @@ -762,34 +724,6 @@ out_loop: return; } - if (sc->sc_rx_feature == FEATURE_RX_FLIP) { - /* also make sure to flush all TLB entries */ - rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = - UVMF_TLB_FLUSH | UVMF_ALL; - /* - * We may have allocated buffers which have entries - * outstanding in the page update queue -- make sure we flush - * those first! - */ - s = splvm(); /* XXXSMP */ - xpq_flush_queue(); - splx(s); - /* now decrease reservation */ - set_xen_guest_handle(reservation.extent_start, xennet_pages); - reservation.nr_extents = i; - reservation.extent_order = 0; - reservation.address_bits = 0; - reservation.domid = DOMID_SELF; - rx_mcl[i].op = __HYPERVISOR_memory_op; - rx_mcl[i].args[0] = XENMEM_decrease_reservation; - rx_mcl[i].args[1] = (unsigned long)&reservation; - HYPERVISOR_multicall(rx_mcl, i+1); - if (__predict_false(rx_mcl[i].result != i)) { - panic("xennet_alloc_rx_buffer: " - "XENMEM_decrease_reservation"); - } - } - sc->sc_rx_ring.req_prod_pvt = req_prod + i; RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&sc->sc_rx_ring, notify); if (notify) @@ -803,11 +737,7 @@ out_loop: static void xennet_free_rx_buffer(struct xennet_xenbus_softc *sc) { - paddr_t ma, pa; - vaddr_t va; RING_IDX i; - mmu_update_t mmu[1]; - multicall_entry_t mcl[2]; mutex_enter(&sc->sc_rx_lock); @@ -825,56 +755,8 @@ xennet_free_rx_buffer(struct xennet_xenb rxreq_next); sc->sc_free_rxreql++; - switch (sc->sc_rx_feature) { - case FEATURE_RX_COPY: - xengnt_revoke_access(rxreq->rxreq_gntref); - rxreq->rxreq_gntref = GRANT_INVALID_REF; - break; - case FEATURE_RX_FLIP: - ma = xengnt_revoke_transfer( - rxreq->rxreq_gntref); - rxreq->rxreq_gntref = GRANT_INVALID_REF; - if (ma == 0) { - u_long pfn; - struct xen_memory_reservation xenres; - /* - * transfer not complete, we lost the page. - * Get one from hypervisor - */ - set_xen_guest_handle( - xenres.extent_start, &pfn); - xenres.nr_extents = 1; - xenres.extent_order = 0; - xenres.address_bits = 31; - xenres.domid = DOMID_SELF; - if (HYPERVISOR_memory_op( - XENMEM_increase_reservation, &xenres) < 0) { - panic("xennet_free_rx_buffer: " - "can't get memory back"); - } - ma = pfn; - KASSERT(ma != 0); - } - pa = rxreq->rxreq_pa; - va = rxreq->rxreq_va; - /* remap the page */ - mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; - mmu[0].val = pa >> PAGE_SHIFT; - MULTI_update_va_mapping(&mcl[0], va, - (ma << PAGE_SHIFT) | PTE_P | PTE_W | xpmap_pg_nx, - UVMF_TLB_FLUSH|UVMF_ALL); - xpmap_ptom_map(pa, ptoa(ma)); - mcl[1].op = __HYPERVISOR_mmu_update; - mcl[1].args[0] = (unsigned long)mmu; - mcl[1].args[1] = 1; - mcl[1].args[2] = 0; - mcl[1].args[3] = DOMID_SELF; - HYPERVISOR_multicall(mcl, 2); - break; - default: - panic("%s: unsupported RX feature mode: %ld\n", - __func__, sc->sc_rx_feature); - } + xengnt_revoke_access(rxreq->rxreq_gntref); + rxreq->rxreq_gntref = GRANT_INVALID_REF; } } @@ -981,10 +863,8 @@ xennet_handler(void *arg) struct ifnet *ifp = &sc->sc_ethercom.ec_if; RING_IDX resp_prod, i; struct xennet_rxreq *req; - paddr_t ma, pa; + paddr_t pa; vaddr_t va; - mmu_update_t mmu[1]; - multicall_entry_t mcl[2]; struct mbuf *m; void *pktp; int more_to_do; @@ -1011,55 +891,11 @@ again: KASSERT(req->rxreq_gntref != GRANT_INVALID_REF); KASSERT(req->rxreq_id == rx->id); - ma = 0; - switch (sc->sc_rx_feature) { - case FEATURE_RX_COPY: - xengnt_revoke_access(req->rxreq_gntref); - break; - case FEATURE_RX_FLIP: - ma = xengnt_revoke_transfer(req->rxreq_gntref); - if (ma == 0) { - DPRINTFN(XEDB_EVENT, ("xennet_handler ma == 0\n")); - /* - * the remote could't send us a packet. - * we can't free this rxreq as no page will be mapped - * here. Instead give it back immediatly to backend. - */ - if_statinc(ifp, if_ierrors); - RING_GET_REQUEST(&sc->sc_rx_ring, - sc->sc_rx_ring.req_prod_pvt)->id = req->rxreq_id; - RING_GET_REQUEST(&sc->sc_rx_ring, - sc->sc_rx_ring.req_prod_pvt)->gref = - req->rxreq_gntref; - sc->sc_rx_ring.req_prod_pvt++; - RING_PUSH_REQUESTS(&sc->sc_rx_ring); - continue; - } - break; - default: - panic("%s: unsupported RX feature mode: %ld\n", - __func__, sc->sc_rx_feature); - } + xengnt_revoke_access(req->rxreq_gntref); pa = req->rxreq_pa; va = req->rxreq_va; - if (sc->sc_rx_feature == FEATURE_RX_FLIP) { - /* remap the page */ - mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; - mmu[0].val = pa >> PAGE_SHIFT; - MULTI_update_va_mapping(&mcl[0], va, - (ma << PAGE_SHIFT) | PTE_P | PTE_W | xpmap_pg_nx, - UVMF_TLB_FLUSH|UVMF_ALL); - xpmap_ptom_map(pa, ptoa(ma)); - mcl[1].op = __HYPERVISOR_mmu_update; - mcl[1].args[0] = (unsigned long)mmu; - mcl[1].args[1] = 1; - mcl[1].args[2] = 0; - mcl[1].args[3] = DOMID_SELF; - HYPERVISOR_multicall(mcl, 2); - } - pktp = (void *)(va + rx->offset); #ifdef XENNET_DEBUG_DUMP xennet_hex_dump(pktp, rx->status, "r", rx->id); Index: src/sys/arch/xen/xen/xennetback_xenbus.c diff -u src/sys/arch/xen/xen/xennetback_xenbus.c:1.91 src/sys/arch/xen/xen/xennetback_xenbus.c:1.92 --- src/sys/arch/xen/xen/xennetback_xenbus.c:1.91 Sat Apr 4 14:45:37 2020 +++ src/sys/arch/xen/xen/xennetback_xenbus.c Sun Apr 5 17:26:46 2020 @@ -1,4 +1,4 @@ -/* $NetBSD: xennetback_xenbus.c,v 1.91 2020/04/04 14:45:37 jdolecek Exp $ */ +/* $NetBSD: xennetback_xenbus.c,v 1.92 2020/04/05 17:26:46 jdolecek Exp $ */ /* * Copyright (c) 2006 Manuel Bouyer. @@ -25,7 +25,7 @@ */ #include <sys/cdefs.h> -__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.91 2020/04/04 14:45:37 jdolecek Exp $"); +__KERNEL_RCSID(0, "$NetBSD: xennetback_xenbus.c,v 1.92 2020/04/05 17:26:46 jdolecek Exp $"); #include "opt_xen.h" @@ -96,7 +96,6 @@ struct xnetback_instance { domid_t xni_domid; /* attached to this domain */ uint32_t xni_handle; /* domain-specific handle */ xnetback_state_t xni_status; - bool xni_rx_copy; /* network interface stuff */ struct ethercom xni_ec; @@ -119,7 +118,6 @@ struct xnetback_instance { void xvifattach(int); static int xennetback_ifioctl(struct ifnet *, u_long, void *); static void xennetback_ifstart(struct ifnet *); -static void xennetback_ifsoftstart_transfer(struct xnetback_instance *); static void xennetback_ifsoftstart_copy(struct xnetback_instance *); static void xennetback_ifwatchdog(struct ifnet *); static int xennetback_ifinit(struct ifnet *); @@ -150,34 +148,10 @@ static struct xenbus_backend_driver xvif */ #define NB_XMIT_PAGES_BATCH 64 -/* - * We will transfer a mapped page to the remote domain, and remap another - * page in place immediately. For this we keep a list of pages available. - * When the list is empty, we ask the hypervisor to give us - * NB_XMIT_PAGES_BATCH pages back. - */ -static unsigned long mcl_pages[NB_XMIT_PAGES_BATCH]; /* our physical pages */ -int mcl_pages_alloc; /* current index in mcl_pages */ -static int xennetback_get_mcl_page(paddr_t *); -static void xennetback_get_new_mcl_pages(void); - -/* - * If we can't transfer the mbuf directly, we have to copy it to a page which - * will be transferred to the remote domain. We use a pool_cache for this. - */ -pool_cache_t xmit_pages_cache; - /* arrays used in xennetback_ifstart(), too large to allocate on stack */ /* XXXSMP */ -static mmu_update_t xstart_mmu[NB_XMIT_PAGES_BATCH]; -static multicall_entry_t xstart_mcl[NB_XMIT_PAGES_BATCH + 1]; -static gnttab_transfer_t xstart_gop_transfer[NB_XMIT_PAGES_BATCH]; static gnttab_copy_t xstart_gop_copy[NB_XMIT_PAGES_BATCH]; static struct mbuf *mbufs_sent[NB_XMIT_PAGES_BATCH]; -static struct _pages_pool_free { - vaddr_t va; - paddr_t pa; -} pages_pool_free[NB_XMIT_PAGES_BATCH]; static struct _req_info { int id; int flags; @@ -187,30 +161,8 @@ static struct _req_info { void xvifattach(int n) { - int i; - struct pglist mlist; - struct vm_page *pg; - XENPRINTF(("xennetback_init\n")); - /* - * steal some non-managed pages to the VM system, to replace - * mbuf cluster or xmit_pages_pool pages given to foreign domains. - */ - if (uvm_pglistalloc(PAGE_SIZE * NB_XMIT_PAGES_BATCH, 0, 0xffffffff, - 0, 0, &mlist, NB_XMIT_PAGES_BATCH, 0) != 0) - panic("xennetback_init: uvm_pglistalloc"); - for (i = 0, pg = mlist.tqh_first; pg != NULL; - pg = pg->pageq.queue.tqe_next, i++) - mcl_pages[i] = xpmap_ptom(VM_PAGE_TO_PHYS(pg)) >> PAGE_SHIFT; - if (i != NB_XMIT_PAGES_BATCH) - panic("xennetback_init: %d mcl pages", i); - mcl_pages_alloc = NB_XMIT_PAGES_BATCH - 1; - - /* initialise pools */ - xmit_pages_cache = pool_cache_init(PAGE_SIZE, 0, 0, 0, "xnbxm", NULL, - IPL_VM, NULL, NULL, NULL); - SLIST_INIT(&xnetback_instances); mutex_init(&xnetback_lock, MUTEX_DEFAULT, IPL_NONE); @@ -337,14 +289,6 @@ xennetback_xenbus_create(struct xenbus_d xbusd->xbusd_path, err); goto abort_xbt; } - err = xenbus_printf(xbt, xbusd->xbusd_path, - "feature-rx-flip", "%d", 1); - if (err) { - aprint_error_ifnet(ifp, - "failed to write %s/feature-rx-flip: %d\n", - xbusd->xbusd_path, err); - goto abort_xbt; - } } while ((err = xenbus_transaction_end(xbt, 0)) == EAGAIN); if (err) { aprint_error_ifnet(ifp, @@ -462,14 +406,16 @@ xennetback_connect(struct xnetback_insta } err = xenbus_read_ul(NULL, xbusd->xbusd_otherend, "request-rx-copy", &rx_copy, 10); - if (err == ENOENT) - rx_copy = 0; - else if (err) { + if (err == ENOENT || !rx_copy) { + xenbus_dev_fatal(xbusd, err, + "%s/request-rx-copy not supported by frontend", + xbusd->xbusd_otherend); + return -1; + } else if (err) { xenbus_dev_fatal(xbusd, err, "reading %s/request-rx-copy", xbusd->xbusd_otherend); return -1; } - xneti->xni_rx_copy = (rx_copy != 0); /* allocate VA space and map rings */ xneti->xni_tx_ring_va = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, @@ -638,48 +584,6 @@ xnetif_lookup(domid_t dom , uint32_t han return found; } -/* get a page to replace a mbuf cluster page given to a domain */ -static int -xennetback_get_mcl_page(paddr_t *map) -{ - if (mcl_pages_alloc < 0) { - /* - * we exhausted our allocation. We can't allocate new ones yet - * because the current pages may not have been loaned to - * the remote domain yet. We have to let the caller do this. - */ - return -1; - } - - *map = ((paddr_t)mcl_pages[mcl_pages_alloc]) << PAGE_SHIFT; - mcl_pages_alloc--; - return 0; -} - -static void -xennetback_get_new_mcl_pages(void) -{ - int nb_pages; - struct xen_memory_reservation res; - - /* get some new pages. */ - set_xen_guest_handle(res.extent_start, mcl_pages); - res.nr_extents = NB_XMIT_PAGES_BATCH; - res.extent_order = 0; - res.address_bits = 0; - res.domid = DOMID_SELF; - - nb_pages = HYPERVISOR_memory_op(XENMEM_increase_reservation, &res); - if (nb_pages <= 0) { - printf("xennetback: can't get new mcl pages (%d)\n", nb_pages); - return; - } - if (nb_pages != NB_XMIT_PAGES_BATCH) - printf("xennetback: got only %d new mcl pages\n", nb_pages); - - mcl_pages_alloc = nb_pages - 1; -} - static inline void xennetback_tx_response(struct xnetback_instance *xneti, int id, int status) { @@ -933,257 +837,7 @@ xennetback_ifstart(struct ifnet *ifp) * stack will enqueue all pending mbufs in the interface's send queue * before it is processed by the soft interrupt handler. */ - if (__predict_true(xneti->xni_rx_copy)) - xennetback_ifsoftstart_copy(xneti); - else - xennetback_ifsoftstart_transfer(xneti); -} - -static void -xennetback_ifsoftstart_transfer(struct xnetback_instance *xneti) -{ - struct ifnet *ifp = &xneti->xni_if; - struct mbuf *m; - vaddr_t xmit_va; - paddr_t xmit_pa; - paddr_t xmit_ma; - paddr_t newp_ma = 0; /* XXX gcc */ - int i, j, nppitems; - mmu_update_t *mmup; - multicall_entry_t *mclp; - netif_rx_response_t *rxresp; - netif_rx_request_t rxreq; - RING_IDX req_prod, resp_prod; - int do_event = 0; - gnttab_transfer_t *gop; - int id, offset; - - XENPRINTF(("xennetback_ifsoftstart_transfer ")); - int s = splnet(); - if (__predict_false((ifp->if_flags & IFF_RUNNING) == 0)) { - splx(s); - return; - } - - while (!IFQ_IS_EMPTY(&ifp->if_snd)) { - XENPRINTF(("pkt\n")); - req_prod = xneti->xni_rxring.sring->req_prod; - resp_prod = xneti->xni_rxring.rsp_prod_pvt; - xen_rmb(); - - mmup = xstart_mmu; - mclp = xstart_mcl; - gop = xstart_gop_transfer; - for (nppitems = 0, i = 0; !IFQ_IS_EMPTY(&ifp->if_snd);) { - XENPRINTF(("have a packet\n")); - IFQ_POLL(&ifp->if_snd, m); - if (__predict_false(m == NULL)) - panic("xennetback_ifstart: IFQ_POLL"); - if (__predict_false( - req_prod == xneti->xni_rxring.req_cons || - xneti->xni_rxring.req_cons - resp_prod == - NET_RX_RING_SIZE)) { - /* out of ring space */ - XENPRINTF(("xennetback_ifstart: ring full " - "req_prod 0x%x req_cons 0x%x resp_prod " - "0x%x\n", - req_prod, xneti->xni_rxring.req_cons, - resp_prod)); - ifp->if_timer = 1; - break; - } - if (__predict_false(i == NB_XMIT_PAGES_BATCH)) - break; /* we filled the array */ - if (__predict_false( - xennetback_get_mcl_page(&newp_ma) != 0)) - break; /* out of memory */ - if ((m->m_flags & M_EXT_CLUSTER) != 0 && - !M_READONLY(m) && MCLBYTES == PAGE_SIZE) { - /* we can give this page away */ - xmit_pa = m->m_ext.ext_paddr; - xmit_ma = xpmap_ptom(xmit_pa); - xmit_va = (vaddr_t)m->m_ext.ext_buf; - KASSERT(xmit_pa != M_PADDR_INVALID); - KASSERT((xmit_va & PAGE_MASK) == 0); - offset = m->m_data - m->m_ext.ext_buf; - } else { - /* we have to copy the packet */ - xmit_va = (vaddr_t)pool_cache_get_paddr( - xmit_pages_cache, PR_NOWAIT, &xmit_pa); - if (__predict_false(xmit_va == 0)) - break; /* out of memory */ - - KASSERT(xmit_pa != POOL_PADDR_INVALID); - xmit_ma = xpmap_ptom(xmit_pa); - XENPRINTF(("xennetback_get_xmit_page: got va " - "0x%x ma 0x%x\n", (u_int)xmit_va, - (u_int)xmit_ma)); - m_copydata(m, 0, m->m_pkthdr.len, - (char *)xmit_va + LINUX_REQUESTED_OFFSET); - offset = LINUX_REQUESTED_OFFSET; - pages_pool_free[nppitems].va = xmit_va; - pages_pool_free[nppitems].pa = xmit_pa; - nppitems++; - } - /* start filling ring */ - RING_COPY_REQUEST(&xneti->xni_rxring, - xneti->xni_rxring.req_cons, &rxreq); - gop->ref = rxreq.gref; - id = rxreq.id; - xen_rmb(); - xneti->xni_rxring.req_cons++; - rxresp = RING_GET_RESPONSE(&xneti->xni_rxring, - resp_prod); - rxresp->id = id; - rxresp->offset = offset; - rxresp->status = m->m_pkthdr.len; - if ((m->m_pkthdr.csum_flags & - XN_M_CSUM_SUPPORTED) != 0) { - rxresp->flags = NETRXF_csum_blank; - } else { - rxresp->flags = NETRXF_data_validated; - } - /* - * transfers the page containing the packet to the - * remote domain, and map newp in place. - */ - xpmap_ptom_map(xmit_pa, newp_ma); - MULTI_update_va_mapping(mclp, xmit_va, - newp_ma | PTE_P | PTE_W | PTE_A | PTE_D | xpmap_pg_nx, 0); - mclp++; - gop->mfn = xmit_ma >> PAGE_SHIFT; - gop->domid = xneti->xni_domid; - gop++; - - mmup->ptr = newp_ma | MMU_MACHPHYS_UPDATE; - mmup->val = xmit_pa >> PAGE_SHIFT; - mmup++; - - /* done with this packet */ - IFQ_DEQUEUE(&ifp->if_snd, m); - mbufs_sent[i] = m; - resp_prod++; - i++; /* this packet has been queued */ - if_statinc(ifp, if_opackets); - bpf_mtap(ifp, m, BPF_D_OUT); - } - if (i != 0) { - /* - * We may have allocated buffers which have entries - * outstanding in the page update queue -- make sure - * we flush those first! - */ - int svm = splvm(); - xpq_flush_queue(); - splx(svm); - mclp[-1].args[MULTI_UVMFLAGS_INDEX] = - UVMF_TLB_FLUSH|UVMF_ALL; - mclp->op = __HYPERVISOR_mmu_update; - mclp->args[0] = (unsigned long)xstart_mmu; - mclp->args[1] = i; - mclp->args[2] = 0; - mclp->args[3] = DOMID_SELF; - mclp++; - /* update the MMU */ - if (HYPERVISOR_multicall(xstart_mcl, i + 1) != 0) { - panic("%s: HYPERVISOR_multicall failed", - ifp->if_xname); - } - for (j = 0; j < i + 1; j++) { - if (xstart_mcl[j].result != 0) { - printf("%s: xstart_mcl[%d] " - "failed (%lu)\n", ifp->if_xname, - j, xstart_mcl[j].result); - printf("%s: req_prod %u req_cons " - "%u rsp_prod %u rsp_prod_pvt %u " - "i %u\n", - ifp->if_xname, - xneti->xni_rxring.sring->req_prod, - xneti->xni_rxring.req_cons, - xneti->xni_rxring.sring->rsp_prod, - xneti->xni_rxring.rsp_prod_pvt, - i); - } - } - if (HYPERVISOR_grant_table_op(GNTTABOP_transfer, - xstart_gop_transfer, i) != 0) { - panic("%s: GNTTABOP_transfer failed", - ifp->if_xname); - } - - for (j = 0; j < i; j++) { - if (xstart_gop_transfer[j].status != GNTST_okay) { - printf("%s GNTTABOP_transfer[%d] %d\n", - ifp->if_xname, - j, xstart_gop_transfer[j].status); - printf("%s: req_prod %u req_cons " - "%u rsp_prod %u rsp_prod_pvt %u " - "i %d\n", - ifp->if_xname, - xneti->xni_rxring.sring->req_prod, - xneti->xni_rxring.req_cons, - xneti->xni_rxring.sring->rsp_prod, - xneti->xni_rxring.rsp_prod_pvt, - i); - rxresp = RING_GET_RESPONSE( - &xneti->xni_rxring, - xneti->xni_rxring.rsp_prod_pvt + j); - rxresp->status = NETIF_RSP_ERROR; - } - } - - /* update pointer */ - KASSERT( - xneti->xni_rxring.rsp_prod_pvt + i == resp_prod); - xneti->xni_rxring.rsp_prod_pvt = resp_prod; - RING_PUSH_RESPONSES_AND_CHECK_NOTIFY( - &xneti->xni_rxring, j); - if (j) - do_event = 1; - /* now we can free the mbufs */ - for (j = 0; j < i; j++) { - m_freem(mbufs_sent[j]); - } - for (j = 0; j < nppitems; j++) { - pool_cache_put_paddr(xmit_pages_cache, - (void *)pages_pool_free[j].va, - pages_pool_free[j].pa); - } - } - /* send event */ - if (do_event) { - xen_rmb(); - XENPRINTF(("%s receive event\n", - xneti->xni_if.if_xname)); - hypervisor_notify_via_evtchn(xneti->xni_evtchn); - do_event = 0; - } - /* check if we need to get back some pages */ - if (mcl_pages_alloc < 0) { - xennetback_get_new_mcl_pages(); - if (mcl_pages_alloc < 0) { - /* - * setup the watchdog to try again, because - * xennetback_ifstart() will never be called - * again if queue is full. - */ - printf("xennetback_ifstart: no mcl_pages\n"); - ifp->if_timer = 1; - break; - } - } - /* - * note that we don't use RING_FINAL_CHECK_FOR_REQUESTS() - * here, as the frontend doesn't notify when adding - * requests anyway - */ - if (__predict_false( - !RING_HAS_UNCONSUMED_REQUESTS(&xneti->xni_rxring))) { - /* ring full */ - break; - } - } - splx(s); + xennetback_ifsoftstart_copy(xneti); } /*