Module Name:    src
Committed By:   reinoud
Date:           Wed Jan 20 19:46:48 UTC 2021

Modified Files:
        src/sys/dev/acpi: virtio_acpi.c
        src/sys/dev/fdt: virtio_mmio_fdt.c
        src/sys/dev/pci: if_vioif.c ld_virtio.c vio9p.c viomb.c viornd.c
            vioscsi.c virtio.c virtio_pci.c virtioreg.h virtiovar.h
        src/sys/dev/virtio: virtio_mmio.c
Added Files:
        src/sys/dev/pci: virtio_pcireg.h

Log Message:
Add VirtIO PCI v1.0 attachments and fix the drivers affected.

The vioif, ld, scsi, viornd and viomb devices were adjusted when needed and
tested both in legacy 0.9 and v1.0 attachments trough PCI on amd64, sparc64,
aarch64 and aarch64-eb. ACPI/FDT attachments also tested on
aarch64/aarch64-eb.

Known issues

* viomb on aarch64 works only with ACPI/FDT attachment but not with PCI
  attachment. PCI and ACPI/FDT attachment works on aarch64-eb.

* virtio on sparc64 attaches but is it not functioning though not a
  regression.


To generate a diff of this commit:
cvs rdiff -u -r1.4 -r1.5 src/sys/dev/acpi/virtio_acpi.c
cvs rdiff -u -r1.4 -r1.5 src/sys/dev/fdt/virtio_mmio_fdt.c
cvs rdiff -u -r1.65 -r1.66 src/sys/dev/pci/if_vioif.c
cvs rdiff -u -r1.28 -r1.29 src/sys/dev/pci/ld_virtio.c
cvs rdiff -u -r1.2 -r1.3 src/sys/dev/pci/vio9p.c
cvs rdiff -u -r1.11 -r1.12 src/sys/dev/pci/viomb.c
cvs rdiff -u -r1.13 -r1.14 src/sys/dev/pci/viornd.c
cvs rdiff -u -r1.24 -r1.25 src/sys/dev/pci/vioscsi.c
cvs rdiff -u -r1.42 -r1.43 src/sys/dev/pci/virtio.c
cvs rdiff -u -r1.14 -r1.15 src/sys/dev/pci/virtio_pci.c
cvs rdiff -u -r0 -r1.1 src/sys/dev/pci/virtio_pcireg.h
cvs rdiff -u -r1.6 -r1.7 src/sys/dev/pci/virtioreg.h
cvs rdiff -u -r1.16 -r1.17 src/sys/dev/pci/virtiovar.h
cvs rdiff -u -r1.3 -r1.4 src/sys/dev/virtio/virtio_mmio.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/dev/acpi/virtio_acpi.c
diff -u src/sys/dev/acpi/virtio_acpi.c:1.4 src/sys/dev/acpi/virtio_acpi.c:1.5
--- src/sys/dev/acpi/virtio_acpi.c:1.4	Mon Dec  7 10:02:51 2020
+++ src/sys/dev/acpi/virtio_acpi.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: virtio_acpi.c,v 1.4 2020/12/07 10:02:51 jmcneill Exp $ */
+/* $NetBSD: virtio_acpi.c,v 1.5 2021/01/20 19:46:48 reinoud Exp $ */
 
 /*-
  * Copyright (c) 2018 The NetBSD Foundation, Inc.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: virtio_acpi.c,v 1.4 2020/12/07 10:02:51 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: virtio_acpi.c,v 1.5 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/bus.h>
@@ -163,7 +163,16 @@ virtio_acpi_rescan(device_t self, const 
 	memset(&va, 0, sizeof(va));
 	va.sc_childdevid = vsc->sc_childdevid;
 
-	config_found_ia(self, ifattr, &va, virtiobusprint);
+	config_found_ia(self, ifattr, &va, NULL);
+
+	if (virtio_attach_failed(vsc))
+		return 0;
+
+	/*
+	 * Make sure child drivers initialize interrupts via call
+	 * to virtio_child_attach_finish().
+	 */
+	KASSERT(msc->sc_ih != NULL);
 
 	return 0;
 }

Index: src/sys/dev/fdt/virtio_mmio_fdt.c
diff -u src/sys/dev/fdt/virtio_mmio_fdt.c:1.4 src/sys/dev/fdt/virtio_mmio_fdt.c:1.5
--- src/sys/dev/fdt/virtio_mmio_fdt.c:1.4	Fri Jan 15 22:35:39 2021
+++ src/sys/dev/fdt/virtio_mmio_fdt.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/* $NetBSD: virtio_mmio_fdt.c,v 1.4 2021/01/15 22:35:39 jmcneill Exp $ */
+/* $NetBSD: virtio_mmio_fdt.c,v 1.5 2021/01/20 19:46:48 reinoud Exp $ */
 
 /*
  * Copyright (c) 2018 Jonathan A. Kollasch
@@ -27,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: virtio_mmio_fdt.c,v 1.4 2021/01/15 22:35:39 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: virtio_mmio_fdt.c,v 1.5 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -119,19 +119,14 @@ virtio_mmio_fdt_rescan(device_t self, co
 
 	if (vsc->sc_child)	/* Child already attached? */
 		return 0;
+
 	memset(&va, 0, sizeof(va));
 	va.sc_childdevid = vsc->sc_childdevid;
 
-	config_found_ia(self, attr, &va, virtiobusprint);
+	config_found_ia(self, attr, &va, NULL);
 
-	if (vsc->sc_child == NULL) {
+	if (virtio_attach_failed(vsc))
 		return 0;
-	}
-
-	if (vsc->sc_child == VIRTIO_CHILD_FAILED) {
-		aprint_error_dev(self, "virtio configuration failed\n");
-		return 0;
-	}
 
 	/*
 	 * Make sure child drivers initialize interrupts via call
@@ -164,7 +159,7 @@ virtio_mmio_fdt_setup_interrupts(struct 
 		return -1;
 	}
 
-	if (vsc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
+	if (vsc->sc_flags & VIRTIO_F_INTR_MPSAFE)
 		flags |= FDT_INTR_MPSAFE;
 
 	msc->sc_ih = fdtbus_intr_establish_xname(fsc->sc_phandle, 0,

Index: src/sys/dev/pci/if_vioif.c
diff -u src/sys/dev/pci/if_vioif.c:1.65 src/sys/dev/pci/if_vioif.c:1.66
--- src/sys/dev/pci/if_vioif.c:1.65	Thu May 28 23:25:17 2020
+++ src/sys/dev/pci/if_vioif.c	Wed Jan 20 19:46:48 2021
@@ -1,6 +1,7 @@
-/*	$NetBSD: if_vioif.c,v 1.65 2020/05/28 23:25:17 riastradh Exp $	*/
+/*	$NetBSD: if_vioif.c,v 1.66 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
  * Copyright (c) 2010 Minoura Makoto.
  * All rights reserved.
  *
@@ -26,7 +27,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.65 2020/05/28 23:25:17 riastradh Exp $");
+__KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v 1.66 2021/01/20 19:46:48 reinoud Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_net_mpsafe.h"
@@ -70,9 +71,10 @@ __KERNEL_RCSID(0, "$NetBSD: if_vioif.c,v
  * if_vioifreg.h:
  */
 /* Configuration registers */
-#define VIRTIO_NET_CONFIG_MAC		0 /* 8bit x 6byte */
-#define VIRTIO_NET_CONFIG_STATUS	6 /* 16bit */
-#define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS	8 /* 16bit */
+#define VIRTIO_NET_CONFIG_MAC		 0 /* 8bit x 6byte */
+#define VIRTIO_NET_CONFIG_STATUS	 6 /* 16bit */
+#define VIRTIO_NET_CONFIG_MAX_VQ_PAIRS	 8 /* 16bit */
+#define VIRTIO_NET_CONFIG_MTU		10 /* 16bit */
 
 /* Feature bits */
 #define VIRTIO_NET_F_CSUM		__BIT(0)
@@ -130,9 +132,8 @@ struct virtio_net_hdr {
 	uint16_t	gso_size;
 	uint16_t	csum_start;
 	uint16_t	csum_offset;
-#if 0
-	uint16_t	num_buffers; /* if VIRTIO_NET_F_MRG_RXBUF enabled */
-#endif
+
+	uint16_t	num_buffers; /* VIRTIO_NET_F_MRG_RXBUF enabled or v1 */
 } __packed;
 
 #define VIRTIO_NET_HDR_F_NEEDS_CSUM	1 /* flags */
@@ -188,12 +189,6 @@ struct virtio_net_ctrl_mq {
 	uint16_t virtqueue_pairs;
 } __packed;
 
-struct vioif_ctrl_cmdspec {
-	bus_dmamap_t	dmamap;
-	void		*buf;
-	bus_size_t	bufsize;
-};
-
 /*
  * if_vioifvar.h:
  */
@@ -211,6 +206,12 @@ struct vioif_ctrl_cmdspec {
  *      - the lock is held before acquisition of other locks
  */
 
+struct vioif_ctrl_cmdspec {
+	bus_dmamap_t	dmamap;
+	void		*buf;
+	bus_size_t	bufsize;
+};
+
 struct vioif_work {
 	struct work	 cookie;
 	void		(*func)(void *);
@@ -299,6 +300,7 @@ struct vioif_softc {
 
 	struct virtio_softc	*sc_virtio;
 	struct virtqueue	*sc_vqs;
+	u_int			 sc_hdr_size;
 
 	int			sc_max_nvq_pairs;
 	int			sc_req_nvq_pairs;
@@ -357,7 +359,8 @@ static void	vioif_watchdog(struct ifnet 
 /* rx */
 static int	vioif_add_rx_mbuf(struct vioif_rxqueue *, int);
 static void	vioif_free_rx_mbuf(struct vioif_rxqueue *, int);
-static void	vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *);
+static void	vioif_populate_rx_mbufs_locked(struct vioif_softc *,
+		    struct vioif_rxqueue *);
 static void	vioif_rx_queue_clear(struct vioif_rxqueue *);
 static bool	vioif_rx_deq_locked(struct vioif_softc *, struct virtio_softc *,
 		    struct vioif_rxqueue *, u_int);
@@ -412,7 +415,7 @@ vioif_match(device_t parent, cfdata_t ma
 {
 	struct virtio_attach_args *va = aux;
 
-	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_NETWORK)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_NETWORK)
 		return 1;
 
 	return 0;
@@ -571,10 +574,8 @@ vioif_alloc_mems(struct vioif_softc *sc)
 		rxq = &sc->sc_rxq[qid];
 		txq = &sc->sc_txq[qid];
 
-		allocsize +=
-		    sizeof(struct virtio_net_hdr) * rxq->rxq_vq->vq_num;
-		allocsize +=
-		    sizeof(struct virtio_net_hdr) * txq->txq_vq->vq_num;
+		allocsize += sc->sc_hdr_size * rxq->rxq_vq->vq_num;
+		allocsize += sc->sc_hdr_size * txq->txq_vq->vq_num;
 	}
 	if (sc->sc_has_ctrl) {
 		allocsize += sizeof(struct virtio_net_ctrl_cmd) * 1;
@@ -610,9 +611,9 @@ vioif_alloc_mems(struct vioif_softc *sc)
 		txq = &sc->sc_txq[qid];
 
 		rxq->rxq_hdrs = vioif_assign_mem(&p,
-		    sizeof(rxq->rxq_hdrs[0]) * rxq->rxq_vq->vq_num);
+		    sc->sc_hdr_size * rxq->rxq_vq->vq_num);
 		txq->txq_hdrs = vioif_assign_mem(&p,
-		    sizeof(txq->txq_hdrs[0]) * txq->txq_vq->vq_num);
+		    sc->sc_hdr_size * txq->txq_vq->vq_num);
 	}
 	if (sc->sc_has_ctrl) {
 		ctrlq->ctrlq_cmd = vioif_assign_mem(&p,
@@ -677,7 +678,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 
 		for (i = 0; i < rxq->rxq_vq->vq_num; i++) {
 			r = vioif_dmamap_create_load(sc, &rxq->rxq_hdr_dmamaps[i],
-			    &rxq->rxq_hdrs[i], sizeof(rxq->rxq_hdrs[0]), 1,
+			    &rxq->rxq_hdrs[i], sc->sc_hdr_size, 1,
 			    BUS_DMA_READ, "rx header");
 			if (r != 0)
 				goto err_reqs;
@@ -690,7 +691,7 @@ vioif_alloc_mems(struct vioif_softc *sc)
 
 		for (i = 0; i < txq->txq_vq->vq_num; i++) {
 			r = vioif_dmamap_create_load(sc, &txq->txq_hdr_dmamaps[i],
-			    &txq->txq_hdrs[i], sizeof(txq->txq_hdrs[0]), 1,
+			    &txq->txq_hdrs[i], sc->sc_hdr_size, 1,
 			    BUS_DMA_READ, "tx header");
 			if (r != 0)
 				goto err_reqs;
@@ -785,7 +786,7 @@ vioif_attach(device_t parent, device_t s
 	struct vioif_ctrlqueue *ctrlq = &sc->sc_ctrlq;
 	struct vioif_txqueue *txq;
 	struct vioif_rxqueue *rxq;
-	uint32_t features, req_features;
+	uint64_t features, req_features;
 	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	u_int softint_flags;
 	int r, i, nvqs=0, req_flags;
@@ -821,13 +822,14 @@ vioif_attach(device_t parent, device_t s
 	req_flags = 0;
 
 #ifdef VIOIF_MPSAFE
-	req_flags |= VIRTIO_F_PCI_INTR_MPSAFE;
+	req_flags |= VIRTIO_F_INTR_MPSAFE;
 #endif
-	req_flags |= VIRTIO_F_PCI_INTR_MSIX;
+	req_flags |= VIRTIO_F_INTR_MSIX;
 
 	req_features =
 	    VIRTIO_NET_F_MAC | VIRTIO_NET_F_STATUS | VIRTIO_NET_F_CTRL_VQ |
 	    VIRTIO_NET_F_CTRL_RX | VIRTIO_F_NOTIFY_ON_EMPTY;
+	req_features |= VIRTIO_F_RING_EVENT_IDX;
 #ifdef VIOIF_MULTIQ
 	req_features |= VIRTIO_NET_F_MQ;
 #endif
@@ -836,6 +838,8 @@ vioif_attach(device_t parent, device_t s
 	    req_features, VIRTIO_NET_FLAG_BITS);
 
 	features = virtio_features(vsc);
+	if (features == 0)
+		goto err;
 
 	if (features & VIRTIO_NET_F_MAC) {
 		for (i = 0; i < __arraycount(sc->sc_mac); i++) {
@@ -855,9 +859,16 @@ vioif_attach(device_t parent, device_t s
 		}
 	}
 
+	/* 'Ethernet' with capital follows other ethernet driver attachment */
 	aprint_normal_dev(self, "Ethernet address %s\n",
 	    ether_sprintf(sc->sc_mac));
 
+	if (features & (VIRTIO_NET_F_MRG_RXBUF | VIRTIO_F_VERSION_1)) {
+		sc->sc_hdr_size = sizeof(struct virtio_net_hdr);
+	} else {
+		sc->sc_hdr_size = offsetof(struct virtio_net_hdr, num_buffers);
+	}
+
 	if ((features & VIRTIO_NET_F_CTRL_VQ) &&
 	    (features & VIRTIO_NET_F_CTRL_RX)) {
 		sc->sc_has_ctrl = true;
@@ -908,7 +919,7 @@ vioif_attach(device_t parent, device_t s
 
 		snprintf(qname, sizeof(qname), "rx%d", i);
 		r = virtio_alloc_vq(vsc, rxq->rxq_vq, nvqs,
-		    MCLBYTES+sizeof(struct virtio_net_hdr), 2, qname);
+		    MCLBYTES + sc->sc_hdr_size, 2, qname);
 		if (r != 0)
 			goto err;
 		nvqs++;
@@ -934,8 +945,7 @@ vioif_attach(device_t parent, device_t s
 
 		snprintf(qname, sizeof(qname), "tx%d", i);
 		r = virtio_alloc_vq(vsc, txq->txq_vq, nvqs,
-		    sizeof(struct virtio_net_hdr)
-		    + (ETHER_MAX_LEN - ETHER_HDR_LEN),
+		    sc->sc_hdr_size + (ETHER_MAX_LEN - ETHER_HDR_LEN),
 		    VIRTIO_NET_TX_MAXNSEGS + 1, qname);
 		if (r != 0)
 			goto err;
@@ -1154,7 +1164,7 @@ vioif_init(struct ifnet *ifp)
 		/* Have to set false before vioif_populate_rx_mbufs */
 		mutex_enter(rxq->rxq_lock);
 		rxq->rxq_stopping = false;
-		vioif_populate_rx_mbufs_locked(rxq);
+		vioif_populate_rx_mbufs_locked(sc, rxq);
 		mutex_exit(rxq->rxq_lock);
 
 	}
@@ -1264,6 +1274,7 @@ vioif_send_common_locked(struct ifnet *i
 	struct vioif_softc *sc = ifp->if_softc;
 	struct virtio_softc *vsc = sc->sc_virtio;
 	struct virtqueue *vq = txq->txq_vq;
+	struct virtio_net_hdr *hdr;
 	struct mbuf *m;
 	int queued = 0;
 
@@ -1337,7 +1348,8 @@ skip:
 
 		txq->txq_mbufs[slot] = m;
 
-		memset(&txq->txq_hdrs[slot], 0, sizeof(struct virtio_net_hdr));
+		hdr = &txq->txq_hdrs[slot];
+		memset(hdr, 0, sc->sc_hdr_size);
 		bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
 		    0, txq->txq_dmamaps[slot]->dm_mapsize,
 		    BUS_DMASYNC_PREWRITE);
@@ -1521,7 +1533,7 @@ vioif_free_rx_mbuf(struct vioif_rxqueue 
 
 /* add mbufs for all the empty receive slots */
 static void
-vioif_populate_rx_mbufs_locked(struct vioif_rxqueue *rxq)
+vioif_populate_rx_mbufs_locked(struct vioif_softc *sc, struct vioif_rxqueue *rxq)
 {
 	struct virtqueue *vq = rxq->rxq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
@@ -1553,7 +1565,7 @@ vioif_populate_rx_mbufs_locked(struct vi
 			break;
 		}
 		bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
-		    0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_PREREAD);
+		    0, sc->sc_hdr_size, BUS_DMASYNC_PREREAD);
 		bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
 		    0, MCLBYTES, BUS_DMASYNC_PREREAD);
 		virtio_enqueue(vsc, vq, slot, rxq->rxq_hdr_dmamaps[slot],
@@ -1613,9 +1625,9 @@ vioif_rx_deq_locked(struct vioif_softc *
 
 		dequeued = true;
 
-		len -= sizeof(struct virtio_net_hdr);
+		len -= sc->sc_hdr_size;
 		bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_hdr_dmamaps[slot],
-		    0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTREAD);
+		    0, sc->sc_hdr_size, BUS_DMASYNC_POSTREAD);
 		bus_dmamap_sync(virtio_dmat(vsc), rxq->rxq_dmamaps[slot],
 		    0, MCLBYTES, BUS_DMASYNC_POSTREAD);
 		m = rxq->rxq_mbufs[slot];
@@ -1635,12 +1647,37 @@ vioif_rx_deq_locked(struct vioif_softc *
 	}
 
 	if (dequeued)
-		vioif_populate_rx_mbufs_locked(rxq);
+		vioif_populate_rx_mbufs_locked(sc, rxq);
 
 	return more;
 }
 
 /* rx interrupt; call _dequeue above and schedule a softint */
+
+static void
+vioif_rx_handle_locked(void *xrxq, u_int limit)
+{
+	struct vioif_rxqueue *rxq = xrxq;
+	struct virtqueue *vq = rxq->rxq_vq;
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
+	bool more;
+
+	KASSERT(!rxq->rxq_stopping);
+
+	more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
+	if (more) {
+		vioif_rx_sched_handle(sc, rxq);
+		return;
+	} 
+	more = virtio_start_vq_intr(vsc, rxq->rxq_vq);
+	if (more) {
+		vioif_rx_sched_handle(sc, rxq);
+		return;
+	} 
+	atomic_store_relaxed(&rxq->rxq_active, false);
+}
+
 static int
 vioif_rx_intr(void *arg)
 {
@@ -1649,7 +1686,6 @@ vioif_rx_intr(void *arg)
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	u_int limit;
-	bool more;
 
 	limit = sc->sc_rx_intr_process_limit;
 
@@ -1664,13 +1700,7 @@ vioif_rx_intr(void *arg)
 		virtio_stop_vq_intr(vsc, vq);
 		atomic_store_relaxed(&rxq->rxq_active, true);
 
-		more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
-		if (more) {
-			vioif_rx_sched_handle(sc, rxq);
-		} else {
-			atomic_store_relaxed(&rxq->rxq_active, false);
-			virtio_start_vq_intr(vsc, vq);
-		}
+		vioif_rx_handle_locked(rxq, limit);
 	}
 
 	mutex_exit(rxq->rxq_lock);
@@ -1685,21 +1715,13 @@ vioif_rx_handle(void *xrxq)
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
 	u_int limit;
-	bool more;
 
 	limit = sc->sc_rx_process_limit;
 
 	mutex_enter(rxq->rxq_lock);
 
-	if (!rxq->rxq_stopping) {
-		more = vioif_rx_deq_locked(sc, vsc, rxq, limit);
-		if (more) {
-			vioif_rx_sched_handle(sc, rxq);
-		} else {
-			atomic_store_relaxed(&rxq->rxq_active, false);
-			virtio_start_vq_intr(vsc, rxq->rxq_vq);
-		}
-	}
+	if (!rxq->rxq_stopping)
+		vioif_rx_handle_locked(rxq, limit);
 
 	mutex_exit(rxq->rxq_lock);
 }
@@ -1738,6 +1760,42 @@ vioif_rx_drain(struct vioif_rxqueue *rxq
  * tx vq full and watchdog
  */
 
+static void
+vioif_tx_handle_locked(struct vioif_txqueue *txq, u_int limit)
+{
+	struct virtqueue *vq = txq->txq_vq;
+	struct virtio_softc *vsc = vq->vq_owner;
+	struct vioif_softc *sc = device_private(virtio_child(vsc));
+	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
+	bool more;
+
+	KASSERT(!txq->txq_stopping);
+
+	more = vioif_tx_deq_locked(sc, vsc, txq, limit);
+	if (more) {
+		vioif_tx_sched_handle(sc, txq);
+		return;
+	}
+
+	if (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX) 
+		more = virtio_postpone_intr_smart(vsc, vq);
+	else
+		more = virtio_start_vq_intr(vsc, vq);
+	if (more) {
+		vioif_tx_sched_handle(sc, txq);
+		return;
+	}
+
+	atomic_store_relaxed(&txq->txq_active, false);
+	/* for ALTQ */
+	if (txq == &sc->sc_txq[0]) {
+		if_schedule_deferred_start(ifp);
+		ifp->if_flags &= ~IFF_OACTIVE;
+	}
+	softint_schedule(txq->txq_deferred_transmit);
+}
+
+
 static int
 vioif_tx_intr(void *arg)
 {
@@ -1745,8 +1803,6 @@ vioif_tx_intr(void *arg)
 	struct virtqueue *vq = txq->txq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
-	bool more;
 	u_int limit;
 
 	limit = sc->sc_tx_intr_process_limit;
@@ -1762,21 +1818,7 @@ vioif_tx_intr(void *arg)
 		virtio_stop_vq_intr(vsc, vq);
 		atomic_store_relaxed(&txq->txq_active, true);
 
-		more = vioif_tx_deq_locked(sc, vsc, txq, limit);
-		if (more) {
-			vioif_tx_sched_handle(sc, txq);
-		} else {
-			atomic_store_relaxed(&txq->txq_active, false);
-
-			/* for ALTQ */
-			if (txq == &sc->sc_txq[0]) {
-				if_schedule_deferred_start(ifp);
-				ifp->if_flags &= ~IFF_OACTIVE;
-			}
-			softint_schedule(txq->txq_deferred_transmit);
-
-			virtio_start_vq_intr(vsc, vq);
-		}
+		vioif_tx_handle_locked(txq, limit);
 	}
 
 	mutex_exit(txq->txq_lock);
@@ -1791,32 +1833,13 @@ vioif_tx_handle(void *xtxq)
 	struct virtqueue *vq = txq->txq_vq;
 	struct virtio_softc *vsc = vq->vq_owner;
 	struct vioif_softc *sc = device_private(virtio_child(vsc));
-	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
 	u_int limit;
-	bool more;
 
 	limit = sc->sc_tx_process_limit;
 
 	mutex_enter(txq->txq_lock);
-
-	if (!txq->txq_stopping) {
-		more = vioif_tx_deq_locked(sc, vsc, txq, limit);
-		if (more) {
-			vioif_tx_sched_handle(sc, txq);
-		} else {
-			atomic_store_relaxed(&txq->txq_active, false);
-
-			/* for ALTQ */
-			if (txq == &sc->sc_txq[0]) {
-				if_schedule_deferred_start(ifp);
-				ifp->if_flags &= ~IFF_OACTIVE;
-			}
-			softint_schedule(txq->txq_deferred_transmit);
-
-			virtio_start_vq_intr(vsc, txq->txq_vq);
-		}
-	}
-
+	if (!txq->txq_stopping)
+		vioif_tx_handle_locked(txq, limit);
 	mutex_exit(txq->txq_lock);
 }
 
@@ -1873,7 +1896,7 @@ vioif_tx_deq_locked(struct vioif_softc *
 			break;
 
 		bus_dmamap_sync(virtio_dmat(vsc), txq->txq_hdr_dmamaps[slot],
-		    0, sizeof(struct virtio_net_hdr), BUS_DMASYNC_POSTWRITE);
+		    0, sc->sc_hdr_size, BUS_DMASYNC_POSTWRITE);
 		bus_dmamap_sync(virtio_dmat(vsc), txq->txq_dmamaps[slot],
 		    0, txq->txq_dmamaps[slot]->dm_mapsize,
 		    BUS_DMASYNC_POSTWRITE);
@@ -2002,6 +2025,10 @@ vioif_ctrl_send_command(struct vioif_sof
 	bus_dmamap_sync(virtio_dmat(vsc), ctrlq->ctrlq_status_dmamap,
 	    0, sizeof(struct virtio_net_ctrl_status), BUS_DMASYNC_PREREAD);
 
+	/* we need to explicitly (re)start vq intr when using RING EVENT IDX */
+	if (virtio_features(vsc) & VIRTIO_F_RING_EVENT_IDX)
+		virtio_start_vq_intr(vsc, ctrlq->ctrlq_vq);
+
 	r = virtio_enqueue_prep(vsc, vq, &slot);
 	if (r != 0)
 		panic("%s: control vq busy!?", device_xname(sc->sc_dev));
@@ -2083,6 +2110,7 @@ static int
 vioif_set_rx_filter(struct vioif_softc *sc)
 {
 	/* filter already set in ctrlq->ctrlq_mac_tbl */
+	struct virtio_softc *vsc = sc->sc_virtio;
 	struct virtio_net_ctrl_mac_tbl *mac_tbl_uc, *mac_tbl_mc;
 	struct vioif_ctrl_cmdspec specs[2];
 	int nspecs = __arraycount(specs);
@@ -2099,12 +2127,12 @@ vioif_set_rx_filter(struct vioif_softc *
 	specs[0].dmamap = sc->sc_ctrlq.ctrlq_tbl_uc_dmamap;
 	specs[0].buf = mac_tbl_uc;
 	specs[0].bufsize = sizeof(*mac_tbl_uc)
-	    + (ETHER_ADDR_LEN * mac_tbl_uc->nentries);
+	    + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_uc->nentries));
 
 	specs[1].dmamap = sc->sc_ctrlq.ctrlq_tbl_mc_dmamap;
 	specs[1].buf = mac_tbl_mc;
 	specs[1].bufsize = sizeof(*mac_tbl_mc)
-	    + (ETHER_ADDR_LEN * mac_tbl_mc->nentries);
+	    + (ETHER_ADDR_LEN * virtio_rw32(vsc, mac_tbl_mc->nentries));
 
 	r = vioif_ctrl_load_cmdspec(sc, specs, nspecs);
 	if (r != 0)
@@ -2137,7 +2165,7 @@ vioif_ctrl_mq_vq_pairs_set(struct vioif_
 
 	vioif_ctrl_acquire(sc);
 
-	mq->virtqueue_pairs = nvq_pairs;
+	mq->virtqueue_pairs = virtio_rw16(sc->sc_virtio, nvq_pairs);
 	specs[0].dmamap = sc->sc_ctrlq.ctrlq_mq_dmamap;
 	specs[0].buf = mq;
 	specs[0].bufsize = sizeof(*mq);
@@ -2188,6 +2216,7 @@ vioif_ctrl_intr(void *arg)
 static int
 vioif_rx_filter(struct vioif_softc *sc)
 {
+	struct virtio_softc *vsc = sc->sc_virtio;
 	struct ethercom *ec = &sc->sc_ethercom;
 	struct ifnet *ifp = &ec->ec_if;
 	struct ether_multi *enm;
@@ -2230,8 +2259,8 @@ set_unlock:
 
 set:
 	if (rxfilter) {
-		ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
-		ctrlq->ctrlq_mac_tbl_mc->nentries = nentries;
+		ctrlq->ctrlq_mac_tbl_uc->nentries = virtio_rw32(vsc, 0);
+		ctrlq->ctrlq_mac_tbl_mc->nentries = virtio_rw32(vsc, nentries);
 		r = vioif_set_rx_filter(sc);
 		if (r != 0) {
 			rxfilter = 0;
@@ -2239,8 +2268,8 @@ set:
 		}
 	} else {
 		/* remove rx filter */
-		ctrlq->ctrlq_mac_tbl_uc->nentries = 0;
-		ctrlq->ctrlq_mac_tbl_mc->nentries = 0;
+		ctrlq->ctrlq_mac_tbl_uc->nentries = virtio_rw32(vsc, 0);
+		ctrlq->ctrlq_mac_tbl_mc->nentries = virtio_rw32(vsc, 0);
 		r = vioif_set_rx_filter(sc);
 		/* what to do on failure? */
 	}

Index: src/sys/dev/pci/ld_virtio.c
diff -u src/sys/dev/pci/ld_virtio.c:1.28 src/sys/dev/pci/ld_virtio.c:1.29
--- src/sys/dev/pci/ld_virtio.c:1.28	Sat Oct 24 09:00:35 2020
+++ src/sys/dev/pci/ld_virtio.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: ld_virtio.c,v 1.28 2020/10/24 09:00:35 skrll Exp $	*/
+/*	$NetBSD: ld_virtio.c,v 1.29 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.28 2020/10/24 09:00:35 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: ld_virtio.c,v 1.29 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -156,7 +156,7 @@ ld_virtio_match(device_t parent, cfdata_
 {
 	struct virtio_attach_args *va = aux;
 
-	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BLOCK)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_BLOCK)
 		return 1;
 
 	return 0;
@@ -263,7 +263,7 @@ ld_virtio_attach(device_t parent, device
 	struct ld_virtio_softc *sc = device_private(self);
 	struct ld_softc *ld = &sc->sc_ld;
 	struct virtio_softc *vsc = device_private(parent);
-	uint32_t features;
+	uint64_t features;
 	int qsize, maxxfersize, maxnsegs;
 
 	if (virtio_child(vsc) != NULL) {
@@ -276,13 +276,15 @@ ld_virtio_attach(device_t parent, device
 	sc->sc_virtio = vsc;
 
 	virtio_child_attach_start(vsc, self, IPL_BIO, &sc->sc_vq,
-	    NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
+	    NULL, virtio_vq_intr, VIRTIO_F_INTR_MSIX,
 	    (VIRTIO_BLK_F_SIZE_MAX | VIRTIO_BLK_F_SEG_MAX |
 	     VIRTIO_BLK_F_GEOMETRY | VIRTIO_BLK_F_RO | VIRTIO_BLK_F_BLK_SIZE |
 	     VIRTIO_BLK_F_FLUSH | VIRTIO_BLK_F_CONFIG_WCE),
 	    VIRTIO_BLK_FLAG_BITS);
 
 	features = virtio_features(vsc);
+	if (features == 0)
+		goto err;
 
 	if (features & VIRTIO_BLK_F_RO)
 		sc->sc_readonly = 1;
@@ -417,10 +419,12 @@ ld_virtio_start(struct ld_softc *ld, str
 	}
 
 	vr->vr_bp = bp;
-	vr->vr_hdr.type = isread?VIRTIO_BLK_T_IN:VIRTIO_BLK_T_OUT;
-	vr->vr_hdr.ioprio = 0;
-	vr->vr_hdr.sector = bp->b_rawblkno * sc->sc_ld.sc_secsize /
-	    VIRTIO_BLK_BSIZE;
+	vr->vr_hdr.type   = virtio_rw32(vsc, 
+			isread ? VIRTIO_BLK_T_IN : VIRTIO_BLK_T_OUT);
+	vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
+	vr->vr_hdr.sector = virtio_rw64(vsc,
+			bp->b_rawblkno * sc->sc_ld.sc_secsize /
+			VIRTIO_BLK_BSIZE);
 
 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
@@ -544,10 +548,11 @@ ld_virtio_dump(struct ld_softc *ld, void
 	}
 
 	vr->vr_bp = (void*)0xdeadbeef;
-	vr->vr_hdr.type = VIRTIO_BLK_T_OUT;
-	vr->vr_hdr.ioprio = 0;
-	vr->vr_hdr.sector = (daddr_t) blkno * ld->sc_secsize /
-	    VIRTIO_BLK_BSIZE;
+	vr->vr_hdr.type   = virtio_rw32(vsc, VIRTIO_BLK_T_OUT);
+	vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
+	vr->vr_hdr.sector = virtio_rw64(vsc,
+			(daddr_t) blkno * ld->sc_secsize /
+			VIRTIO_BLK_BSIZE);
 
 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
@@ -642,7 +647,7 @@ ld_virtio_flush(struct ld_softc *ld, boo
 {
 	struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
 	struct virtio_softc * const vsc = sc->sc_virtio;
-	const uint32_t features = virtio_features(vsc);
+	const uint64_t features = virtio_features(vsc);
 	struct virtqueue *vq = &sc->sc_vq;
 	struct virtio_blk_req *vr;
 	int slot;
@@ -678,9 +683,9 @@ ld_virtio_flush(struct ld_softc *ld, boo
 	}
 
 	vr->vr_bp = DUMMY_VR_BP;
-	vr->vr_hdr.type = VIRTIO_BLK_T_FLUSH;
-	vr->vr_hdr.ioprio = 0;
-	vr->vr_hdr.sector = 0;
+	vr->vr_hdr.type   = virtio_rw32(vsc, VIRTIO_BLK_T_FLUSH);
+	vr->vr_hdr.ioprio = virtio_rw32(vsc, 0);
+	vr->vr_hdr.sector = virtio_rw64(vsc, 0);
 
 	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_cmdsts,
 			0, sizeof(struct virtio_blk_req_hdr),
@@ -727,7 +732,7 @@ ld_virtio_getcache(struct ld_softc *ld, 
 {
 	struct ld_virtio_softc * const sc = device_private(ld->sc_dv);
 	struct virtio_softc * const vsc = sc->sc_virtio;
-	const uint32_t features = virtio_features(vsc);
+	const uint64_t features = virtio_features(vsc);
 
 	*bitsp = DKCACHE_READ;
 	if ((features & VIRTIO_BLK_F_CONFIG_WCE) != 0)

Index: src/sys/dev/pci/vio9p.c
diff -u src/sys/dev/pci/vio9p.c:1.2 src/sys/dev/pci/vio9p.c:1.3
--- src/sys/dev/pci/vio9p.c:1.2	Fri Dec 18 02:55:32 2020
+++ src/sys/dev/pci/vio9p.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: vio9p.c,v 1.2 2020/12/18 02:55:32 thorpej Exp $	*/
+/*	$NetBSD: vio9p.c,v 1.3 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
  * Copyright (c) 2019 Internet Initiative Japan, Inc.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.2 2020/12/18 02:55:32 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vio9p.c,v 1.3 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -466,7 +466,7 @@ vio9p_match(device_t parent, cfdata_t ma
 {
 	struct virtio_attach_args *va = aux;
 
-	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_9P)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_9P)
 		return 1;
 
 	return 0;
@@ -477,6 +477,7 @@ vio9p_attach(device_t parent, device_t s
 {
 	struct vio9p_softc *sc = device_private(self);
 	struct virtio_softc *vsc = device_private(parent);
+	uint64_t features;
 	int error;
 
 	if (virtio_child(vsc) != NULL) {
@@ -490,9 +491,13 @@ vio9p_attach(device_t parent, device_t s
 
 	virtio_child_attach_start(vsc, self, IPL_VM, NULL,
 	    NULL, virtio_vq_intr,
-	    VIRTIO_F_PCI_INTR_MPSAFE | VIRTIO_F_PCI_INTR_SOFTINT, 0,
+	    VIRTIO_F_INTR_MPSAFE | VIRTIO_F_INTR_SOFTINT, 0,
 	    VIO9P_FLAG_BITS);
 
+	features = virtio_features(vsc);
+	if (features == 0)
+		goto err_none;
+
 	error = virtio_alloc_vq(vsc, &sc->sc_vq[0], 0, VIO9P_MAX_REQLEN,
 	    VIO9P_N_SEGMENTS * 2, "vio9p");
 	if (error != 0)
@@ -569,8 +574,8 @@ vio9p_read_config(struct vio9p_softc *sc
 	int i;
 
 	/* these values are explicitly specified as little-endian */
-	reg = virtio_read_device_config_2(sc->sc_virtio, VIO9P_CONFIG_TAG_LEN);
-	sc->sc_taglen = le16toh(reg);
+	sc->sc_taglen = virtio_read_device_config_le_2(sc->sc_virtio,
+		VIO9P_CONFIG_TAG_LEN);
 
 	if (sc->sc_taglen > P9_MAX_TAG_LEN) {
 		aprint_error_dev(dev, "warning: tag is trimmed from %u to %u\n",

Index: src/sys/dev/pci/viomb.c
diff -u src/sys/dev/pci/viomb.c:1.11 src/sys/dev/pci/viomb.c:1.12
--- src/sys/dev/pci/viomb.c:1.11	Wed Jan 13 19:46:49 2021
+++ src/sys/dev/pci/viomb.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: viomb.c,v 1.11 2021/01/13 19:46:49 reinoud Exp $	*/
+/*	$NetBSD: viomb.c,v 1.12 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.11 2021/01/13 19:46:49 reinoud Exp $");
+__KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.12 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -59,6 +59,9 @@ __KERNEL_RCSID(0, "$NetBSD: viomb.c,v 1.
 	"\x01""MUST_TELL_HOST"
 
 #define PGS_PER_REQ		(256) /* 1MB, 4KB/page */
+#define VQ_INFLATE	0
+#define VQ_DEFLATE	1
+
 
 CTASSERT((PAGE_SIZE) == (VIRTIO_PAGE_SIZE)); /* XXX */
 
@@ -110,7 +113,7 @@ viomb_match(device_t parent, cfdata_t ma
 {
 	struct virtio_attach_args *va = aux;
 
-	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_BALLOON)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_BALLOON)
 		return 1;
 
 	return 0;
@@ -122,6 +125,7 @@ viomb_attach(device_t parent, device_t s
 	struct viomb_softc *sc = device_private(self);
 	struct virtio_softc *vsc = device_private(parent);
 	const struct sysctlnode *node;
+	uint64_t features;
 
 	if (virtio_child(vsc) != NULL) {
 		aprint_normal(": child already attached for %s; "
@@ -131,27 +135,47 @@ viomb_attach(device_t parent, device_t s
 
 	if (balloon_initialized++) {
 		aprint_normal(": balloon already exists; something wrong...\n");
-		goto err_none;
+		return;
+	}
+
+	/* fail on non-4K page size archs */
+	if (VIRTIO_PAGE_SIZE != PAGE_SIZE){
+		aprint_normal("non-4K page size arch found, needs %d, got %d\n",
+		    VIRTIO_PAGE_SIZE, PAGE_SIZE);
+		return;
 	}
 
 	sc->sc_dev = self;
 	sc->sc_virtio = vsc;
 
-	if ((virtio_alloc_vq(vsc, &sc->sc_vq[0], 0,
-			     sizeof(uint32_t)*PGS_PER_REQ, 1,
-			     "inflate") != 0) ||
-	    (virtio_alloc_vq(vsc, &sc->sc_vq[1], 1,
-			     sizeof(uint32_t)*PGS_PER_REQ, 1,
-			     "deflate") != 0)) {
+	virtio_child_attach_start(vsc, self, IPL_VM, sc->sc_vq,
+	    viomb_config_change, virtio_vq_intr, 0,
+	    VIRTIO_BALLOON_F_MUST_TELL_HOST, VIRTIO_BALLOON_FLAG_BITS);
+
+	features = virtio_features(vsc);
+	if (features == 0)
 		goto err_none;
-	}
-	sc->sc_vq[0].vq_done = inflateq_done;
-	sc->sc_vq[1].vq_done = deflateq_done;
 
 	viomb_read_config(sc);
 	sc->sc_inflight = 0;
 	TAILQ_INIT(&sc->sc_balloon_pages);
 
+	sc->sc_inflate_done = sc->sc_deflate_done = 0;
+	mutex_init(&sc->sc_waitlock, MUTEX_DEFAULT, IPL_VM); /* spin */
+	cv_init(&sc->sc_wait, "balloon");
+
+	if (virtio_alloc_vq(vsc, &sc->sc_vq[VQ_INFLATE], 0,
+			     sizeof(uint32_t)*PGS_PER_REQ, 1,
+			     "inflate") != 0)
+		goto err_mutex;
+	if (virtio_alloc_vq(vsc, &sc->sc_vq[VQ_DEFLATE], 1,
+			     sizeof(uint32_t)*PGS_PER_REQ, 1,
+			     "deflate") != 0)
+		goto err_vq0;
+
+	sc->sc_vq[VQ_INFLATE].vq_done = inflateq_done;
+	sc->sc_vq[VQ_DEFLATE].vq_done = deflateq_done;
+
 	if (bus_dmamap_create(virtio_dmat(vsc), sizeof(uint32_t)*PGS_PER_REQ,
 			      1, sizeof(uint32_t)*PGS_PER_REQ, 0,
 			      BUS_DMA_NOWAIT, &sc->sc_req.bl_dmamap)) {
@@ -166,21 +190,13 @@ viomb_attach(device_t parent, device_t s
 		goto err_dmamap;
 	}
 
-	sc->sc_inflate_done = sc->sc_deflate_done = 0;
-	mutex_init(&sc->sc_waitlock, MUTEX_DEFAULT, IPL_VM); /* spin */
-	cv_init(&sc->sc_wait, "balloon");
-
-	virtio_child_attach_start(vsc, self, IPL_VM, sc->sc_vq,
-	    viomb_config_change, virtio_vq_intr, 0,
-	    0, VIRTIO_BALLOON_FLAG_BITS);
-
 	if (virtio_child_attach_finish(vsc) != 0)
-		goto err_mutex;
+		goto err_out;
 
 	if (kthread_create(PRI_IDLE, KTHREAD_MPSAFE, NULL,
 			   viomb_thread, sc, NULL, "viomb")) {
 		aprint_error_dev(sc->sc_dev, "cannot create kthread.\n");
-		goto err_mutex;
+		goto err_out;
 	}
 
 	sysctl_createv(NULL, 0, NULL, &node, 0, CTLTYPE_NODE,
@@ -197,14 +213,16 @@ viomb_attach(device_t parent, device_t s
 		       CTL_HW, node->sysctl_num, CTL_CREATE, CTL_EOL);
 	return;
 
-err_mutex:
-	cv_destroy(&sc->sc_wait);
-	mutex_destroy(&sc->sc_waitlock);
+err_out:
 err_dmamap:
 	bus_dmamap_destroy(virtio_dmat(vsc), sc->sc_req.bl_dmamap);
 err_vq:
-	virtio_free_vq(vsc, &sc->sc_vq[1]);
-	virtio_free_vq(vsc, &sc->sc_vq[0]);
+	virtio_free_vq(vsc, &sc->sc_vq[VQ_DEFLATE]);
+err_vq0:
+	virtio_free_vq(vsc, &sc->sc_vq[VQ_INFLATE]);
+err_mutex:
+	cv_destroy(&sc->sc_wait);
+	mutex_destroy(&sc->sc_waitlock);
 err_none:
 	virtio_child_attach_failed(vsc);
 	return;
@@ -213,16 +231,12 @@ err_none:
 static void
 viomb_read_config(struct viomb_softc *sc)
 {
-	unsigned int reg;
-
 	/* these values are explicitly specified as little-endian */
-	reg = virtio_read_device_config_4(sc->sc_virtio,
-					  VIRTIO_BALLOON_CONFIG_NUM_PAGES);
-	sc->sc_npages = le32toh(reg);
-	
-	reg = virtio_read_device_config_4(sc->sc_virtio,
-					  VIRTIO_BALLOON_CONFIG_ACTUAL);
-	sc->sc_actual = le32toh(reg);
+	sc->sc_npages = virtio_read_device_config_le_4(sc->sc_virtio,
+		  VIRTIO_BALLOON_CONFIG_NUM_PAGES);
+
+	sc->sc_actual = virtio_read_device_config_le_4(sc->sc_virtio,
+		  VIRTIO_BALLOON_CONFIG_ACTUAL);
 }
 
 /*
@@ -260,7 +274,7 @@ inflate(struct viomb_softc *sc)
 	uint64_t nvpages, nhpages;
 	struct balloon_req *b;
 	struct vm_page *p;
-	struct virtqueue *vq = &sc->sc_vq[0];
+	struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
 
 	if (sc->sc_inflight)
 		return 0;
@@ -281,7 +295,8 @@ inflate(struct viomb_softc *sc)
 	b->bl_nentries = nvpages;
 	i = 0;
 	TAILQ_FOREACH(p, &b->bl_pglist, pageq.queue) {
-		b->bl_pages[i++] = VM_PAGE_TO_PHYS(p) / VIRTIO_PAGE_SIZE;
+		b->bl_pages[i++] =
+			htole32(VM_PAGE_TO_PHYS(p) / VIRTIO_PAGE_SIZE);
 	}
 	KASSERT(i == nvpages);
 
@@ -324,7 +339,7 @@ static int
 inflate_done(struct viomb_softc *sc)
 {
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct virtqueue *vq = &sc->sc_vq[0];
+	struct virtqueue *vq = &sc->sc_vq[VQ_INFLATE];
 	struct balloon_req *b;
 	int r, slot;
 	uint64_t nvpages;
@@ -351,9 +366,9 @@ inflate_done(struct viomb_softc *sc)
 	}
 
 	sc->sc_inflight -= nvpages;
-	virtio_write_device_config_4(vsc,
-				     VIRTIO_BALLOON_CONFIG_ACTUAL,
-				     sc->sc_actual + nvpages);
+	virtio_write_device_config_le_4(vsc,
+		     VIRTIO_BALLOON_CONFIG_ACTUAL,
+		     sc->sc_actual + nvpages);
 	viomb_read_config(sc);
 
 	return 1;
@@ -370,7 +385,7 @@ deflate(struct viomb_softc *sc)
 	uint64_t nvpages, nhpages;
 	struct balloon_req *b;
 	struct vm_page *p;
-	struct virtqueue *vq = &sc->sc_vq[1];
+	struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
 
 	nvpages = (sc->sc_actual + sc->sc_inflight) - sc->sc_npages;
 	if (nvpages > PGS_PER_REQ)
@@ -387,7 +402,8 @@ deflate(struct viomb_softc *sc)
 			break;
 		TAILQ_REMOVE(&sc->sc_balloon_pages, p, pageq.queue);
 		TAILQ_INSERT_TAIL(&b->bl_pglist, p, pageq.queue);
-		b->bl_pages[i] = VM_PAGE_TO_PHYS(p) / VIRTIO_PAGE_SIZE;
+		b->bl_pages[i] =
+			htole32(VM_PAGE_TO_PHYS(p) / VIRTIO_PAGE_SIZE);
 	}
 
 	if (virtio_enqueue_prep(vsc, vq, &slot) != 0) {
@@ -440,7 +456,7 @@ static int
 deflate_done(struct viomb_softc *sc)
 {
 	struct virtio_softc *vsc = sc->sc_virtio;
-	struct virtqueue *vq = &sc->sc_vq[1];
+	struct virtqueue *vq = &sc->sc_vq[VQ_DEFLATE];
 	struct balloon_req *b;
 	int r, slot;
 	uint64_t nvpages;
@@ -464,9 +480,9 @@ deflate_done(struct viomb_softc *sc)
 		uvm_pglistfree(&b->bl_pglist);
 
 	sc->sc_inflight += nvpages;
-	virtio_write_device_config_4(vsc,
-				     VIRTIO_BALLOON_CONFIG_ACTUAL,
-				     sc->sc_actual - nvpages);
+	virtio_write_device_config_le_4(vsc,
+		     VIRTIO_BALLOON_CONFIG_ACTUAL,
+		     sc->sc_actual - nvpages);
 	viomb_read_config(sc);
 
 	return 1;

Index: src/sys/dev/pci/viornd.c
diff -u src/sys/dev/pci/viornd.c:1.13 src/sys/dev/pci/viornd.c:1.14
--- src/sys/dev/pci/viornd.c:1.13	Thu Apr 30 03:40:53 2020
+++ src/sys/dev/pci/viornd.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/* 	$NetBSD: viornd.c,v 1.13 2020/04/30 03:40:53 riastradh Exp $ */
+/* 	$NetBSD: viornd.c,v 1.14 2021/01/20 19:46:48 reinoud Exp $ */
 /*	$OpenBSD: viornd.c,v 1.1 2014/01/21 21:14:58 sf Exp $	*/
 
 /*
@@ -118,7 +118,7 @@ viornd_match(device_t parent, cfdata_t m
 {
 	struct virtio_attach_args *va = aux;
 
-	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_ENTROPY)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_ENTROPY)
 		return 1;
 
 	return 0;

Index: src/sys/dev/pci/vioscsi.c
diff -u src/sys/dev/pci/vioscsi.c:1.24 src/sys/dev/pci/vioscsi.c:1.25
--- src/sys/dev/pci/vioscsi.c:1.24	Sat Sep 19 07:30:32 2020
+++ src/sys/dev/pci/vioscsi.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: vioscsi.c,v 1.24 2020/09/19 07:30:32 kim Exp $	*/
+/*	$NetBSD: vioscsi.c,v 1.25 2021/01/20 19:46:48 reinoud Exp $	*/
 /*	$OpenBSD: vioscsi.c,v 1.3 2015/03/14 03:38:49 jsg Exp $	*/
 
 /*
@@ -18,7 +18,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.24 2020/09/19 07:30:32 kim Exp $");
+__KERNEL_RCSID(0, "$NetBSD: vioscsi.c,v 1.25 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -104,7 +104,7 @@ vioscsi_match(device_t parent, cfdata_t 
 {
 	struct virtio_attach_args *va = aux;
 
-	if (va->sc_childdevid == PCI_PRODUCT_VIRTIO_SCSI)
+	if (va->sc_childdevid == VIRTIO_DEVICE_ID_SCSI)
 		return 1;
 
 	return 0;
@@ -129,7 +129,7 @@ vioscsi_attach(device_t parent, device_t
 	sc->sc_dev = self;
 
 	virtio_child_attach_start(vsc, self, ipl, sc->sc_vqs,
-	    NULL, virtio_vq_intr, VIRTIO_F_PCI_INTR_MSIX,
+	    NULL, virtio_vq_intr, VIRTIO_F_INTR_MSIX,
 	    0, VIRTIO_COMMON_FLAG_BITS);
 
 	mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, ipl);
@@ -361,7 +361,7 @@ vioscsi_scsipi_request(struct scsipi_cha
 		req->task_attr = VIRTIO_SCSI_S_SIMPLE;
 		break;
 	}
-	req->id = slot;
+	req->id = virtio_rw64(vsc, slot);
 
 	if ((size_t)xs->cmdlen > sizeof(req->cdb)) {
 		DPRINTF(("%s: bad cmdlen %zu > %zu\n", __func__,
@@ -478,15 +478,17 @@ vioscsi_req_done(struct vioscsi_softc *s
 	    offsetof(struct vioscsi_req, vr_res),
 	    sizeof(struct virtio_scsi_res_hdr),
 	    BUS_DMASYNC_POSTREAD);
-	bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
-	    XS2DMAPOST(xs));
+	if (xs->datalen)
+		bus_dmamap_sync(virtio_dmat(vsc), vr->vr_data, 0, xs->datalen,
+		    XS2DMAPOST(xs));
 
 	xs->status = vr->vr_res.status;
-	xs->resid = vr->vr_res.residual;
+	xs->resid  = virtio_rw32(vsc, vr->vr_res.residual);
 
 	switch (vr->vr_res.response) {
 	case VIRTIO_SCSI_S_OK:
-		sense_len = MIN(sizeof(xs->sense), vr->vr_res.sense_len);
+		sense_len = MIN(sizeof(xs->sense),
+				virtio_rw32(vsc, vr->vr_res.sense_len));
 		memcpy(&xs->sense, vr->vr_res.sense, sense_len);
 		xs->error = (sense_len == 0) ? XS_NOERROR : XS_SENSE;
 		break;

Index: src/sys/dev/pci/virtio.c
diff -u src/sys/dev/pci/virtio.c:1.42 src/sys/dev/pci/virtio.c:1.43
--- src/sys/dev/pci/virtio.c:1.42	Thu Sep 17 17:09:59 2020
+++ src/sys/dev/pci/virtio.c	Wed Jan 20 19:46:48 2021
@@ -1,6 +1,8 @@
-/*	$NetBSD: virtio.c,v 1.42 2020/09/17 17:09:59 jakllsch Exp $	*/
+/*	$NetBSD: virtio.c,v 1.43 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 2012 Stefan Fritsch, Alexander Fiveg.
  * Copyright (c) 2010 Minoura Makoto.
  * All rights reserved.
  *
@@ -26,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.42 2020/09/17 17:09:59 jakllsch Exp $");
+__KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1.43 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -44,6 +46,21 @@ __KERNEL_RCSID(0, "$NetBSD: virtio.c,v 1
 
 #define MINSEG_INDIRECT		2 /* use indirect if nsegs >= this value */
 
+/* incomplete list */
+static const char *virtio_device_name[] = {
+	"unknown (0)",			/*  0 */
+	"network",			/*  1 */
+	"block",			/*  2 */
+	"console",			/*  3 */
+	"entropy",			/*  4 */
+	"memory balloon",		/*  5 */
+	"I/O memory",			/*  6 */
+	"remote processor messaging",	/*  7 */
+	"SCSI",				/*  8 */
+	"9P transport",			/*  9 */
+};
+#define NDEVNAMES	__arraycount(virtio_device_name)
+
 static void	virtio_init_vq(struct virtio_softc *,
 		    struct virtqueue *, const bool);
 
@@ -93,7 +110,7 @@ virtio_reinit_start(struct virtio_softc 
 		}
 		virtio_init_vq(sc, vq, true);
 		sc->sc_ops->setup_queue(sc, vq->vq_index,
-		    vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
+		    vq->vq_dmamap->dm_segs[0].ds_addr);
 	}
 }
 
@@ -106,79 +123,161 @@ virtio_reinit_end(struct virtio_softc *s
 /*
  * Feature negotiation.
  */
-uint32_t
-virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
+void
+virtio_negotiate_features(struct virtio_softc *sc, uint64_t guest_features)
 {
-	uint32_t r;
-
 	if (!(device_cfdata(sc->sc_dev)->cf_flags & 1) &&
 	    !(device_cfdata(sc->sc_child)->cf_flags & 1)) /* XXX */
 		guest_features |= VIRTIO_F_RING_INDIRECT_DESC;
-	r = sc->sc_ops->neg_features(sc, guest_features);
-	sc->sc_features = r;
-	if (r & VIRTIO_F_RING_INDIRECT_DESC)
+	sc->sc_ops->neg_features(sc, guest_features);
+	if (sc->sc_active_features & VIRTIO_F_RING_INDIRECT_DESC)
 		sc->sc_indirect = true;
 	else
 		sc->sc_indirect = false;
-
-	return r;
 }
 
+
 /*
- * Device configuration registers.
+ * Device configuration registers readers/writers
  */
+#if 0
+#define DPRINTFR(n, fmt, val, index, num) \
+	printf("\n%s (", n); \
+	for (int i = 0; i < num; i++) \
+		printf("%02x ", bus_space_read_1(sc->sc_devcfg_iot, sc->sc_devcfg_ioh, index+i)); \
+	printf(") -> "); printf(fmt, val); printf("\n");
+#else
+#define DPRINTFR(n, fmt, val, index, num)
+#endif
+
 uint8_t
-virtio_read_device_config_1(struct virtio_softc *sc, int index)
-{
-	return sc->sc_ops->read_dev_cfg_1(sc, index);
+virtio_read_device_config_1(struct virtio_softc *sc, int index) {
+	uint8_t val;
+	val = sc->sc_ops->read_dev_cfg_1(sc, index);
+	DPRINTFR("read_1", "%02x", val, index, 1);
+	return val;
 }
 
 uint16_t
-virtio_read_device_config_2(struct virtio_softc *sc, int index)
-{
-	return sc->sc_ops->read_dev_cfg_2(sc, index);
+virtio_read_device_config_2(struct virtio_softc *sc, int index) {
+	uint16_t val;
+	val = sc->sc_ops->read_dev_cfg_2(sc, index);
+	DPRINTFR("read_2", "%04x", val, index, 2);
+	return val;
 }
 
 uint32_t
-virtio_read_device_config_4(struct virtio_softc *sc, int index)
-{
-	return sc->sc_ops->read_dev_cfg_4(sc, index);
+virtio_read_device_config_4(struct virtio_softc *sc, int index) {
+	uint32_t val;
+	val = sc->sc_ops->read_dev_cfg_4(sc, index);
+	DPRINTFR("read_4", "%08x", val, index, 4);
+	return val;
 }
 
 uint64_t
-virtio_read_device_config_8(struct virtio_softc *sc, int index)
+virtio_read_device_config_8(struct virtio_softc *sc, int index) {
+	uint64_t val;
+	val = sc->sc_ops->read_dev_cfg_8(sc, index);
+	DPRINTFR("read_8", "%08lx", val, index, 8);
+	return val;
+}
+
+/*
+ * In the older virtio spec, device config registers are host endian. On newer
+ * they are little endian. The normal logic will cater for this. However some
+ * devices however explicitly state that its fields are always little endian
+ * and will still need to be swapped.
+ */
+uint16_t
+virtio_read_device_config_le_2(struct virtio_softc *sc, int index) {
+	bool virtio_v1 = (sc->sc_active_features & VIRTIO_F_VERSION_1);
+	uint16_t val;
+
+	val = sc->sc_ops->read_dev_cfg_2(sc, index);
+	val = virtio_v1 ? val : le16toh(val);
+	DPRINTFR("read_le_2", "%08x", val, index, 2);
+	return val;
+}
+
+uint32_t
+virtio_read_device_config_le_4(struct virtio_softc *sc, int index) {
+	bool virtio_v1 = (sc->sc_active_features & VIRTIO_F_VERSION_1);
+	uint32_t val;
+
+	val = sc->sc_ops->read_dev_cfg_4(sc, index);
+	val = virtio_v1 ? val : le32toh(val);
+	DPRINTFR("read_le_4", "%08x", val, index, 4);
+	return val;
+}
+
+void
+virtio_write_device_config_1(struct virtio_softc *sc, int index, uint8_t value)
+{
+	sc->sc_ops->write_dev_cfg_1(sc, index, value);
+}
+
+void
+virtio_write_device_config_2(struct virtio_softc *sc, int index, uint16_t value)
 {
-	return sc->sc_ops->read_dev_cfg_8(sc, index);
+	sc->sc_ops->write_dev_cfg_2(sc, index, value);
 }
 
 void
-virtio_write_device_config_1(struct virtio_softc *sc,
-			     int index, uint8_t value)
+virtio_write_device_config_4(struct virtio_softc *sc, int index, uint32_t value)
 {
-	return sc->sc_ops->write_dev_cfg_1(sc, index, value);
+	sc->sc_ops->write_dev_cfg_4(sc, index, value);
 }
 
 void
-virtio_write_device_config_2(struct virtio_softc *sc,
-			     int index, uint16_t value)
+virtio_write_device_config_8(struct virtio_softc *sc, int index, uint64_t value)
 {
-	return sc->sc_ops->write_dev_cfg_2(sc, index, value);
+	sc->sc_ops->write_dev_cfg_8(sc, index, value);
 }
 
+/*
+ * In the older virtio spec, device config registers are host endian. On newer
+ * they are little endian. The normal logic will cater for this. However some
+ * devices however explicitly state that its fields are always little endian
+ * and will still need to be swapped.
+ */
 void
-virtio_write_device_config_4(struct virtio_softc *sc,
-			     int index, uint32_t value)
+virtio_write_device_config_le_2(struct virtio_softc *sc, int index, uint16_t value)
 {
-	return sc->sc_ops->write_dev_cfg_4(sc, index, value);
+	bool virtio_v1 = (sc->sc_active_features & VIRTIO_F_VERSION_1);
+	value = virtio_v1 ? value : htole16(value);
+	sc->sc_ops->write_dev_cfg_2(sc, index, value);
 }
 
 void
-virtio_write_device_config_8(struct virtio_softc *sc,
-			     int index, uint64_t value)
+virtio_write_device_config_le_4(struct virtio_softc *sc, int index, uint32_t value)
+{
+	bool virtio_v1 = (sc->sc_active_features & VIRTIO_F_VERSION_1);
+	value = virtio_v1 ? value : htole32(value);
+	sc->sc_ops->write_dev_cfg_4(sc, index, value);
+}
+
+/*
+ * data structures endian helpers
+ */
+uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val)
 {
-	return sc->sc_ops->write_dev_cfg_8(sc, index, value);
+	KASSERT(sc);
+	return (sc->sc_devcfg_swap) ? bswap16(val) : val;
 }
 
+uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val)
+{
+	KASSERT(sc);
+	return (sc->sc_devcfg_swap) ? bswap32(val) : val;
+}
+
+uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val)
+{
+	KASSERT(sc);
+	return (sc->sc_devcfg_swap) ? bswap64(val) : val;
+}
+
+
 /*
  * Interrupt handler.
  */
@@ -206,20 +305,26 @@ vq_sync_descs(struct virtio_softc *sc, s
 static inline void
 vq_sync_aring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
 {
+	uint16_t hdrlen = offsetof(struct vring_avail, ring);
+	if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
+		hdrlen += sizeof(uint16_t);
+
 	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
 			vq->vq_availoffset,
-			offsetof(struct vring_avail, ring)
-			 + vq->vq_num * sizeof(uint16_t),
+			hdrlen + sc->sc_nvqs * sizeof(uint16_t),
 			ops);
 }
 
 static inline void
 vq_sync_uring(struct virtio_softc *sc, struct virtqueue *vq, int ops)
 {
+	uint16_t hdrlen = offsetof(struct vring_used, ring);
+	if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX)
+		hdrlen += sizeof(uint16_t);
+
 	bus_dmamap_sync(sc->sc_dmat, vq->vq_dmamap,
 			vq->vq_usedoffset,
-			offsetof(struct vring_used, ring)
-			 + vq->vq_num * sizeof(struct vring_used_elem),
+			hdrlen + sc->sc_nvqs * sizeof(struct vring_used_elem),
 			ops);
 }
 
@@ -253,7 +358,7 @@ virtio_vq_is_enqueued(struct virtio_soft
 	vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
 	membar_consumer();
 
-	return (vq->vq_used_idx != vq->vq_used->idx) ? 1 : 0;
+	return (vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx)) ? 1 : 0;
 }
 
 int
@@ -287,23 +392,100 @@ virtio_vq_intrhand(struct virtio_softc *
 	return r;
 }
 
+
+/*
+ * Increase the event index in order to delay interrupts.
+ */
+int
+virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
+		uint16_t nslots)
+{
+	uint16_t	idx, nused;
+
+	idx = vq->vq_used_idx + nslots;
+
+	/* set the new event index: avail_ring->used_event = idx */
+	*vq->vq_used_event = virtio_rw16(sc, idx);
+	membar_producer();
+
+	vq_sync_aring(vq->vq_owner, vq, BUS_DMASYNC_PREWRITE);
+	vq->vq_queued++;
+
+	nused = (uint16_t)
+		(virtio_rw16(sc, vq->vq_used->idx) - vq->vq_used_idx);
+	KASSERT(nused <= vq->vq_num);
+
+	return nslots < nused;
+}
+
+/*
+ * Postpone interrupt until 3/4 of the available descriptors have been
+ * consumed.
+ */
+int
+virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq)
+{
+	uint16_t	nslots;
+
+	nslots = (uint16_t)
+		(virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx) * 3 / 4;
+
+	return virtio_postpone_intr(sc, vq, nslots);
+}
+
+/*
+ * Postpone interrupt until all of the available descriptors have been
+ * consumed.
+ */
+int
+virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq)
+{
+	uint16_t	nslots;
+
+	nslots = (uint16_t)
+		(virtio_rw16(sc, vq->vq_avail->idx) - vq->vq_used_idx);
+
+	return virtio_postpone_intr(sc, vq, nslots);
+}
+
 /*
  * Start/stop vq interrupt.  No guarantee.
  */
 void
 virtio_stop_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
 {
-	vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
+	if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
+		/*
+		 * No way to disable the interrupt completely with
+		 * RingEventIdx. Instead advance used_event by half the
+		 * possible value. This won't happen soon and is far enough in
+		 * the past to not trigger a spurios interrupt.
+		 */
+		*vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx + 0x8000);
+	} else {
+		vq->vq_avail->flags |= virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
+	}
 	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
 	vq->vq_queued++;
 }
 
-void
+int
 virtio_start_vq_intr(struct virtio_softc *sc, struct virtqueue *vq)
 {
-	vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
+	if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
+		/*
+		 * If event index feature is negotiated, enabling interrupts
+		 * is done through setting the latest consumed index in the
+		 * used_event field
+		 */
+		*vq->vq_used_event = virtio_rw16(sc, vq->vq_used_idx);
+	} else {
+		vq->vq_avail->flags &= ~virtio_rw16(sc, VRING_AVAIL_F_NO_INTERRUPT);
+	}
 	vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
 	vq->vq_queued++;
+
+	return vq->vq_used_idx != virtio_rw16(sc, vq->vq_used->idx);
 }
 
 /*
@@ -326,7 +508,7 @@ virtio_init_vq(struct virtio_softc *sc, 
 			vd = vq->vq_indirect;
 			vd += vq->vq_maxnsegs * i;
 			for (j = 0; j < vq->vq_maxnsegs-1; j++) {
-				vd[j].next = j + 1;
+				vd[j].next = virtio_rw16(sc, j + 1);
 			}
 		}
 	}
@@ -362,7 +544,7 @@ virtio_alloc_vq(struct virtio_softc *sc,
     int maxsegsize, int maxnsegs, const char *name)
 {
 	int vq_size, allocsize1, allocsize2, allocsize3, allocsize = 0;
-	int rsegs, r;
+	int rsegs, r, hdrlen;
 #define VIRTQUEUE_ALIGN(n)	(((n)+(VIRTIO_PAGE_SIZE-1))&	\
 				 ~(VIRTIO_PAGE_SIZE-1))
 
@@ -378,12 +560,15 @@ virtio_alloc_vq(struct virtio_softc *sc,
 				 index, name);
 		goto err;
 	}
+
+	hdrlen = sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX ? 3 : 2;
+
 	/* allocsize1: descriptor table + avail ring + pad */
 	allocsize1 = VIRTQUEUE_ALIGN(sizeof(struct vring_desc)*vq_size
-				     + sizeof(uint16_t)*(2+vq_size));
+			     + sizeof(uint16_t)*(hdrlen + vq_size));
 	/* allocsize2: used ring + pad */
-	allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t)*2
-				     + sizeof(struct vring_used_elem)*vq_size);
+	allocsize2 = VIRTQUEUE_ALIGN(sizeof(uint16_t) * hdrlen
+			     + sizeof(struct vring_used_elem)*vq_size);
 	/* allocsize3: indirect table */
 	if (sc->sc_indirect && maxnsegs >= MINSEG_INDIRECT)
 		allocsize3 = sizeof(struct vring_desc) * maxnsegs * vq_size;
@@ -400,7 +585,7 @@ virtio_alloc_vq(struct virtio_softc *sc,
 				 "error code %d\n", index, name, r);
 		goto err;
 	}
-	r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], 1, allocsize,
+	r = bus_dmamem_map(sc->sc_dmat, &vq->vq_segs[0], rsegs, allocsize,
 			   &vq->vq_vaddr, BUS_DMA_NOWAIT);
 	if (r != 0) {
 		aprint_error_dev(sc->sc_dev,
@@ -425,10 +610,6 @@ virtio_alloc_vq(struct virtio_softc *sc,
 		goto err;
 	}
 
-	/* set the vq address */
-	sc->sc_ops->setup_queue(sc, index,
-	    vq->vq_dmamap->dm_segs[0].ds_addr / VIRTIO_PAGE_SIZE);
-
 	/* remember addresses and offsets for later use */
 	vq->vq_owner = sc;
 	vq->vq_num = vq_size;
@@ -436,8 +617,13 @@ virtio_alloc_vq(struct virtio_softc *sc,
 	vq->vq_desc = vq->vq_vaddr;
 	vq->vq_availoffset = sizeof(struct vring_desc)*vq_size;
 	vq->vq_avail = (void*)(((char*)vq->vq_desc) + vq->vq_availoffset);
+	vq->vq_used_event = (uint16_t *) ((char *)vq->vq_avail +
+		 offsetof(struct vring_avail, ring[vq->vq_num]));
 	vq->vq_usedoffset = allocsize1;
 	vq->vq_used = (void*)(((char*)vq->vq_desc) + vq->vq_usedoffset);
+	vq->vq_avail_event = (uint16_t *)((char *)vq->vq_used +
+		 offsetof(struct vring_used, ring[vq->vq_num]));
+
 	if (allocsize3 > 0) {
 		vq->vq_indirectoffset = allocsize1 + allocsize2;
 		vq->vq_indirect = (void*)(((char*)vq->vq_desc)
@@ -452,6 +638,10 @@ virtio_alloc_vq(struct virtio_softc *sc,
 				     KM_SLEEP);
 	virtio_init_vq(sc, vq, false);
 
+	/* set the vq address */
+	sc->sc_ops->setup_queue(sc, index,
+	    vq->vq_dmamap->dm_segs[0].ds_addr);
+
 	aprint_verbose_dev(sc->sc_dev,
 			   "allocated %u byte for virtqueue %d for %s, "
 			   "size %d\n", allocsize, index, name, vq_size);
@@ -618,24 +808,26 @@ virtio_enqueue_reserve(struct virtio_sof
 
 	if (indirect) {
 		struct vring_desc *vd;
+		uint64_t addr;
 		int i;
 
 		vd = &vq->vq_desc[qe1->qe_index];
-		vd->addr = vq->vq_dmamap->dm_segs[0].ds_addr
+		addr = vq->vq_dmamap->dm_segs[0].ds_addr
 			+ vq->vq_indirectoffset;
-		vd->addr += sizeof(struct vring_desc)
+		addr += sizeof(struct vring_desc)
 			* vq->vq_maxnsegs * qe1->qe_index;
-		vd->len = sizeof(struct vring_desc) * nsegs;
-		vd->flags = VRING_DESC_F_INDIRECT;
+		vd->addr  = virtio_rw64(sc, addr);
+		vd->len   = virtio_rw32(sc, sizeof(struct vring_desc) * nsegs);
+		vd->flags = virtio_rw16(sc, VRING_DESC_F_INDIRECT);
 
 		vd = vq->vq_indirect;
 		vd += vq->vq_maxnsegs * qe1->qe_index;
 		qe1->qe_desc_base = vd;
 
 		for (i = 0; i < nsegs-1; i++) {
-			vd[i].flags = VRING_DESC_F_NEXT;
+			vd[i].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
 		}
-		vd[i].flags = 0;
+		vd[i].flags  = virtio_rw16(sc, 0);
 		qe1->qe_next = 0;
 
 		return 0;
@@ -651,15 +843,15 @@ virtio_enqueue_reserve(struct virtio_sof
 		for (i = 0; i < nsegs - 1; i++) {
 			qe = vq_alloc_entry(vq);
 			if (qe == NULL) {
-				vd[s].flags = 0;
+				vd[s].flags = virtio_rw16(sc, 0);
 				virtio_enqueue_abort(sc, vq, slot);
 				return EAGAIN;
 			}
-			vd[s].flags = VRING_DESC_F_NEXT;
-			vd[s].next = qe->qe_index;
+			vd[s].flags = virtio_rw16(sc, VRING_DESC_F_NEXT);
+			vd[s].next  = virtio_rw16(sc, qe->qe_index);
 			s = qe->qe_index;
 		}
-		vd[s].flags = 0;
+		vd[s].flags = virtio_rw16(sc, 0);
 
 		return 0;
 	}
@@ -681,11 +873,11 @@ virtio_enqueue(struct virtio_softc *sc, 
 	KASSERT(dmamap->dm_nsegs > 0);
 
 	for (i = 0; i < dmamap->dm_nsegs; i++) {
-		vd[s].addr = dmamap->dm_segs[i].ds_addr;
-		vd[s].len = dmamap->dm_segs[i].ds_len;
+		vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[i].ds_addr);
+		vd[s].len  = virtio_rw32(sc, dmamap->dm_segs[i].ds_len);
 		if (!write)
-			vd[s].flags |= VRING_DESC_F_WRITE;
-		s = vd[s].next;
+			vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
+		s = virtio_rw16(sc, vd[s].next);
 	}
 	qe1->qe_next = s;
 
@@ -706,11 +898,11 @@ virtio_enqueue_p(struct virtio_softc *sc
 	KASSERT((dmamap->dm_segs[0].ds_len > start) &&
 		(dmamap->dm_segs[0].ds_len >= start + len));
 
-	vd[s].addr = dmamap->dm_segs[0].ds_addr + start;
-	vd[s].len = len;
+	vd[s].addr = virtio_rw64(sc, dmamap->dm_segs[0].ds_addr + start);
+	vd[s].len  = virtio_rw32(sc, len);
 	if (!write)
-		vd[s].flags |= VRING_DESC_F_WRITE;
-	qe1->qe_next = vd[s].next;
+		vd[s].flags |= virtio_rw16(sc, VRING_DESC_F_WRITE);
+	qe1->qe_next = virtio_rw16(sc, vd[s].next);
 
 	return 0;
 }
@@ -733,21 +925,35 @@ virtio_enqueue_commit(struct virtio_soft
 	if (qe1->qe_indirect)
 		vq_sync_indirect(sc, vq, slot, BUS_DMASYNC_PREWRITE);
 	mutex_enter(&vq->vq_aring_lock);
-	vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] = slot;
+	vq->vq_avail->ring[(vq->vq_avail_idx++) % vq->vq_num] =
+		virtio_rw16(sc, slot);
 
 notify:
 	if (notifynow) {
-		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
-		vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
+		uint16_t o, n, t;
+		uint16_t flags;
+		o = virtio_rw16(sc, vq->vq_avail->idx);
+		n = vq->vq_avail_idx;
+
+		/* publish avail idx */
 		membar_producer();
-		vq->vq_avail->idx = vq->vq_avail_idx;
+		vq->vq_avail->idx = virtio_rw16(sc, vq->vq_avail_idx);
 		vq_sync_aring(sc, vq, BUS_DMASYNC_PREWRITE);
-		membar_producer();
 		vq->vq_queued++;
-		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
+
 		membar_consumer();
-		if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
-			sc->sc_ops->kick(sc, vq->vq_index);
+		vq_sync_uring(sc, vq, BUS_DMASYNC_PREREAD);
+		if (sc->sc_active_features & VIRTIO_F_RING_EVENT_IDX) {
+			t = virtio_rw16(sc, *vq->vq_avail_event) + 1;
+			if ((uint16_t) (n - t) < (uint16_t) (n - o))
+				sc->sc_ops->kick(sc, vq->vq_index);
+		} else {
+			flags = virtio_rw16(sc, vq->vq_used->flags);
+			if (!(flags & VRING_USED_F_NO_NOTIFY))
+				sc->sc_ops->kick(sc, vq->vq_index);
+		}
+		vq_sync_uring(sc, vq, BUS_DMASYNC_POSTREAD);
+		vq_sync_aring(sc, vq, BUS_DMASYNC_POSTWRITE);
 	}
 	mutex_exit(&vq->vq_aring_lock);
 
@@ -771,8 +977,8 @@ virtio_enqueue_abort(struct virtio_softc
 
 	s = slot;
 	vd = &vq->vq_desc[0];
-	while (vd[s].flags & VRING_DESC_F_NEXT) {
-		s = vd[s].next;
+	while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
+		s = virtio_rw16(sc, vd[s].next);
 		vq_free_entry(vq, qe);
 		qe = &vq->vq_entries[s];
 	}
@@ -794,13 +1000,13 @@ virtio_dequeue(struct virtio_softc *sc, 
 	uint16_t slot, usedidx;
 	struct vq_entry *qe;
 
-	if (vq->vq_used_idx == vq->vq_used->idx)
+	if (vq->vq_used_idx == virtio_rw16(sc, vq->vq_used->idx))
 		return ENOENT;
 	mutex_enter(&vq->vq_uring_lock);
 	usedidx = vq->vq_used_idx++;
 	mutex_exit(&vq->vq_uring_lock);
 	usedidx %= vq->vq_num;
-	slot = vq->vq_used->ring[usedidx].id;
+	slot = virtio_rw32(sc, vq->vq_used->ring[usedidx].id);
 	qe = &vq->vq_entries[slot];
 
 	if (qe->qe_indirect)
@@ -809,7 +1015,7 @@ virtio_dequeue(struct virtio_softc *sc, 
 	if (slotp)
 		*slotp = slot;
 	if (lenp)
-		*lenp = vq->vq_used->ring[usedidx].len;
+		*lenp = virtio_rw32(sc, vq->vq_used->ring[usedidx].len);
 
 	return 0;
 }
@@ -825,8 +1031,8 @@ virtio_dequeue_commit(struct virtio_soft
 	struct vring_desc *vd = &vq->vq_desc[0];
 	int s = slot;
 
-	while (vd[s].flags & VRING_DESC_F_NEXT) {
-		s = vd[s].next;
+	while (virtio_rw16(sc, vd[s].flags) & VRING_DESC_F_NEXT) {
+		s = virtio_rw16(sc, vd[s].next);
 		vq_free_entry(vq, qe);
 		qe = &vq->vq_entries[s];
 	}
@@ -845,8 +1051,7 @@ virtio_child_attach_start(struct virtio_
 		    virtio_callback intr_hand,
 		    int req_flags, int req_features, const char *feat_bits)
 {
-	char buf[256];
-	int features;
+	char buf[1024];
 
 	sc->sc_child = child;
 	sc->sc_ipl = ipl;
@@ -855,9 +1060,9 @@ virtio_child_attach_start(struct virtio_
 	sc->sc_intrhand = intr_hand;
 	sc->sc_flags = req_flags;
 
-	features = virtio_negotiate_features(sc, req_features);
-	snprintb(buf, sizeof(buf), feat_bits, features);
-	aprint_normal(": Features: %s\n", buf);
+	virtio_negotiate_features(sc, req_features);
+	snprintb(buf, sizeof(buf), feat_bits, sc->sc_active_features);
+	aprint_normal(": features: %s\n", buf);
 	aprint_naive("\n");
 }
 
@@ -867,7 +1072,7 @@ virtio_child_attach_set_vqs(struct virti
 {
 
 	KASSERT(nvq_pairs == 1 ||
-	    (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) == 0);
+	    (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) == 0);
 	if (nvq_pairs > 1)
 		sc->sc_child_mq = true;
 
@@ -886,9 +1091,9 @@ virtio_child_attach_finish(struct virtio
 	}
 
 	KASSERT(sc->sc_soft_ih == NULL);
-	if (sc->sc_flags & VIRTIO_F_PCI_INTR_SOFTINT) {
+	if (sc->sc_flags & VIRTIO_F_INTR_SOFTINT) {
 		u_int flags = SOFTINT_NET; 
-		if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
+		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
 			flags |= SOFTINT_MPSAFE;
 
 		sc->sc_soft_ih = softint_establish(flags, virtio_soft_intr, sc);
@@ -957,26 +1162,43 @@ virtio_intrhand(struct virtio_softc *sc)
 	return (sc->sc_intrhand)(sc);
 }
 
-uint32_t
+uint64_t
 virtio_features(struct virtio_softc *sc)
 {
-	return sc->sc_features;
+	return sc->sc_active_features;
 }
 
 int
-virtiobusprint(void *aux, const char *pnp)
+virtio_attach_failed(struct virtio_softc *sc)
 {
-	struct virtio_attach_args * const va = aux;
+	device_t self = sc->sc_dev;
 
-	if (va->sc_childdevid == 0)
-		return QUIET;	/* No device present */
+	/* no error if its not connected, but its failed */
+	if (sc->sc_childdevid == 0)
+		return 1;
 
-	if (pnp)
-		aprint_normal("Device ID %d at %s", va->sc_childdevid, pnp);
+	if (sc->sc_child == NULL) {
+		aprint_error_dev(self,
+			"no matching child driver; not configured\n");
+		return 1;
+	}
 
-	return UNCONF;
+	if (sc->sc_child == VIRTIO_CHILD_FAILED) {
+		aprint_error_dev(self, "virtio configuration failed\n");
+		return 1;
+	}
+	return 0;
+}
+
+void
+virtio_print_device_type(device_t self, int id, int revision)
+{
+	aprint_normal_dev(self, "%s device (rev. 0x%02x)\n",
+		  (id < NDEVNAMES ? virtio_device_name[id] : "Unknown"),
+		  revision);
 }
 
+
 MODULE(MODULE_CLASS_DRIVER, virtio, NULL);
  
 #ifdef _MODULE

Index: src/sys/dev/pci/virtio_pci.c
diff -u src/sys/dev/pci/virtio_pci.c:1.14 src/sys/dev/pci/virtio_pci.c:1.15
--- src/sys/dev/pci/virtio_pci.c:1.14	Wed Sep 23 13:45:14 2020
+++ src/sys/dev/pci/virtio_pci.c	Wed Jan 20 19:46:48 2021
@@ -1,6 +1,8 @@
-/* $NetBSD: virtio_pci.c,v 1.14 2020/09/23 13:45:14 jakllsch Exp $ */
+/* $NetBSD: virtio_pci.c,v 1.15 2021/01/20 19:46:48 reinoud Exp $ */
 
 /*
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 2012 Stefan Fritsch.
  * Copyright (c) 2010 Minoura Makoto.
  * All rights reserved.
  *
@@ -26,7 +28,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.14 2020/09/23 13:45:14 jakllsch Exp $");
+__KERNEL_RCSID(0, "$NetBSD: virtio_pci.c,v 1.15 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -40,11 +42,13 @@ __KERNEL_RCSID(0, "$NetBSD: virtio_pci.c
 #include <dev/pci/pcireg.h>
 #include <dev/pci/pcivar.h>
 
-#define VIRTIO_PRIVATE
-
 #include <dev/pci/virtioreg.h> /* XXX: move to non-pci */
+#include <dev/pci/virtio_pcireg.h>
+
+#define VIRTIO_PRIVATE
 #include <dev/pci/virtiovar.h> /* XXX: move to non-pci */
 
+
 static int	virtio_pci_match(device_t, cfdata_t, void *);
 static void	virtio_pci_attach(device_t, device_t, void *);
 static int	virtio_pci_rescan(device_t, const char *, const int *);
@@ -52,17 +56,52 @@ static int	virtio_pci_detach(device_t, i
 
 struct virtio_pci_softc {
 	struct virtio_softc	sc_sc;
+
+	/* IO space */
 	bus_space_tag_t		sc_iot;
 	bus_space_handle_t	sc_ioh;
 	bus_size_t		sc_iosize;
+	bus_size_t		sc_mapped_iosize;
+
+	/* BARs */
+	bus_space_tag_t		sc_bars_iot[4];
+	bus_space_handle_t	sc_bars_ioh[4];
+	bus_size_t		sc_bars_iosize[4];
+
+	/* notify space */
+	bus_space_tag_t		sc_notify_iot;
+	bus_space_handle_t	sc_notify_ioh;
+	bus_size_t		sc_notify_iosize;
+	uint32_t		sc_notify_off_multiplier;
+
+	/* isr space */
+	bus_space_tag_t		sc_isr_iot;
+	bus_space_handle_t	sc_isr_ioh;
+	bus_size_t		sc_isr_iosize;
+
+	/* generic */
 	struct pci_attach_args	sc_pa;
 	pci_intr_handle_t	*sc_ihp;
 	void			**sc_ihs;
 	int			sc_ihs_num;
-	int			sc_config_offset;
+	int			sc_devcfg_offset;	/* for 0.9 */
 };
 
-static void	virtio_pci_kick(struct virtio_softc *, uint16_t);
+static int	virtio_pci_attach_09(device_t, void *);
+static void	virtio_pci_kick_09(struct virtio_softc *, uint16_t);
+static uint16_t	virtio_pci_read_queue_size_09(struct virtio_softc *, uint16_t);
+static void	virtio_pci_setup_queue_09(struct virtio_softc *, uint16_t, uint64_t);
+static void	virtio_pci_set_status_09(struct virtio_softc *, int);
+static void	virtio_pci_negotiate_features_09(struct virtio_softc *, uint64_t);
+
+static int	virtio_pci_attach_10(device_t, void *);
+static void	virtio_pci_kick_10(struct virtio_softc *, uint16_t);
+static uint16_t	virtio_pci_read_queue_size_10(struct virtio_softc *, uint16_t);
+static void	virtio_pci_setup_queue_10(struct virtio_softc *, uint16_t, uint64_t);
+static void	virtio_pci_set_status_10(struct virtio_softc *, int);
+static void	virtio_pci_negotiate_features_10(struct virtio_softc *, uint64_t);
+static int	virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen);
+
 static uint8_t	virtio_pci_read_device_config_1(struct virtio_softc *, int);
 static uint16_t	virtio_pci_read_device_config_2(struct virtio_softc *, int);
 static uint32_t	virtio_pci_read_device_config_4(struct virtio_softc *, int);
@@ -71,17 +110,15 @@ static void 	virtio_pci_write_device_con
 static void	virtio_pci_write_device_config_2(struct virtio_softc *, int, uint16_t);
 static void	virtio_pci_write_device_config_4(struct virtio_softc *, int, uint32_t);
 static void	virtio_pci_write_device_config_8(struct virtio_softc *, int, uint64_t);
-static uint16_t	virtio_pci_read_queue_size(struct virtio_softc *, uint16_t);
-static void	virtio_pci_setup_queue(struct virtio_softc *, uint16_t, uint32_t);
-static void	virtio_pci_set_status(struct virtio_softc *, int);
-static uint32_t	virtio_pci_negotiate_features(struct virtio_softc *, uint32_t);
+
 static int	virtio_pci_setup_interrupts(struct virtio_softc *);
 static void	virtio_pci_free_interrupts(struct virtio_softc *);
-
+static int	virtio_pci_adjust_config_region(struct virtio_pci_softc *psc);
 static int	virtio_pci_intr(void *arg);
 static int	virtio_pci_msix_queue_intr(void *);
 static int	virtio_pci_msix_config_intr(void *);
-static int	virtio_pci_setup_msix_vectors(struct virtio_softc *);
+static int	virtio_pci_setup_msix_vectors_09(struct virtio_softc *);
+static int	virtio_pci_setup_msix_vectors_10(struct virtio_softc *);
 static int	virtio_pci_setup_msix_interrupts(struct virtio_softc *,
 		    struct pci_attach_args *);
 static int	virtio_pci_setup_intx_interrupt(struct virtio_softc *,
@@ -90,6 +127,7 @@ static int	virtio_pci_setup_intx_interru
 #define VIRTIO_MSIX_CONFIG_VECTOR_INDEX	0
 #define VIRTIO_MSIX_QUEUE_VECTOR_INDEX	1
 
+#if 0
 /* we use the legacy virtio spec, so the PCI registers are host native
  * byte order, not PCI (i.e. LE) byte order */
 #if BYTE_ORDER == BIG_ENDIAN
@@ -135,29 +173,45 @@ bus_space_write_stream_4(bus_space_tag_t
 #define bus_space_write_stream_4 bus_space_write_4
 #endif
 #endif
+#endif
 
 
-static const char *virtio_device_name[] = {
-	"Unknown (0)",			/* 0 */
-	"Network",			/* 1 */
-	"Block",			/* 2 */
-	"Console",			/* 3 */
-	"Entropy",			/* 4 */
-	"Memory Balloon",		/* 5 */
-	"I/O Memory",			/* 6 */
-	"Remote Processor Messaging",	/* 7 */
-	"SCSI",				/* 8 */
-	"9P Transport",			/* 9 */
-	"mac80211 wlan",		/* 10 */
-};
-#define NDEVNAMES	__arraycount(virtio_device_name)
+#if BYTE_ORDER == LITTLE_ENDIAN
+#	define VIODEVRW_SWAP_09 false 
+#	define VIODEVRW_SWAP_10 false
+#else /* big endian */
+#	define VIODEVRW_SWAP_09 false
+#	define VIODEVRW_SWAP_10 true
+#endif
+
 
 CFATTACH_DECL3_NEW(virtio_pci, sizeof(struct virtio_pci_softc),
     virtio_pci_match, virtio_pci_attach, virtio_pci_detach, NULL,
     virtio_pci_rescan, NULL, DVF_DETACH_SHUTDOWN);
 
-static const struct virtio_ops virtio_pci_ops = {
-	.kick = virtio_pci_kick,
+static const struct virtio_ops virtio_pci_ops_09 = {
+	.kick = virtio_pci_kick_09,
+
+	.read_dev_cfg_1 = virtio_pci_read_device_config_1,
+	.read_dev_cfg_2 = virtio_pci_read_device_config_2,
+	.read_dev_cfg_4 = virtio_pci_read_device_config_4,
+	.read_dev_cfg_8 = virtio_pci_read_device_config_8,
+	.write_dev_cfg_1 = virtio_pci_write_device_config_1,
+	.write_dev_cfg_2 = virtio_pci_write_device_config_2,
+	.write_dev_cfg_4 = virtio_pci_write_device_config_4,
+	.write_dev_cfg_8 = virtio_pci_write_device_config_8,
+
+	.read_queue_size = virtio_pci_read_queue_size_09,
+	.setup_queue = virtio_pci_setup_queue_09,
+	.set_status = virtio_pci_set_status_09,
+	.neg_features = virtio_pci_negotiate_features_09,
+	.setup_interrupts = virtio_pci_setup_interrupts,
+	.free_interrupts = virtio_pci_free_interrupts,
+};
+
+static const struct virtio_ops virtio_pci_ops_10 = {
+	.kick = virtio_pci_kick_10,
+
 	.read_dev_cfg_1 = virtio_pci_read_device_config_1,
 	.read_dev_cfg_2 = virtio_pci_read_device_config_2,
 	.read_dev_cfg_4 = virtio_pci_read_device_config_4,
@@ -166,10 +220,11 @@ static const struct virtio_ops virtio_pc
 	.write_dev_cfg_2 = virtio_pci_write_device_config_2,
 	.write_dev_cfg_4 = virtio_pci_write_device_config_4,
 	.write_dev_cfg_8 = virtio_pci_write_device_config_8,
-	.read_queue_size = virtio_pci_read_queue_size,
-	.setup_queue = virtio_pci_setup_queue,
-	.set_status = virtio_pci_set_status,
-	.neg_features = virtio_pci_negotiate_features,
+
+	.read_queue_size = virtio_pci_read_queue_size_10,
+	.setup_queue = virtio_pci_setup_queue_10,
+	.set_status = virtio_pci_set_status_10,
+	.neg_features = virtio_pci_negotiate_features_10,
 	.setup_interrupts = virtio_pci_setup_interrupts,
 	.free_interrupts = virtio_pci_free_interrupts,
 };
@@ -182,10 +237,17 @@ virtio_pci_match(device_t parent, cfdata
 	pa = (struct pci_attach_args *)aux;
 	switch (PCI_VENDOR(pa->pa_id)) {
 	case PCI_VENDOR_QUMRANET:
-		if ((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
-		     PCI_PRODUCT(pa->pa_id)) &&
-		    (PCI_PRODUCT(pa->pa_id) <=
-		     PCI_PRODUCT_QUMRANET_VIRTIO_103F))
+		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1000 <=
+		      PCI_PRODUCT(pa->pa_id)) &&
+		     (PCI_PRODUCT(pa->pa_id) <=
+		      PCI_PRODUCT_QUMRANET_VIRTIO_103F)) &&
+	              PCI_REVISION(pa->pa_class) == 0)
+			return 1;
+		if (((PCI_PRODUCT_QUMRANET_VIRTIO_1040 <=
+		      PCI_PRODUCT(pa->pa_id)) &&
+		     (PCI_PRODUCT(pa->pa_id) <=
+		      PCI_PRODUCT_QUMRANET_VIRTIO_107F)) &&
+		      PCI_REVISION(pa->pa_class) == 1)
 			return 1;
 		break;
 	}
@@ -202,50 +264,69 @@ virtio_pci_attach(device_t parent, devic
 	pci_chipset_tag_t pc = pa->pa_pc;
 	pcitag_t tag = pa->pa_tag;
 	int revision;
+	int ret;
 	pcireg_t id;
 	pcireg_t csr;
 
 	revision = PCI_REVISION(pa->pa_class);
-	if (revision != 0) {
+	switch (revision) {
+	case 0:
+		/* subsystem ID shows what I am */
+		id = PCI_SUBSYS_ID(pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG));
+		break;
+	case 1:
+		/* pci product number shows what I am */
+		id = PCI_PRODUCT(pa->pa_id) - PCI_PRODUCT_QUMRANET_VIRTIO_1040;
+		break;
+	default:
 		aprint_normal(": unknown revision 0x%02x; giving up\n",
 			      revision);
 		return;
 	}
+
 	aprint_normal("\n");
 	aprint_naive("\n");
-
-	/* subsystem ID shows what I am */
-	id = pci_conf_read(pc, tag, PCI_SUBSYS_ID_REG);
-	aprint_normal_dev(self, "Virtio %s Device (rev. 0x%02x)\n",
-			  (PCI_SUBSYS_ID(id) < NDEVNAMES?
-			   virtio_device_name[PCI_SUBSYS_ID(id)] : "Unknown"),
-			  revision);
+	virtio_print_device_type(self, id, revision);
 
 	csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG);
 	csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_IO_ENABLE;
 	pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr);
 
 	sc->sc_dev = self;
-	sc->sc_ops = &virtio_pci_ops;
 	psc->sc_pa = *pa;
 	psc->sc_iot = pa->pa_iot;
+
+	sc->sc_dmat = pa->pa_dmat;
 	if (pci_dma64_available(pa))
 		sc->sc_dmat = pa->pa_dmat64;
-	else
-		sc->sc_dmat = pa->pa_dmat;
-	psc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
 
-	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
-			   &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
-		aprint_error_dev(self, "can't map i/o space\n");
+	/* attach is dependent on revision */
+	ret = 0;
+	if (revision == 1) {
+		/* try to attach 1.0 */
+		ret = virtio_pci_attach_10(self, aux);
+	}
+	if (ret == 0 && revision == 0) {
+		/* revision 0 means 0.9 only or both 0.9 and 1.0 */
+		ret = virtio_pci_attach_09(self, aux);
+	}
+	if (ret) {
+		aprint_error_dev(self, "cannot attach (%d)\n", ret);
 		return;
 	}
+	KASSERT(sc->sc_ops);
+
+	/* preset config region */
+	psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
+	if (virtio_pci_adjust_config_region(psc))
+		return;
 
+	/* generic */
 	virtio_device_reset(sc);
 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
 	virtio_set_status(sc, VIRTIO_CONFIG_DEVICE_STATUS_DRIVER);
 
-	sc->sc_childdevid = PCI_SUBSYS_ID(id);
+	sc->sc_childdevid = id;
 	sc->sc_child = NULL;
 	virtio_pci_rescan(self, "virtio", 0);
 	return;
@@ -267,17 +348,8 @@ virtio_pci_rescan(device_t self, const c
 
 	config_found_ia(self, attr, &va, NULL);
 
-	if (sc->sc_child == NULL) {
-		aprint_error_dev(self,
-				 "no matching child driver; not configured\n");
+	if (virtio_attach_failed(sc))
 		return 0;
-	}
-
-	if (sc->sc_child == VIRTIO_CHILD_FAILED) {
-		aprint_error_dev(self,
-				 "virtio configuration failed\n");
-		return 0;
-	}
 
 	/*
 	 * Make sure child drivers initialize interrupts via call
@@ -308,183 +380,682 @@ virtio_pci_detach(device_t self, int fla
 	KASSERT(psc->sc_ihs_num == 0);
 
 	if (psc->sc_iosize)
-		bus_space_unmap(psc->sc_iot, psc->sc_ioh, psc->sc_iosize);
+		bus_space_unmap(psc->sc_iot, psc->sc_ioh,
+			psc->sc_mapped_iosize);
 	psc->sc_iosize = 0;
 
 	return 0;
 }
 
-static void
-virtio_pci_kick(struct virtio_softc *sc, uint16_t idx)
+
+static int
+virtio_pci_attach_09(device_t self, void *aux)
+	//struct virtio_pci_softc *psc, struct pci_attach_args *pa)
 {
-	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	struct virtio_pci_softc * const psc = device_private(self);
+	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
+	struct virtio_softc * const sc = &psc->sc_sc;
+//	pci_chipset_tag_t pc = pa->pa_pc;
+//	pcitag_t tag = pa->pa_tag;
 
-	bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_QUEUE_NOTIFY, idx);
+	/* complete IO region */
+	if (pci_mapreg_map(pa, PCI_MAPREG_START, PCI_MAPREG_TYPE_IO, 0,
+			   &psc->sc_iot, &psc->sc_ioh, NULL, &psc->sc_iosize)) {
+		aprint_error_dev(self, "can't map i/o space\n");
+		return EIO;
+	}
+	psc->sc_mapped_iosize = psc->sc_iosize;
+
+	/* queue space */
+	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
+			VIRTIO_CONFIG_QUEUE_NOTIFY, 2, &psc->sc_notify_ioh)) {
+		aprint_error_dev(self, "can't map notify i/o space\n");
+		return EIO;
+	}
+	psc->sc_notify_iosize = 2;
+	psc->sc_notify_iot = psc->sc_iot;
+
+	/* ISR space */
+	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
+			VIRTIO_CONFIG_ISR_STATUS, 1, &psc->sc_isr_ioh)) {
+		aprint_error_dev(self, "can't map isr i/o space\n");
+		return EIO;
+	}
+	psc->sc_isr_iosize = 1;
+	psc->sc_isr_iot = psc->sc_iot;
+
+	/* set our version 0.9 ops */
+	sc->sc_ops = &virtio_pci_ops_09;
+	sc->sc_devcfg_swap = VIODEVRW_SWAP_09;
+	return 0;
 }
 
-static uint8_t
-virtio_pci_read_device_config_1(struct virtio_softc *sc, int index)
+
+#define NMAPREG		((PCI_MAPREG_END - PCI_MAPREG_START) / \
+				sizeof(pcireg_t))
+static int
+virtio_pci_attach_10(device_t self, void *aux)
 {
-	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
-	return bus_space_read_stream_1(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index);
+	struct virtio_pci_softc * const psc = device_private(self);
+	struct pci_attach_args *pa = (struct pci_attach_args *)aux;
+	struct virtio_softc * const sc = &psc->sc_sc;
+	pci_chipset_tag_t pc = pa->pa_pc;
+	pcitag_t tag = pa->pa_tag;
+
+	struct virtio_pci_cap common, isr, device;
+	struct virtio_pci_notify_cap notify;
+	int have_device_cfg = 0;
+	bus_size_t bars[NMAPREG] = { 0 };
+	int bars_idx[NMAPREG] = { 0 };
+	struct virtio_pci_cap *caps[] = { &common, &isr, &device, &notify.cap };
+	int i, j = 0, ret = 0;
+
+	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_COMMON_CFG,
+			&common, sizeof(common)))
+		return ENODEV;
+	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_NOTIFY_CFG,
+			&notify, sizeof(notify)))
+		return ENODEV;
+	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_ISR_CFG,
+			&isr, sizeof(isr)))
+		return ENODEV;
+	if (virtio_pci_find_cap(psc, VIRTIO_PCI_CAP_DEVICE_CFG,
+			&device, sizeof(device)))
+		memset(&device, 0, sizeof(device));
+	else
+		have_device_cfg = 1;
+
+	/*
+	 * XXX Maybe there are devices that offer the pci caps but not the
+	 * XXX VERSION_1 feature bit? Then we should check the feature bit
+	 * XXX here and fall back to 0.9 out if not present.
+	 */
+
+	/* Figure out which bars we need to map */
+	for (i = 0; i < __arraycount(caps); i++) {
+		int bar = caps[i]->bar;
+		bus_size_t len = caps[i]->offset + caps[i]->length;
+		if (caps[i]->length == 0)
+			continue;
+		if (bars[bar] < len)
+			bars[bar] = len;
+	}
+
+	for (i = 0; i < __arraycount(bars); i++) {
+		int reg;
+		pcireg_t type;
+		if (bars[i] == 0)
+			continue;
+		reg = PCI_MAPREG_START + i * 4;
+		type = pci_mapreg_type(pc, tag, reg);
+		if (pci_mapreg_map(pa, reg, type, 0,
+				&psc->sc_bars_iot[j], &psc->sc_bars_ioh[j],
+				NULL, &psc->sc_bars_iosize[j])) {
+			aprint_error_dev(self, "can't map bar %u \n", i);
+			ret = EIO;
+			goto err;
+		}
+		aprint_debug_dev(self, "bar[%d]: iot %p, size 0x%lx\n",
+			j, psc->sc_bars_iot[j], psc->sc_bars_iosize[j]);
+		bars_idx[i] = j;
+		j++;
+	}
+
+	i = bars_idx[notify.cap.bar];
+	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
+			notify.cap.offset, notify.cap.length,
+			&psc->sc_notify_ioh)) {
+		aprint_error_dev(self, "can't map notify i/o space\n");
+		ret = EIO;
+		goto err;
+	}
+	psc->sc_notify_iosize = notify.cap.length;
+	psc->sc_notify_iot = psc->sc_bars_iot[i];
+	psc->sc_notify_off_multiplier = le32toh(notify.notify_off_multiplier);
+
+	if (have_device_cfg) {
+		i = bars_idx[device.bar];
+		if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
+				device.offset, device.length,
+				&sc->sc_devcfg_ioh)) {
+			aprint_error_dev(self, "can't map devcfg i/o space\n");
+			ret = EIO;
+			goto err;
+		}
+		aprint_debug_dev(self,
+			"device.offset = 0x%x, device.length = 0x%x\n",
+			device.offset, device.length);
+		sc->sc_devcfg_iosize = device.length;
+		sc->sc_devcfg_iot = psc->sc_bars_iot[i];
+	}
+
+	i = bars_idx[isr.bar];
+	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
+			isr.offset, isr.length, &psc->sc_isr_ioh)) {
+		aprint_error_dev(self, "can't map isr i/o space\n");
+		ret = EIO;
+		goto err;
+	}
+	psc->sc_isr_iosize = isr.length;
+	psc->sc_isr_iot = psc->sc_bars_iot[i];
+
+	i = bars_idx[common.bar];
+	if (bus_space_subregion(psc->sc_bars_iot[i], psc->sc_bars_ioh[i],
+			common.offset, common.length, &psc->sc_ioh)) {
+		aprint_error_dev(self, "can't map common i/o space\n");
+		ret = EIO;
+		goto err;
+	}
+	psc->sc_iosize = common.length;
+	psc->sc_iot = psc->sc_bars_iot[i];
+	psc->sc_mapped_iosize = psc->sc_bars_iosize[i];
+
+	psc->sc_sc.sc_version_1 = 1;
+
+	/* set our version 1.0 ops */
+	sc->sc_ops = &virtio_pci_ops_10;
+	sc->sc_devcfg_swap = VIODEVRW_SWAP_10;
+	return 0;
+
+err:
+	/* there is no pci_mapreg_unmap() */
+	return ret;
 }
 
-static uint16_t
-virtio_pci_read_device_config_2(struct virtio_softc *sc, int index)
+/* v1.0 attach helper */
+static int
+virtio_pci_find_cap(struct virtio_pci_softc *psc, int cfg_type, void *buf, int buflen)
 {
-	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
-	return bus_space_read_stream_2(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index);
+	device_t self = psc->sc_sc.sc_dev;
+	pci_chipset_tag_t pc = psc->sc_pa.pa_pc;
+	pcitag_t tag = psc->sc_pa.pa_tag;
+	unsigned int offset, i, len;
+	union {
+		pcireg_t reg[8];
+		struct virtio_pci_cap vcap;
+	} *v = buf;
+
+	if (buflen < sizeof(struct virtio_pci_cap))
+		return ERANGE;
+
+	if (!pci_get_capability(pc, tag, PCI_CAP_VENDSPEC, &offset, &v->reg[0]))
+		return ENOENT;
+
+	do {
+		for (i = 0; i < 4; i++)
+			v->reg[i] =
+				le32toh(pci_conf_read(pc, tag, offset + i * 4));
+		if (v->vcap.cfg_type == cfg_type)
+			break;
+		offset = v->vcap.cap_next;
+	} while (offset != 0);
+
+	if (offset == 0)
+		return ENOENT;
+
+	if (v->vcap.cap_len > sizeof(struct virtio_pci_cap)) {
+		len = roundup(v->vcap.cap_len, sizeof(pcireg_t));
+		if (len > buflen) {
+			aprint_error_dev(self, "%s cap too large\n", __func__);
+			return ERANGE;
+		}
+		for (i = 4; i < len / sizeof(pcireg_t);  i++)
+			v->reg[i] =
+				le32toh(pci_conf_read(pc, tag, offset + i * 4));
+	}
+
+	/* endian fixup */
+	v->vcap.offset = le32toh(v->vcap.offset);
+	v->vcap.length = le32toh(v->vcap.length);
+	return 0;
 }
 
-static uint32_t
-virtio_pci_read_device_config_4(struct virtio_softc *sc, int index)
+
+/* -------------------------------------
+ * Version 0.9 support
+ * -------------------------------------*/
+
+static void
+virtio_pci_kick_09(struct virtio_softc *sc, uint16_t idx)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
-	return bus_space_read_stream_4(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index);
+
+	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, 0, idx);
 }
 
-static uint64_t
-virtio_pci_read_device_config_8(struct virtio_softc *sc, int index)
+/* only applicable for v 0.9 but also called for 1.0 */
+static int
+virtio_pci_adjust_config_region(struct virtio_pci_softc *psc)
 {
-	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
-	uint64_t r;
+	struct virtio_softc * const sc = (struct virtio_softc *) psc;
+	device_t self = psc->sc_sc.sc_dev;
 
-	r = bus_space_read_stream_4(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index + REG_HI_OFF);
-	r <<= 32;
-	r |= bus_space_read_stream_4(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index + REG_LO_OFF);
+	if (psc->sc_sc.sc_version_1)
+		return 0;
 
-	return r;
+	sc->sc_devcfg_iosize = psc->sc_iosize - psc->sc_devcfg_offset;
+	sc->sc_devcfg_iot = psc->sc_iot;
+	if (bus_space_subregion(psc->sc_iot, psc->sc_ioh,
+			psc->sc_devcfg_offset, sc->sc_devcfg_iosize,
+			&sc->sc_devcfg_ioh)) {
+		aprint_error_dev(self, "can't map config i/o space\n");
+		return EIO;
+	}
+
+	return 0;
+}
+
+static uint16_t
+virtio_pci_read_queue_size_09(struct virtio_softc *sc, uint16_t idx)
+{
+	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+
+	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
+	return bus_space_read_2(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_QUEUE_SIZE);
 }
 
 static void
-virtio_pci_write_device_config_1(struct virtio_softc *sc, int index,
-    uint8_t value)
+virtio_pci_setup_queue_09(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
 
-	bus_space_write_stream_1(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index, value);
+	bus_space_write_2(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
+	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr / VIRTIO_PAGE_SIZE);
+
+	if (psc->sc_ihs_num > 1) {
+		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
+		if (sc->sc_child_mq)
+			vec += idx;
+		bus_space_write_2(psc->sc_iot, psc->sc_ioh,
+		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
+	}
 }
 
 static void
-virtio_pci_write_device_config_2(struct virtio_softc *sc, int index,
-    uint16_t value)
+virtio_pci_set_status_09(struct virtio_softc *sc, int status)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	int old = 0;
 
-	bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index, value);
+	if (status != 0) {
+	    old = bus_space_read_1(psc->sc_iot, psc->sc_ioh,
+		VIRTIO_CONFIG_DEVICE_STATUS);
+	}
+	bus_space_write_1(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
 }
 
 static void
-virtio_pci_write_device_config_4(struct virtio_softc *sc, int index,
-    uint32_t value)
+virtio_pci_negotiate_features_09(struct virtio_softc *sc, uint64_t guest_features)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	uint32_t r;
+
+	r = bus_space_read_4(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_DEVICE_FEATURES);
+
+	r &= guest_features;
+
+	bus_space_write_4(psc->sc_iot, psc->sc_ioh,
+	    VIRTIO_CONFIG_GUEST_FEATURES, r);
 
-	bus_space_write_stream_4(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index, value);
+	sc->sc_active_features = r;
 }
 
+/* -------------------------------------
+ * Version 1.0 support
+ * -------------------------------------*/
+
 static void
-virtio_pci_write_device_config_8(struct virtio_softc *sc, int index,
-    uint64_t value)
+virtio_pci_kick_10(struct virtio_softc *sc, uint16_t idx)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	unsigned offset = sc->sc_vqs[idx].vq_notify_off *
+		psc->sc_notify_off_multiplier;
 
-	bus_space_write_stream_4(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index + REG_LO_OFF,
-	    value & 0xffffffff);
-	bus_space_write_stream_4(psc->sc_iot, psc->sc_ioh,
-	    psc->sc_config_offset + index + REG_HI_OFF,
-	    value >> 32);
+	bus_space_write_2(psc->sc_notify_iot, psc->sc_notify_ioh, offset, idx);
 }
 
+
 static uint16_t
-virtio_pci_read_queue_size(struct virtio_softc *sc, uint16_t idx)
+virtio_pci_read_queue_size_10(struct virtio_softc *sc, uint16_t idx)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	bus_space_tag_t	   iot = psc->sc_iot;
+	bus_space_handle_t ioh = psc->sc_ioh;
 
-	bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
-	return bus_space_read_stream_2(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_QUEUE_SIZE);
+	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, idx);
+	return bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SIZE);
 }
 
 static void
-virtio_pci_setup_queue(struct virtio_softc *sc, uint16_t idx, uint32_t addr)
+virtio_pci_setup_queue_10(struct virtio_softc *sc, uint16_t idx, uint64_t addr)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
-
-	bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_QUEUE_SELECT, idx);
-	bus_space_write_stream_4(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_QUEUE_ADDRESS, addr);
+	struct virtqueue *vq = &sc->sc_vqs[idx];
+	bus_space_tag_t	   iot = psc->sc_iot;
+	bus_space_handle_t ioh = psc->sc_ioh;
+	KASSERT(vq->vq_index == idx);
+
+	bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, vq->vq_index);
+	if (addr == 0) {
+		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_ENABLE, 0);
+		bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_DESC,   0);
+		bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_AVAIL,  0);
+		bus_space_write_8(iot, ioh, VIRTIO_CONFIG1_QUEUE_USED,   0);
+	} else {
+		bus_space_write_8(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_DESC, addr);
+		bus_space_write_8(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_AVAIL, addr + vq->vq_availoffset);
+		bus_space_write_8(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_USED, addr + vq->vq_usedoffset);
+		bus_space_write_2(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_ENABLE, 1);
+		vq->vq_notify_off = bus_space_read_2(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF);
+	}
 
 	if (psc->sc_ihs_num > 1) {
 		int vec = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
 		if (sc->sc_child_mq)
 			vec += idx;
-		bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh,
-		    VIRTIO_CONFIG_MSI_QUEUE_VECTOR, vec);
+		bus_space_write_2(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR, vec);
 	}
 }
 
 static void
-virtio_pci_set_status(struct virtio_softc *sc, int status)
+virtio_pci_set_status_10(struct virtio_softc *sc, int status)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	bus_space_tag_t	   iot = psc->sc_iot;
+	bus_space_handle_t ioh = psc->sc_ioh;
 	int old = 0;
 
-	if (status != 0) {
-	    old = bus_space_read_stream_1(psc->sc_iot, psc->sc_ioh,
-		VIRTIO_CONFIG_DEVICE_STATUS);
+	if (status)
+		old = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
+	bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS, status | old);
+}
+
+void
+virtio_pci_negotiate_features_10(struct virtio_softc *sc, uint64_t guest_features)
+{
+	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	device_t self          =  sc->sc_dev;
+	bus_space_tag_t	   iot = psc->sc_iot;
+	bus_space_handle_t ioh = psc->sc_ioh;
+	uint64_t host, negotiated, device_status;
+
+	guest_features |= VIRTIO_F_VERSION_1;
+	/* notify on empty is 0.9 only */
+	guest_features &= ~VIRTIO_F_NOTIFY_ON_EMPTY;
+	sc->sc_active_features = 0;
+
+	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 0);
+	host = bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE);
+	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT, 1);
+	host |= (uint64_t)
+		bus_space_read_4(iot, ioh, VIRTIO_CONFIG1_DEVICE_FEATURE) << 32;
+
+	negotiated = host & guest_features;
+
+	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 0);
+	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
+			negotiated & 0xffffffff);
+	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT, 1);
+	bus_space_write_4(iot, ioh, VIRTIO_CONFIG1_DRIVER_FEATURE,
+			negotiated >> 32);
+	virtio_pci_set_status_10(sc, VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK);
+
+	device_status = bus_space_read_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS);
+	if ((device_status & VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK) == 0) {
+		aprint_error_dev(self, "feature negotiation failed\n");
+		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
+				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
+		return;
 	}
-	bus_space_write_stream_1(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_DEVICE_STATUS, status|old);
+
+	if ((negotiated & VIRTIO_F_VERSION_1) == 0) {
+		aprint_error_dev(self, "host rejected version 1\n");
+		bus_space_write_1(iot, ioh, VIRTIO_CONFIG1_DEVICE_STATUS,
+				VIRTIO_CONFIG_DEVICE_STATUS_FAILED);
+		return;
+	}
+
+	sc->sc_active_features = negotiated;
+	return;
+}
+
+/* -------------------------------------
+ * Read/write device config code
+ * -------------------------------------*/
+
+static uint8_t
+virtio_pci_read_device_config_1(struct virtio_softc *vsc, int index)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	return bus_space_read_1(iot, ioh, index);
+}
+
+static uint16_t
+virtio_pci_read_device_config_2(struct virtio_softc *vsc, int index)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+	uint16_t val;
+
+#if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
+	val = bus_space_read_2(iot, ioh, index);
+	return val;
+#else
+	val = bus_space_read_stream_2(iot, ioh, index);
+	if (vsc->sc_devcfg_swap)
+		return bswap16(val);
+	return val;
+#endif
 }
 
 static uint32_t
-virtio_pci_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
+virtio_pci_read_device_config_4(struct virtio_softc *vsc, int index)
 {
-	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
-	uint32_t r;
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+	uint32_t val;
+
+#if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
+	val = bus_space_read_4(iot, ioh, index);
+	return val;
+#else
+	val = bus_space_read_stream_4(iot, ioh, index);
+	if (vsc->sc_devcfg_swap)
+		return bswap32(val);
+	return val;
+#endif
+}
 
-	r = bus_space_read_stream_4(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_DEVICE_FEATURES);
-	r &= guest_features;
-	bus_space_write_stream_4(psc->sc_iot, psc->sc_ioh,
-	    VIRTIO_CONFIG_GUEST_FEATURES, r);
+static uint64_t
+virtio_pci_read_device_config_8(struct virtio_softc *vsc, int index)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+	uint64_t val, val_h, val_l;
+
+#if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
+	if (vsc->sc_devcfg_swap) {
+		val_l = bus_space_read_4(iot, ioh, index);
+		val_h = bus_space_read_4(iot, ioh, index + 4);
+	} else {
+		val_h = bus_space_read_4(iot, ioh, index);
+		val_l = bus_space_read_4(iot, ioh, index + 4);
+	}
+	val = val_h << 32;
+	val |= val_l;
+	return val;
+#elif BYTE_ORDER == BIG_ENDIAN
+	val_h = bus_space_read_stream_4(iot, ioh, index);
+	val_l = bus_space_read_stream_4(iot, ioh, index + 4);
+	val = val_h << 32;
+	val |= val_l;
+	if (vsc->sc_devcfg_swap)
+		return bswap64(val);
+	return val;
+#else
+	val_l = bus_space_read_4(iot, ioh, index);
+	val_h = bus_space_read_4(iot, ioh, index + 4);
+	val = val_h << 32;
+	val |= val_l;
 
-	return r;
+	return val;
+#endif
+}
+
+static void
+virtio_pci_write_device_config_1(struct virtio_softc *vsc,
+			     int index, uint8_t value)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	bus_space_write_1(iot, ioh, index, value);
+}
+
+static void
+virtio_pci_write_device_config_2(struct virtio_softc *vsc,
+			     int index, uint16_t value)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+#if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
+	bus_space_write_2(iot, ioh, index, value);
+#else
+	if (vsc->sc_devcfg_swap)
+		value = bswap16(value);
+	bus_space_write_stream_2(iot, ioh, index, value);
+#endif
 }
 
+static void
+virtio_pci_write_device_config_4(struct virtio_softc *vsc,
+			     int index, uint32_t value)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+#if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
+	bus_space_write_4(iot, ioh, index, value);
+#else
+	if (vsc->sc_devcfg_swap)
+		value = bswap32(value);
+	bus_space_write_stream_4(iot, ioh, index, value);
+#endif
+}
+
+static void
+virtio_pci_write_device_config_8(struct virtio_softc *vsc,
+			     int index, uint64_t value)
+{
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+	uint64_t val_h, val_l;
+
+#if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
+	val_l = value & 0xffffffff;
+	val_h = value >> 32;
+	if (vsc->sc_devcfg_swap) {
+		bus_space_write_4(iot, ioh, index, val_l);
+		bus_space_write_4(iot, ioh, index + 4, val_h);
+	} else {
+		bus_space_write_4(iot, ioh, index, val_h);
+		bus_space_write_4(iot, ioh, index + 4, val_l);
+	}
+#elif BYTE_ORDER == BIG_ENDIAN
+	if (vsc->sc_devcfg_swap)
+		value = bswap64(value);
+	val_l = value & 0xffffffff;
+	val_h = value >> 32;
+
+	bus_space_write_stream_4(iot, ioh, index, val_h);
+	bus_space_write_stream_4(iot, ioh, index + 4, val_l);
+#else
+	val_l = value & 0xffffffff;
+	val_h = value >> 32;
+	bus_space_write_stream_4(iot, ioh, index, val_l);
+	bus_space_write_stream_4(iot, ioh, index + 4, val_h);
+#endif
+}
+
+/* -------------------------------------
+ * Generic PCI interrupt code
+ * -------------------------------------*/
 
 static int
-virtio_pci_setup_msix_vectors(struct virtio_softc *sc)
+virtio_pci_setup_msix_vectors_10(struct virtio_softc *sc)
 {
 	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	device_t self          =  sc->sc_dev;
+	bus_space_tag_t	   iot = psc->sc_iot;
+	bus_space_handle_t ioh = psc->sc_ioh;
+	int vector, ret, qid;
+
+	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
+	bus_space_write_2(iot, ioh,
+		VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR, vector);
+	ret = bus_space_read_2(iot, ioh, VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR);
+	if (ret != vector) {
+		aprint_error_dev(self, "can't set config msix vector\n");
+		return -1;
+	}
+
+	for (qid = 0; qid < sc->sc_nvqs; qid++) {
+		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
+
+		if (sc->sc_child_mq)
+			vector += qid;
+		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_SELECT, qid);
+		bus_space_write_2(iot, ioh, VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR,
+			vector);
+		ret = bus_space_read_2(iot, ioh,
+			VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR);
+		if (ret != vector) {
+			aprint_error_dev(self, "can't set queue %d "
+				"msix vector\n", qid);
+			return -1;
+		}
+	}
+
+	return 0;
+}
+
+static int
+virtio_pci_setup_msix_vectors_09(struct virtio_softc *sc)
+{
+	struct virtio_pci_softc * const psc = (struct virtio_pci_softc *)sc;
+	device_t self = sc->sc_dev;
 	int offset, vector, ret, qid;
 
 	offset = VIRTIO_CONFIG_MSI_CONFIG_VECTOR;
 	vector = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
 
-	bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh, offset, vector);
-	ret = bus_space_read_stream_2(psc->sc_iot, psc->sc_ioh, offset);
+	bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
+	ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
 	aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
 	    vector, ret);
-	if (ret != vector)
+	if (ret != vector) {
+		aprint_error_dev(self, "can't set config msix vector\n");
 		return -1;
+	}
 
 	for (qid = 0; qid < sc->sc_nvqs; qid++) {
 		offset = VIRTIO_CONFIG_QUEUE_SELECT;
-		bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh, offset, qid);
+		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, qid);
 
 		offset = VIRTIO_CONFIG_MSI_QUEUE_VECTOR;
 		vector = VIRTIO_MSIX_QUEUE_VECTOR_INDEX;
@@ -492,12 +1063,15 @@ virtio_pci_setup_msix_vectors(struct vir
 		if (sc->sc_child_mq)
 			vector += qid;
 
-		bus_space_write_stream_2(psc->sc_iot, psc->sc_ioh, offset, vector);
-		ret = bus_space_read_stream_2(psc->sc_iot, psc->sc_ioh, offset);
+		bus_space_write_2(psc->sc_iot, psc->sc_ioh, offset, vector);
+		ret = bus_space_read_2(psc->sc_iot, psc->sc_ioh, offset);
 		aprint_debug_dev(sc->sc_dev, "expected=%d, actual=%d\n",
 		    vector, ret);
-		if (ret != vector)
+		if (ret != vector) {
+			aprint_error_dev(self, "can't set queue %d "
+				"msix vector\n", qid);
 			return -1;
+		}
 	}
 
 	return 0;
@@ -515,9 +1089,10 @@ virtio_pci_setup_msix_interrupts(struct 
 	char intr_xname[INTRDEVNAMEBUF];
 	char const *intrstr;
 	int idx, qid, n;
+	int ret;
 
 	idx = VIRTIO_MSIX_CONFIG_VECTOR_INDEX;
-	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
+	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
 		pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
 
 	snprintf(intr_xname, sizeof(intr_xname), "%s config",
@@ -539,7 +1114,7 @@ virtio_pci_setup_msix_interrupts(struct 
 			snprintf(intr_xname, sizeof(intr_xname), "%s vq#%d",
 			    device_xname(sc->sc_dev), qid);
 
-			if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE) {
+			if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE) {
 				pci_intr_setattr(pc, &psc->sc_ihp[n],
 				    PCI_INTR_MPSAFE, true);
 			}
@@ -552,7 +1127,7 @@ virtio_pci_setup_msix_interrupts(struct 
 			}
 		}
 	} else {
-		if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
+		if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
 			pci_intr_setattr(pc, &psc->sc_ihp[idx], PCI_INTR_MPSAFE, true);
 
 		snprintf(intr_xname, sizeof(intr_xname), "%s queues",
@@ -565,7 +1140,12 @@ virtio_pci_setup_msix_interrupts(struct 
 		}
 	}
 
-	if (virtio_pci_setup_msix_vectors(sc) != 0) {
+	if (sc->sc_version_1) {
+		ret = virtio_pci_setup_msix_vectors_10(sc);
+	} else {
+		ret = virtio_pci_setup_msix_vectors_09(sc);
+	}
+	if (ret) {
 		aprint_error_dev(self, "couldn't setup MSI-X vectors\n");
 		goto error;
 	}
@@ -640,7 +1220,7 @@ virtio_pci_setup_intx_interrupt(struct v
 	char intrbuf[PCI_INTRSTR_LEN];
 	char const *intrstr;
 
-	if (sc->sc_flags & VIRTIO_F_PCI_INTR_MPSAFE)
+	if (sc->sc_flags & VIRTIO_F_INTR_MPSAFE)
 		pci_intr_setattr(pc, &psc->sc_ihp[0], PCI_INTR_MPSAFE, true);
 
 	psc->sc_ihs[0] = pci_intr_establish_xname(pc, psc->sc_ihp[0],
@@ -674,7 +1254,7 @@ virtio_pci_setup_interrupts(struct virti
 	aprint_debug_dev(self, "pci_msix_count=%d\n", nmsix);
 
 	/* We need at least two: one for config and the other for queues */
-	if ((sc->sc_flags & VIRTIO_F_PCI_INTR_MSIX) == 0 || nmsix < 2) {
+	if ((sc->sc_flags & VIRTIO_F_INTR_MSIX) == 0 || nmsix < 2) {
 		/* Try INTx only */
 		max_type = PCI_INTR_TYPE_INTX;
 		counts[PCI_INTR_TYPE_INTX] = 1;
@@ -719,7 +1299,8 @@ retry:
 		}
 
 		psc->sc_ihs_num = nmsix;
-		psc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
+		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
+		virtio_pci_adjust_config_region(psc);
 	} else if (pci_intr_type(pc, psc->sc_ihp[0]) == PCI_INTR_TYPE_INTX) {
 		psc->sc_ihs = kmem_zalloc(sizeof(*psc->sc_ihs) * 1,
 		    KM_SLEEP);
@@ -732,7 +1313,8 @@ retry:
 		}
 
 		psc->sc_ihs_num = 1;
-		psc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
+		psc->sc_devcfg_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
+		virtio_pci_adjust_config_region(psc);
 
 		error = pci_get_capability(pc, tag, PCI_CAP_MSIX, &off, NULL);
 		if (error != 0) {
@@ -778,8 +1360,7 @@ virtio_pci_intr(void *arg)
 	int isr, r = 0;
 
 	/* check and ack the interrupt */
-	isr = bus_space_read_stream_1(psc->sc_iot, psc->sc_ioh,
-			       VIRTIO_CONFIG_ISR_STATUS);
+	isr = bus_space_read_1(psc->sc_isr_iot, psc->sc_isr_ioh, 0);
 	if (isr == 0)
 		return 0;
 	if ((isr & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&

Index: src/sys/dev/pci/virtioreg.h
diff -u src/sys/dev/pci/virtioreg.h:1.6 src/sys/dev/pci/virtioreg.h:1.7
--- src/sys/dev/pci/virtioreg.h:1.6	Thu Oct 29 01:56:12 2015
+++ src/sys/dev/pci/virtioreg.h	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: virtioreg.h,v 1.6 2015/10/29 01:56:12 christos Exp $	*/
+/*	$NetBSD: virtioreg.h,v 1.7 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -67,43 +67,38 @@
 
 #include <sys/types.h>
 
-/* Virtio product id (subsystem) */
-#define PCI_PRODUCT_VIRTIO_NETWORK	1
-#define PCI_PRODUCT_VIRTIO_BLOCK	2
-#define PCI_PRODUCT_VIRTIO_CONSOLE	3
-#define PCI_PRODUCT_VIRTIO_ENTROPY	4
-#define PCI_PRODUCT_VIRTIO_BALLOON	5
-#define	PCI_PRODUCT_VIRTIO_IOMEM	6
-#define PCI_PRODUCT_VIRTIO_RPMSG	7
-#define PCI_PRODUCT_VIRTIO_SCSI		8
-#define PCI_PRODUCT_VIRTIO_9P		9
-#define PCI_PRODUCT_VIRTIO_MAC80211	10
-
-/* Virtio header */
-#define VIRTIO_CONFIG_DEVICE_FEATURES	0 /* 32bit */
-#define VIRTIO_CONFIG_GUEST_FEATURES	4 /* 32bit */
+/* Virtio product id (all subsystems) */
+#define VIRTIO_DEVICE_ID_NETWORK	 1
+#define VIRTIO_DEVICE_ID_BLOCK		 2
+#define VIRTIO_DEVICE_ID_CONSOLE	 3
+#define VIRTIO_DEVICE_ID_ENTROPY	 4
+#define VIRTIO_DEVICE_ID_BALLOON	 5
+#define VIRTIO_DEVICE_ID_IOMEM		 6
+#define VIRTIO_DEVICE_ID_RPMSG		 7
+#define VIRTIO_DEVICE_ID_SCSI		 8
+#define VIRTIO_DEVICE_ID_9P		 9
+
+/* common device/guest features */
 #define  VIRTIO_F_NOTIFY_ON_EMPTY		(1<<24)
 #define  VIRTIO_F_RING_INDIRECT_DESC		(1<<28)
+#define  VIRTIO_F_RING_EVENT_IDX		(1<<29)
 #define  VIRTIO_F_BAD_FEATURE			(1<<30)
-#define VIRTIO_CONFIG_QUEUE_ADDRESS	8 /* 32bit */
-#define VIRTIO_CONFIG_QUEUE_SIZE	12 /* 16bit */
-#define VIRTIO_CONFIG_QUEUE_SELECT	14 /* 16bit */
-#define VIRTIO_CONFIG_QUEUE_NOTIFY	16 /* 16bit */
-#define VIRTIO_CONFIG_DEVICE_STATUS	18 /* 8bit */
-#define  VIRTIO_CONFIG_DEVICE_STATUS_RESET	0
-#define  VIRTIO_CONFIG_DEVICE_STATUS_ACK	1
-#define  VIRTIO_CONFIG_DEVICE_STATUS_DRIVER	2
-#define  VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK	4
-#define  VIRTIO_CONFIG_DEVICE_STATUS_FAILED	128
-#define VIRTIO_CONFIG_ISR_STATUS	19 /* 8bit */
+#define  VIRTIO_F_VERSION_1			(1ULL<<32)
+
+/* common device status flags */
+#define  VIRTIO_CONFIG_DEVICE_STATUS_RESET		  0
+#define  VIRTIO_CONFIG_DEVICE_STATUS_ACK		  1
+#define  VIRTIO_CONFIG_DEVICE_STATUS_DRIVER		  2
+#define  VIRTIO_CONFIG_DEVICE_STATUS_DRIVER_OK		  4
+#define  VIRTIO_CONFIG_DEVICE_STATUS_FEATURES_OK	  8
+#define  VIRTIO_CONFIG_DEVICE_STATUS_DEVICE_NEEDS_RESET	 64
+#define  VIRTIO_CONFIG_DEVICE_STATUS_FAILED		128
+
+/* common ISR status flags */
+#define  VIRTIO_CONFIG_ISR_QUEUE_INTERRUPT	1
 #define  VIRTIO_CONFIG_ISR_CONFIG_CHANGE	2
-#define VIRTIO_CONFIG_CONFIG_VECTOR	20 /* 16bit, optional */
-#define VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI	20
-#define VIRTIO_CONFIG_DEVICE_CONFIG_MSI		24
-/* MSI/MSI-X */
-#define VIRTIO_CONFIG_MSI_CONFIG_VECTOR		20
-#define VIRTIO_CONFIG_MSI_QUEUE_VECTOR		22
 
+/* common device/guest features */
 #define VIRTIO_COMMON_FLAG_BITS \
         "\20" \
 	"\x1f""BAD_FEATURE" \
@@ -111,23 +106,36 @@
 	"\x1d""INDIRECT_DESC" \
 	"\x19""NOTIFY_ON_EMPTY"
 
-/* Virtqueue */
-/* This marks a buffer as continuing via the next field. */
+
+/*
+ * Virtqueue
+ */
+
+/* marks a buffer as continuing via the next field. */
 #define VRING_DESC_F_NEXT       1
-/* This marks a buffer as write-only (otherwise read-only). */
+
+/* marks a buffer as write-only (otherwise read-only). */
 #define VRING_DESC_F_WRITE      2
-/* This means the buffer contains a list of buffer descriptors. */
+
+/* the buffer contains a list of buffer descriptors. */
 #define VRING_DESC_F_INDIRECT	4
 
-/* The Host uses this in used->flags to advise the Guest: don't kick me
- * when you add a buffer.  It's unreliable, so it's simply an
- * optimization.  Guest will still kick if it's out of buffers. */
+
+/*
+ * The Host uses this in used->flags to advise the Guest: don't kick me when
+ * you add a buffer.  It's unreliable, so it's simply an optimization.  Guest
+ * will still kick if it's out of buffers.
+ */
 #define VRING_USED_F_NO_NOTIFY  1
-/* The Guest uses this in avail->flags to advise the Host: don't
- * interrupt me when you consume a buffer.  It's unreliable, so it's
- * simply an optimization.  */
+
+/*
+ * The Guest uses this in avail->flags to advise the Host: don't interrupt me
+ * when you consume a buffer.  It's unreliable, so it's simply an
+ * optimization.
+ */
 #define VRING_AVAIL_F_NO_INTERRUPT      1
 
+
 /* Virtio ring descriptors: 16 bytes.
  * These can chain together via "next". */
 struct vring_desc {
@@ -145,6 +153,7 @@ struct vring_avail {
         uint16_t flags;
         uint16_t idx;
         uint16_t ring[0];
+	/* trailed by uint16_t used_event when VIRTIO_F_RING_EVENT_IDX */
 } __packed;
 
 /* u32 is used here for ids for padding reasons. */
@@ -159,6 +168,7 @@ struct vring_used {
         uint16_t flags;
         uint16_t idx;
         struct vring_used_elem ring[0];
+	/* trailed by uint16_t avail_event when VIRTIO_F_RING_EVENT_IDX */
 } __packed;
 
 /* The standard layout for the ring is a continuous chunk of memory which

Index: src/sys/dev/pci/virtiovar.h
diff -u src/sys/dev/pci/virtiovar.h:1.16 src/sys/dev/pci/virtiovar.h:1.17
--- src/sys/dev/pci/virtiovar.h:1.16	Mon May 25 07:52:16 2020
+++ src/sys/dev/pci/virtiovar.h	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: virtiovar.h,v 1.16 2020/05/25 07:52:16 yamaguchi Exp $	*/
+/*	$NetBSD: virtiovar.h,v 1.17 2021/01/20 19:46:48 reinoud Exp $	*/
 
 /*
  * Copyright (c) 2010 Minoura Makoto.
@@ -70,6 +70,7 @@
 #include <sys/bus.h>
 #include <dev/pci/virtioreg.h>
 
+
 struct vq_entry {
 	SIMPLEQ_ENTRY(vq_entry)	qe_list; /* free list */
 	uint16_t		qe_index; /* index in vq_desc array */
@@ -89,6 +90,8 @@ struct virtqueue {
         struct vring_avail	*vq_avail;
         struct vring_used	*vq_used;
 	void			*vq_indirect;
+	uint16_t		*vq_used_event;		/* trails avail */
+	uint16_t		*vq_avail_event;	/* trails used  */
 
 	/* virtqueue allocation info */
 	void			*vq_vaddr;
@@ -118,6 +121,9 @@ struct virtqueue {
 	int			(*vq_done)(struct virtqueue*);
 	int			(*vq_intrhand)(void *);
 	void			*vq_intrhand_arg;
+
+	/* for 1.0 */
+	uint32_t		vq_notify_off;
 };
 
 struct virtio_attach_args {
@@ -129,6 +135,7 @@ typedef int (*virtio_callback)(struct vi
 #ifdef VIRTIO_PRIVATE
 struct virtio_ops {
 	void		(*kick)(struct virtio_softc *, uint16_t);
+
 	uint8_t		(*read_dev_cfg_1)(struct virtio_softc *, int);
 	uint16_t	(*read_dev_cfg_2)(struct virtio_softc *, int);
 	uint32_t	(*read_dev_cfg_4)(struct virtio_softc *, int);
@@ -137,10 +144,11 @@ struct virtio_ops {
 	void		(*write_dev_cfg_2)(struct virtio_softc *, int, uint16_t);
 	void		(*write_dev_cfg_4)(struct virtio_softc *, int, uint32_t);
 	void		(*write_dev_cfg_8)(struct virtio_softc *, int, uint64_t);
+
 	uint16_t	(*read_queue_size)(struct virtio_softc *, uint16_t);
-	void		(*setup_queue)(struct virtio_softc *, uint16_t, uint32_t);
+	void		(*setup_queue)(struct virtio_softc *, uint16_t, uint64_t);
 	void		(*set_status)(struct virtio_softc *, int);
-	uint32_t	(*neg_features)(struct virtio_softc *, uint32_t);
+	void		(*neg_features)(struct virtio_softc *, uint64_t);
 	int		(*setup_interrupts)(struct virtio_softc *);
 	void		(*free_interrupts)(struct virtio_softc *);
 };
@@ -150,13 +158,19 @@ struct virtio_softc {
 	const struct virtio_ops *sc_ops;
 	bus_dma_tag_t		sc_dmat;
 
+	bool			sc_devcfg_swap;
+	bus_space_tag_t		sc_devcfg_iot;
+	bus_space_handle_t	sc_devcfg_ioh;
+	bus_size_t		sc_devcfg_iosize;
+
 	int			sc_ipl; /* set by child */
 	void			*sc_soft_ih;
 
 	int			sc_flags; /* set by child */
 
-	uint32_t		sc_features;
+	uint64_t		sc_active_features;
 	bool			sc_indirect;
+	bool			sc_version_1;
 
 	int			sc_nvqs; /* set by child */
 	struct virtqueue	*sc_vqs; /* set by child */
@@ -171,23 +185,30 @@ struct virtio_softc {
 struct virtio_softc;
 #endif
 
-#define VIRTIO_F_PCI_INTR_MPSAFE	(1 << 0)
-#define VIRTIO_F_PCI_INTR_SOFTINT	(1 << 1)
-#define VIRTIO_F_PCI_INTR_MSIX		(1 << 2)
+
+/* interupt types, stored in virtio_softc->sc_flags */
+#define VIRTIO_F_INTR_MPSAFE	(1 << 0)
+#define VIRTIO_F_INTR_SOFTINT	(1 << 1)
+#define VIRTIO_F_INTR_MSIX	(1 << 2)
+
 
 #define	VIRTIO_CHILD_FAILED		((void *)1)
 
 /* public interface */
-uint32_t virtio_negotiate_features(struct virtio_softc*, uint32_t);
+void virtio_negotiate_features(struct virtio_softc*, uint64_t);
 
 uint8_t virtio_read_device_config_1(struct virtio_softc *, int);
 uint16_t virtio_read_device_config_2(struct virtio_softc *, int);
 uint32_t virtio_read_device_config_4(struct virtio_softc *, int);
 uint64_t virtio_read_device_config_8(struct virtio_softc *, int);
+uint16_t virtio_read_device_config_le_2(struct virtio_softc *, int);
+uint32_t virtio_read_device_config_le_4(struct virtio_softc *, int);
 void virtio_write_device_config_1(struct virtio_softc *, int, uint8_t);
 void virtio_write_device_config_2(struct virtio_softc *, int, uint16_t);
 void virtio_write_device_config_4(struct virtio_softc *, int, uint32_t);
 void virtio_write_device_config_8(struct virtio_softc *, int, uint64_t);
+void virtio_write_device_config_le_2(struct virtio_softc *, int, uint16_t);
+void virtio_write_device_config_le_4(struct virtio_softc *, int, uint32_t);
 
 int virtio_alloc_vq(struct virtio_softc*, struct virtqueue*, int, int, int,
 		    const char*);
@@ -220,19 +241,34 @@ int virtio_dequeue_commit(struct virtio_
 bool virtio_vq_is_enqueued(struct virtio_softc *, struct virtqueue *);
 int virtio_vq_intr(struct virtio_softc *);
 int virtio_vq_intrhand(struct virtio_softc *);
+int virtio_postpone_intr(struct virtio_softc *sc, struct virtqueue *vq,
+		uint16_t nslots);
+int virtio_postpone_intr_smart(struct virtio_softc *sc, struct virtqueue *vq);
+int virtio_postpone_intr_far(struct virtio_softc *sc, struct virtqueue *vq);
 void virtio_stop_vq_intr(struct virtio_softc *, struct virtqueue *);
-void virtio_start_vq_intr(struct virtio_softc *, struct virtqueue *);
+int virtio_start_vq_intr(struct virtio_softc *, struct virtqueue *);
 
 /* encapsulation */
 bus_dma_tag_t	virtio_dmat(struct virtio_softc *);
 device_t	virtio_child(struct virtio_softc *);
 int		virtio_intrhand(struct virtio_softc *);
-uint32_t	virtio_features(struct virtio_softc *);
+uint64_t	virtio_features(struct virtio_softc *);
 
 /* autoconf(9) common */
 void virtio_set_status(struct virtio_softc *, int);
-int virtiobusprint(void *aux, const char *);
+void virtio_print_device_type(device_t, int, int);
+int virtio_attach_failed(struct virtio_softc *);
 
 #define virtio_device_reset(sc)	virtio_set_status((sc), 0)
 
+/* endian conversion */
+
+/*
+ * Virtio structures are read/written in host endian for 0.9 but little endian
+ * for 1.0. One notable exception is AArch BE that always uses little endian.
+ */
+uint16_t virtio_rw16(struct virtio_softc *sc, uint16_t val);
+uint32_t virtio_rw32(struct virtio_softc *sc, uint32_t val);
+uint64_t virtio_rw64(struct virtio_softc *sc, uint64_t val);
+
 #endif /* _DEV_PCI_VIRTIOVAR_H_ */

Index: src/sys/dev/virtio/virtio_mmio.c
diff -u src/sys/dev/virtio/virtio_mmio.c:1.3 src/sys/dev/virtio/virtio_mmio.c:1.4
--- src/sys/dev/virtio/virtio_mmio.c:1.3	Sat Oct  3 13:51:34 2020
+++ src/sys/dev/virtio/virtio_mmio.c	Wed Jan 20 19:46:48 2021
@@ -1,4 +1,4 @@
-/*	$NetBSD: virtio_mmio.c,v 1.3 2020/10/03 13:51:34 jmcneill Exp $	*/
+/*	$NetBSD: virtio_mmio.c,v 1.4 2021/01/20 19:46:48 reinoud Exp $	*/
 /*	$OpenBSD: virtio_mmio.c,v 1.2 2017/02/24 17:12:31 patrick Exp $	*/
 
 /*
@@ -29,7 +29,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: virtio_mmio.c,v 1.3 2020/10/03 13:51:34 jmcneill Exp $");
+__KERNEL_RCSID(0, "$NetBSD: virtio_mmio.c,v 1.4 2021/01/20 19:46:48 reinoud Exp $");
 
 #include <sys/param.h>
 #include <sys/systm.h>
@@ -66,10 +66,16 @@ __KERNEL_RCSID(0, "$NetBSD: virtio_mmio.
 #define VIRTIO_MMIO_INT_CONFIG		(1 << 1)
 
 /*
- * MMIO configuration space is in guest byte order. AArch64 BE is special,
- * as the guest starts in LE and we switch to BE after the kernel starts.
- * For this case, we need to byte swap all config space accesses.
+ * MMIO configuration space for virtio-mmio v1 is in guest byte order.
+ *
+ * AArch64 BE is special in that its bus space functions always read little
+ * endian like on the PCI bus and thus need swapping to read host endian
+ * registers.
+ *
+ * XXX this might also be true for other big endian machines.
+ * XXX: TODO test virtio MMIO on non AArch64 big endian machines.
  */
+
 #if defined(__aarch64__) && BYTE_ORDER == BIG_ENDIAN
 #define	VIO16TOH(x)	le16toh(x)
 #define	VIO32TOH(x)	le32toh(x)
@@ -77,6 +83,7 @@ __KERNEL_RCSID(0, "$NetBSD: virtio_mmio.
 #define	HTOVIO16(x)	htole16(x)
 #define	HTOVIO32(x)	htole32(x)
 #define	HTOVIO64(x)	htole64(x)
+#define VIODEVRW_SWAP false	/* can only be native endian now */
 #else
 #define	VIO16TOH(x)	(x)
 #define	VIO32TOH(x)	(x)
@@ -84,13 +91,9 @@ __KERNEL_RCSID(0, "$NetBSD: virtio_mmio.
 #define	HTOVIO16(x)	(x)
 #define	HTOVIO32(x)	(x)
 #define	HTOVIO64(x)	(x)
+#define VIODEVRW_SWAP false	/* will only be native endian now */
 #endif
 
-/*
- * XXX: Before being used on big endian arches, the access to config registers
- * XXX: needs to be reviewed/fixed. The non-device specific registers are
- * XXX: PCI-endian while the device specific registers are native endian.
- */
 
 static void	virtio_mmio_kick(struct virtio_softc *, uint16_t);
 static uint8_t	virtio_mmio_read_device_config_1(struct virtio_softc *, int);
@@ -101,15 +104,17 @@ static void	virtio_mmio_write_device_con
 static void	virtio_mmio_write_device_config_2(struct virtio_softc *, int, uint16_t);
 static void	virtio_mmio_write_device_config_4(struct virtio_softc *, int, uint32_t);
 static void	virtio_mmio_write_device_config_8(struct virtio_softc *, int, uint64_t);
+
 static uint16_t	virtio_mmio_read_queue_size(struct virtio_softc *, uint16_t);
-static void	virtio_mmio_setup_queue(struct virtio_softc *, uint16_t, uint32_t);
+static void	virtio_mmio_setup_queue(struct virtio_softc *, uint16_t, uint64_t);
 static void	virtio_mmio_set_status(struct virtio_softc *, int);
-static uint32_t	virtio_mmio_negotiate_features(struct virtio_softc *, uint32_t);
+static void	virtio_mmio_negotiate_features(struct virtio_softc *, uint64_t);
 static int	virtio_mmio_setup_interrupts(struct virtio_softc *);
 static void	virtio_mmio_free_interrupts(struct virtio_softc *);
 
 static const struct virtio_ops virtio_mmio_ops = {
 	.kick = virtio_mmio_kick,
+
 	.read_dev_cfg_1 = virtio_mmio_read_device_config_1,
 	.read_dev_cfg_2 = virtio_mmio_read_device_config_2,
 	.read_dev_cfg_4 = virtio_mmio_read_device_config_4,
@@ -118,6 +123,7 @@ static const struct virtio_ops virtio_mm
 	.write_dev_cfg_2 = virtio_mmio_write_device_config_2,
 	.write_dev_cfg_4 = virtio_mmio_write_device_config_4,
 	.write_dev_cfg_8 = virtio_mmio_write_device_config_8,
+
 	.read_queue_size = virtio_mmio_read_queue_size,
 	.setup_queue = virtio_mmio_setup_queue,
 	.set_status = virtio_mmio_set_status,
@@ -136,7 +142,7 @@ virtio_mmio_read_queue_size(struct virti
 }
 
 static void
-virtio_mmio_setup_queue(struct virtio_softc *vsc, uint16_t idx, uint32_t addr)
+virtio_mmio_setup_queue(struct virtio_softc *vsc, uint16_t idx, uint64_t addr)
 {
 	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
 
@@ -145,7 +151,8 @@ virtio_mmio_setup_queue(struct virtio_so
 	    bus_space_read_4(sc->sc_iot, sc->sc_ioh, VIRTIO_MMIO_QUEUE_NUM_MAX));
 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_MMIO_QUEUE_ALIGN,
 	    VIRTIO_PAGE_SIZE);
-	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_MMIO_QUEUE_PFN, addr);
+	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_MMIO_QUEUE_PFN,
+	    addr / VIRTIO_PAGE_SIZE);
 }
 
 static void
@@ -165,6 +172,7 @@ void
 virtio_mmio_common_attach(struct virtio_mmio_softc *sc)
 {
 	struct virtio_softc *vsc = &sc->sc_sc;
+	device_t self = vsc->sc_dev;
 	uint32_t id, magic, ver;
 
 	magic = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
@@ -188,11 +196,23 @@ virtio_mmio_common_attach(struct virtio_
 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, VIRTIO_MMIO_GUEST_PAGE_SIZE,
 	    VIRTIO_PAGE_SIZE);
 
-	/* No device connected. */
+	/* no device connected. */
 	if (id == 0)
 		return;
 
+	virtio_print_device_type(self, id, ver);
 	vsc->sc_ops = &virtio_mmio_ops;
+	vsc->sc_devcfg_swap = VIODEVRW_SWAP;
+
+	/* set up our device config tag */
+	vsc->sc_devcfg_iosize = sc->sc_iosize - VIRTIO_MMIO_CONFIG;
+	vsc->sc_devcfg_iot = sc->sc_iot;
+	if (bus_space_subregion(sc->sc_iot, sc->sc_ioh,
+			VIRTIO_MMIO_CONFIG, vsc->sc_devcfg_iosize,
+			&vsc->sc_devcfg_ioh)) {
+		aprint_error_dev(self, "can't map config i/o space\n");
+		return;
+	}
 
 	virtio_device_reset(vsc);
 	virtio_mmio_set_status(vsc, VIRTIO_CONFIG_DEVICE_STATUS_ACK);
@@ -229,8 +249,8 @@ virtio_mmio_common_detach(struct virtio_
 /*
  * Feature negotiation.
  */
-static uint32_t
-virtio_mmio_negotiate_features(struct virtio_softc *vsc, uint32_t
+static void
+virtio_mmio_negotiate_features(struct virtio_softc *vsc, uint64_t
     guest_features)
 {
 	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
@@ -245,47 +265,56 @@ virtio_mmio_negotiate_features(struct vi
 	    VIRTIO_MMIO_GUEST_FEATURES_SEL, 0);
 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
 			  VIRTIO_MMIO_GUEST_FEATURES, r);
-	return r;
+
+	vsc->sc_active_features = r;
 }
 
+#
 /*
  * Device configuration registers.
  */
+
+/* ----------------------------------------------------
+ * Read/write device config code
+ * ----------------------------------------------------*/
+
 static uint8_t
 virtio_mmio_read_device_config_1(struct virtio_softc *vsc, int index)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	return bus_space_read_1(sc->sc_iot, sc->sc_ioh,
-				VIRTIO_MMIO_CONFIG + index);
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	return bus_space_read_1(iot, ioh, index);
 }
 
 static uint16_t
 virtio_mmio_read_device_config_2(struct virtio_softc *vsc, int index)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	return VIO16TOH(bus_space_read_2(sc->sc_iot, sc->sc_ioh,
-					VIRTIO_MMIO_CONFIG + index));
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	return VIO16TOH(bus_space_read_2(iot, ioh, index));
 }
 
 static uint32_t
 virtio_mmio_read_device_config_4(struct virtio_softc *vsc, int index)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	return VIO32TOH(bus_space_read_4(sc->sc_iot, sc->sc_ioh,
-					VIRTIO_MMIO_CONFIG + index));
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	return VIO32TOH(bus_space_read_4(iot, ioh, index));
 }
 
 static uint64_t
 virtio_mmio_read_device_config_8(struct virtio_softc *vsc, int index)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
 	uint64_t r;
 
-	r = bus_space_read_4(sc->sc_iot, sc->sc_ioh,
-			     VIRTIO_MMIO_CONFIG + index + sizeof(uint32_t));
+	r = bus_space_read_4(iot, ioh, index + sizeof(uint32_t));
 	r <<= 32;
-	r += bus_space_read_4(sc->sc_iot, sc->sc_ioh,
-			      VIRTIO_MMIO_CONFIG + index);
+	r += bus_space_read_4(iot, ioh, index);
 	return VIO64TOH(r);
 }
 
@@ -293,42 +322,47 @@ static void
 virtio_mmio_write_device_config_1(struct virtio_softc *vsc,
 			     int index, uint8_t value)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	bus_space_write_1(sc->sc_iot, sc->sc_ioh,
-			  VIRTIO_MMIO_CONFIG + index, value);
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	bus_space_write_1(iot, ioh, index, value);
 }
 
 static void
 virtio_mmio_write_device_config_2(struct virtio_softc *vsc,
 			     int index, uint16_t value)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	bus_space_write_2(sc->sc_iot, sc->sc_ioh,
-			  VIRTIO_MMIO_CONFIG + index, HTOVIO16(value));
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	value = HTOVIO16(value);
+	bus_space_write_2(iot, ioh, index, value);
 }
 
 static void
 virtio_mmio_write_device_config_4(struct virtio_softc *vsc,
 			     int index, uint32_t value)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
-			  VIRTIO_MMIO_CONFIG + index, HTOVIO32(value));
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	value = HTOVIO32(value);
+	bus_space_write_4(iot, ioh, index, value);
 }
 
 static void
 virtio_mmio_write_device_config_8(struct virtio_softc *vsc,
 			     int index, uint64_t value)
 {
-	struct virtio_mmio_softc *sc = (struct virtio_mmio_softc *)vsc;
-	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
-			  VIRTIO_MMIO_CONFIG + index,
-			  HTOVIO64(value) & 0xffffffff);
-	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
-			  VIRTIO_MMIO_CONFIG + index + sizeof(uint32_t),
-			  HTOVIO64(value) >> 32);
+	bus_space_tag_t	   iot = vsc->sc_devcfg_iot;
+	bus_space_handle_t ioh = vsc->sc_devcfg_ioh;
+
+	value = HTOVIO64(value);
+	bus_space_write_4(iot, ioh, index, value & 0xffffffff);
+	bus_space_write_4(iot, ioh, index + sizeof(uint32_t), value >> 32);
 }
 
+
 /*
  * Interrupt handler.
  */

Added files:

Index: src/sys/dev/pci/virtio_pcireg.h
diff -u /dev/null src/sys/dev/pci/virtio_pcireg.h:1.1
--- /dev/null	Wed Jan 20 19:46:48 2021
+++ src/sys/dev/pci/virtio_pcireg.h	Wed Jan 20 19:46:48 2021
@@ -0,0 +1,141 @@
+/*	$NetBSD: virtio_pcireg.h,v 1.1 2021/01/20 19:46:48 reinoud Exp $	*/
+
+/*
+ * Copyright (c) 2020 The NetBSD Foundation, Inc.
+ * Copyright (c) 2010 Minoura Makoto.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Part of the file derived from `Virtio PCI Card Specification v0.8.6 DRAFT'
+ * Appendix A.
+ */
+/* An interface for efficient virtio implementation.
+ *
+ * This header is BSD licensed so anyone can use the definitions
+ * to implement compatible drivers/servers.
+ *
+ * Copyright 2007, 2009, IBM Corporation
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of IBM nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+
+#ifndef _DEV_PCI_VIRTIO_PCIREG_H_
+#define	_DEV_PCI_VIRTIO_PCIREG_H_
+
+#include <sys/types.h>
+
+/*
+ * Virtio PCI v0.9 config space
+ */
+#define VIRTIO_CONFIG_DEVICE_FEATURES		 0 /* 32 bit */
+#define VIRTIO_CONFIG_GUEST_FEATURES		 4 /* 32 bit */
+#define VIRTIO_CONFIG_QUEUE_ADDRESS		 8 /* 32 bit */
+#define VIRTIO_CONFIG_QUEUE_SIZE		12 /* 16 bit */
+#define VIRTIO_CONFIG_QUEUE_SELECT		14 /* 16 bit */
+#define VIRTIO_CONFIG_QUEUE_NOTIFY		16 /* 16 bit */
+#define VIRTIO_CONFIG_DEVICE_STATUS		18 /*  8 bit */
+#define VIRTIO_CONFIG_ISR_STATUS		19 /*  8 bit */
+#define VIRTIO_CONFIG_CONFIG_VECTOR		20 /* 16 bit, optional */
+#define VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI	20 /* start of config space */
+#define VIRTIO_CONFIG_DEVICE_CONFIG_MSI		24 /* start of config space */
+/* MSI/MSI-X */
+#define VIRTIO_CONFIG_MSI_CONFIG_VECTOR		20 /* 16 bit, optional */
+#define VIRTIO_CONFIG_MSI_QUEUE_VECTOR		22 /* 16 bit, optional */
+
+
+/*
+ * Virtio PCI 1.0 PCI cap space
+ */
+
+#define VIRTIO_PCI_CAP_COMMON_CFG	1 /* Common configuration */
+#define VIRTIO_PCI_CAP_NOTIFY_CFG	2 /* Notifications */
+#define VIRTIO_PCI_CAP_ISR_CFG		3 /* ISR Status */
+#define VIRTIO_PCI_CAP_DEVICE_CFG	4 /* Device specific configuration */
+#define VIRTIO_PCI_CAP_PCI_CFG		5 /* PCI configuration access */
+
+struct virtio_pci_cap {
+	uint8_t cap_vndr;	/* Generic PCI field: PCI_CAP_ID_VNDR */
+	uint8_t cap_next;	/* Generic PCI field: next ptr */
+	uint8_t cap_len;	/* Generic PCI field: capability length */
+	uint8_t cfg_type;	/* Identifies the structure */
+	uint8_t bar;		/* Where to find it */
+	uint8_t padding[3];	/* Pad to full dword */
+	uint32_t offset;	/* Offset within bar */
+	uint32_t length;	/* Length of the structure, in bytes */
+} __packed;
+
+
+struct virtio_pci_notify_cap {
+	struct virtio_pci_cap cap;
+	uint32_t notify_off_multiplier;	/* Multiplier for queue_notify_off. */
+} __packed;
+
+/*
+ * Virtio PCI v1.0 config space
+ */
+
+#define VIRTIO_CONFIG1_DEVICE_FEATURE_SELECT	 0 /* 32 bit RW */
+#define VIRTIO_CONFIG1_DEVICE_FEATURE		 4 /* 32 bit RO */
+#define VIRTIO_CONFIG1_DRIVER_FEATURE_SELECT	 8 /* 32 bit RW */
+#define VIRTIO_CONFIG1_DRIVER_FEATURE		12 /* 32 bit RW */
+#define VIRTIO_CONFIG1_CONFIG_MSIX_VECTOR	16 /* 16 bit RW */
+#define VIRTIO_CONFIG1_NUM_QUEUES		18 /* 16 bit RO */
+#define VIRTIO_CONFIG1_DEVICE_STATUS		20 /*  8 bit RW */
+#define VIRTIO_CONFIG1_CONFIG_GENERATION	21 /*  8 bit RO */
+
+/* about a specific virtqueue: */
+#define VIRTIO_CONFIG1_QUEUE_SELECT		22 /* 16 bit RW */
+#define VIRTIO_CONFIG1_QUEUE_SIZE		24 /* 16 bit RO, power of 2 */
+#define VIRTIO_CONFIG1_QUEUE_MSIX_VECTOR	26 /* 16 bit RW */
+#define VIRTIO_CONFIG1_QUEUE_ENABLE		28 /* 16 bit RW */
+#define VIRTIO_CONFIG1_QUEUE_NOTIFY_OFF		30 /* 16 bit RO */
+#define VIRTIO_CONFIG1_QUEUE_DESC		32 /* 64 bit RW */
+#define VIRTIO_CONFIG1_QUEUE_AVAIL		40 /* 64 bit RW */
+#define VIRTIO_CONFIG1_QUEUE_USED		48 /* 64 bit RW */
+
+
+#endif /* _DEV_PCI_VIRTIO_PCIREG_H_ */

Reply via email to