This adds a side effect free io path to nvme(4) allowing it to be used for
hibernate. I've only tested this in a qemu vm, which successfully hibernates
but falls over weirdly trying to resume.
Index: arch/amd64/amd64/hibernate_machdep.c
===================================================================
RCS file: /cvs/src/sys/arch/amd64/amd64/hibernate_machdep.c,v
retrieving revision 1.38
diff -u -p -r1.38 hibernate_machdep.c
--- arch/amd64/amd64/hibernate_machdep.c 21 Aug 2015 07:01:38 -0000
1.38
+++ arch/amd64/amd64/hibernate_machdep.c 28 May 2017 08:33:46 -0000
@@ -47,6 +47,7 @@
#include "ahci.h"
#include "softraid.h"
#include "sd.h"
+#include "nvme.h"
/* Hibernate support */
void hibernate_enter_resume_4k_pte(vaddr_t, paddr_t);
@@ -89,6 +90,8 @@ get_hibernate_io_function(dev_t dev)
extern struct cfdriver sd_cd;
extern int ahci_hibernate_io(dev_t dev, daddr_t blkno,
vaddr_t addr, size_t size, int op, void *page);
+ extern int nvme_hibernate_io(dev_t dev, daddr_t blkno,
+ vaddr_t addr, size_t size, int op, void *page);
extern int sr_hibernate_io(dev_t dev, daddr_t blkno,
vaddr_t addr, size_t size, int op, void *page);
struct device *dv = disk_lookup(&sd_cd, DISKUNIT(dev));
@@ -98,6 +101,12 @@ get_hibernate_io_function(dev_t dev)
strcmp(dv->dv_parent->dv_parent->dv_cfdata->cf_driver->cd_name,
"ahci") == 0)
return ahci_hibernate_io;
+#endif
+#if NNVME > 0
+ if (dv && dv->dv_parent && dv->dv_parent->dv_parent &&
+
strcmp(dv->dv_parent->dv_parent->dv_cfdata->cf_driver->cd_name,
+ "nvme") == 0)
+ return nvme_hibernate_io;
#endif
#if NSOFTRAID > 0
if (dv && dv->dv_parent && dv->dv_parent->dv_parent &&
Index: dev/ic/nvmevar.h
===================================================================
RCS file: /cvs/src/sys/dev/ic/nvmevar.h,v
retrieving revision 1.10
diff -u -p -r1.10 nvmevar.h
--- dev/ic/nvmevar.h 27 May 2017 12:40:51 -0000 1.10
+++ dev/ic/nvmevar.h 28 May 2017 08:33:46 -0000
@@ -16,6 +16,9 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#define NVME_IO_Q 1
+#define NVME_HIB_Q 2
+
struct nvme_dmamem {
bus_dmamap_t ndm_map;
bus_dma_segment_t ndm_seg;
Index: dev/ic/nvme.c
===================================================================
RCS file: /cvs/src/sys/dev/ic/nvme.c,v
retrieving revision 1.56
diff -u -p -r1.56 nvme.c
--- dev/ic/nvme.c 27 May 2017 12:40:51 -0000 1.56
+++ dev/ic/nvme.c 28 May 2017 08:33:46 -0000
@@ -86,6 +86,15 @@ void nvme_scsi_cmd(struct scsi_xfer *);
int nvme_scsi_probe(struct scsi_link *);
void nvme_scsi_free(struct scsi_link *);
+#ifdef HIBERNATE
+#include <uvm/uvm_extern.h>
+#include <sys/hibernate.h>
+#include <sys/disk.h>
+#include <sys/disklabel.h>
+
+int nvme_hibernate_io(dev_t, daddr_t, vaddr_t, size_t, int, void *);
+#endif
+
struct scsi_adapter nvme_switch = {
nvme_scsi_cmd, /* cmd */
scsi_minphys, /* minphys */
@@ -332,7 +341,7 @@ nvme_attach(struct nvme_softc *sc)
goto free_admin_q;
}
- sc->sc_q = nvme_q_alloc(sc, 1, 128, sc->sc_dstrd);
+ sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q, 128, sc->sc_dstrd);
if (sc->sc_q == NULL) {
printf("%s: unable to allocate io q\n", DEVNAME(sc));
goto disable;
@@ -394,7 +403,7 @@ nvme_resume(struct nvme_softc *sc)
return (1);
}
- sc->sc_q = nvme_q_alloc(sc, 1, 128, sc->sc_dstrd);
+ sc->sc_q = nvme_q_alloc(sc, NVME_IO_Q, 128, sc->sc_dstrd);
if (sc->sc_q == NULL) {
printf("%s: unable to allocate io q\n", DEVNAME(sc));
goto disable;
@@ -1393,3 +1402,201 @@ nvme_dmamem_free(struct nvme_softc *sc,
free(ndm, M_DEVBUF, sizeof *ndm);
}
+#ifdef HIBERNATE
+
+int
+nvme_hibernate_admin_cmd(struct nvme_softc *sc, struct nvme_sqe *sqe, struct
nvme_cqe *cqe,
+ int cid)
+{
+ struct nvme_sqe *asqe = NVME_DMA_KVA(sc->sc_admin_q->q_sq_dmamem);
+ struct nvme_cqe *acqe = NVME_DMA_KVA(sc->sc_admin_q->q_cq_dmamem);
+ struct nvme_queue *q = sc->sc_admin_q;
+ int tail;
+ u_int16_t flags;
+
+ /* submit command */
+ tail = q->q_sq_tail;
+ if (++q->q_sq_tail >= q->q_entries)
+ q->q_sq_tail = 0;
+
+ asqe += tail;
+ bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
+ sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_POSTWRITE);
+ *asqe = *sqe;
+ asqe->cid = cid;
+ bus_dmamap_sync(sc->sc_dmat, NVME_DMA_MAP(q->q_sq_dmamem),
+ sizeof(*sqe) * tail, sizeof(*sqe), BUS_DMASYNC_PREWRITE);
+
+ nvme_write4(sc, q->q_sqtdbl, q->q_sq_tail);
+
+ /* wait for completion */
+ acqe += q->q_cq_head;
+ for (;;) {
+ nvme_dmamem_sync(sc, q->q_cq_dmamem, BUS_DMASYNC_POSTREAD);
+ flags = lemtoh16(&acqe->flags);
+ if ((flags & NVME_CQE_PHASE) == q->q_cq_phase)
+ break;
+
+ /* not done yet */
+ delay(10);
+ }
+
+ if (++q->q_cq_head >= q->q_entries) {
+ q->q_cq_head = 0;
+ q->q_cq_phase ^= NVME_CQE_PHASE;
+ }
+ nvme_write4(sc, q->q_cqhdbl, q->q_cq_head);
+ if ((NVME_CQE_SC(flags) != NVME_CQE_SC_SUCCESS) || (acqe->cid != cid))
+ return (EIO);
+
+ return (0);
+}
+
+int
+nvme_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr, size_t size,
+ int op, void *page)
+{
+ struct nvme_hibernate_page {
+ struct nvme_sqe_io sqe[2];
+ struct nvme_cqe cqe[2];
+ u_int64_t prpl[MAXPHYS / PAGE_SIZE];
+
+ struct nvme_softc *sc;
+ int nsid;
+ int sq_tail;
+ int cq_head;
+ int cqe_phase;
+
+ daddr_t poffset;
+ size_t psize;
+
+ } *my = page;
+ struct nvme_sqe_io *isqe;
+ struct nvme_cqe *icqe;
+ paddr_t data_phys, page_phys;
+ u_int64_t data_bus_phys, page_bus_phys;
+ u_int16_t flags;
+ int i;
+
+ if (op == HIB_INIT) {
+ struct device *disk;
+ struct device *scsibus;
+ extern struct cfdriver sd_cd;
+ struct scsi_link *link;
+ struct scsibus_softc *bus_sc;
+ struct nvme_sqe_q qsqe;
+ struct nvme_cqe qcqe;
+
+ /* find nvme softc */
+ disk = disk_lookup(&sd_cd, DISKUNIT(dev));
+ scsibus = disk->dv_parent;
+ my->sc = (struct nvme_softc *)disk->dv_parent->dv_parent;
+
+ /* find scsi_link, which tells us the target */
+ my->nsid = 0;
+ bus_sc = (struct scsibus_softc *)scsibus;
+ SLIST_FOREACH(link, &bus_sc->sc_link_list, bus_list) {
+ if (link->device_softc == disk) {
+ my->nsid = link->target + 1;
+ break;
+ }
+ }
+ if (my->nsid == 0)
+ return (EIO);
+
+ my->poffset = blkno;
+ my->psize = size;
+
+ /* ensure io queue is idle? */
+
+ memset(my->sqe, 0, sizeof(my->sqe));
+ memset(my->cqe, 0, sizeof(my->cqe));
+ my->sq_tail = 0;
+ my->cq_head = 0;
+ my->cqe_phase = NVME_CQE_PHASE;
+
+ pmap_extract(pmap_kernel(), (vaddr_t)page, &page_phys);
+
+ /*
+ * create io queue; this uses the admin queue, but that's ok
because
+ * it gets reset during resume.
+ */
+ qsqe.opcode = NVM_ADMIN_ADD_IOCQ;
+ htolem64(&qsqe.prp1, page_phys +
+ offsetof(struct nvme_hibernate_page, cqe));
+ htolem16(&qsqe.qsize, nitems(my->cqe) - 1);
+ htolem16(&qsqe.qid, NVME_HIB_Q);
+ qsqe.qflags = NVM_SQE_CQ_IEN | NVM_SQE_Q_PC;
+ if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe,
&qcqe,
+ 0xfff1) != 0)
+ return (EIO);
+
+ qsqe.opcode = NVM_ADMIN_ADD_IOSQ;
+ htolem64(&qsqe.prp1, page_phys +
+ offsetof(struct nvme_hibernate_page, sqe));
+ htolem16(&qsqe.qsize, nitems(my->sqe) - 1);
+ htolem16(&qsqe.qid, NVME_HIB_Q);
+ htolem16(&qsqe.cqid, NVME_HIB_Q);
+ qsqe.qflags = NVM_SQE_Q_PC;
+ if (nvme_hibernate_admin_cmd(my->sc, (struct nvme_sqe *)&qsqe,
&qcqe,
+ 0xfff2) != 0)
+ return (EIO);
+
+ return (0);
+ }
+
+ if (op != HIB_W)
+ return (0);
+
+ isqe = &my->sqe[my->sq_tail];
+ if (++my->sq_tail == nitems(my->sqe))
+ my->sq_tail = 0;
+
+ isqe->opcode = NVM_CMD_WRITE;
+ htolem32(&isqe->nsid, my->nsid);
+
+ /* assume we're writing a single page for now */
+ pmap_extract(pmap_kernel(), addr, &data_phys);
+ data_bus_phys = data_phys;
+ htolem64(&isqe->entry.prp[0], data_bus_phys);
+ if ((size > my->sc->sc_mps) && (size <= my->sc->sc_mps * 2)) {
+ htolem64(&isqe->entry.prp[1], data_bus_phys + my->sc->sc_mps);
+ } else if (size > my->sc->sc_mps * 2) {
+ pmap_extract(pmap_kernel(), (vaddr_t)page, &page_phys);
+ page_bus_phys = page_phys;
+ htolem64(&isqe->entry.prp[1], page_bus_phys +
+ offsetof(struct nvme_hibernate_page, prpl));
+ for (i = 1; i < (size / my->sc->sc_mps); i++) {
+ htolem64(&my->prpl[i - 1], data_bus_phys +
+ (i * my->sc->sc_mps));
+ }
+ }
+
+ isqe->slba = blkno + my->poffset;
+ isqe->nlb = (size / DEV_BSIZE) - 1;
+ isqe->cid = blkno % 0xffff;
+
+ nvme_write4(my->sc, NVME_SQTDBL(NVME_HIB_Q, my->sc->sc_dstrd),
my->sq_tail);
+
+ icqe = &my->cqe[my->cq_head];
+ for (;;) {
+ flags = lemtoh16(&icqe->flags);
+ if ((flags & NVME_CQE_PHASE) == my->cqe_phase)
+ break;
+
+ /* not done yet */
+ delay(10);
+ }
+
+ if (++my->cq_head >= nitems(my->cqe)) {
+ my->cq_head = 0;
+ my->cqe_phase ^= NVME_CQE_PHASE;
+ }
+ nvme_write4(my->sc, NVME_CQHDBL(NVME_HIB_Q, my->sc->sc_dstrd),
my->cq_head);
+ if ((NVME_CQE_SC(flags) != NVME_CQE_SC_SUCCESS) || (icqe->cid != blkno
% 0xffff))
+ return (EIO);
+
+ return (0);
+}
+
+#endif
Index: conf/files
===================================================================
RCS file: /cvs/src/sys/conf/files,v
retrieving revision 1.645
diff -u -p -r1.645 files
--- conf/files 15 May 2017 11:23:25 -0000 1.645
+++ conf/files 28 May 2017 08:33:46 -0000
@@ -193,7 +193,7 @@ file dev/ic/ahci.c ahci |
(ahci_pci |
# NVM Express Controller
device nvme: scsi
-file dev/ic/nvme.c nvme
+file dev/ic/nvme.c nvme needs-flag
# LSI Logic Fusion-MPT Message Passing Interface
device mpi: scsi