Re: [PATCH v6 35/42] nvme: handle dma errors
On Mar 25 12:58, Maxim Levitsky wrote: > On Mon, 2020-03-16 at 07:29 -0700, Klaus Jensen wrote: > > From: Klaus Jensen > > > > Handling DMA errors gracefully is required for the device to pass the > > block/011 test ("disable PCI device while doing I/O") in the blktests > > suite. > > > > With this patch the device passes the test by retrying "critical" > > transfers (posting of completion entries and processing of submission > > queue entries). > > > > If DMA errors occur at any other point in the execution of the command > > (say, while mapping the PRPs), the command is aborted with a Data > > Transfer Error status code. > > > > Signed-off-by: Klaus Jensen > > Acked-by: Keith Busch > > --- > > hw/block/nvme.c | 45 --- > > hw/block/trace-events | 2 ++ > > include/block/nvme.h | 2 +- > > 3 files changed, 37 insertions(+), 12 deletions(-) > > > > diff --git a/hw/block/nvme.c b/hw/block/nvme.c > > index 15ca2417af04..49d323566393 100644 > > --- a/hw/block/nvme.c > > +++ b/hw/block/nvme.c > > @@ -164,7 +164,7 @@ static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, > > QEMUIOVector *iov, hwaddr addr, > >size_t len) > > { > > if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len)) { > > -return NVME_DATA_TRAS_ERROR; > > +return NVME_DATA_TRANSFER_ERROR; > > Minor nitpick: this is also a non functional refactoring. > I don't think that each piece of a refactoring should be in a separate patch, > so I usually group all the non functional (aka cosmetic) refactoring in one > patch, usually the first in the series. > But I try not to leave such refactoring in the functional patches. > > However, since there is not that much cases like that left, I don't mind > leaving this particular case as is. > Noted. Keeping it here for now ;) > > Reviewed-by: Maxim Levitsky > > Best regards, > Maxim Levitsky >
Re: [PATCH v6 35/42] nvme: handle dma errors
On Mon, 2020-03-16 at 07:29 -0700, Klaus Jensen wrote: > From: Klaus Jensen > > Handling DMA errors gracefully is required for the device to pass the > block/011 test ("disable PCI device while doing I/O") in the blktests > suite. > > With this patch the device passes the test by retrying "critical" > transfers (posting of completion entries and processing of submission > queue entries). > > If DMA errors occur at any other point in the execution of the command > (say, while mapping the PRPs), the command is aborted with a Data > Transfer Error status code. > > Signed-off-by: Klaus Jensen > Acked-by: Keith Busch > --- > hw/block/nvme.c | 45 --- > hw/block/trace-events | 2 ++ > include/block/nvme.h | 2 +- > 3 files changed, 37 insertions(+), 12 deletions(-) > > diff --git a/hw/block/nvme.c b/hw/block/nvme.c > index 15ca2417af04..49d323566393 100644 > --- a/hw/block/nvme.c > +++ b/hw/block/nvme.c > @@ -74,14 +74,14 @@ static inline bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr > addr) > return addr >= low && addr < hi; > } > > -static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) > +static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) > { > if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) { > memcpy(buf, nvme_addr_to_cmb(n, addr), size); > -return; > +return 0; > } > > -pci_dma_read(>parent_obj, addr, buf, size); > +return pci_dma_read(>parent_obj, addr, buf, size); > } > > static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) > @@ -164,7 +164,7 @@ static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, > QEMUIOVector *iov, hwaddr addr, >size_t len) > { > if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len)) { > -return NVME_DATA_TRAS_ERROR; > +return NVME_DATA_TRANSFER_ERROR; Minor nitpick: this is also a non functional refactoring. I don't think that each piece of a refactoring should be in a separate patch, so I usually group all the non functional (aka cosmetic) refactoring in one patch, usually the first in the series. But I try not to leave such refactoring in the functional patches. However, since there is not that much cases like that left, I don't mind leaving this particular case as is. > } > > qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); > @@ -213,6 +213,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList > *qsg, QEMUIOVector *iov, > int num_prps = (len >> n->page_bits) + 1; > uint16_t status; > bool prp_list_in_cmb = false; > +int ret; > > trace_nvme_dev_map_prp(nvme_cid(req), trans_len, len, prp1, prp2, > num_prps); > @@ -252,7 +253,12 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList > *qsg, QEMUIOVector *iov, > > nents = (len + n->page_size - 1) >> n->page_bits; > prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); > -nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); > +ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); > +if (ret) { > +trace_nvme_dev_err_addr_read(prp2); > +status = NVME_DATA_TRANSFER_ERROR; > +goto unmap; > +} > while (len != 0) { > uint64_t prp_ent = le64_to_cpu(prp_list[i]); > > @@ -271,8 +277,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList > *qsg, QEMUIOVector *iov, > i = 0; > nents = (len + n->page_size - 1) >> n->page_bits; > prp_trans = MIN(n->max_prp_ents, nents) * > sizeof(uint64_t); > -nvme_addr_read(n, prp_ent, (void *)prp_list, > -prp_trans); > +ret = nvme_addr_read(n, prp_ent, (void *)prp_list, > + prp_trans); > +if (ret) { > +trace_nvme_dev_err_addr_read(prp_ent); > +status = NVME_DATA_TRANSFER_ERROR; > +goto unmap; > +} > prp_ent = le64_to_cpu(prp_list[i]); > } > > @@ -466,6 +477,7 @@ static void nvme_post_cqes(void *opaque) > NvmeCQueue *cq = opaque; > NvmeCtrl *n = cq->ctrl; > NvmeRequest *req, *next; > +int ret; > > QTAILQ_FOREACH_SAFE(req, >req_list, entry, next) { > NvmeSQueue *sq; > @@ -475,15 +487,21 @@ static void nvme_post_cqes(void *opaque) > break; > } > > -QTAILQ_REMOVE(>req_list, req, entry); > sq = req->sq; > req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); > req->cqe.sq_id = cpu_to_le16(sq->sqid); > req->cqe.sq_head = cpu_to_le16(sq->head); > addr = cq->dma_addr + cq->tail * n->cqe_size; > +ret =
[PATCH v6 35/42] nvme: handle dma errors
From: Klaus Jensen Handling DMA errors gracefully is required for the device to pass the block/011 test ("disable PCI device while doing I/O") in the blktests suite. With this patch the device passes the test by retrying "critical" transfers (posting of completion entries and processing of submission queue entries). If DMA errors occur at any other point in the execution of the command (say, while mapping the PRPs), the command is aborted with a Data Transfer Error status code. Signed-off-by: Klaus Jensen Acked-by: Keith Busch --- hw/block/nvme.c | 45 --- hw/block/trace-events | 2 ++ include/block/nvme.h | 2 +- 3 files changed, 37 insertions(+), 12 deletions(-) diff --git a/hw/block/nvme.c b/hw/block/nvme.c index 15ca2417af04..49d323566393 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -74,14 +74,14 @@ static inline bool nvme_addr_is_cmb(NvmeCtrl *n, hwaddr addr) return addr >= low && addr < hi; } -static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) +static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size) { if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) { memcpy(buf, nvme_addr_to_cmb(n, addr), size); -return; +return 0; } -pci_dma_read(>parent_obj, addr, buf, size); +return pci_dma_read(>parent_obj, addr, buf, size); } static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid) @@ -164,7 +164,7 @@ static uint16_t nvme_map_addr_cmb(NvmeCtrl *n, QEMUIOVector *iov, hwaddr addr, size_t len) { if (!nvme_addr_is_cmb(n, addr) || !nvme_addr_is_cmb(n, addr + len)) { -return NVME_DATA_TRAS_ERROR; +return NVME_DATA_TRANSFER_ERROR; } qemu_iovec_add(iov, nvme_addr_to_cmb(n, addr), len); @@ -213,6 +213,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov, int num_prps = (len >> n->page_bits) + 1; uint16_t status; bool prp_list_in_cmb = false; +int ret; trace_nvme_dev_map_prp(nvme_cid(req), trans_len, len, prp1, prp2, num_prps); @@ -252,7 +253,12 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov, nents = (len + n->page_size - 1) >> n->page_bits; prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); -nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); +ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans); +if (ret) { +trace_nvme_dev_err_addr_read(prp2); +status = NVME_DATA_TRANSFER_ERROR; +goto unmap; +} while (len != 0) { uint64_t prp_ent = le64_to_cpu(prp_list[i]); @@ -271,8 +277,13 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, QEMUSGList *qsg, QEMUIOVector *iov, i = 0; nents = (len + n->page_size - 1) >> n->page_bits; prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t); -nvme_addr_read(n, prp_ent, (void *)prp_list, -prp_trans); +ret = nvme_addr_read(n, prp_ent, (void *)prp_list, + prp_trans); +if (ret) { +trace_nvme_dev_err_addr_read(prp_ent); +status = NVME_DATA_TRANSFER_ERROR; +goto unmap; +} prp_ent = le64_to_cpu(prp_list[i]); } @@ -466,6 +477,7 @@ static void nvme_post_cqes(void *opaque) NvmeCQueue *cq = opaque; NvmeCtrl *n = cq->ctrl; NvmeRequest *req, *next; +int ret; QTAILQ_FOREACH_SAFE(req, >req_list, entry, next) { NvmeSQueue *sq; @@ -475,15 +487,21 @@ static void nvme_post_cqes(void *opaque) break; } -QTAILQ_REMOVE(>req_list, req, entry); sq = req->sq; req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase); req->cqe.sq_id = cpu_to_le16(sq->sqid); req->cqe.sq_head = cpu_to_le16(sq->head); addr = cq->dma_addr + cq->tail * n->cqe_size; +ret = pci_dma_write(>parent_obj, addr, (void *)>cqe, +sizeof(req->cqe)); +if (ret) { +trace_nvme_dev_err_addr_write(addr); +timer_mod(cq->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + + 500 * SCALE_MS); +break; +} +QTAILQ_REMOVE(>req_list, req, entry); nvme_inc_cq_tail(cq); -pci_dma_write(>parent_obj, addr, (void *)>cqe, -sizeof(req->cqe)); nvme_req_clear(req); QTAILQ_INSERT_TAIL(>req_list, req, entry); } @@ -1650,7 +1668,12 @@ static void nvme_process_sq(void *opaque) while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(>req_list))) { addr = sq->dma_addr +