On Mon, 2020-03-16 at 07:28 -0700, Klaus Jensen wrote:
> From: Klaus Jensen
>
> Change the prefix of all nvme device related trace events to 'nvme_dev'
> to not clash with trace events from the nvme block driver.
>
> Signed-off-by: Klaus Jensen
> Acked-by: Keith Busch
> Reviewed-by: Maxim Levitsky
> ---
> hw/block/nvme.c | 188 +-
> hw/block/trace-events | 172 +++---
> 2 files changed, 180 insertions(+), 180 deletions(-)
>
> diff --git a/hw/block/nvme.c b/hw/block/nvme.c
> index d28335cbf377..3e4b18956ed2 100644
> --- a/hw/block/nvme.c
> +++ b/hw/block/nvme.c
> @@ -112,16 +112,16 @@ static void nvme_irq_assert(NvmeCtrl *n, NvmeCQueue *cq)
> {
> if (cq->irq_enabled) {
> if (msix_enabled(&(n->parent_obj))) {
> -trace_nvme_irq_msix(cq->vector);
> +trace_nvme_dev_irq_msix(cq->vector);
> msix_notify(&(n->parent_obj), cq->vector);
> } else {
> -trace_nvme_irq_pin();
> +trace_nvme_dev_irq_pin();
> assert(cq->cqid < 64);
> n->irq_status |= 1 << cq->cqid;
> nvme_irq_check(n);
> }
> } else {
> -trace_nvme_irq_masked();
> +trace_nvme_dev_irq_masked();
> }
> }
>
> @@ -146,7 +146,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
> int num_prps = (len >> n->page_bits) + 1;
>
> if (unlikely(!prp1)) {
> -trace_nvme_err_invalid_prp();
> +trace_nvme_dev_err_invalid_prp();
> return NVME_INVALID_FIELD | NVME_DNR;
> } else if (n->cmbsz && prp1 >= n->ctrl_mem.addr &&
> prp1 < n->ctrl_mem.addr + int128_get64(n->ctrl_mem.size)) {
> @@ -160,7 +160,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
> len -= trans_len;
> if (len) {
> if (unlikely(!prp2)) {
> -trace_nvme_err_invalid_prp2_missing();
> +trace_nvme_dev_err_invalid_prp2_missing();
> goto unmap;
> }
> if (len > n->page_size) {
> @@ -176,7 +176,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
>
> if (i == n->max_prp_ents - 1 && len > n->page_size) {
> if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
> -trace_nvme_err_invalid_prplist_ent(prp_ent);
> +trace_nvme_dev_err_invalid_prplist_ent(prp_ent);
> goto unmap;
> }
>
> @@ -189,7 +189,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
> }
>
> if (unlikely(!prp_ent || prp_ent & (n->page_size - 1))) {
> -trace_nvme_err_invalid_prplist_ent(prp_ent);
> +trace_nvme_dev_err_invalid_prplist_ent(prp_ent);
> goto unmap;
> }
>
> @@ -204,7 +204,7 @@ static uint16_t nvme_map_prp(QEMUSGList *qsg,
> QEMUIOVector *iov, uint64_t prp1,
> }
> } else {
> if (unlikely(prp2 & (n->page_size - 1))) {
> -trace_nvme_err_invalid_prp2_align(prp2);
> +trace_nvme_dev_err_invalid_prp2_align(prp2);
> goto unmap;
> }
> if (qsg->nsg) {
> @@ -252,20 +252,20 @@ static uint16_t nvme_dma_read_prp(NvmeCtrl *n, uint8_t
> *ptr, uint32_t len,
> QEMUIOVector iov;
> uint16_t status = NVME_SUCCESS;
>
> -trace_nvme_dma_read(prp1, prp2);
> +trace_nvme_dev_dma_read(prp1, prp2);
>
> if (nvme_map_prp(, , prp1, prp2, len, n)) {
> return NVME_INVALID_FIELD | NVME_DNR;
> }
> if (qsg.nsg > 0) {
> if (unlikely(dma_buf_read(ptr, len, ))) {
> -trace_nvme_err_invalid_dma();
> +trace_nvme_dev_err_invalid_dma();
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
> qemu_sglist_destroy();
> } else {
> if (unlikely(qemu_iovec_from_buf(, 0, ptr, len) != len)) {
> -trace_nvme_err_invalid_dma();
> +trace_nvme_dev_err_invalid_dma();
> status = NVME_INVALID_FIELD | NVME_DNR;
> }
> qemu_iovec_destroy();
> @@ -354,7 +354,7 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n,
> NvmeNamespace *ns, NvmeCmd *cmd,
> uint32_t count = nlb << data_shift;
>
> if (unlikely(slba + nlb > ns->id_ns.nsze)) {
> -trace_nvme_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
> +trace_nvme_dev_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
> return NVME_LBA_RANGE | NVME_DNR;
> }
>
> @@ -382,11 +382,11 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns,
> NvmeCmd *cmd,
> int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
> enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE :