On 2021/10/19 22:10, Kevin Laatz wrote: > When a suitable device is found during the bus scan/probe, create a dmadev > instance for each HW queue. Internal structures required for device > creation are also added. >
[snip] > static void * > idxd_bus_mmap_wq(struct rte_dsa_device *dev) > { > @@ -206,6 +218,7 @@ idxd_probe_dsa(struct rte_dsa_device *dev) > return -1; > idxd.max_batch_size = ret; > idxd.qid = dev->addr.wq_id; > + idxd.u.bus.dsa_id = dev->addr.device_id; > idxd.sva_support = 1; > > idxd.portal = idxd_bus_mmap_wq(dev); > @@ -214,6 +227,12 @@ idxd_probe_dsa(struct rte_dsa_device *dev) > return -ENOENT; > } > > + ret = idxd_dmadev_create(dev->wq_name, &dev->device, &idxd, > &idxd_bus_ops); > + if (ret) { > + IDXD_PMD_ERR("Failed to create rawdev %s", dev->wq_name); rawdev -> dmadev > + return ret; > + } > + > return 0; > } > > diff --git a/drivers/dma/idxd/idxd_common.c b/drivers/dma/idxd/idxd_common.c > index e00ddbe5ef..5abff34292 100644 > --- a/drivers/dma/idxd/idxd_common.c > +++ b/drivers/dma/idxd/idxd_common.c > @@ -2,10 +2,71 @@ > * Copyright 2021 Intel Corporation > */ > > +#include <rte_malloc.h> > +#include <rte_common.h> > #include <rte_log.h> > > #include "idxd_internal.h" > > +#define IDXD_PMD_NAME_STR "dmadev_idxd" > + > +int > +idxd_dmadev_create(const char *name, struct rte_device *dev, > + const struct idxd_dmadev *base_idxd, > + const struct rte_dma_dev_ops *ops) > +{ > + struct idxd_dmadev *idxd = NULL; > + struct rte_dma_dev *dmadev = NULL; > + int ret = 0; > + > + if (!name) { > + IDXD_PMD_ERR("Invalid name of the device!"); > + ret = -EINVAL; > + goto cleanup; > + } > + > + /* Allocate device structure */ > + dmadev = rte_dma_pmd_allocate(name, dev->numa_node, sizeof(struct > idxd_dmadev)); > + if (dmadev == NULL) { > + IDXD_PMD_ERR("Unable to allocate raw device"); raw -> dma Better check the 'raw' keyword in the patch set. > + ret = -ENOMEM; > + goto cleanup; > + } > + dmadev->dev_ops = ops; > + dmadev->device = dev; > + > + idxd = dmadev->data->dev_private; > + *idxd = *base_idxd; /* copy over the main fields already passed in */ > + idxd->dmadev = dmadev; > + > + /* allocate batch index ring and completion ring. > + * The +1 is because we can never fully use > + * the ring, otherwise read == write means both full and empty. > + */ > + idxd->batch_comp_ring = rte_zmalloc(NULL, > (sizeof(idxd->batch_idx_ring[0]) + > + sizeof(idxd->batch_comp_ring[0])) * > (idxd->max_batches + 1), > + sizeof(idxd->batch_comp_ring[0])); infer the batch_comp_ring will access by hardware, maybe better use rte_zmalloc_socket() because rte_zmalloc will use rte_socket_id() and it may at diff socket when call. > + if (idxd->batch_comp_ring == NULL) { > + IDXD_PMD_ERR("Unable to reserve memory for batch data\n"); > + ret = -ENOMEM; > [snip]