Add support for the eventdev start entry point.

Signed-off-by: Timothy McDaniel <timothy.mcdan...@intel.com>
---
 drivers/event/dlb/dlb.c                  | 225 +++++++++++++++++++++++++------
 drivers/event/dlb/dlb_iface.c            |   3 +
 drivers/event/dlb/dlb_iface.h            |   3 +
 drivers/event/dlb/pf/base/dlb_resource.c | 142 +++++++++++++++++++
 drivers/event/dlb/pf/dlb_pf.c            |  23 ++++
 5 files changed, 352 insertions(+), 44 deletions(-)

diff --git a/drivers/event/dlb/dlb.c b/drivers/event/dlb/dlb.c
index 4f56869..5f2a7fa 100644
--- a/drivers/event/dlb/dlb.c
+++ b/drivers/event/dlb/dlb.c
@@ -1443,6 +1443,7 @@ dlb_eventdev_ldb_queue_setup(struct rte_eventdev *dev,
        return 0;
 }
 
+
 static int dlb_num_dir_queues_setup(struct dlb_eventdev *dlb)
 {
        int i, num = 0;
@@ -1656,6 +1657,47 @@ dlb_eventdev_port_setup(struct rte_eventdev *dev,
 }
 
 static int
+dlb_eventdev_reapply_configuration(struct rte_eventdev *dev)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       int ret, i;
+
+       /* If an event queue or port was previously configured, but hasn't been
+        * reconfigured, reapply its original configuration.
+        */
+       for (i = 0; i < dlb->num_queues; i++) {
+               struct dlb_eventdev_queue *ev_queue;
+
+               ev_queue = &dlb->ev_queues[i];
+
+               if (ev_queue->qm_queue.config_state != DLB_PREV_CONFIGURED)
+                       continue;
+
+               ret = dlb_eventdev_queue_setup(dev, i, &ev_queue->conf);
+               if (ret < 0) {
+                       DLB_LOG_ERR("dlb: failed to reconfigure queue %d", i);
+                       return ret;
+               }
+       }
+
+       for (i = 0; i < dlb->num_ports; i++) {
+               struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+
+               if (ev_port->qm_port.config_state != DLB_PREV_CONFIGURED)
+                       continue;
+
+               ret = dlb_eventdev_port_setup(dev, i, &ev_port->conf);
+               if (ret < 0) {
+                       DLB_LOG_ERR("dlb: failed to reconfigure ev_port %d",
+                                   i);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int
 set_dev_id(const char *key __rte_unused,
           const char *value,
           void *opaque)
@@ -1791,6 +1833,50 @@ dlb_validate_port_link(struct dlb_eventdev_port *ev_port,
        return 0;
 }
 
+static int32_t
+dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
+{
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_create_dir_queue_args cfg;
+       struct dlb_cmd_response response;
+       int32_t ret;
+
+       cfg.response = (uintptr_t)&response;
+
+       /* The directed port is always configured before its queue */
+       cfg.port_id = qm_port_id;
+
+       ret = dlb_iface_dir_queue_create(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver 
status: %s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return -EINVAL;
+       }
+
+       return response.id;
+}
+
+static int
+dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
+                            struct dlb_eventdev_queue *ev_queue,
+                            struct dlb_eventdev_port *ev_port)
+{
+       int32_t qm_qid;
+
+       qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
+
+       if (qm_qid < 0) {
+               DLB_LOG_ERR("Failed to create the DIR queue\n");
+               return qm_qid;
+       }
+
+       dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
+
+       ev_queue->qm_queue.id = qm_qid;
+
+       return 0;
+}
+
 static int16_t
 dlb_hw_map_ldb_qid_to_port(struct dlb_hw_dev *handle,
                           uint32_t qm_port_id,
@@ -1866,50 +1952,6 @@ dlb_event_queue_join_ldb(struct dlb_eventdev *dlb,
        return ret;
 }
 
-static int32_t
-dlb_hw_create_dir_queue(struct dlb_eventdev *dlb, int32_t qm_port_id)
-{
-       struct dlb_hw_dev *handle = &dlb->qm_instance;
-       struct dlb_create_dir_queue_args cfg;
-       struct dlb_cmd_response response;
-       int32_t ret;
-
-       cfg.response = (uintptr_t)&response;
-
-       /* The directed port is always configured before its queue */
-       cfg.port_id = qm_port_id;
-
-       ret = dlb_iface_dir_queue_create(handle, &cfg);
-       if (ret < 0) {
-               DLB_LOG_ERR("dlb: create DIR event queue error, ret=%d (driver 
status: %s)\n",
-                           ret, dlb_error_strings[response.status]);
-               return -EINVAL;
-       }
-
-       return response.id;
-}
-
-static int
-dlb_eventdev_dir_queue_setup(struct dlb_eventdev *dlb,
-                            struct dlb_eventdev_queue *ev_queue,
-                            struct dlb_eventdev_port *ev_port)
-{
-       int32_t qm_qid;
-
-       qm_qid = dlb_hw_create_dir_queue(dlb, ev_port->qm_port.id);
-
-       if (qm_qid < 0) {
-               DLB_LOG_ERR("Failed to create the DIR queue\n");
-               return qm_qid;
-       }
-
-       dlb->qm_dir_to_ev_queue_id[qm_qid] = ev_queue->id;
-
-       ev_queue->qm_queue.id = qm_qid;
-
-       return 0;
-}
-
 static int
 dlb_do_port_link(struct rte_eventdev *dev,
                 struct dlb_eventdev_queue *ev_queue,
@@ -1941,6 +1983,40 @@ dlb_do_port_link(struct rte_eventdev *dev,
 }
 
 static int
+dlb_eventdev_apply_port_links(struct rte_eventdev *dev)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       int i;
+
+       /* Perform requested port->queue links */
+       for (i = 0; i < dlb->num_ports; i++) {
+               struct dlb_eventdev_port *ev_port = &dlb->ev_ports[i];
+               int j;
+
+               for (j = 0; j < DLB_MAX_NUM_QIDS_PER_LDB_CQ; j++) {
+                       struct dlb_eventdev_queue *ev_queue;
+                       uint8_t prio, queue_id;
+
+                       if (!ev_port->link[j].valid)
+                               continue;
+
+                       prio = ev_port->link[j].priority;
+                       queue_id = ev_port->link[j].queue_id;
+
+                       if (dlb_validate_port_link(ev_port, queue_id, true, j))
+                               return -EINVAL;
+
+                       ev_queue = &dlb->ev_queues[queue_id];
+
+                       if (dlb_do_port_link(dev, ev_queue, ev_port, prio))
+                               return -EINVAL;
+               }
+       }
+
+       return 0;
+}
+
+static int
 dlb_eventdev_port_link(struct rte_eventdev *dev, void *event_port,
                       const uint8_t queues[], const uint8_t priorities[],
                       uint16_t nb_links)
@@ -2030,12 +2106,73 @@ dlb_eventdev_port_link(struct rte_eventdev *dev, void 
*event_port,
        return i;
 }
 
+static int
+dlb_eventdev_start(struct rte_eventdev *dev)
+{
+       struct dlb_eventdev *dlb = dlb_pmd_priv(dev);
+       struct dlb_hw_dev *handle = &dlb->qm_instance;
+       struct dlb_start_domain_args cfg;
+       struct dlb_cmd_response response;
+       int ret, i;
+
+       rte_spinlock_lock(&dlb->qm_instance.resource_lock);
+       if (dlb->run_state != DLB_RUN_STATE_STOPPED) {
+               DLB_LOG_ERR("bad state %d for dev_start\n",
+                           (int)dlb->run_state);
+               rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+               return -EINVAL;
+       }
+       dlb->run_state  = DLB_RUN_STATE_STARTING;
+       rte_spinlock_unlock(&dlb->qm_instance.resource_lock);
+
+       /* If the device was configured more than once, some event ports and/or
+        * queues may need to be reconfigured.
+        */
+       ret = dlb_eventdev_reapply_configuration(dev);
+       if (ret)
+               return ret;
+
+       /* The DLB PMD delays port links until the device is started. */
+       ret = dlb_eventdev_apply_port_links(dev);
+       if (ret)
+               return ret;
+
+       cfg.response = (uintptr_t)&response;
+
+       for (i = 0; i < dlb->num_ports; i++) {
+               if (!dlb->ev_ports[i].setup_done) {
+                       DLB_LOG_ERR("dlb: port %d not setup", i);
+                       return -ESTALE;
+               }
+       }
+
+       for (i = 0; i < dlb->num_queues; i++) {
+               if (dlb->ev_queues[i].num_links == 0) {
+                       DLB_LOG_ERR("dlb: queue %d is not linked", i);
+                       return -ENOLINK;
+               }
+       }
+
+       ret = dlb_iface_sched_domain_start(handle, &cfg);
+       if (ret < 0) {
+               DLB_LOG_ERR("dlb: sched_domain_start ret=%d (driver status: 
%s)\n",
+                           ret, dlb_error_strings[response.status]);
+               return ret;
+       }
+
+       dlb->run_state = DLB_RUN_STATE_STARTED;
+       DLB_LOG_DBG("dlb: sched_domain_start completed OK\n");
+
+       return 0;
+}
+
 void
 dlb_entry_points_init(struct rte_eventdev *dev)
 {
        static struct rte_eventdev_ops dlb_eventdev_entry_ops = {
                .dev_infos_get    = dlb_eventdev_info_get,
                .dev_configure    = dlb_eventdev_configure,
+               .dev_start        = dlb_eventdev_start,
                .queue_def_conf   = dlb_eventdev_queue_default_conf_get,
                .port_def_conf    = dlb_eventdev_port_default_conf_get,
                .queue_setup      = dlb_eventdev_queue_setup,
diff --git a/drivers/event/dlb/dlb_iface.c b/drivers/event/dlb/dlb_iface.c
index 3d34c89..e0cade1 100644
--- a/drivers/event/dlb/dlb_iface.c
+++ b/drivers/event/dlb/dlb_iface.c
@@ -67,6 +67,9 @@ int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
 int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
                           struct dlb_unmap_qid_args *cfg);
 
+int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
+                                   struct dlb_start_domain_args *cfg);
+
 int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
                                     struct dlb_pending_port_unmaps_args *args);
 
diff --git a/drivers/event/dlb/dlb_iface.h b/drivers/event/dlb/dlb_iface.h
index c0f5f2e..8c905ab 100644
--- a/drivers/event/dlb/dlb_iface.h
+++ b/drivers/event/dlb/dlb_iface.h
@@ -55,6 +55,9 @@ extern int (*dlb_iface_map_qid)(struct dlb_hw_dev *handle,
 extern int (*dlb_iface_unmap_qid)(struct dlb_hw_dev *handle,
                                  struct dlb_unmap_qid_args *cfg);
 
+extern int (*dlb_iface_sched_domain_start)(struct dlb_hw_dev *handle,
+                                   struct dlb_start_domain_args *cfg);
+
 extern int (*dlb_iface_pending_port_unmaps)(struct dlb_hw_dev *handle,
                                struct dlb_pending_port_unmaps_args *args);
 
diff --git a/drivers/event/dlb/pf/base/dlb_resource.c 
b/drivers/event/dlb/pf/base/dlb_resource.c
index 34207ff..829a977 100644
--- a/drivers/event/dlb/pf/base/dlb_resource.c
+++ b/drivers/event/dlb/pf/base/dlb_resource.c
@@ -6408,6 +6408,32 @@ static int dlb_verify_map_qid_args(struct dlb_hw *hw,
        return 0;
 }
 
+static int dlb_verify_start_domain_args(struct dlb_hw *hw,
+                                       u32 domain_id,
+                                       struct dlb_cmd_response *resp)
+{
+       struct dlb_domain *domain;
+
+       domain = dlb_get_domain_from_id(hw, domain_id);
+
+       if (!domain) {
+               resp->status = DLB_ST_INVALID_DOMAIN_ID;
+               return -1;
+       }
+
+       if (!domain->configured) {
+               resp->status = DLB_ST_DOMAIN_NOT_CONFIGURED;
+               return -1;
+       }
+
+       if (domain->started) {
+               resp->status = DLB_ST_DOMAIN_STARTED;
+               return -1;
+       }
+
+       return 0;
+}
+
 static int dlb_verify_map_qid_slot_available(struct dlb_ldb_port *port,
                                             struct dlb_ldb_queue *queue,
                                             struct dlb_cmd_response *resp)
@@ -6669,3 +6695,119 @@ int dlb_hw_map_qid(struct dlb_hw *hw,
        return 0;
 }
 
+static void dlb_log_start_domain(struct dlb_hw *hw, u32 domain_id)
+{
+       DLB_HW_INFO(hw, "DLB start domain arguments:\n");
+       DLB_HW_INFO(hw, "\tDomain ID: %d\n", domain_id);
+}
+
+static void dlb_ldb_pool_write_credit_count_reg(struct dlb_hw *hw,
+                                               u32 pool_id)
+{
+       union dlb_chp_ldb_pool_crd_cnt r0 = { {0} };
+       struct dlb_credit_pool *pool;
+
+       pool = &hw->rsrcs.ldb_credit_pools[pool_id];
+
+       r0.field.count = pool->avail_credits;
+
+       DLB_CSR_WR(hw,
+                  DLB_CHP_LDB_POOL_CRD_CNT(pool->id),
+                  r0.val);
+}
+
+static void dlb_dir_pool_write_credit_count_reg(struct dlb_hw *hw,
+                                               u32 pool_id)
+{
+       union dlb_chp_dir_pool_crd_cnt r0 = { {0} };
+       struct dlb_credit_pool *pool;
+
+       pool = &hw->rsrcs.dir_credit_pools[pool_id];
+
+       r0.field.count = pool->avail_credits;
+
+       DLB_CSR_WR(hw,
+                  DLB_CHP_DIR_POOL_CRD_CNT(pool->id),
+                  r0.val);
+}
+
+/**
+ * dlb_hw_start_domain() - Lock the domain configuration
+ * @hw:          Contains the current state of the DLB hardware.
+ * @args: User-provided arguments.
+ * @resp: Response to user.
+ *
+ * Return: returns < 0 on error, 0 otherwise. If the driver is unable to
+ * satisfy a request, resp->status will be set accordingly.
+ */
+int dlb_hw_start_domain(struct dlb_hw *hw,
+                       u32 domain_id,
+                       struct dlb_start_domain_args *arg,
+                       struct dlb_cmd_response *resp)
+{
+       struct dlb_list_entry *iter;
+       struct dlb_dir_pq_pair *dir_queue;
+       struct dlb_ldb_queue *ldb_queue;
+       struct dlb_credit_pool *pool;
+       struct dlb_domain *domain;
+       RTE_SET_USED(arg);
+       RTE_SET_USED(iter);
+
+       dlb_log_start_domain(hw, domain_id);
+
+       if (dlb_verify_start_domain_args(hw, domain_id, resp))
+               return -EINVAL;
+
+       domain = dlb_get_domain_from_id(hw, domain_id);
+       if (!domain) {
+               DLB_HW_ERR(hw,
+                          "[%s():%d] Internal error: domain not found\n",
+                          __func__, __LINE__);
+               return -EFAULT;
+       }
+
+       /* Write the domain's pool credit counts, which have been updated
+        * during port configuration. The sum of the pool credit count plus
+        * each producer port's credit count must equal the pool's credit
+        * allocation *before* traffic is sent.
+        */
+       DLB_DOM_LIST_FOR(domain->used_ldb_credit_pools, pool, iter)
+               dlb_ldb_pool_write_credit_count_reg(hw, pool->id);
+
+       DLB_DOM_LIST_FOR(domain->used_dir_credit_pools, pool, iter)
+               dlb_dir_pool_write_credit_count_reg(hw, pool->id);
+
+       /* Enable load-balanced and directed queue write permissions for the
+        * queues this domain owns. Without this, the DLB will drop all
+        * incoming traffic to those queues.
+        */
+       DLB_DOM_LIST_FOR(domain->used_ldb_queues, ldb_queue, iter) {
+               union dlb_sys_ldb_vasqid_v r0 = { {0} };
+               unsigned int offs;
+
+               r0.field.vasqid_v = 1;
+
+               offs = domain->id * DLB_MAX_NUM_LDB_QUEUES + ldb_queue->id;
+
+               DLB_CSR_WR(hw, DLB_SYS_LDB_VASQID_V(offs), r0.val);
+       }
+
+       DLB_DOM_LIST_FOR(domain->used_dir_pq_pairs, dir_queue, iter) {
+               union dlb_sys_dir_vasqid_v r0 = { {0} };
+               unsigned int offs;
+
+               r0.field.vasqid_v = 1;
+
+               offs = domain->id * DLB_MAX_NUM_DIR_PORTS + dir_queue->id;
+
+               DLB_CSR_WR(hw, DLB_SYS_DIR_VASQID_V(offs), r0.val);
+       }
+
+       dlb_flush_csr(hw);
+
+       domain->started = true;
+
+       resp->status = 0;
+
+       return 0;
+}
diff --git a/drivers/event/dlb/pf/dlb_pf.c b/drivers/event/dlb/pf/dlb_pf.c
index a6a5adb..237ade9 100644
--- a/drivers/event/dlb/pf/dlb_pf.c
+++ b/drivers/event/dlb/pf/dlb_pf.c
@@ -477,6 +477,28 @@ dlb_pf_get_sn_occupancy(struct dlb_hw_dev *handle,
 }
 
 static int
+dlb_pf_sched_domain_start(struct dlb_hw_dev *handle,
+                         struct dlb_start_domain_args *cfg)
+{
+       struct dlb_dev *dlb_dev = (struct dlb_dev *)handle->pf_dev;
+       struct dlb_cmd_response response = {0};
+       int ret;
+
+       DLB_INFO(dev->dlb_device, "Entering %s()\n", __func__);
+
+       ret = dlb_hw_start_domain(&dlb_dev->hw,
+                                 handle->domain_id,
+                                 cfg,
+                                 &response);
+
+       *(struct dlb_cmd_response *)cfg->response = response;
+
+       DLB_INFO(dev->dlb_device, "Exiting %s() with ret=%d\n", __func__, ret);
+
+       return ret;
+}
+
+static int
 dlb_pf_pending_port_unmaps(struct dlb_hw_dev *handle,
                           struct dlb_pending_port_unmaps_args *args)
 {
@@ -559,6 +581,7 @@ dlb_pf_iface_fn_ptrs_init(void)
        dlb_iface_dir_port_create = dlb_pf_dir_port_create;
        dlb_iface_map_qid = dlb_pf_map_qid;
        dlb_iface_unmap_qid = dlb_pf_unmap_qid;
+       dlb_iface_sched_domain_start = dlb_pf_sched_domain_start;
        dlb_iface_pending_port_unmaps = dlb_pf_pending_port_unmaps;
        dlb_iface_get_cq_poll_mode = dlb_pf_get_cq_poll_mode;
        dlb_iface_get_sn_allocation = dlb_pf_get_sn_allocation;
-- 
2.6.4

Reply via email to