From: Arthur Kiyanovski <akiy...@amazon.com>

This patch introduces APIs for detection, initialization, configuration
and actual usage of low latency queues(LLQ). It extends transmit API with
creation of LLQ descriptors in device memory (which include host buffers
descriptors as well as packet header)

Signed-off-by: Arthur Kiyanovski <akiy...@amazon.com>
---
 drivers/net/ethernet/amazon/ena/ena_com.c     | 249 ++++++++++++++++++++++++--
 drivers/net/ethernet/amazon/ena/ena_com.h     |  28 +++
 drivers/net/ethernet/amazon/ena/ena_eth_com.c | 231 ++++++++++++++++++------
 drivers/net/ethernet/amazon/ena/ena_eth_com.h |  25 ++-
 drivers/net/ethernet/amazon/ena/ena_netdev.c  |  21 +--
 5 files changed, 474 insertions(+), 80 deletions(-)

diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c 
b/drivers/net/ethernet/amazon/ena/ena_com.c
index b6e6a47..5220c75 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -58,6 +58,8 @@
 
 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
 
+#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT        4
+
 #define ENA_REGS_ADMIN_INTR_MASK 1
 
 #define ENA_POLL_MS    5
@@ -352,21 +354,48 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
                                                    &io_sq->desc_addr.phys_addr,
                                                    GFP_KERNEL);
                }
-       } else {
+
+               if (!io_sq->desc_addr.virt_addr) {
+                       pr_err("memory allocation failed");
+                       return -ENOMEM;
+               }
+       }
+
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
+               /* Allocate bounce buffers */
+               io_sq->bounce_buf_ctrl.buffer_size =
+                       ena_dev->llq_info.desc_list_entry_size;
+               io_sq->bounce_buf_ctrl.buffers_num =
+                       ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
+               io_sq->bounce_buf_ctrl.next_to_use = 0;
+
+               size = io_sq->bounce_buf_ctrl.buffer_size *
+                        io_sq->bounce_buf_ctrl.buffers_num;
+
                dev_node = dev_to_node(ena_dev->dmadev);
                set_dev_node(ena_dev->dmadev, ctx->numa_node);
-               io_sq->desc_addr.virt_addr =
+               io_sq->bounce_buf_ctrl.base_buffer =
                        devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
                set_dev_node(ena_dev->dmadev, dev_node);
-               if (!io_sq->desc_addr.virt_addr) {
-                       io_sq->desc_addr.virt_addr =
+               if (!io_sq->bounce_buf_ctrl.base_buffer)
+                       io_sq->bounce_buf_ctrl.base_buffer =
                                devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
+
+               if (!io_sq->bounce_buf_ctrl.base_buffer) {
+                       pr_err("bounce buffer memory allocation failed");
+                       return -ENOMEM;
                }
-       }
 
-       if (!io_sq->desc_addr.virt_addr) {
-               pr_err("memory allocation failed");
-               return -ENOMEM;
+               memcpy(&io_sq->llq_info, &ena_dev->llq_info,
+                      sizeof(io_sq->llq_info));
+
+               /* Initiate the first bounce buffer */
+               io_sq->llq_buf_ctrl.curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+               memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                      0x0, io_sq->llq_info.desc_list_entry_size);
+               io_sq->llq_buf_ctrl.descs_left_in_line =
+                       io_sq->llq_info.descs_num_before_header;
        }
 
        io_sq->tail = 0;
@@ -554,6 +583,156 @@ static int 
ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c
        return ret;
 }
 
+/**
+ * Set the LLQ configurations of the firmware
+ *
+ * The driver provides only the enabled feature values to the device,
+ * which in turn, checks if they are supported.
+ */
+static int ena_com_set_llq(struct ena_com_dev *ena_dev)
+{
+       struct ena_com_admin_queue *admin_queue;
+       struct ena_admin_set_feat_cmd cmd;
+       struct ena_admin_set_feat_resp resp;
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+       int ret;
+
+       memset(&cmd, 0x0, sizeof(cmd));
+       admin_queue = &ena_dev->admin_queue;
+
+       cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
+       cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
+
+       cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
+       cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
+       cmd.u.llq.desc_num_before_header_enabled = 
llq_info->descs_num_before_header;
+       cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
+
+       ret = ena_com_execute_admin_command(admin_queue,
+                                           (struct ena_admin_aq_entry *)&cmd,
+                                           sizeof(cmd),
+                                           (struct ena_admin_acq_entry *)&resp,
+                                           sizeof(resp));
+
+       if (unlikely(ret))
+               pr_err("Failed to set LLQ configurations: %d\n", ret);
+
+       return ret;
+}
+
+static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
+                                  struct ena_admin_feature_llq_desc 
*llq_features,
+                                  struct ena_llq_configurations 
*llq_default_cfg)
+{
+       struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
+       u16 supported_feat;
+       int rc;
+
+       memset(llq_info, 0, sizeof(*llq_info));
+
+       supported_feat = llq_features->header_location_ctrl_supported;
+
+       if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
+               llq_info->header_location_ctrl =
+                       llq_default_cfg->llq_header_location;
+       } else {
+               pr_err("Invalid header location control, supported: 0x%x\n",
+                      supported_feat);
+               return -EINVAL;
+       }
+
+       if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
+               supported_feat = 
llq_features->descriptors_stride_ctrl_supported;
+               if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
+                       llq_info->desc_stride_ctrl = 
llq_default_cfg->llq_stride_ctrl;
+               } else  {
+                       if (supported_feat & 
ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
+                               llq_info->desc_stride_ctrl = 
ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
+                       } else if (supported_feat & 
ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
+                               llq_info->desc_stride_ctrl = 
ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
+                       } else {
+                               pr_err("Invalid desc_stride_ctrl, supported: 
0x%x\n",
+                                      supported_feat);
+                               return -EINVAL;
+                       }
+
+                       pr_err("Default llq stride ctrl is not supported, 
performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                              llq_default_cfg->llq_stride_ctrl, supported_feat,
+                              llq_info->desc_stride_ctrl);
+               }
+       } else {
+               llq_info->desc_stride_ctrl = 0;
+       }
+
+       supported_feat = llq_features->entry_size_ctrl_supported;
+       if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
+               llq_info->desc_list_entry_size_ctrl = 
llq_default_cfg->llq_ring_entry_size;
+               llq_info->desc_list_entry_size = 
llq_default_cfg->llq_ring_entry_size_value;
+       } else {
+               if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
+                       llq_info->desc_list_entry_size_ctrl = 
ENA_ADMIN_LIST_ENTRY_SIZE_128B;
+                       llq_info->desc_list_entry_size = 128;
+               } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
+                       llq_info->desc_list_entry_size_ctrl = 
ENA_ADMIN_LIST_ENTRY_SIZE_192B;
+                       llq_info->desc_list_entry_size = 192;
+               } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
+                       llq_info->desc_list_entry_size_ctrl = 
ENA_ADMIN_LIST_ENTRY_SIZE_256B;
+                       llq_info->desc_list_entry_size = 256;
+               } else {
+                       pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
+                              supported_feat);
+                       return -EINVAL;
+               }
+
+               pr_err("Default llq ring entry size is not supported, 
performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                      llq_default_cfg->llq_ring_entry_size, supported_feat,
+                      llq_info->desc_list_entry_size);
+       }
+       if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
+               /* The desc list entry size should be whole multiply of 8
+                * This requirement comes from __iowrite64_copy()
+                */
+               pr_err("illegal entry size %d\n",
+                      llq_info->desc_list_entry_size);
+               return -EINVAL;
+       }
+
+       if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
+               llq_info->descs_per_entry = llq_info->desc_list_entry_size /
+                       sizeof(struct ena_eth_io_tx_desc);
+       else
+               llq_info->descs_per_entry = 1;
+
+       supported_feat = llq_features->desc_num_before_header_supported;
+       if (likely(supported_feat & 
llq_default_cfg->llq_num_decs_before_header)) {
+               llq_info->descs_num_before_header = 
llq_default_cfg->llq_num_decs_before_header;
+       } else {
+               if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
+                       llq_info->descs_num_before_header = 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
+               } else if (supported_feat & 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
+                       llq_info->descs_num_before_header = 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
+               } else if (supported_feat & 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
+                       llq_info->descs_num_before_header = 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
+               } else if (supported_feat & 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
+                       llq_info->descs_num_before_header = 
ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
+               } else {
+                       pr_err("Invalid descs_num_before_header, supported: 
0x%x\n",
+                              supported_feat);
+                       return -EINVAL;
+               }
+
+               pr_err("Default llq num descs before header is not supported, 
performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
+                      llq_default_cfg->llq_num_decs_before_header,
+                      supported_feat, llq_info->descs_num_before_header);
+       }
+
+       rc = ena_com_set_llq(ena_dev);
+       if (rc)
+               pr_err("Cannot set LLQ configuration: %d\n", rc);
+
+       return 0;
+}
+
 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx 
*comp_ctx,
                                                        struct 
ena_com_admin_queue *admin_queue)
 {
@@ -725,15 +904,17 @@ static void ena_com_io_queue_free(struct ena_com_dev 
*ena_dev,
        if (io_sq->desc_addr.virt_addr) {
                size = io_sq->desc_entry_size * io_sq->q_depth;
 
-               if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-                       dma_free_coherent(ena_dev->dmadev, size,
-                                         io_sq->desc_addr.virt_addr,
-                                         io_sq->desc_addr.phys_addr);
-               else
-                       devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
+               dma_free_coherent(ena_dev->dmadev, size,
+                                 io_sq->desc_addr.virt_addr,
+                                 io_sq->desc_addr.phys_addr);
 
                io_sq->desc_addr.virt_addr = NULL;
        }
+
+       if (io_sq->bounce_buf_ctrl.base_buffer) {
+               devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
+               io_sq->bounce_buf_ctrl.base_buffer = NULL;
+       }
 }
 
 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
@@ -1740,6 +1921,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev 
*ena_dev,
        else
                return rc;
 
+       rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
+       if (!rc)
+               memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
+                      sizeof(get_resp.u.llq));
+       else if (rc == -EOPNOTSUPP)
+               memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
+       else
+               return rc;
+
        return 0;
 }
 
@@ -2708,3 +2898,34 @@ void ena_com_get_intr_moderation_entry(struct 
ena_com_dev *ena_dev,
        intr_moder_tbl[level].pkts_per_interval;
        entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
 }
+
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq_features,
+                           struct ena_llq_configurations *llq_default_cfg)
+{
+       int rc;
+       int size;
+
+       if (!llq_features->max_llq_num) {
+               ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
+               return 0;
+       }
+
+       rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
+       if (rc)
+               return rc;
+
+       /* Validate the descriptor is not too big */
+       size = ena_dev->tx_max_header_size;
+       size += ena_dev->llq_info.descs_num_before_header *
+               sizeof(struct ena_eth_io_tx_desc);
+
+       if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
+               pr_err("the size of the LLQ entry is smaller than needed\n");
+               return -EINVAL;
+       }
+
+       ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
+
+       return 0;
+}
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h 
b/drivers/net/ethernet/amazon/ena/ena_com.h
index 50e6c8f..25af8d0 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -37,6 +37,7 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 #include <linux/gfp.h>
+#include <linux/io.h>
 #include <linux/sched.h>
 #include <linux/sizes.h>
 #include <linux/spinlock.h>
@@ -973,6 +974,16 @@ void ena_com_get_intr_moderation_entry(struct ena_com_dev 
*ena_dev,
                                       enum ena_intr_moder_level level,
                                       struct ena_intr_moder_entry *entry);
 
+/* ena_com_config_dev_mode - Configure the placement policy of the device.
+ * @ena_dev: ENA communication layer struct
+ * @llq_features: LLQ feature descriptor, retrieve via
+ *                ena_com_get_dev_attr_feat.
+ * @ena_llq_config: The default driver LLQ parameters configurations
+ */
+int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
+                           struct ena_admin_feature_llq_desc *llq_features,
+                           struct ena_llq_configurations *llq_default_config);
+
 static inline bool ena_com_get_adaptive_moderation_enabled(struct ena_com_dev 
*ena_dev)
 {
        return ena_dev->adaptive_coalescing;
@@ -1082,4 +1093,21 @@ static inline void ena_com_update_intr_reg(struct 
ena_eth_io_intr_reg *intr_reg,
                intr_reg->intr_control |= ENA_ETH_IO_INTR_REG_INTR_UNMASK_MASK;
 }
 
+static inline u8 *ena_com_get_next_bounce_buffer(struct 
ena_com_io_bounce_buffer_control *bounce_buf_ctrl)
+{
+       u16 size, buffers_num;
+       u8 *buf;
+
+       size = bounce_buf_ctrl->buffer_size;
+       buffers_num = bounce_buf_ctrl->buffers_num;
+
+       buf = bounce_buf_ctrl->base_buffer +
+               (bounce_buf_ctrl->next_to_use++ & (buffers_num - 1)) * size;
+
+       prefetchw(bounce_buf_ctrl->base_buffer +
+               (bounce_buf_ctrl->next_to_use & (buffers_num - 1)) * size);
+
+       return buf;
+}
+
 #endif /* !(ENA_COM) */
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c 
b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
index 2fa032b..2a53b54 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c
@@ -59,7 +59,7 @@ static inline struct ena_eth_io_rx_cdesc_base 
*ena_com_get_next_rx_cdesc(
        return cdesc;
 }
 
-static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+static inline void *get_sq_desc_regular_queue(struct ena_com_io_sq *io_sq)
 {
        u16 tail_masked;
        u32 offset;
@@ -71,45 +71,159 @@ static inline void *get_sq_desc(struct ena_com_io_sq 
*io_sq)
        return (void *)((uintptr_t)io_sq->desc_addr.virt_addr + offset);
 }
 
-static inline void ena_com_copy_curr_sq_desc_to_dev(struct ena_com_io_sq 
*io_sq)
+static inline int ena_com_write_bounce_buffer_to_dev(struct ena_com_io_sq 
*io_sq,
+                                                    u8 *bounce_buffer)
 {
-       u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
-       u32 offset = tail_masked * io_sq->desc_entry_size;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
 
-       /* In case this queue isn't a LLQ */
-       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
-               return;
+       u16 dst_tail_mask;
+       u32 dst_offset;
 
-       memcpy_toio(io_sq->desc_addr.pbuf_dev_addr + offset,
-                   io_sq->desc_addr.virt_addr + offset,
-                   io_sq->desc_entry_size);
-}
+       dst_tail_mask = io_sq->tail & (io_sq->q_depth - 1);
+       dst_offset = dst_tail_mask * llq_info->desc_list_entry_size;
+
+       /* Make sure everything was written into the bounce buffer before
+        * writing the bounce buffer to the device
+        */
+       wmb();
+
+       /* The line is completed. Copy it to dev */
+       __iowrite64_copy(io_sq->desc_addr.pbuf_dev_addr + dst_offset,
+                        bounce_buffer, (llq_info->desc_list_entry_size) / 8);
 
-static inline void ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
-{
        io_sq->tail++;
 
        /* Switch phase bit in case of wrap around */
        if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
                io_sq->phase ^= 1;
+
+       return 0;
 }
 
-static inline int ena_com_write_header(struct ena_com_io_sq *io_sq,
-                                      u8 *head_src, u16 header_len)
+static inline int ena_com_write_header_to_bounce(struct ena_com_io_sq *io_sq,
+                                                u8 *header_src,
+                                                u16 header_len)
 {
-       u16 tail_masked = io_sq->tail & (io_sq->q_depth - 1);
-       u8 __iomem *dev_head_addr =
-               io_sq->header_addr + (tail_masked * io_sq->tx_max_header_size);
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+       u8 *bounce_buffer = pkt_ctrl->curr_bounce_buf;
+       u16 header_offset;
 
-       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+       if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
                return 0;
 
-       if (unlikely(!io_sq->header_addr)) {
-               pr_err("Push buffer header ptr is NULL\n");
-               return -EINVAL;
+       header_offset =
+               llq_info->descs_num_before_header * io_sq->desc_entry_size;
+
+       if (unlikely((header_offset + header_len) >
+                    llq_info->desc_list_entry_size)) {
+               pr_err("trying to write header larger than llq entry can 
accommodate\n");
+               return -EFAULT;
+       }
+
+       if (unlikely(!bounce_buffer)) {
+               pr_err("bounce buffer is NULL\n");
+               return -EFAULT;
+       }
+
+       memcpy(bounce_buffer + header_offset, header_src, header_len);
+
+       return 0;
+}
+
+static inline void *get_sq_desc_llq(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       u8 *bounce_buffer;
+       void *sq_desc;
+
+       bounce_buffer = pkt_ctrl->curr_bounce_buf;
+
+       if (unlikely(!bounce_buffer)) {
+               pr_err("bounce buffer is NULL\n");
+               return NULL;
+       }
+
+       sq_desc = bounce_buffer + pkt_ctrl->idx * io_sq->desc_entry_size;
+       pkt_ctrl->idx++;
+       pkt_ctrl->descs_left_in_line--;
+
+       return sq_desc;
+}
+
+static inline int ena_com_close_bounce_buffer(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+       int rc;
+
+       if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST))
+               return 0;
+
+       /* bounce buffer was used, so write it and get a new one */
+       if (pkt_ctrl->idx) {
+               rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+                                                       
pkt_ctrl->curr_bounce_buf);
+               if (unlikely(rc))
+                       return rc;
+
+               pkt_ctrl->curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+               memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                      0x0, llq_info->desc_list_entry_size);
+       }
+
+       pkt_ctrl->idx = 0;
+       pkt_ctrl->descs_left_in_line = llq_info->descs_num_before_header;
+       return 0;
+}
+
+static inline void *get_sq_desc(struct ena_com_io_sq *io_sq)
+{
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+               return get_sq_desc_llq(io_sq);
+
+       return get_sq_desc_regular_queue(io_sq);
+}
+
+static inline int ena_com_sq_update_llq_tail(struct ena_com_io_sq *io_sq)
+{
+       struct ena_com_llq_pkt_ctrl *pkt_ctrl = &io_sq->llq_buf_ctrl;
+       struct ena_com_llq_info *llq_info = &io_sq->llq_info;
+       int rc;
+
+       if (!pkt_ctrl->descs_left_in_line) {
+               rc = ena_com_write_bounce_buffer_to_dev(io_sq,
+                                                       
pkt_ctrl->curr_bounce_buf);
+               if (unlikely(rc))
+                       return rc;
+
+               pkt_ctrl->curr_bounce_buf =
+                       ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
+                       memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
+                              0x0, llq_info->desc_list_entry_size);
+
+               pkt_ctrl->idx = 0;
+               if (unlikely(llq_info->desc_stride_ctrl == 
ENA_ADMIN_SINGLE_DESC_PER_ENTRY))
+                       pkt_ctrl->descs_left_in_line = 1;
+               else
+                       pkt_ctrl->descs_left_in_line =
+                       llq_info->desc_list_entry_size / io_sq->desc_entry_size;
        }
 
-       memcpy_toio(dev_head_addr, head_src, header_len);
+       return 0;
+}
+
+static inline int ena_com_sq_update_tail(struct ena_com_io_sq *io_sq)
+{
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV)
+               return ena_com_sq_update_llq_tail(io_sq);
+
+       io_sq->tail++;
+
+       /* Switch phase bit in case of wrap around */
+       if (unlikely((io_sq->tail & (io_sq->q_depth - 1)) == 0))
+               io_sq->phase ^= 1;
 
        return 0;
 }
@@ -177,8 +291,8 @@ static inline bool ena_com_meta_desc_changed(struct 
ena_com_io_sq *io_sq,
        return false;
 }
 
-static inline void ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq 
*io_sq,
-                                                        struct ena_com_tx_ctx 
*ena_tx_ctx)
+static inline int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq 
*io_sq,
+                                                       struct ena_com_tx_ctx 
*ena_tx_ctx)
 {
        struct ena_eth_io_tx_meta_desc *meta_desc = NULL;
        struct ena_com_tx_meta *ena_meta = &ena_tx_ctx->ena_meta;
@@ -223,8 +337,7 @@ static inline void 
ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *i
        memcpy(&io_sq->cached_tx_meta, ena_meta,
               sizeof(struct ena_com_tx_meta));
 
-       ena_com_copy_curr_sq_desc_to_dev(io_sq);
-       ena_com_sq_update_tail(io_sq);
+       return ena_com_sq_update_tail(io_sq);
 }
 
 static inline void ena_com_rx_set_flags(struct ena_com_rx_ctx *ena_rx_ctx,
@@ -262,18 +375,19 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
 {
        struct ena_eth_io_tx_desc *desc = NULL;
        struct ena_com_buf *ena_bufs = ena_tx_ctx->ena_bufs;
-       void *push_header = ena_tx_ctx->push_header;
+       void *buffer_to_push = ena_tx_ctx->push_header;
        u16 header_len = ena_tx_ctx->header_len;
        u16 num_bufs = ena_tx_ctx->num_bufs;
-       int total_desc, i, rc;
+       u16 start_tail = io_sq->tail;
+       int i, rc;
        bool have_meta;
        u64 addr_hi;
 
        WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_TX, "wrong Q type");
 
        /* num_bufs +1 for potential meta desc */
-       if (ena_com_sq_empty_space(io_sq) < (num_bufs + 1)) {
-               pr_err("Not enough space in the tx queue\n");
+       if (unlikely(!ena_com_sq_have_enough_space(io_sq, num_bufs + 1))) {
+               pr_debug("Not enough space in the tx queue\n");
                return -ENOMEM;
        }
 
@@ -283,23 +397,32 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
                return -EINVAL;
        }
 
-       /* start with pushing the header (if needed) */
-       rc = ena_com_write_header(io_sq, push_header, header_len);
+       if (unlikely(io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV &&
+                    !buffer_to_push))
+               return -EINVAL;
+
+       rc = ena_com_write_header_to_bounce(io_sq, buffer_to_push, header_len);
        if (unlikely(rc))
                return rc;
 
        have_meta = ena_tx_ctx->meta_valid && ena_com_meta_desc_changed(io_sq,
                        ena_tx_ctx);
-       if (have_meta)
-               ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+       if (have_meta) {
+               rc = ena_com_create_and_store_tx_meta_desc(io_sq, ena_tx_ctx);
+               if (unlikely(rc))
+                       return rc;
+       }
 
-       /* If the caller doesn't want send packets */
+       /* If the caller doesn't want to send packets */
        if (unlikely(!num_bufs && !header_len)) {
-               *nb_hw_desc = have_meta ? 0 : 1;
-               return 0;
+               rc = ena_com_close_bounce_buffer(io_sq);
+               *nb_hw_desc = io_sq->tail - start_tail;
+               return rc;
        }
 
        desc = get_sq_desc(io_sq);
+       if (unlikely(!desc))
+               return -EFAULT;
        memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
 
        /* Set first desc when we don't have meta descriptor */
@@ -351,10 +474,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
        for (i = 0; i < num_bufs; i++) {
                /* The first desc share the same desc as the header */
                if (likely(i != 0)) {
-                       ena_com_copy_curr_sq_desc_to_dev(io_sq);
-                       ena_com_sq_update_tail(io_sq);
+                       rc = ena_com_sq_update_tail(io_sq);
+                       if (unlikely(rc))
+                               return rc;
 
                        desc = get_sq_desc(io_sq);
+                       if (unlikely(!desc))
+                               return -EFAULT;
+
                        memset(desc, 0x0, sizeof(struct ena_eth_io_tx_desc));
 
                        desc->len_ctrl |= (io_sq->phase <<
@@ -377,15 +504,14 @@ int ena_com_prepare_tx(struct ena_com_io_sq *io_sq,
        /* set the last desc indicator */
        desc->len_ctrl |= ENA_ETH_IO_TX_DESC_LAST_MASK;
 
-       ena_com_copy_curr_sq_desc_to_dev(io_sq);
-
-       ena_com_sq_update_tail(io_sq);
+       rc = ena_com_sq_update_tail(io_sq);
+       if (unlikely(rc))
+               return rc;
 
-       total_desc = max_t(u16, num_bufs, 1);
-       total_desc += have_meta ? 1 : 0;
+       rc = ena_com_close_bounce_buffer(io_sq);
 
-       *nb_hw_desc = total_desc;
-       return 0;
+       *nb_hw_desc = io_sq->tail - start_tail;
+       return rc;
 }
 
 int ena_com_rx_pkt(struct ena_com_io_cq *io_cq,
@@ -444,15 +570,18 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq 
*io_sq,
 
        WARN(io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX, "wrong Q type");
 
-       if (unlikely(ena_com_sq_empty_space(io_sq) == 0))
+       if (unlikely(!ena_com_sq_have_enough_space(io_sq, 1)))
                return -ENOSPC;
 
        desc = get_sq_desc(io_sq);
+       if (unlikely(!desc))
+               return -EFAULT;
+
        memset(desc, 0x0, sizeof(struct ena_eth_io_rx_desc));
 
        desc->length = ena_buf->len;
 
-       desc->ctrl |= ENA_ETH_IO_RX_DESC_FIRST_MASK;
+       desc->ctrl = ENA_ETH_IO_RX_DESC_FIRST_MASK;
        desc->ctrl |= ENA_ETH_IO_RX_DESC_LAST_MASK;
        desc->ctrl |= io_sq->phase & ENA_ETH_IO_RX_DESC_PHASE_MASK;
        desc->ctrl |= ENA_ETH_IO_RX_DESC_COMP_REQ_MASK;
@@ -463,9 +592,7 @@ int ena_com_add_single_rx_desc(struct ena_com_io_sq *io_sq,
        desc->buff_addr_hi =
                ((ena_buf->paddr & GENMASK_ULL(io_sq->dma_addr_bits - 1, 32)) 
>> 32);
 
-       ena_com_sq_update_tail(io_sq);
-
-       return 0;
+       return ena_com_sq_update_tail(io_sq);
 }
 
 bool ena_com_cq_empty(struct ena_com_io_cq *io_cq)
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h 
b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
index 4930324..bcc8407 100644
--- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h
@@ -94,7 +94,7 @@ static inline void ena_com_unmask_intr(struct ena_com_io_cq 
*io_cq,
        writel(intr_reg->intr_control, io_cq->unmask_reg);
 }
 
-static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq)
+static inline int ena_com_free_desc(struct ena_com_io_sq *io_sq)
 {
        u16 tail, next_to_comp, cnt;
 
@@ -105,11 +105,28 @@ static inline int ena_com_sq_empty_space(struct 
ena_com_io_sq *io_sq)
        return io_sq->q_depth - 1 - cnt;
 }
 
-static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+/* Check if the submission queue has enough space to hold required_buffers */
+static inline bool ena_com_sq_have_enough_space(struct ena_com_io_sq *io_sq,
+                                               u16 required_buffers)
 {
-       u16 tail;
+       int temp;
 
-       tail = io_sq->tail;
+       if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
+               return ena_com_free_desc(io_sq) >= required_buffers;
+
+       /* This calculation doesn't need to be 100% accurate. So to reduce
+        * the calculation overhead just Subtract 2 lines from the free descs
+        * (one for the header line and one to compensate the devision
+        * down calculation.
+        */
+       temp = required_buffers / io_sq->llq_info.descs_per_entry + 2;
+
+       return ena_com_free_desc(io_sq) > temp;
+}
+
+static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
+{
+       u16 tail = io_sq->tail;
 
        pr_debug("write submission queue doorbell for queue: %d tail: %d\n",
                 io_sq->qid, tail);
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c 
b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index e81f4bb..79a4e87 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -804,12 +804,13 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 
budget)
         */
        smp_mb();
 
-       above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
-               ENA_TX_WAKEUP_THRESH;
+       above_thresh = ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                                                   ENA_TX_WAKEUP_THRESH);
        if (unlikely(netif_tx_queue_stopped(txq) && above_thresh)) {
                __netif_tx_lock(txq, smp_processor_id());
-               above_thresh = ena_com_sq_empty_space(tx_ring->ena_com_io_sq) >
-                       ENA_TX_WAKEUP_THRESH;
+               above_thresh =
+                       ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                                                    ENA_TX_WAKEUP_THRESH);
                if (netif_tx_queue_stopped(txq) && above_thresh) {
                        netif_tx_wake_queue(txq);
                        u64_stats_update_begin(&tx_ring->syncp);
@@ -1101,7 +1102,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, 
struct napi_struct *napi,
 
        rx_ring->next_to_clean = next_to_clean;
 
-       refill_required = ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+       refill_required = ena_com_free_desc(rx_ring->ena_com_io_sq);
        refill_threshold = rx_ring->ring_size / ENA_RX_REFILL_THRESH_DIVIDER;
 
        /* Optimization, try to batch new rx buffers */
@@ -2110,8 +2111,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, 
struct net_device *dev)
         * to sgl_size + 2. one for the meta descriptor and one for header
         * (if the header is larger than tx_max_header_size).
         */
-       if (unlikely(ena_com_sq_empty_space(tx_ring->ena_com_io_sq) <
-                    (tx_ring->sgl_size + 2))) {
+       if (unlikely(!ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                                                  tx_ring->sgl_size + 2))) {
                netif_dbg(adapter, tx_queued, dev, "%s stop queue %d\n",
                          __func__, qid);
 
@@ -2130,8 +2131,8 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, 
struct net_device *dev)
                 */
                smp_mb();
 
-               if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq)
-                               > ENA_TX_WAKEUP_THRESH) {
+               if (ena_com_sq_have_enough_space(tx_ring->ena_com_io_sq,
+                                                ENA_TX_WAKEUP_THRESH)) {
                        netif_tx_wake_queue(txq);
                        u64_stats_update_begin(&tx_ring->syncp);
                        tx_ring->tx_stats.queue_wakeup++;
@@ -2804,7 +2805,7 @@ static void check_for_empty_rx_ring(struct ena_adapter 
*adapter)
                rx_ring = &adapter->rx_ring[i];
 
                refill_required =
-                       ena_com_sq_empty_space(rx_ring->ena_com_io_sq);
+                       ena_com_free_desc(rx_ring->ena_com_io_sq);
                if (unlikely(refill_required == (rx_ring->ring_size - 1))) {
                        rx_ring->empty_rx_queue++;
 
-- 
2.7.4

Reply via email to