Re: [PATCH 2/2] IB/mad: use CQ abstraction

2016-01-05 Thread Christoph Hellwig
On Mon, Jan 04, 2016 at 07:04:03PM -0500, ira.weiny wrote:
> Sorry I did not catch this before but rather than void * wouldn't it be better
> to use struct ib_cqe?

Sure, I'll fix it up.
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html


Re: [PATCH 2/2] IB/mad: use CQ abstraction

2016-01-04 Thread ira.weiny
On Mon, Jan 04, 2016 at 02:15:59PM +0100, Christoph Hellwig wrote:
> Remove the local workqueue to process mad completions and use the CQ API
> instead.
> 
> Signed-off-by: Christoph Hellwig 

One minor nit below...


> ---
>  drivers/infiniband/core/mad.c  | 159 
> +
>  drivers/infiniband/core/mad_priv.h |   2 +-
>  2 files changed, 58 insertions(+), 103 deletions(-)
> 
> diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
> index cbe232a..286d1a9 100644
> --- a/drivers/infiniband/core/mad.c
> +++ b/drivers/infiniband/core/mad.c
> @@ -61,18 +61,6 @@ MODULE_PARM_DESC(send_queue_size, "Size of send queue in 
> number of work requests
>  module_param_named(recv_queue_size, mad_recvq_size, int, 0444);
>  MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work 
> requests");
>  
> -/*
> - * Define a limit on the number of completions which will be processed by the
> - * worker thread in a single work item.  This ensures that other work items
> - * (potentially from other users) are processed fairly.
> - *
> - * The number of completions was derived from the default queue sizes above.
> - * We use a value which is double the larger of the 2 queues (receive @ 512)
> - * but keep it fixed such that an increase in that value does not introduce
> - * unfairness.
> - */
> -#define MAD_COMPLETION_PROC_LIMIT 1024
> -
>  static struct list_head ib_mad_port_list;
>  static u32 ib_mad_client_id = 0;
>  
> @@ -96,6 +84,9 @@ static int add_nonoui_reg_req(struct ib_mad_reg_req 
> *mad_reg_req,
> u8 mgmt_class);
>  static int add_oui_reg_req(struct ib_mad_reg_req *mad_reg_req,
>  struct ib_mad_agent_private *agent_priv);
> +static bool ib_mad_send_error(struct ib_mad_port_private *port_priv,
> +   struct ib_wc *wc);
> +static void ib_mad_send_done(struct ib_cq *cq, struct ib_wc *wc);
>  
>  /*
>   * Returns a ib_mad_port_private structure or NULL for a device/port
> @@ -702,11 +693,11 @@ static void snoop_recv(struct ib_mad_qp_info *qp_info,
>  }
>  
>  static void build_smp_wc(struct ib_qp *qp,
> -  u64 wr_id, u16 slid, u16 pkey_index, u8 port_num,
> +  void *wr_cqe, u16 slid, u16 pkey_index, u8 port_num,

Sorry I did not catch this before but rather than void * wouldn't it be better
to use struct ib_cqe?

Regardless:

Reviewed-by: Ira Weiny 

>struct ib_wc *wc)
>  {
>   memset(wc, 0, sizeof *wc);
> - wc->wr_id = wr_id;
> + wc->wr_cqe = wr_cqe;
>   wc->status = IB_WC_SUCCESS;
>   wc->opcode = IB_WC_RECV;
>   wc->pkey_index = pkey_index;
> @@ -844,7 +835,7 @@ static int handle_outgoing_dr_smp(struct 
> ib_mad_agent_private *mad_agent_priv,
>   }
>  
>   build_smp_wc(mad_agent_priv->agent.qp,
> -  send_wr->wr.wr_id, drslid,
> +  send_wr->wr.wr_cqe, drslid,
>send_wr->pkey_index,
>send_wr->port_num, _wc);
>  
> @@ -1051,7 +1042,9 @@ struct ib_mad_send_buf * ib_create_send_mad(struct 
> ib_mad_agent *mad_agent,
>  
>   mad_send_wr->sg_list[1].lkey = mad_agent->qp->pd->local_dma_lkey;
>  
> - mad_send_wr->send_wr.wr.wr_id = (unsigned long) mad_send_wr;
> + mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
> +
> + mad_send_wr->send_wr.wr.wr_cqe = _send_wr->mad_list.cqe;
>   mad_send_wr->send_wr.wr.sg_list = mad_send_wr->sg_list;
>   mad_send_wr->send_wr.wr.num_sge = 2;
>   mad_send_wr->send_wr.wr.opcode = IB_WR_SEND;
> @@ -1163,8 +1156,9 @@ int ib_send_mad(struct ib_mad_send_wr_private 
> *mad_send_wr)
>  
>   /* Set WR ID to find mad_send_wr upon completion */
>   qp_info = mad_send_wr->mad_agent_priv->qp_info;
> - mad_send_wr->send_wr.wr.wr_id = (unsigned long)_send_wr->mad_list;
>   mad_send_wr->mad_list.mad_queue = _info->send_queue;
> + mad_send_wr->mad_list.cqe.done = ib_mad_send_done;
> + mad_send_wr->send_wr.wr.wr_cqe = _send_wr->mad_list.cqe;
>  
>   mad_agent = mad_send_wr->send_buf.mad_agent;
>   sge = mad_send_wr->sg_list;
> @@ -2185,13 +2179,14 @@ handle_smi(struct ib_mad_port_private *port_priv,
>   return handle_ib_smi(port_priv, qp_info, wc, port_num, recv, response);
>  }
>  
> -static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
> -  struct ib_wc *wc)
> +static void ib_mad_recv_done(struct ib_cq *cq, struct ib_wc *wc)
>  {
> + struct ib_mad_port_private *port_priv = cq->cq_context;
> + struct ib_mad_list_head *mad_list =
> + container_of(wc->wr_cqe, struct ib_mad_list_head, cqe);
>   struct ib_mad_qp_info *qp_info;
>   struct ib_mad_private_header *mad_priv_hdr;
>   struct ib_mad_private *recv, *response = NULL;
> - struct ib_mad_list_head *mad_list;
>   struct ib_mad_agent_private *mad_agent;
>   

Re: [PATCH 2/2] IB/mad: use CQ abstraction

2016-01-04 Thread Hal Rosenstock
On 1/4/2016 9:16 AM, Christoph Hellwig wrote:
> Remove the local workqueue to process mad completions and use the CQ API
> instead.
> 
> Signed-off-by: Christoph Hellwig 

Reviewed-by: Hal Rosenstock 
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html