This patch restructures handle_outgoing_smp to improve its readability
and fixes the following issues: removes unneeded memory allocation for
received SMP, properly sends a SMP if the underlying HCA driver does not
provide a process_mad routine, and deallocates the allocated received
SMP in all failures cases.

- Sean


Index: core/mad.c =================================================================== --- core/mad.c (revision 1291) +++ core/mad.c (working copy) @@ -366,108 +366,92 @@ struct ib_send_wr *send_wr) { int ret; + struct ib_mad_private *mad_priv; + struct ib_mad_send_wc mad_send_wc;

        if (!smi_handle_dr_smp_send(smp,
                                    mad_agent->device->node_type,
                                    mad_agent->port_num)) {
                ret = -EINVAL;
                printk(KERN_ERR PFX "Invalid directed route\n");
-               goto error1;
+               goto out;
        }
-       if (smi_check_local_dr_smp(smp,
-                                  mad_agent->device,
-                                  mad_agent->port_num)) {
-               struct ib_mad_private *mad_priv;
-               struct ib_mad_agent_private *mad_agent_priv;
-               struct ib_mad_send_wc mad_send_wc;
-
-               mad_priv = kmem_cache_alloc(ib_mad_cache,
-                                           (in_atomic() || irqs_disabled()) ?
-                                           GFP_ATOMIC : GFP_KERNEL);
-               if (!mad_priv) {
-                       ret = -ENOMEM;
-                       printk(KERN_ERR PFX "No memory for local "
-                              "response MAD\n");
-                       goto error1;
-               }
+       /* Check to post send on QP or process locally. */
+       ret = smi_check_local_dr_smp(smp, mad_agent->device,
+                                    mad_agent->port_num);
+       if (!ret || !mad_agent->device->process_mad)
+               goto out;

-               mad_agent_priv = container_of(mad_agent,
-                                             struct ib_mad_agent_private,
-                                             agent);
-
-               if (mad_agent->device->process_mad) {
-                       ret = mad_agent->device->process_mad(
-                                           mad_agent->device,
-                                           0,
-                                           mad_agent->port_num,
-                                           smp->dr_slid, /* ? */
+       mad_priv = kmem_cache_alloc(ib_mad_cache,
+                                   (in_atomic() || irqs_disabled()) ?
+                                   GFP_ATOMIC : GFP_KERNEL);
+       if (!mad_priv) {
+               ret = -ENOMEM;
+               printk(KERN_ERR PFX "No memory for local response MAD\n");
+               goto out;
+       }
+       ret = mad_agent->device->process_mad(mad_agent->device, 0,
+                                            mad_agent->port_num, smp->dr_slid,
                                            (struct ib_mad *)smp,
                                            (struct ib_mad *)&mad_priv->mad);
-                       if (ret & IB_MAD_RESULT_SUCCESS) {
-                               if (ret & IB_MAD_RESULT_CONSUMED) {
-                                       ret = 1;
-                                       goto error1;
-                               }
-                               if (ret & IB_MAD_RESULT_REPLY) {
-                                       /*
-                                        * See if response is solicited and
-                                        * there is a recv handler
-                                        */
-                                       if (solicited_mad(&mad_priv->mad.mad) &&
-                                           mad_agent_priv->agent.recv_handler) 
{
-                                               struct ib_wc wc;
-
-                                               /*
-                                                * Defined behavior is to
-                                                * complete response before
-                                                * request
-                                                */
-                                               wc.wr_id = send_wr->wr_id;
-                                               wc.status = IB_WC_SUCCESS;
-                                               wc.opcode = IB_WC_RECV;
-                                               wc.vendor_err = 0;
-                                               wc.byte_len = sizeof(struct 
ib_mad);
-                                               wc.src_qp = 0;  /* IB_QPT_SMI ? 
*/
-                                               wc.wc_flags = 0;
-                                               wc.pkey_index = 0;
-                                               wc.slid = IB_LID_PERMISSIVE;
-                                               wc.sl = 0;
-                                               wc.dlid_path_bits = 0;
-                                               mad_priv->header.recv_wc.wc = 
&wc;
-                                               
mad_priv->header.recv_wc.mad_len =
-                                                       sizeof(struct ib_mad);
-                                               
INIT_LIST_HEAD(&mad_priv->header.recv_buf.list);
-                                               mad_priv->header.recv_buf.grh = 
NULL;
-                                               mad_priv->header.recv_buf.mad =
-                                                       &mad_priv->mad.mad;
-                                               
mad_priv->header.recv_wc.recv_buf =
-                                                       
&mad_priv->header.recv_buf;
-                                               
mad_agent_priv->agent.recv_handler(
-                                                       mad_agent,
-                                                       
&mad_priv->header.recv_wc);
-                                       } else
-                                               kmem_cache_free(ib_mad_cache, 
mad_priv);
-                               } else
-                                       kmem_cache_free(ib_mad_cache, mad_priv);
-                       } else
-                               kmem_cache_free(ib_mad_cache, mad_priv);
-               }
-
-               if (mad_agent_priv->agent.send_handler) {
-                       /* Now, complete send */
-                       mad_send_wc.status = IB_WC_SUCCESS;
-                       mad_send_wc.vendor_err = 0;
-                       mad_send_wc.wr_id = send_wr->wr_id;
-                       mad_agent_priv->agent.send_handler(
-                                               mad_agent,
-                                               &mad_send_wc);
-                       ret = 1;
+       switch (ret)
+       {
+       case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY:
+               /*
+                * See if response is solicited and
+                * there is a recv handler
+                */
+               if (solicited_mad(&mad_priv->mad.mad) &&
+                   mad_agent->recv_handler) {
+                       struct ib_wc wc;
+
+                       /*
+                         * Defined behavior is to complete response before
+                        * request
+                        */
+                       wc.wr_id = send_wr->wr_id;
+                       wc.status = IB_WC_SUCCESS;
+                       wc.opcode = IB_WC_RECV;
+                       wc.vendor_err = 0;
+                       wc.byte_len = sizeof(struct ib_mad);
+                       wc.src_qp = IB_QP0;
+                       wc.wc_flags = 0;
+                       wc.pkey_index = 0;
+                       wc.slid = IB_LID_PERMISSIVE;
+                       wc.sl = 0;
+                       wc.dlid_path_bits = 0;
+                       mad_priv->header.recv_wc.wc = &wc;
+                       mad_priv->header.recv_wc.mad_len =
+                               sizeof(struct ib_mad);
+                       INIT_LIST_HEAD(&mad_priv->header.recv_buf.list);
+                       mad_priv->header.recv_buf.grh = NULL;
+                       mad_priv->header.recv_buf.mad = &mad_priv->mad.mad;
+                       mad_priv->header.recv_wc.recv_buf =
+                               &mad_priv->header.recv_buf;
+                       mad_agent->recv_handler(mad_agent,
+                                               &mad_priv->header.recv_wc);
                } else
-                       ret = -EINVAL;
-       } else
+                       kmem_cache_free(ib_mad_cache, mad_priv);
+               break;
+       case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED:
+               kmem_cache_free(ib_mad_cache, mad_priv);
+               break;
+       case IB_MAD_RESULT_SUCCESS:
                ret = 0;
+               goto out;
+       default:
+               kmem_cache_free(ib_mad_cache, mad_priv);
+               ret = -EINVAL;
+               goto out;
+       }

-error1:
+       /* Complete send */
+       mad_send_wc.status = IB_WC_SUCCESS;
+       mad_send_wc.vendor_err = 0;
+       mad_send_wc.wr_id = send_wr->wr_id;
+       mad_agent->send_handler(mad_agent, &mad_send_wc);
+       ret = 1;
+out:
        return ret;
 }


_______________________________________________ openib-general mailing list [EMAIL PROTECTED] http://openib.org/mailman/listinfo/openib-general

To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general

Reply via email to