On Thu, May 27, 2010 at 11:12 AM, Hal Rosenstock
<[email protected]> wrote:
> On Thu, May 27, 2010 at 10:32 AM, Mike Heinz <[email protected]> wrote:
>> There are QLogic customers who have requested the ability to perform RMPP 
>> transaction handling in user space.  This was an option in our old 
>> proprietary stack and there are a few customers still using it which need a 
>> way to forward migrate to OFED while containing the scope of their 
>> application changes.  While we have developed appropriate "shim" libraries 
>> to allow their applications to migrate, we can't simulate/shim rmpp 
>> processing without some kernel support.
>>
>> We also have some management applications which also need these 
>> capabilities.  For those applications, the use of application RMPP control 
>> allows the application to perform some pacing of the RMPP transactions, 
>> permits some parts of the RMPP response to be built on the fly and also 
>> permits a degree of sharing of the response data between multiple requestors.
>>
>> The use of OFED kernel based RMPP transaction handling was unable to meet 
>> all these requirements.  So this simple option provides a minimal change to 
>> OFED while permitting the application to have the control it needs.
>
> The API would need to be extended for this mode (in some backward
> compatible manner) rather than supplanting the rmpp_version 0
> behavior. Changing that behavior breaks backward compatibility/some
> existing apps.

I think the simplest change is to use rmpp_version 255 for this mode
(and update doc/headers accordingly) and preserve existing
rmpp_version behavior.

>
>>
>> -----Original Message-----
>> From: Hal Rosenstock [mailto:[email protected]]
>> Sent: Thursday, May 27, 2010 9:07 AM
>> To: Mike Heinz
>> Cc: [email protected]; Roland Dreier
>> Subject: Re: [PATCH] IB/core User RMPP patch
>>
>> On Wed, May 26, 2010 at 4:14 PM, Mike Heinz <[email protected]> wrote:
>>> Currently, if a user application calls umad_register() or 
>>> umad_register_oui() with an rmpp_version of zero, incoming rmpp messages 
>>> are discarded.
>>
>> Actually, it's the underlying ib_register_mad_agent call and this drop
>> behavior is intentional.
>>
>>> This patch changes this behavior so that rmpp_version of zero causes 
>>> incoming rmpp packets to be passed through without alteration, instead.
>>
>> What's the motivation to make such a behavioral change ?
>>
>> -- Hal
>>
>>>
>>> Signed-off-by: Michael Heinz <[email protected]>
>>> ----
>>>
>>> diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c
>>> index 80282f1..b3457ae 100644
>>> --- a/drivers/infiniband/core/mad.c
>>> +++ b/drivers/infiniband/core/mad.c
>>> @@ -1830,25 +1830,39 @@ static void ib_mad_complete_recv(struct 
>>> ib_mad_agent_private *mad_agent_priv,
>>>        if (ib_response_mad(mad_recv_wc->recv_buf.mad)) {
>>>                spin_lock_irqsave(&mad_agent_priv->lock, flags);
>>>                mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc);
>>> -               if (!mad_send_wr) {
>>> +               if (mad_send_wr) {
>>> +                       ib_mark_mad_done(mad_send_wr);
>>>                        spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
>>> -                       ib_free_recv_mad(mad_recv_wc);
>>> -                       deref_mad_agent(mad_agent_priv);
>>> -                       return;
>>> -               }
>>> -               ib_mark_mad_done(mad_send_wr);
>>> -               spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
>>>
>>> -               /* Defined behavior is to complete response before request 
>>> */
>>> -               mad_recv_wc->wc->wr_id = (unsigned long) 
>>> &mad_send_wr->send_buf;
>>> -               mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
>>> -                                                  mad_recv_wc);
>>> -               atomic_dec(&mad_agent_priv->refcount);
>>> +                       /* Defined behavior is to complete response before 
>>> request */
>>> +                       mad_recv_wc->wc->wr_id = (unsigned long) 
>>> &mad_send_wr->send_buf;
>>> +                       
>>> mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
>>> +                                                          mad_recv_wc);
>>> +                       atomic_dec(&mad_agent_priv->refcount);
>>>
>>> -               mad_send_wc.status = IB_WC_SUCCESS;
>>> -               mad_send_wc.vendor_err = 0;
>>> -               mad_send_wc.send_buf = &mad_send_wr->send_buf;
>>> -               ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
>>> +                       mad_send_wc.status = IB_WC_SUCCESS;
>>> +                       mad_send_wc.vendor_err = 0;
>>> +                       mad_send_wc.send_buf = &mad_send_wr->send_buf;
>>> +                       ib_mad_complete_send_wr(mad_send_wr, &mad_send_wc);
>>> +               } else {
>>> +                       if (  !mad_agent_priv->agent.rmpp_version
>>> +                          && 
>>> ib_is_mad_class_rmpp(mad_recv_wc->recv_buf.mad->mad_hdr.mgmt_class)
>>> +                          && (ib_get_rmpp_flags(&((struct ib_rmpp_mad 
>>> *)mad_recv_wc->recv_buf.mad)->rmpp_hdr) & IB_MGMT_RMPP_FLAG_ACTIVE)) {
>>> +                               // user rmpp is in effect
>>> +                               
>>> spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
>>> +
>>> +                               mad_recv_wc->wc->wr_id = 0;
>>> +                               
>>> mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
>>> +                                                                  
>>> mad_recv_wc);
>>> +                               atomic_dec(&mad_agent_priv->refcount);
>>> +                       } else {
>>> +                               // not user rmpp, revert to normal behavior 
>>> and drop the mad
>>> +                               
>>> spin_unlock_irqrestore(&mad_agent_priv->lock, flags);
>>> +                               ib_free_recv_mad(mad_recv_wc);
>>> +                               deref_mad_agent(mad_agent_priv);
>>> +                               return;
>>> +                       }
>>> +               }
>>>        } else {
>>>                mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent,
>>>                                                   mad_recv_wc);
>>> diff --git a/drivers/infiniband/core/user_mad.c 
>>> b/drivers/infiniband/core/user_mad.c
>>> index 36f815c..f9cacbc 100644
>>> --- a/drivers/infiniband/core/user_mad.c
>>> +++ b/drivers/infiniband/core/user_mad.c
>>> @@ -508,7 +508,7 @@ static ssize_t ib_umad_write(struct file *filp, const 
>>> char __user *buf,
>>>
>>>        rmpp_mad = (struct ib_rmpp_mad *) packet->mad.data;
>>>        hdr_len = ib_get_mad_data_offset(rmpp_mad->mad_hdr.mgmt_class);
>>> -       if (!ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
>>> +       if (!agent->rmpp_version || 
>>> !ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)) {
>>>                copy_offset = IB_MGMT_MAD_HDR;
>>>                rmpp_active = 0;
>>>        } else {
>>> @@ -560,14 +560,22 @@ static ssize_t ib_umad_write(struct file *filp, const 
>>> char __user *buf,
>>>                rmpp_mad->mad_hdr.tid = *tid;
>>>        }
>>>
>>> -       spin_lock_irq(&file->send_lock);
>>> -       ret = is_duplicate(file, packet);
>>> -       if (!ret)
>>> +       if (  !agent->rmpp_version
>>> +          && ib_is_mad_class_rmpp(rmpp_mad->mad_hdr.mgmt_class)
>>> +          && (ib_get_rmpp_flags(&rmpp_mad->rmpp_hdr) & 
>>> IB_MGMT_RMPP_FLAG_ACTIVE)) {
>>> +               spin_lock_irq(&file->send_lock);
>>>                list_add_tail(&packet->list, &file->send_list);
>>> -       spin_unlock_irq(&file->send_lock);
>>> -       if (ret) {
>>> -               ret = -EINVAL;
>>> -               goto err_msg;
>>> +               spin_unlock_irq(&file->send_lock);
>>> +       } else {
>>> +               spin_lock_irq(&file->send_lock);
>>> +               ret = is_duplicate(file, packet);
>>> +               if (!ret)
>>> +                       list_add_tail(&packet->list, &file->send_list);
>>> +               spin_unlock_irq(&file->send_lock);
>>> +               if (ret) {
>>> +                       ret = -EINVAL;
>>> +                       goto err_msg;
>>> +               }
>>>        }
>>>
>>>        ret = ib_post_send_mad(packet->msg, NULL);
>>>
>>>
>>> --
>>> To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
>>> the body of a message to [email protected]
>>> More majordomo info at  http://vger.kernel.org/majordomo-info.html
>>>
>>
>
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to