Here's a patch that just renames a few structure members related to MADs. The renamed variables will be used when handling MAD timeouts.
- Sean -- Index: access/ib_mad_priv.h =================================================================== --- access/ib_mad_priv.h (revision 955) +++ access/ib_mad_priv.h (working copy) @@ -106,7 +106,7 @@ struct ib_mad_reg_req *reg_req; struct ib_mad_port_private *port_priv; - spinlock_t send_list_lock; + spinlock_t lock; struct list_head send_list; atomic_t refcount; @@ -116,11 +116,11 @@ struct ib_mad_send_wr_private { struct list_head send_list; - struct list_head agent_send_list; + struct list_head agent_list; struct ib_mad_agent *agent; u64 wr_id; /* client WR ID */ u64 tid; - int timeout_ms; + int timeout; int refcount; enum ib_wc_status status; }; Index: access/ib_mad.c =================================================================== --- access/ib_mad.c (revision 955) +++ access/ib_mad.c (working copy) @@ -230,7 +230,7 @@ list_add_tail(&mad_agent_priv->agent_list, &port_priv->agent_list); spin_unlock_irqrestore(&port_priv->reg_lock, flags); - spin_lock_init(&mad_agent_priv->send_list_lock); + spin_lock_init(&mad_agent_priv->lock); INIT_LIST_HEAD(&mad_agent_priv->send_list); atomic_set(&mad_agent_priv->refcount, 1); init_waitqueue_head(&mad_agent_priv->wait); @@ -352,8 +352,8 @@ mad_send_wr->tid = send_wr->wr.ud.mad_hdr->tid; mad_send_wr->agent = mad_agent; - mad_send_wr->timeout_ms = cur_send_wr->wr.ud.timeout_ms; - if (mad_send_wr->timeout_ms) + mad_send_wr->timeout = cur_send_wr->wr.ud.timeout_ms; + if (mad_send_wr->timeout) mad_send_wr->refcount = 2; else mad_send_wr->refcount = 1; @@ -361,10 +361,10 @@ /* Reference MAD agent until send completes */ atomic_inc(&mad_agent_priv->refcount); - spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags); - list_add_tail(&mad_send_wr->agent_send_list, + spin_lock_irqsave(&mad_agent_priv->lock, flags); + list_add_tail(&mad_send_wr->agent_list, &mad_agent_priv->send_list); - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); cur_send_wr->next = NULL; ret = ib_send_mad(mad_agent_priv, mad_send_wr, @@ -373,11 +373,9 @@ /* Handle QP overrun separately... -ENOMEM */ /* Fail send request */ - spin_lock_irqsave(&mad_agent_priv->send_list_lock, - flags); - list_del(&mad_send_wr->agent_send_list); - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, - flags); + spin_lock_irqsave(&mad_agent_priv->lock, flags); + list_del(&mad_send_wr->agent_list); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); *bad_send_wr = cur_send_wr; if (atomic_dec_and_test(&mad_agent_priv->refcount)) @@ -788,12 +786,12 @@ struct ib_mad_send_wr_private *mad_send_wr; list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, - agent_send_list) { + agent_list) { if (mad_send_wr->tid == tid) { /* Verify request is still valid */ if (mad_send_wr->status == IB_WC_SUCCESS && - mad_send_wr->timeout_ms) + mad_send_wr->timeout) return mad_send_wr; else return NULL; @@ -817,17 +815,16 @@ /* Complete corresponding request */ if (solicited) { - spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags); + spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = find_send_req(mad_agent_priv, recv->mad.mad.mad_hdr.tid); if (!mad_send_wr) { - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, - flags); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); ib_free_recv_mad(&recv->header.recv_wc); return; } - mad_send_wr->timeout_ms = 0; - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + mad_send_wr->timeout = 0; + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Defined behavior is to complete response before request */ mad_agent_priv->agent.recv_handler(&mad_agent_priv->agent, @@ -951,13 +948,13 @@ mad_agent_priv = container_of(mad_send_wr->agent, struct ib_mad_agent_private, agent); - spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags); + spin_lock_irqsave(&mad_agent_priv->lock, flags); if (mad_send_wc->status != IB_WC_SUCCESS && mad_send_wr->status == IB_WC_SUCCESS) { mad_send_wr->status = mad_send_wc->status; - if (mad_send_wr->timeout_ms) { - mad_send_wr->timeout_ms = 0; + if (mad_send_wr->timeout) { + mad_send_wr->timeout = 0; mad_send_wr->refcount--; } } @@ -968,13 +965,13 @@ * or timeout occurs */ if (--mad_send_wr->refcount > 0) { - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); return; } /* Remove send from MAD agent and notify client of completion */ - list_del(&mad_send_wr->agent_send_list); - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + list_del(&mad_send_wr->agent_list); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); if (mad_send_wr->status != IB_WC_SUCCESS ) mad_send_wc->status = mad_send_wr->status; @@ -1075,38 +1072,38 @@ INIT_LIST_HEAD(&cancel_list); - spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags); + spin_lock_irqsave(&mad_agent_priv->lock, flags); list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, - &mad_agent_priv->send_list, agent_send_list) { + &mad_agent_priv->send_list, agent_list) { if (mad_send_wr->status == IB_WC_SUCCESS) mad_send_wr->status = IB_WC_WR_FLUSH_ERR; - if (mad_send_wr->timeout_ms) { - mad_send_wr->timeout_ms = 0; + if (mad_send_wr->timeout) { + mad_send_wr->timeout = 0; mad_send_wr->refcount--; } if (mad_send_wr->refcount == 0) { - list_del(&mad_send_wr->agent_send_list); - list_add_tail(&mad_send_wr->agent_send_list, + list_del(&mad_send_wr->agent_list); + list_add_tail(&mad_send_wr->agent_list, &cancel_list); } } - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); /* Report all cancelled requests */ mad_send_wc.status = IB_WC_WR_FLUSH_ERR; mad_send_wc.vendor_err = 0; list_for_each_entry_safe(mad_send_wr, temp_mad_send_wr, - &cancel_list, agent_send_list) { + &cancel_list, agent_list) { mad_send_wc.wr_id = mad_send_wr->wr_id; mad_agent_priv->agent.send_handler(&mad_agent_priv->agent, &mad_send_wc); - list_del(&mad_send_wr->agent_send_list); + list_del(&mad_send_wr->agent_list); kfree(mad_send_wr); atomic_dec(&mad_agent_priv->refcount); @@ -1120,7 +1117,7 @@ struct ib_mad_send_wr_private *mad_send_wr; list_for_each_entry(mad_send_wr, &mad_agent_priv->send_list, - agent_send_list) { + agent_list) { if (mad_send_wr->wr_id == wr_id) return mad_send_wr; } @@ -1137,28 +1134,28 @@ mad_agent_priv = container_of(mad_agent, struct ib_mad_agent_private, agent); - spin_lock_irqsave(&mad_agent_priv->send_list_lock, flags); + spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = find_send_by_wr_id(mad_agent_priv, wr_id); if (!mad_send_wr) { - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); goto ret; } if (mad_send_wr->status == IB_WC_SUCCESS) mad_send_wr->status = IB_WC_WR_FLUSH_ERR; - if (mad_send_wr->timeout_ms) { - mad_send_wr->timeout_ms = 0; + if (mad_send_wr->timeout) { + mad_send_wr->timeout = 0; mad_send_wr->refcount--; } if (mad_send_wr->refcount != 0) { - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); goto ret; } - list_del(&mad_send_wr->agent_send_list); - spin_unlock_irqrestore(&mad_agent_priv->send_list_lock, flags); + list_del(&mad_send_wr->agent_list); + spin_unlock_irqrestore(&mad_agent_priv->lock, flags); mad_send_wc.status = IB_WC_WR_FLUSH_ERR; mad_send_wc.vendor_err = 0; _______________________________________________ openib-general mailing list [EMAIL PROTECTED] http://openib.org/mailman/listinfo/openib-general To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general