Only purpose of patch is to reformat code to keep it within 80 columns. The resulting code highlights some areas where we may want to look at restructing it.
- Sean Index: access/ib_mad.c =================================================================== --- access/ib_mad.c (revision 949) +++ access/ib_mad.c (working copy) @@ -122,7 +122,7 @@ } if (rmpp_version) { - ret = ERR_PTR(-EINVAL); /* for now!!! (until RMPP implemented) */ + ret = ERR_PTR(-EINVAL); /* until RMPP implemented!!! */ goto error1; } @@ -133,8 +133,12 @@ goto error1; } if (mad_reg_req->mgmt_class >= MAX_MGMT_CLASS) { - /* IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only one currently allowed */ - if (mad_reg_req->mgmt_class != IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + /* + * IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE is the only + * one currently allowed + */ + if (mad_reg_req->mgmt_class != + IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { ret = ERR_PTR(-EINVAL); goto error1; } @@ -188,12 +192,13 @@ if (mad_reg_req) { class = port_priv->version[mad_reg_req->mgmt_class_version]; if (class) { - mgmt_class = convert_mgmt_class(mad_reg_req->mgmt_class); + mgmt_class = convert_mgmt_class( + mad_reg_req->mgmt_class); method = class->method_table[mgmt_class]; if (method) { if (method_in_use(&method, mad_reg_req)) { - spin_unlock_irqrestore(&port_priv->reg_lock, flags); - + spin_unlock_irqrestore( + &port_priv->reg_lock, flags); ret = ERR_PTR(-EINVAL); goto error2; } @@ -340,7 +345,8 @@ GFP_ATOMIC : GFP_KERNEL); if (!mad_send_wr) { *bad_send_wr = cur_send_wr; - printk(KERN_ERR "No memory for ib_mad_send_wr_private\n"); + printk(KERN_ERR "No memory for " + "ib_mad_send_wr_private\n"); return -ENOMEM; } @@ -396,7 +402,8 @@ struct ib_mad_private_header *mad_priv_hdr; struct ib_mad_private *priv; - mad_priv_hdr = container_of(mad_recv_wc, struct ib_mad_private_header, recv_wc); + mad_priv_hdr = container_of(mad_recv_wc, struct ib_mad_private_header, + recv_wc); priv = container_of(mad_priv_hdr, struct ib_mad_private, header); /* @@ -406,8 +413,10 @@ list_for_each_entry(entry, &mad_recv_wc->recv_buf->list, list) { /* Free previous receive buffer */ kmem_cache_free(ib_mad_cache, priv); - mad_priv_hdr = container_of(entry, struct ib_mad_private_header, recv_buf); - priv = container_of(mad_priv_hdr, struct ib_mad_private, header); + mad_priv_hdr = container_of(entry, struct ib_mad_private_header, + recv_buf); + priv = container_of(mad_priv_hdr, struct ib_mad_private, + header); } /* Free last buffer */ @@ -454,7 +463,8 @@ for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS); i < IB_MGMT_MAX_METHODS; - i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, 1+i)) { + i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, + 1+i)) { if ((*method)->agent[i]) { printk(KERN_ERR "Method %d already in use\n", i); return -EINVAL; @@ -494,7 +504,10 @@ { int i, j; - /* Check to see if there are any method tables for this class still in use */ + /* + * Check to see if there are any method tables for this class still + * in use + */ j = 0; for (i = 0; i < MAX_MGMT_CLASS; i++) { if (class->method_table[i]) { @@ -538,7 +551,8 @@ /* Allocate management class table for "new" class version */ *class = kmalloc(sizeof **class, GFP_KERNEL); if (!*class) { - printk(KERN_ERR "No memory for ib_mad_mgmt_class_table\n"); + printk(KERN_ERR "No memory for " + "ib_mad_mgmt_class_table\n"); goto error1; } /* Clear management class table for this class version */ @@ -568,7 +582,8 @@ /* Finally, add in methods being registered */ for (i = find_first_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS); i < IB_MGMT_MAX_METHODS; - i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, 1+i)) { + i = find_next_bit(mad_reg_req->method_mask, IB_MGMT_MAX_METHODS, + 1+i)) { (*method)->agent[i] = priv; } return 0; @@ -608,7 +623,8 @@ port_priv = agent_priv->port_priv; class = port_priv->version[agent_priv->reg_req->mgmt_class_version]; if (!class) { - printk(KERN_ERR "No class table yet MAD registration request supplied\n"); + printk(KERN_ERR "No class table yet MAD registration request " + "supplied\n"); goto ret; } @@ -626,7 +642,8 @@ if (!check_class_table(class)) { /* If not, release management class table */ kfree(class); - port_priv->version[agent_priv->reg_req->mgmt_class_version] = NULL; + port_priv->version[agent_priv->reg_req-> + mgmt_class_version]= NULL; } } } @@ -670,9 +687,10 @@ return response_mad(mad); } -static struct ib_mad_agent_private *find_mad_agent(struct ib_mad_port_private *port_priv, - struct ib_mad *mad, - int solicited) +static struct ib_mad_agent_private * +find_mad_agent(struct ib_mad_port_private *port_priv, + struct ib_mad *mad, + int solicited) { struct ib_mad_agent_private *entry, *mad_agent = NULL; struct ib_mad_mgmt_class_table *version; @@ -690,28 +708,35 @@ } } if (!mad_agent) { - printk(KERN_ERR "No client 0x%x for received MAD on port %d\n", - hi_tid, port_priv->port_num); + printk(KERN_ERR "No client 0x%x for received MAD on " + "port %d\n", hi_tid, port_priv->port_num); goto ret; } } else { /* Routing is based on version, class, and method */ if (mad->mad_hdr.class_version >= MAX_MGMT_VERSION) { - printk(KERN_ERR "MAD received with unsupported class version %d on port %d\n", + printk(KERN_ERR "MAD received with unsupported class " + "version %d on port %d\n", mad->mad_hdr.class_version, port_priv->port_num); goto ret; } version = port_priv->version[mad->mad_hdr.class_version]; if (!version) { - printk(KERN_ERR "MAD received on port %d for class version %d with no client\n", port_priv->port_num, mad->mad_hdr.class_version); + printk(KERN_ERR "MAD received on port %d for class " + "version %d with no client\n", + port_priv->port_num, mad->mad_hdr.class_version); goto ret; } - class = version->method_table[convert_mgmt_class(mad->mad_hdr.mgmt_class)]; + class = version->method_table[convert_mgmt_class( + mad->mad_hdr.mgmt_class)]; if (!class) { - printk(KERN_ERR "MAD received on port %d for class %d with no client\n", port_priv->port_num, mad->mad_hdr.mgmt_class); + printk(KERN_ERR "MAD received on port %d for class " + "%d with no client\n", + port_priv->port_num, mad->mad_hdr.mgmt_class); goto ret; } - mad_agent = class->agent[mad->mad_hdr.method & ~IB_MGMT_METHOD_RESP]; + mad_agent = class->agent[mad->mad_hdr.method & + ~IB_MGMT_METHOD_RESP]; } ret: @@ -724,8 +749,8 @@ /* Make sure MAD base version is understood */ if (mad->mad_hdr.base_version != IB_MGMT_BASE_VERSION) { - printk(KERN_ERR "MAD received with unsupported base version %d\n", - mad->mad_hdr.base_version); + printk(KERN_ERR "MAD received with unsupported base " + "version %d\n", mad->mad_hdr.base_version); goto ret; } @@ -747,8 +772,9 @@ /* * Return start of fully reassembled MAD, or NULL, if MAD isn't assembled yet */ -static struct ib_mad_private* reassemble_recv(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_private *recv) +static struct ib_mad_private * +reassemble_recv(struct ib_mad_agent_private *mad_agent_priv, + struct ib_mad_private *recv) { /* Until we have RMPP, all receives are reassembled!... */ INIT_LIST_HEAD(&recv->header.recv_buf.list); @@ -854,15 +880,16 @@ rbuf = (struct ib_mad_recv_buf *)rbuf->list.next; mad_priv_hdr = container_of(rbuf, struct ib_mad_private_header, recv_buf); - recv = container_of(mad_priv_hdr, struct ib_mad_private, header); + recv = container_of(mad_priv_hdr, struct ib_mad_private, + header); /* Remove from posted receive MAD list */ list_del(&recv->header.recv_buf.list); port_priv->recv_posted_mad_count[qpn]--; } else { - printk(KERN_ERR "Receive completion WR ID 0x%Lx on QP %d with no" - "posted receive\n", wc->wr_id, qp_num); + printk(KERN_ERR "Receive completion WR ID 0x%Lx on QP %d " + "with no posted receive\n", wc->wr_id, qp_num); spin_unlock_irqrestore(&port_priv->recv_list_lock, flags); ib_mad_post_receive_mad(port_priv, port_priv->qp[qp_num]); return; @@ -893,7 +920,8 @@ solicited); if (!mad_agent) { spin_unlock_irqrestore(&port_priv->reg_lock, flags); - printk(KERN_NOTICE "No matching mad agent found for received MAD on port %d\n", port_priv->port_num); + printk(KERN_NOTICE "No matching mad agent found for received " + "MAD on port %d\n", port_priv->port_num); } else { atomic_inc(&mad_agent->refcount); spin_unlock_irqrestore(&port_priv->reg_lock, flags); @@ -978,7 +1006,8 @@ struct ib_mad_send_wr_private, send_list); send_wr = mad_send_wr->send_list.next; - mad_send_wr = container_of(send_wr, struct ib_mad_send_wr_private, send_list); + mad_send_wr = container_of(send_wr, struct ib_mad_send_wr_private, + send_list); if (wc->wr_id != (unsigned long)mad_send_wr) { printk(KERN_ERR "Send completion WR ID 0x%Lx doesn't match " "posted send WR ID 0x%lx\n", @@ -994,7 +1023,6 @@ /* Restore client wr_id in WC */ wc->wr_id = mad_send_wr->wr_id; - ib_mad_complete_send_wr(mad_send_wr, (struct ib_mad_send_wc*)wc); return; @@ -1012,7 +1040,8 @@ ib_req_notify_cq(port_priv->cq, IB_CQ_NEXT_COMP); while (ib_poll_cq(port_priv->cq, 1, &wc) == 1) { - printk(KERN_DEBUG "Completion opcode 0x%x WRID 0x%Lx\n", wc.opcode, wc.wr_id); + printk(KERN_DEBUG "Completion opcode 0x%x WRID 0x%Lx\n", + wc.opcode, wc.wr_id); switch (wc.opcode) { case IB_WC_SEND: if (wc.status != IB_WC_SUCCESS) @@ -1027,10 +1056,11 @@ ib_mad_recv_done_handler(port_priv, &wc); break; default: - printk(KERN_ERR "Wrong Opcode 0x%x on completion\n", wc.opcode); + printk(KERN_ERR "Wrong Opcode 0x%x on completion\n", + wc.opcode); if (wc.status) { - printk(KERN_ERR "Completion error %d\n", wc.status); - + printk(KERN_ERR "Completion error %d\n", + wc.status); } } } @@ -1235,7 +1265,8 @@ /* Setup scatter list */ sg_list.addr = pci_map_single(port_priv->device->dma_device, &mad_priv->grh, - sizeof *mad_priv - sizeof mad_priv->header, + sizeof *mad_priv - + sizeof mad_priv->header, PCI_DMA_FROMDEVICE); sg_list.length = sizeof *mad_priv - sizeof mad_priv->header; sg_list.lkey = (*port_priv->mr).lkey; @@ -1274,7 +1305,8 @@ spin_unlock_irqrestore(&port_priv->recv_list_lock, flags); kmem_cache_free(ib_mad_cache, mad_priv); - printk(KERN_NOTICE "ib_post_recv WRID 0x%Lx failed ret = %d\n", recv_wr.wr_id, ret); + printk(KERN_NOTICE "ib_post_recv WRID 0x%Lx failed ret = %d\n", + recv_wr.wr_id, ret); return -EINVAL; } @@ -1292,8 +1324,9 @@ for (j = 0; j < IB_MAD_QPS_CORE; j++) { if (ib_mad_post_receive_mad(port_priv, port_priv->qp[j])) { - printk(KERN_ERR "receive post %d failed on %s port %d\n", - i + 1, port_priv->device->name, + printk(KERN_ERR "receive post %d failed on %s " + "port %d\n", i + 1, + port_priv->device->name, port_priv->port_num); } } @@ -1337,7 +1370,6 @@ PCI_DMA_FROMDEVICE); kmem_cache_free(ib_mad_cache, recv); - } INIT_LIST_HEAD(&port_priv->recv_posted_mad_list[i]); @@ -1485,7 +1517,8 @@ for (i = 0; i < IB_MAD_QPS_CORE; i++) { ret = ib_mad_change_qp_state_to_init(port_priv->qp[i]); if (ret) { - printk(KERN_ERR "Couldn't change QP%d state to INIT\n", i); + printk(KERN_ERR "Couldn't change QP%d state to " + "INIT\n", i); return ret; } } @@ -1505,13 +1538,15 @@ for (i = 0; i < IB_MAD_QPS_CORE; i++) { ret = ib_mad_change_qp_state_to_rtr(port_priv->qp[i]); if (ret) { - printk(KERN_ERR "Couldn't change QP%d state to RTR\n", i); + printk(KERN_ERR "Couldn't change QP%d state to " + "RTR\n", i); goto error; } ret = ib_mad_change_qp_state_to_rts(port_priv->qp[i]); if (ret) { - printk(KERN_ERR "Couldn't change QP%d state to RTS\n", i); + printk(KERN_ERR "Couldn't change QP%d state to " + "RTS\n", i); goto error; } } @@ -1522,7 +1557,8 @@ for (i = 0; i < IB_MAD_QPS_CORE; i++) { ret2 = ib_mad_change_qp_state_to_reset(port_priv->qp[i]); if (ret2) { - printk(KERN_ERR "ib_mad_port_start: Couldn't change QP%d state to RESET\n", i); + printk(KERN_ERR "ib_mad_port_start: Couldn't change " + "QP%d state to RESET\n", i); } } @@ -1539,7 +1575,8 @@ for (i = 0; i < IB_MAD_QPS_CORE; i++) { ret = ib_mad_change_qp_state_to_reset(port_priv->qp[i]); if (ret) { - printk(KERN_ERR "ib_mad_port_stop: Couldn't change %s port %d QP%d state to RESET\n", + printk(KERN_ERR "ib_mad_port_stop: Couldn't change %s " + "port %d QP%d state to RESET\n", port_priv->device->name, port_priv->port_num, i); } } @@ -1597,7 +1634,8 @@ cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; port_priv->cq = ib_create_cq(port_priv->device, - (ib_comp_handler) ib_mad_thread_completion_handler, + (ib_comp_handler) + ib_mad_thread_completion_handler, NULL, port_priv, cq_size); if (IS_ERR(port_priv->cq)) { printk(KERN_ERR "Couldn't create ib_mad CQ\n"); -- _______________________________________________ openib-general mailing list [EMAIL PROTECTED] http://openib.org/mailman/listinfo/openib-general To unsubscribe, please visit http://openib.org/mailman/listinfo/openib-general