Currently we send GNN_ID and perform rport_login as
we're handling the repsonse from GPN_FT. Since we're
changing the locking and we want to handle the GPN_FT
with the lport.state_lock held and then send GNN_ID
and PLOGI/PRLI without the state_lock we need to
separate the two parts.

Signed-off-by: Robert Love <[EMAIL PROTECTED]>
---

 drivers/scsi/libfc/fc_ns.c |   35 +++++++++++++++++++----------------
 1 files changed, 19 insertions(+), 16 deletions(-)

diff --git a/drivers/scsi/libfc/fc_ns.c b/drivers/scsi/libfc/fc_ns.c
index d6a0725..d354fbe 100644
--- a/drivers/scsi/libfc/fc_ns.c
+++ b/drivers/scsi/libfc/fc_ns.c
@@ -713,14 +713,14 @@ static void fcdt_ns_error(struct fc_lport *lp, struct 
fc_frame *fp)
  * @buf: GPN_FT response buffer
  * @len: size of response buffer
  */
-static int fc_ns_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
+static void fc_ns_gpn_ft_parse(struct fc_lport *lp, void *buf, size_t len)
 {
        struct fc_gpn_ft_resp *np;
        char *bp;
        size_t plen;
        size_t tlen;
-       int error = 0;
-       struct fc_ns_port *dp;
+       LIST_HEAD(disc_list);
+       struct fc_ns_port *dp, *next;
 
        /*
         * Handle partial name record left over from previous call.
@@ -768,32 +768,39 @@ static int fc_ns_gpn_ft_parse(struct fc_lport *lp, void 
*buf, size_t len)
                dp->ids.port_name = ntohll(np->fp_wwpn);
                dp->ids.node_name = -1;
                dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
-               INIT_WORK(&dp->rport_create_work, fc_ns_gnn_id_req);
-               schedule_work(&dp->rport_create_work);
-
+               list_add_tail(&dp->peers, &disc_list);
+               
                if (np->fp_flags & FC_NS_FID_LAST) {
-                       fc_ns_disc_done(lp);
                        len = 0;
                        break;
                }
+
                len -= sizeof(*np);
                bp += sizeof(*np);
                np = (struct fc_gpn_ft_resp *)bp;
                plen = len;
        }
+       /* RWL- Unlock here in the next patch */
+
+       dp = NULL;
+       list_for_each_entry_safe(dp, next, &disc_list, peers) {
+               INIT_WORK(&dp->rport_create_work, fc_ns_gnn_id_req);
+               schedule_work(&dp->rport_create_work);
+               list_del(&dp->peers);
+       }
 
        /*
         * Save any partial record at the end of the buffer for next time.
         */
-       if (error == 0 && len > 0 && len < sizeof(*np)) {
+       if (len > 0 && len < sizeof(*np)) {
                if (np != &lp->ns_disc_buf)
                        memcpy(&lp->ns_disc_buf, np, len);
                lp->ns_disc_buf_len = (unsigned char) len;
        } else {
                lp->ns_disc_buf_len = 0;
        }
-       return error;
+
+       fc_ns_disc_done(lp);
 }
 
 /*
@@ -828,7 +835,6 @@ static void fc_ns_gpn_ft_resp(struct fc_seq *sp, struct 
fc_frame *fp,
        unsigned int seq_cnt;
        void *buf = NULL;
        unsigned int len;
-       int error;
 
        if (IS_ERR(fp)) {
                fcdt_ns_error(lp, fp);
@@ -869,11 +875,8 @@ static void fc_ns_gpn_ft_resp(struct fc_seq *sp, struct 
fc_frame *fp,
                       seq_cnt, lp->ns_disc_seq_count, fr_sof(fp), fr_eof(fp));
        }
        if (buf) {
-               error = fc_ns_gpn_ft_parse(lp, buf, len);
-               if (error)
-                       fcdt_ns_retry(lp);
-               else
-                       lp->ns_disc_seq_count++;
+               fc_ns_gpn_ft_parse(lp, buf, len);
+               lp->ns_disc_seq_count++;
        }
        fc_frame_free(fp);
 }

_______________________________________________
devel mailing list
[email protected]
http://www.open-fcoe.org/mailman/listinfo/devel

Reply via email to