Author: winnie
Date: Tue Sep 16 09:59:07 2008
New Revision: 498

URL: http://svn.debian.org/wsvn/pkg-lustre/?sc=1&rev=498
Log:
Some more patches

Added:
    trunk/debian/patches/bug16906_FULL_state_server_in_recovery.dpatch
    trunk/debian/patches/bug16972_DEBUG_REQ_paging_request.dpatch
Modified:
    trunk/debian/changelog
    trunk/debian/patches/00list

Modified: trunk/debian/changelog
URL: 
http://svn.debian.org/wsvn/pkg-lustre/trunk/debian/changelog?rev=498&op=diff
==============================================================================
--- trunk/debian/changelog (original)
+++ trunk/debian/changelog Tue Sep 16 09:59:07 2008
@@ -6,6 +6,8 @@
   * Added patch from bugzilla to prevent kernel oops when 
     ost disk is ~70% full. - Doesn't work yet
   * Add patch to support patchless clients <=2.6.24
+  * Added several patches from upstream bugzilla which fixes some errors.
+    This are backports from patches applied to 1.6.6 or 1.6.7
   * Fix lintian warning about missing patch description in
     fix-symlink-attack.dpatch
   * Add lintian override for lustre, which overrides this messages:
@@ -22,7 +24,7 @@
        in make-tree
      - modified fix-lustre-manpage.dpatch to fix typo in lctl.8
 
- -- Patrick Winnertz <[EMAIL PROTECTED]>  Fri, 12 Sep 2008 15:41:54 +0200
+ -- Patrick Winnertz <[EMAIL PROTECTED]>  Tue, 16 Sep 2008 09:52:09 +0200
 
 lustre (1.6.5.1-1) unstable; urgency=low
 

Modified: trunk/debian/patches/00list
URL: 
http://svn.debian.org/wsvn/pkg-lustre/trunk/debian/patches/00list?rev=498&op=diff
==============================================================================
--- trunk/debian/patches/00list (original)
+++ trunk/debian/patches/00list Tue Sep 16 09:59:07 2008
@@ -13,6 +13,8 @@
 bug12769-ql-fix.dpatch
 operation_101_unconnected_mgs.dpatch
 fix_late_release_pa_structure.dpatch
+bug16972_DEBUG_REQ_paging_request.dpatch
+bug16906_FULL_state_server_in_recovery.dpatch
 patchless_support_2.6.24.dpatch
 # Debian patches
 bash_completion.dpatch

Added: trunk/debian/patches/bug16906_FULL_state_server_in_recovery.dpatch
URL: 
http://svn.debian.org/wsvn/pkg-lustre/trunk/debian/patches/bug16906_FULL_state_server_in_recovery.dpatch?rev=498&op=file
==============================================================================
--- trunk/debian/patches/bug16906_FULL_state_server_in_recovery.dpatch (added)
+++ trunk/debian/patches/bug16906_FULL_state_server_in_recovery.dpatch Tue Sep 
16 09:59:07 2008
@@ -1,0 +1,30 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## bug16906_FULL_state_server_in_recovery.dpatch by Patrick Winnertz <[EMAIL 
PROTECTED]>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Patch from bug 16906 (is closed in 1.6.6)
+
[EMAIL PROTECTED]@
+Index: lustre+chaos4/lustre/ptlrpc/import.c
+===================================================================
+--- lustre+chaos4.orig/lustre/ptlrpc/import.c
++++ lustre+chaos4/lustre/ptlrpc/import.c
+@@ -676,7 +676,17 @@ static int ptlrpc_connect_interpret(stru
+                 imp->imp_remote_handle =
+                                 *lustre_msg_get_handle(request->rq_repmsg);
+ 
+-                IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
++                /* Initial connects are allowed for clients with non-random
++                 * uuids when servers are in recovery.  Simply signal the
++                 * servers replay is complete and wait in REPLAY_WAIT. */
++                if (msg_flags & MSG_CONNECT_RECOVERING) {
++                        CDEBUG(D_HA, "connect to %s during recovery\n",
++                               obd2cli_tgt(imp->imp_obd));
++                        IMPORT_SET_STATE(imp, LUSTRE_IMP_REPLAY_LOCKS);
++                } else {
++                        IMPORT_SET_STATE(imp, LUSTRE_IMP_FULL);
++                }
++
+                 spin_lock(&imp->imp_lock);
+                 if (imp->imp_invalid) {
+                         spin_unlock(&imp->imp_lock);

Added: trunk/debian/patches/bug16972_DEBUG_REQ_paging_request.dpatch
URL: 
http://svn.debian.org/wsvn/pkg-lustre/trunk/debian/patches/bug16972_DEBUG_REQ_paging_request.dpatch?rev=498&op=file
==============================================================================
--- trunk/debian/patches/bug16972_DEBUG_REQ_paging_request.dpatch (added)
+++ trunk/debian/patches/bug16972_DEBUG_REQ_paging_request.dpatch Tue Sep 16 
09:59:07 2008
@@ -1,0 +1,119 @@
+#! /bin/sh /usr/share/dpatch/dpatch-run
+## bug15949_DEBUG_REQ_paging_request.dpatch by Patrick Winnertz <[EMAIL 
PROTECTED]>
+##
+## All lines beginning with `## DP:' are a description of the patch.
+## DP: Patch from bug 15949 (is closed in 1.6.6)
+
[EMAIL PROTECTED]@
+Index: lustre/ptlrpc/client.c
+===================================================================
+RCS file: /cvsroot/cfs/lustre-core/ptlrpc/client.c,v
+retrieving revision 1.171.2.39.2.1
+diff -u -p -r1.171.2.39.2.1 client.c
+--- lustre/ptlrpc/client.c     7 Aug 2008 10:28:10 -0000       1.171.2.39.2.1
++++ lustre/ptlrpc/client.c     4 Sep 2008 19:55:28 -0000
+@@ -231,18 +231,18 @@ void ptlrpc_at_set_req_timeout(struct pt
+         lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout);
+ }
+ 
+-/* Adjust max service estimate based on server value */
+-static void ptlrpc_at_adj_service(struct ptlrpc_request *req) 
++/* Adjust max service estimate based on server value.
++   Service estimate is returned in the repmsg timeout field,
++   may be 0 on err */
++static void ptlrpc_at_adj_service(struct ptlrpc_request *req,
++                                  unsigned int serv_est)
+ {
+         int idx;
+-        unsigned int serv_est, oldse;
+-        struct imp_at *at = &req->rq_import->imp_at;
++        unsigned int oldse;
++        struct imp_at *at;
+ 
+         LASSERT(req->rq_import);
+-        
+-        /* service estimate is returned in the repmsg timeout field,
+-           may be 0 on err */
+-        serv_est = lustre_msg_get_timeout(req->rq_repmsg);
++        at = &req->rq_import->imp_at;
+ 
+         idx = import_at_get_index(req->rq_import, req->rq_request_portal);
+         /* max service estimates are tracked on the server side,
+@@ -262,15 +262,15 @@ int ptlrpc_at_get_net_latency(struct ptl
+ }
+ 
+ /* Adjust expected network latency */
+-static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req)
++static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req,
++                                      unsigned int st)
+ {
+-        unsigned int st, nl, oldnl;
+-        struct imp_at *at = &req->rq_import->imp_at;
++        unsigned int nl, oldnl;
++        struct imp_at *at;
+         time_t now = cfs_time_current_sec();
+ 
+         LASSERT(req->rq_import);
+-
+-        st = lustre_msg_get_service_time(req->rq_repmsg);
++        at = &req->rq_import->imp_at;
+ 
+         /* Network latency is total time less server processing time */
+         nl = max_t(int, now - req->rq_sent - st, 0) + 1/*st rounding*/;
+@@ -314,7 +314,7 @@ static int unpack_reply(struct ptlrpc_re
+    We can't risk the real reply coming in and changing rq_repmsg, 
+    so this fn must be called under the rq_lock */
+ static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) {
+-        struct lustre_msg *oldmsg, *msgcpy;
++        struct lustre_msg *msgcpy;
+         time_t olddl;
+         int oldlen, rc;
+         ENTRY;
+@@ -339,8 +339,7 @@ static int ptlrpc_at_recv_early_reply(st
+         /* Another reply might have changed the repmsg and replen while 
+            we dropped the lock; doesn't really matter, just use the latest.
+            If it doesn't fit in oldlen, checksum will be wrong. */
+-        oldmsg = req->rq_repmsg;
+-        memcpy(msgcpy, oldmsg, oldlen);
++        memcpy(msgcpy, req->rq_repmsg, oldlen);
+         if (lustre_msg_get_cksum(msgcpy) != 
+             lustre_msg_calc_cksum(msgcpy)) {
+                 CDEBUG(D_ADAPTTO, "Early reply checksum mismatch, "
+@@ -349,13 +348,9 @@ static int ptlrpc_at_recv_early_reply(st
+                 GOTO(out, rc = -EINVAL); 
+         }
+ 
+-        /* Our copied msg is valid, now we can adjust the timeouts without 
+-           worrying that a new reply will land on the copy. */
+-        req->rq_repmsg = msgcpy;
+-
+         /* Expecting to increase the service time estimate here */
+-        ptlrpc_at_adj_service(req);
+-        ptlrpc_at_adj_net_latency(req);
++        ptlrpc_at_adj_service(req, lustre_msg_get_timeout(msgcpy));
++        ptlrpc_at_adj_net_latency(req, lustre_msg_get_service_time(msgcpy));
+ 
+         /* Adjust the local timeout for this req */
+         ptlrpc_at_set_req_timeout(req);
+@@ -370,9 +365,7 @@ static int ptlrpc_at_recv_early_reply(st
+                   "Early reply #%d, new deadline in %lds (%+lds)", 
+                   req->rq_early_count, req->rq_deadline -
+                   cfs_time_current_sec(), req->rq_deadline - olddl);
+-        
+-        req->rq_repmsg = oldmsg;
+-        
++
+ out:
+         OBD_FREE(msgcpy, oldlen);
+         RETURN(rc);
+@@ -838,8 +831,8 @@ static int after_reply(struct ptlrpc_req
+                                     timediff);
+ 
+         OBD_FAIL_TIMEOUT(OBD_FAIL_PTLRPC_PAUSE_REP, obd_fail_val);
+-        ptlrpc_at_adj_service(req);
+-        ptlrpc_at_adj_net_latency(req);
++        ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg));
++        ptlrpc_at_adj_net_latency(req, 
lustre_msg_get_service_time(req->rq_repmsg));
+ 
+         if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY &&
+             lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) {


_______________________________________________
Pkg-lustre-svn-commit mailing list
Pkg-lustre-svn-commit@lists.alioth.debian.org
http://lists.alioth.debian.org/mailman/listinfo/pkg-lustre-svn-commit

Reply via email to