Signed-off-by: Ruoyu <lian...@ucweb.com>
---
 sheep/gateway.c  |  6 +++---
 sheep/group.c    | 10 +++++-----
 sheep/recovery.c | 14 +++++++-------
 3 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/sheep/gateway.c b/sheep/gateway.c
index 8868bce..49ca12b 100644
--- a/sheep/gateway.c
+++ b/sheep/gateway.c
@@ -102,7 +102,7 @@ out:
 
 /*
  * We spread data strips of req along with its parity strips onto replica for
- * write opertaion. For read we only need to prepare data strip buffers.
+ * write operation. For read we only need to prepare data strip buffers.
  */
 static struct req_iter *prepare_erasure_requests(struct request *req, int *nr)
 {
@@ -392,7 +392,7 @@ again:
                }
 
                nr_sent = fi->nr_sent;
-               /* XXX Blinedly close all the connections */
+               /* XXX Blindly close all the connections */
                for (i = 0; i < nr_sent; i++)
                        sockfd_cache_del(fi->ent[i].nid, fi->ent[i].sfd);
 
@@ -656,7 +656,7 @@ int gateway_write_obj(struct request *req)
                goto out;
 
        if (is_data_vid_update(hdr)) {
-               sd_debug("udpate reference counts, %" PRIx64, hdr->obj.oid);
+               sd_debug("update reference counts, %" PRIx64, hdr->obj.oid);
                update_obj_refcnt(hdr, vids, new_vids, refs);
        }
 out:
diff --git a/sheep/group.c b/sheep/group.c
index 39309eb..b4db954 100644
--- a/sheep/group.c
+++ b/sheep/group.c
@@ -434,8 +434,8 @@ static bool enough_nodes_gathered(struct cluster_info 
*cinfo,
 }
 
 /*
- * We have to use memcpy beause some cluster drivers like corosync can't 
support
- * to send the whole cluster_info structure.
+ * We have to use memcpy because some cluster drivers like corosync can't
+ * support to send the whole cluster_info structure.
  */
 static void cluster_info_copy(struct cluster_info *dst,
                              const struct cluster_info *src)
@@ -1073,9 +1073,9 @@ static bool cluster_join_check(const struct cluster_info 
*cinfo)
        /*
         * Sheepdog's recovery code assumes every node have the same epoch
         * history. But we don't check epoch history of joining node because:
-        * 1. inconsist epoch history only happens in the network partition case
-        *    for the corosync driver, but corosync driver will panic for such
-        *    case to prevent epoch inconsistency.
+        * 1. inconsistent epoch history only happens in the network partition
+        *    case for the corosync driver, but corosync driver will panic for
+        *    such case to prevent epoch inconsistency.
         * 2. checking epoch history with joining node is too expensive and is
         *    unneeded for zookeeper driver.
         *
diff --git a/sheep/recovery.c b/sheep/recovery.c
index 4648966..0a8f5eb 100644
--- a/sheep/recovery.c
+++ b/sheep/recovery.c
@@ -32,7 +32,7 @@ struct recovery_list_work {
        uint64_t *oids;
 };
 
-/* for recoverying objects */
+/* for recovering objects */
 struct recovery_obj_work {
        struct recovery_work base;
 
@@ -609,7 +609,7 @@ main_fn bool oid_in_recovery(uint64_t oid)
 
        cur = rinfo->cur_vinfo;
        if (sd_store->exist(oid, local_ec_index(cur, oid))) {
-               sd_debug("the object %" PRIx64 " is already recoverd", oid);
+               sd_debug("the object %" PRIx64 " is already recovered", oid);
                return false;
        }
 
@@ -659,7 +659,7 @@ main_fn bool oid_in_recovery(uint64_t oid)
                sd_debug("%"PRIx64" is not in the recovery list", oid);
                return false;
        case RW_NOTIFY_COMPLETION:
-               sd_debug("the object %" PRIx64 " is already recoverd", oid);
+               sd_debug("the object %" PRIx64 " is already recovered", oid);
                return false;
        }
 
@@ -719,7 +719,7 @@ static inline bool run_next_rw(void)
 
        nrinfo = uatomic_xchg_ptr(&next_rinfo, NULL);
        /*
-        * When md recovery supersed the reweight or node recovery, we need to
+        * When md recovery supersedes the reweight or node recovery, we need to
         * notify completion.
         */
        if (!nrinfo->notify_complete && cur->notify_complete)
@@ -936,7 +936,7 @@ static void finish_object_list(struct work *work)
         *    chances to be blocked for write and also improve reliability.
         * 3. For disk failure in node, this is similar to adding a node. All
         *    the data on the broken disk will be recovered on other disks in
-        *    this node. Speedy recoery not only improve data reliability but
+        *    this node. Speedy recovery not only improve data reliability but
         *    also cause less writing blocking on the lost data.
         *
         * We choose md_nr_disks() * 2 threads for recovery, no rationale.
@@ -1122,7 +1122,7 @@ int start_recovery(struct vnode_info *cur_vinfo, struct 
vnode_info *old_vinfo,
                sd_debug("recovery skipped");
 
                /*
-                * This is necesary to invoke run_next_rw when
+                * This is necessary to invoke run_next_rw when
                 * recovery work is suspended.
                 */
                resume_suspended_recovery();
@@ -1163,7 +1163,7 @@ static void queue_recovery_work(struct recovery_info 
*rinfo)
                rw->work.done = notify_recovery_completion_main;
                break;
        default:
-               panic("unknow recovery state %d", rinfo->state);
+               panic("unknown recovery state %d", rinfo->state);
                break;
        }
 
-- 
1.8.3.2


-- 
sheepdog mailing list
sheepdog@lists.wpkg.org
http://lists.wpkg.org/mailman/listinfo/sheepdog

Reply via email to