Add a helper function for adding ranges to avoid duplicating code.

Signed-off-by: Hari Bathini <hbath...@linux.vnet.ibm.com>
---
 kexec/fs2dt.c |  115 ++++++++++++++++++++++++++-------------------------------
 1 file changed, 53 insertions(+), 62 deletions(-)

diff --git a/kexec/fs2dt.c b/kexec/fs2dt.c
index 79aa0f3..550eca9 100644
--- a/kexec/fs2dt.c
+++ b/kexec/fs2dt.c
@@ -169,6 +169,50 @@ static unsigned propnum(const char *name)
        return offset;
 }
 
+/*
+ * Add ranges by comparing 'base' and 'end' addresses with usable
+ * memory ranges. Returns the number of ranges added. Each range added
+ * increments 'idx' by 2.
+ */
+static uint64_t add_ranges(uint64_t **ranges, int *ranges_size, int idx,
+                          uint64_t base, uint64_t end)
+{
+       uint64_t loc_base, loc_end, rngs_cnt = 0;
+       size_t range;
+       int add = 0;
+
+       for (range = 0; range < usablemem_rgns.size; range++) {
+               loc_base = usablemem_rgns.ranges[range].start;
+               loc_end = usablemem_rgns.ranges[range].end;
+               if (loc_base >= base && loc_end <= end) {
+                       add = 1;
+               } else if (base < loc_end && end > loc_base) {
+                       if (loc_base < base)
+                               loc_base = base;
+                       if (loc_end > end)
+                               loc_end = end;
+                       add = 1;
+               }
+
+               if (add) {
+                       if (idx >= ((*ranges_size) - 2)) {
+                               (*ranges_size) += MEM_RANGE_CHUNK_SZ;
+                               *ranges = realloc(*ranges, (*ranges_size)*8);
+                               if (!(*ranges))
+                                       die("unrecoverable error: can't realloc"
+                                           "%d bytes for ranges.\n",
+                                           (*ranges_size)*8);
+                       }
+                       (*ranges)[idx++] = cpu_to_be64(loc_base);
+                       (*ranges)[idx++] = cpu_to_be64(loc_end - loc_base);
+
+                       rngs_cnt++;
+               }
+       }
+
+       return rngs_cnt;
+}
+
 #ifdef HAVE_DYNAMIC_MEMORY
 static void add_dyn_reconf_usable_mem_property__(int fd)
 {
@@ -176,8 +220,8 @@ static void add_dyn_reconf_usable_mem_property__(int fd)
        uint64_t buf[32];
        uint64_t *ranges;
        int ranges_size = MEM_RANGE_CHUNK_SZ;
-       uint64_t base, end, loc_base, loc_end;
-       size_t i, rngs_cnt, range;
+       uint64_t base, end, rngs_cnt;
+       size_t i;
        int rlen = 0;
        int tmp_indx;
 
@@ -210,36 +254,8 @@ static void add_dyn_reconf_usable_mem_property__(int fd)
 
                tmp_indx = rlen++;
 
-               rngs_cnt = 0;
-               for (range = 0; range < usablemem_rgns.size; range++) {
-                       int add = 0;
-                       loc_base = usablemem_rgns.ranges[range].start;
-                       loc_end = usablemem_rgns.ranges[range].end;
-                       if (loc_base >= base && loc_end <= end) {
-                               add = 1;
-                       } else if (base < loc_end && end > loc_base) {
-                               if (loc_base < base)
-                                       loc_base = base;
-                               if (loc_end > end)
-                                       loc_end = end;
-                               add = 1;
-                       }
-
-                       if (add) {
-                               if (rlen >= (ranges_size-2)) {
-                                       ranges_size += MEM_RANGE_CHUNK_SZ;
-                                       ranges = realloc(ranges, ranges_size*8);
-                                       if (!ranges)
-                                               die("unrecoverable error: can't"
-                                                   " realloc %d bytes for"
-                                                   " ranges.\n",
-                                                   ranges_size*8);
-                               }
-                               ranges[rlen++] = cpu_to_be64(loc_base);
-                               ranges[rlen++] = cpu_to_be64(loc_end - 
loc_base);
-                               rngs_cnt++;
-                       }
-               }
+               rngs_cnt = add_ranges(&ranges, &ranges_size, rlen,
+                                     base, end);
                if (rngs_cnt == 0) {
                        /* We still need to add a counter for every LMB because
                         * the kernel parsing code is dumb.  We just have
@@ -261,7 +277,8 @@ static void add_dyn_reconf_usable_mem_property__(int fd)
                        }
                } else {
                        /* Store the count of (base, size) duple */
-                       ranges[tmp_indx] = cpu_to_be64((uint64_t) rngs_cnt);
+                       ranges[tmp_indx] = cpu_to_be64(rngs_cnt);
+                       rlen += rngs_cnt * 2;
                }
        }
                
@@ -294,8 +311,7 @@ static void add_usable_mem_property(int fd, size_t len)
        uint64_t buf[2];
        uint64_t *ranges;
        int ranges_size = MEM_RANGE_CHUNK_SZ;
-       uint64_t base, end, loc_base, loc_end;
-       size_t range;
+       uint64_t base, end, rngs_cnt;
        int rlen = 0;
 
        strcpy(fname, pathname);
@@ -326,33 +342,8 @@ static void add_usable_mem_property(int fd, size_t len)
                die("unrecoverable error: can't alloc %zu bytes for ranges.\n",
                    ranges_size * sizeof(*ranges));
 
-       for (range = 0; range < usablemem_rgns.size; range++) {
-               int add = 0;
-               loc_base = usablemem_rgns.ranges[range].start;
-               loc_end = usablemem_rgns.ranges[range].end;
-               if (loc_base >= base && loc_end <= end) {
-                       add = 1;
-               } else if (base < loc_end && end > loc_base) {
-                       if (loc_base < base)
-                               loc_base = base;
-                       if (loc_end > end)
-                               loc_end = end;
-                       add = 1;
-               }
-               if (add) {
-                       if (rlen >= (ranges_size-2)) {
-                               ranges_size += MEM_RANGE_CHUNK_SZ;
-                               ranges = realloc(ranges, ranges_size *
-                                                sizeof(*ranges));
-                               if (!ranges)
-                                       die("unrecoverable error: can't realloc"
-                                           "%zu bytes for ranges.\n",
-                                           ranges_size*sizeof(*ranges));
-                       }
-                       ranges[rlen++] = cpu_to_be64(loc_base);
-                       ranges[rlen++] = cpu_to_be64(loc_end - loc_base);
-               }
-       }
+       rngs_cnt = add_ranges(&ranges, &ranges_size, rlen, base, end);
+       rlen += rngs_cnt * 2;
 
        if (!rlen) {
                /*


_______________________________________________
kexec mailing list
kexec@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/kexec

Reply via email to