The branch main has been updated by kib:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=986c96b54b9ec1118d285b723f53c7451a4c0772

commit 986c96b54b9ec1118d285b723f53c7451a4c0772
Author:     Konstantin Belousov <k...@freebsd.org>
AuthorDate: 2025-01-18 02:45:45 +0000
Commit:     Konstantin Belousov <k...@freebsd.org>
CommitDate: 2025-01-21 01:44:35 +0000

    rtld-elf/map_object.c: apply clang-format
    
    Discussed with: emaste, imp
    Sponsored by:   The FreeBSD Foundation
    Differential revision:  https://reviews.freebsd.org/D48509
---
 libexec/rtld-elf/map_object.c | 653 ++++++++++++++++++++++--------------------
 1 file changed, 339 insertions(+), 314 deletions(-)

diff --git a/libexec/rtld-elf/map_object.c b/libexec/rtld-elf/map_object.c
index 7dbab26f2c63..148b6dc4ca6e 100644
--- a/libexec/rtld-elf/map_object.c
+++ b/libexec/rtld-elf/map_object.c
@@ -62,274 +62,296 @@ phdr_in_zero_page(const Elf_Ehdr *hdr)
 Obj_Entry *
 map_object(int fd, const char *path, const struct stat *sb)
 {
-    Obj_Entry *obj;
-    Elf_Ehdr *hdr;
-    int i;
-    Elf_Phdr *phdr;
-    Elf_Phdr *phlimit;
-    Elf_Phdr **segs;
-    int nsegs;
-    Elf_Phdr *phdyn;
-    Elf_Phdr *phinterp;
-    Elf_Phdr *phtls;
-    caddr_t mapbase;
-    size_t mapsize;
-    Elf_Addr base_vaddr;
-    Elf_Addr base_vlimit;
-    caddr_t base_addr;
-    int base_flags;
-    Elf_Off data_offset;
-    Elf_Addr data_vaddr;
-    Elf_Addr data_vlimit;
-    caddr_t data_addr;
-    int data_prot;
-    int data_flags;
-    Elf_Addr clear_vaddr;
-    caddr_t clear_addr;
-    caddr_t clear_page;
-    Elf_Addr phdr_vaddr;
-    size_t nclear, phsize;
-    Elf_Addr bss_vaddr;
-    Elf_Addr bss_vlimit;
-    caddr_t bss_addr;
-    Elf_Word stack_flags;
-    Elf_Addr note_start;
-    Elf_Addr note_end;
-    char *note_map;
-    size_t note_map_len;
-    Elf_Addr text_end;
-
-    hdr = get_elf_header(fd, path, sb, &phdr);
-    if (hdr == NULL)
-       return (NULL);
+       Obj_Entry *obj;
+       Elf_Ehdr *hdr;
+       int i;
+       Elf_Phdr *phdr;
+       Elf_Phdr *phlimit;
+       Elf_Phdr **segs;
+       int nsegs;
+       Elf_Phdr *phdyn;
+       Elf_Phdr *phinterp;
+       Elf_Phdr *phtls;
+       caddr_t mapbase;
+       size_t mapsize;
+       Elf_Addr base_vaddr;
+       Elf_Addr base_vlimit;
+       caddr_t base_addr;
+       int base_flags;
+       Elf_Off data_offset;
+       Elf_Addr data_vaddr;
+       Elf_Addr data_vlimit;
+       caddr_t data_addr;
+       int data_prot;
+       int data_flags;
+       Elf_Addr clear_vaddr;
+       caddr_t clear_addr;
+       caddr_t clear_page;
+       Elf_Addr phdr_vaddr;
+       size_t nclear, phsize;
+       Elf_Addr bss_vaddr;
+       Elf_Addr bss_vlimit;
+       caddr_t bss_addr;
+       Elf_Word stack_flags;
+       Elf_Addr note_start;
+       Elf_Addr note_end;
+       char *note_map;
+       size_t note_map_len;
+       Elf_Addr text_end;
+
+       hdr = get_elf_header(fd, path, sb, &phdr);
+       if (hdr == NULL)
+               return (NULL);
 
-    /*
-     * Scan the program header entries, and save key information.
-     * We expect that the loadable segments are ordered by load address.
-     */
-    phsize  = hdr->e_phnum * sizeof(phdr[0]);
-    phlimit = phdr + hdr->e_phnum;
-    nsegs = -1;
-    phdyn = phinterp = phtls = NULL;
-    phdr_vaddr = 0;
-    note_start = 0;
-    note_end = 0;
-    note_map = NULL;
-    note_map_len = 0;
-    segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
-    stack_flags = PF_X | PF_R | PF_W;
-    text_end = 0;
-    while (phdr < phlimit) {
-       switch (phdr->p_type) {
-
-       case PT_INTERP:
-           phinterp = phdr;
-           break;
-
-       case PT_LOAD:
-           segs[++nsegs] = phdr;
-           if ((segs[nsegs]->p_align & (page_size - 1)) != 0) {
-               _rtld_error("%s: PT_LOAD segment %d not page-aligned",
-                   path, nsegs);
-               goto error;
-           }
-           if ((segs[nsegs]->p_flags & PF_X) == PF_X) {
-               text_end = MAX(text_end,
-                   rtld_round_page(segs[nsegs]->p_vaddr +
-                   segs[nsegs]->p_memsz));
-           }
-           break;
-
-       case PT_PHDR:
-           phdr_vaddr = phdr->p_vaddr;
-           phsize = phdr->p_memsz;
-           break;
-
-       case PT_DYNAMIC:
-           phdyn = phdr;
-           break;
-
-       case PT_TLS:
-           phtls = phdr;
-           break;
-
-       case PT_GNU_STACK:
-           stack_flags = phdr->p_flags;
-           break;
-
-       case PT_NOTE:
-           if (phdr->p_offset > page_size ||
-             phdr->p_offset + phdr->p_filesz > page_size) {
-               note_map_len = rtld_round_page(phdr->p_offset +
-                 phdr->p_filesz) - rtld_trunc_page(phdr->p_offset);
-               note_map = mmap(NULL, note_map_len, PROT_READ,
-                 MAP_PRIVATE, fd, rtld_trunc_page(phdr->p_offset));
-               if (note_map == MAP_FAILED) {
-                   _rtld_error("%s: error mapping PT_NOTE (%d)", path, errno);
-                   goto error;
+       /*
+        * Scan the program header entries, and save key information.
+        * We expect that the loadable segments are ordered by load address.
+        */
+       phsize = hdr->e_phnum * sizeof(phdr[0]);
+       phlimit = phdr + hdr->e_phnum;
+       nsegs = -1;
+       phdyn = phinterp = phtls = NULL;
+       phdr_vaddr = 0;
+       note_start = 0;
+       note_end = 0;
+       note_map = NULL;
+       note_map_len = 0;
+       segs = alloca(sizeof(segs[0]) * hdr->e_phnum);
+       stack_flags = PF_X | PF_R | PF_W;
+       text_end = 0;
+       while (phdr < phlimit) {
+               switch (phdr->p_type) {
+               case PT_INTERP:
+                       phinterp = phdr;
+                       break;
+
+               case PT_LOAD:
+                       segs[++nsegs] = phdr;
+                       if ((segs[nsegs]->p_align & (page_size - 1)) != 0) {
+                               _rtld_error(
+                                   "%s: PT_LOAD segment %d not page-aligned",
+                                   path, nsegs);
+                               goto error;
+                       }
+                       if ((segs[nsegs]->p_flags & PF_X) == PF_X) {
+                               text_end = MAX(text_end,
+                                   rtld_round_page(segs[nsegs]->p_vaddr +
+                                   segs[nsegs]->p_memsz));
+                       }
+                       break;
+
+               case PT_PHDR:
+                       phdr_vaddr = phdr->p_vaddr;
+                       phsize = phdr->p_memsz;
+                       break;
+
+               case PT_DYNAMIC:
+                       phdyn = phdr;
+                       break;
+
+               case PT_TLS:
+                       phtls = phdr;
+                       break;
+
+               case PT_GNU_STACK:
+                       stack_flags = phdr->p_flags;
+                       break;
+
+               case PT_NOTE:
+                       if (phdr->p_offset > page_size ||
+                           phdr->p_offset + phdr->p_filesz > page_size) {
+                               note_map_len = rtld_round_page(phdr->p_offset +
+                                   phdr->p_filesz) -
+                                   rtld_trunc_page(phdr->p_offset);
+                               note_map = mmap(NULL, note_map_len, PROT_READ,
+                                   MAP_PRIVATE, fd,
+                                   rtld_trunc_page(phdr->p_offset));
+                               if (note_map == MAP_FAILED) {
+                                       _rtld_error(
+                                           "%s: error mapping PT_NOTE (%d)",
+                                           path, errno);
+                                       goto error;
+                               }
+                               note_start = (Elf_Addr)(note_map +
+                                   phdr->p_offset -
+                                   rtld_trunc_page(phdr->p_offset));
+                       } else {
+                               note_start = (Elf_Addr)(char *)hdr +
+                                   phdr->p_offset;
+                       }
+                       note_end = note_start + phdr->p_filesz;
+                       break;
                }
-               note_start = (Elf_Addr)(note_map + phdr->p_offset -
-                 rtld_trunc_page(phdr->p_offset));
-           } else {
-               note_start = (Elf_Addr)(char *)hdr + phdr->p_offset;
-           }
-           note_end = note_start + phdr->p_filesz;
-           break;
-       }
 
-       ++phdr;
-    }
-    if (phdyn == NULL) {
-       _rtld_error("%s: object is not dynamically-linked", path);
-       goto error;
-    }
-
-    if (nsegs < 0) {
-       _rtld_error("%s: too few PT_LOAD segments", path);
-       goto error;
-    }
-
-    /*
-     * Map the entire address space of the object, to stake out our
-     * contiguous region, and to establish the base address for relocation.
-     */
-    base_vaddr = rtld_trunc_page(segs[0]->p_vaddr);
-    base_vlimit = rtld_round_page(segs[nsegs]->p_vaddr + segs[nsegs]->p_memsz);
-    mapsize = base_vlimit - base_vaddr;
-    base_addr = (caddr_t) base_vaddr;
-    base_flags = __getosreldate() >= P_OSREL_MAP_GUARD ? MAP_GUARD :
-       MAP_PRIVATE | MAP_ANON | MAP_NOCORE;
-    if (npagesizes > 1 && rtld_round_page(segs[0]->p_filesz) >= pagesizes[1])
-       base_flags |= MAP_ALIGNED_SUPER;
-    if (base_vaddr != 0)
-       base_flags |= MAP_FIXED | MAP_EXCL;
-
-    mapbase = mmap(base_addr, mapsize, PROT_NONE, base_flags, -1, 0);
-    if (mapbase == MAP_FAILED) {
-       _rtld_error("%s: mmap of entire address space failed: %s",
-         path, rtld_strerror(errno));
-       goto error;
-    }
-    if (base_addr != NULL && mapbase != base_addr) {
-       _rtld_error("%s: mmap returned wrong address: wanted %p, got %p",
-         path, base_addr, mapbase);
-       goto error1;
-    }
-
-    for (i = 0; i <= nsegs; i++) {
-       /* Overlay the segment onto the proper region. */
-       data_offset = rtld_trunc_page(segs[i]->p_offset);
-       data_vaddr = rtld_trunc_page(segs[i]->p_vaddr);
-       data_vlimit = rtld_round_page(segs[i]->p_vaddr + segs[i]->p_filesz);
-       data_addr = mapbase + (data_vaddr - base_vaddr);
-       data_prot = convert_prot(segs[i]->p_flags);
-       data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
-       if (data_vlimit != data_vaddr &&
-           mmap(data_addr, data_vlimit - data_vaddr, data_prot, 
-           data_flags | MAP_PREFAULT_READ, fd, data_offset) == MAP_FAILED) {
-               _rtld_error("%s: mmap of data failed: %s", path,
-                   rtld_strerror(errno));
-               goto error1;
+               ++phdr;
+       }
+       if (phdyn == NULL) {
+               _rtld_error("%s: object is not dynamically-linked", path);
+               goto error;
        }
 
-       /* Do BSS setup */
-       if (segs[i]->p_filesz != segs[i]->p_memsz) {
+       if (nsegs < 0) {
+               _rtld_error("%s: too few PT_LOAD segments", path);
+               goto error;
+       }
 
-           /* Clear any BSS in the last page of the segment. */
-           clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
-           clear_addr = mapbase + (clear_vaddr - base_vaddr);
-           clear_page = mapbase + (rtld_trunc_page(clear_vaddr) - base_vaddr);
+       /*
+        * Map the entire address space of the object, to stake out our
+        * contiguous region, and to establish the base address for relocation.
+        */
+       base_vaddr = rtld_trunc_page(segs[0]->p_vaddr);
+       base_vlimit = rtld_round_page(segs[nsegs]->p_vaddr +
+           segs[nsegs]->p_memsz);
+       mapsize = base_vlimit - base_vaddr;
+       base_addr = (caddr_t)base_vaddr;
+       base_flags = __getosreldate() >= P_OSREL_MAP_GUARD ?
+           MAP_GUARD : MAP_PRIVATE | MAP_ANON | MAP_NOCORE;
+       if (npagesizes > 1 &&  rtld_round_page(segs[0]->p_filesz) >=
+           pagesizes[1])
+               base_flags |= MAP_ALIGNED_SUPER;
+       if (base_vaddr != 0)
+               base_flags |= MAP_FIXED | MAP_EXCL;
+
+       mapbase = mmap(base_addr, mapsize, PROT_NONE, base_flags, -1, 0);
+       if (mapbase == MAP_FAILED) {
+               _rtld_error("%s: mmap of entire address space failed: %s",
+                   path, rtld_strerror(errno));
+               goto error;
+       }
+       if (base_addr != NULL && mapbase != base_addr) {
+               _rtld_error(
+                   "%s: mmap returned wrong address: wanted %p, got %p",
+                   path, base_addr, mapbase);
+               goto error1;
+       }
 
-           if ((nclear = data_vlimit - clear_vaddr) > 0) {
-               /* Make sure the end of the segment is writable */
-               if ((data_prot & PROT_WRITE) == 0 && -1 ==
-                    mprotect(clear_page, page_size, data_prot|PROT_WRITE)) {
-                       _rtld_error("%s: mprotect failed: %s", path,
-                           rtld_strerror(errno));
+       for (i = 0; i <= nsegs; i++) {
+               /* Overlay the segment onto the proper region. */
+               data_offset = rtld_trunc_page(segs[i]->p_offset);
+               data_vaddr = rtld_trunc_page(segs[i]->p_vaddr);
+               data_vlimit = rtld_round_page(segs[i]->p_vaddr +
+                   segs[i]->p_filesz);
+               data_addr = mapbase + (data_vaddr - base_vaddr);
+               data_prot = convert_prot(segs[i]->p_flags);
+               data_flags = convert_flags(segs[i]->p_flags) | MAP_FIXED;
+               if (data_vlimit != data_vaddr && mmap(data_addr,
+                   data_vlimit - data_vaddr, data_prot, data_flags |
+                   MAP_PREFAULT_READ, fd, data_offset) == MAP_FAILED) {
+                       _rtld_error("%s: mmap of data failed: %s",
+                           path, rtld_strerror(errno));
                        goto error1;
                }
 
-               memset(clear_addr, 0, nclear);
-
-               /* Reset the data protection back */
-               if ((data_prot & PROT_WRITE) == 0)
-                   mprotect(clear_page, page_size, data_prot);
-           }
-
-           /* Overlay the BSS segment onto the proper region. */
-           bss_vaddr = data_vlimit;
-           bss_vlimit = rtld_round_page(segs[i]->p_vaddr + segs[i]->p_memsz);
-           bss_addr = mapbase +  (bss_vaddr - base_vaddr);
-           if (bss_vlimit > bss_vaddr) {       /* There is something to do */
-               if (mmap(bss_addr, bss_vlimit - bss_vaddr, data_prot,
-                   data_flags | MAP_ANON, -1, 0) == MAP_FAILED) {
-                   _rtld_error("%s: mmap of bss failed: %s", path,
-                       rtld_strerror(errno));
-                   goto error1;
+               /* Do BSS setup */
+               if (segs[i]->p_filesz != segs[i]->p_memsz) {
+                       /* Clear any BSS in the last page of the segment. */
+                       clear_vaddr = segs[i]->p_vaddr + segs[i]->p_filesz;
+                       clear_addr = mapbase + (clear_vaddr - base_vaddr);
+                       clear_page = mapbase + (rtld_trunc_page(clear_vaddr) -
+                           base_vaddr);
+
+                       if ((nclear = data_vlimit - clear_vaddr) > 0) {
+                               /*
+                                * Make sure the end of the segment is
+                                * writable.
+                                */
+                               if ((data_prot & PROT_WRITE) == 0 &&
+                                   mprotect(clear_page, page_size,
+                                   data_prot | PROT_WRITE) == -1) {
+                                       _rtld_error("%s: mprotect failed: %s",
+                                           path, rtld_strerror(errno));
+                                       goto error1;
+                               }
+
+                               memset(clear_addr, 0, nclear);
+
+                               /* Reset the data protection back */
+                               if ((data_prot & PROT_WRITE) == 0)
+                                       mprotect(clear_page, page_size,
+                                           data_prot);
+                       }
+
+                       /* Overlay the BSS segment onto the proper region. */
+                       bss_vaddr = data_vlimit;
+                       bss_vlimit = rtld_round_page(segs[i]->p_vaddr +
+                           segs[i]->p_memsz);
+                       bss_addr = mapbase + (bss_vaddr - base_vaddr);
+                       if (bss_vlimit > bss_vaddr) {
+                               /* There is something to do */
+                               if (mmap(bss_addr, bss_vlimit - bss_vaddr,
+                                   data_prot, data_flags | MAP_ANON, -1,
+                                   0) == MAP_FAILED) {
+                                       _rtld_error(
+                                           "%s: mmap of bss failed: %s",
+                                           path, rtld_strerror(errno));
+                                       goto error1;
+                               }
+                       }
+               }
+
+               if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
+                   data_vlimit - data_vaddr + data_offset >=
+                   hdr->e_phoff + hdr->e_phnum * sizeof(Elf_Phdr)) {
+                       phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
                }
-           }
        }
 
-       if (phdr_vaddr == 0 && data_offset <= hdr->e_phoff &&
-         (data_vlimit - data_vaddr + data_offset) >=
-         (hdr->e_phoff + hdr->e_phnum * sizeof (Elf_Phdr))) {
-           phdr_vaddr = data_vaddr + hdr->e_phoff - data_offset;
+       obj = obj_new();
+       if (sb != NULL) {
+               obj->dev = sb->st_dev;
+               obj->ino = sb->st_ino;
+       }
+       obj->mapbase = mapbase;
+       obj->mapsize = mapsize;
+       obj->vaddrbase = base_vaddr;
+       obj->relocbase = mapbase - base_vaddr;
+       obj->dynamic = (const Elf_Dyn *)(obj->relocbase + phdyn->p_vaddr);
+       if (hdr->e_entry != 0)
+               obj->entry = (caddr_t)(obj->relocbase + hdr->e_entry);
+       if (phdr_vaddr != 0) {
+               obj->phdr = (const Elf_Phdr *)(obj->relocbase + phdr_vaddr);
+       } else {
+               obj->phdr = malloc(phsize);
+               if (obj->phdr == NULL) {
+                       obj_free(obj);
+                       _rtld_error("%s: cannot allocate program header",
+                           path);
+                       goto error1;
+               }
+               memcpy(__DECONST(char *, obj->phdr), (char *)hdr + hdr->e_phoff,
+                   phsize);
+               obj->phdr_alloc = true;
        }
-    }
-
-    obj = obj_new();
-    if (sb != NULL) {
-       obj->dev = sb->st_dev;
-       obj->ino = sb->st_ino;
-    }
-    obj->mapbase = mapbase;
-    obj->mapsize = mapsize;
-    obj->vaddrbase = base_vaddr;
-    obj->relocbase = mapbase - base_vaddr;
-    obj->dynamic = (const Elf_Dyn *)(obj->relocbase + phdyn->p_vaddr);
-    if (hdr->e_entry != 0)
-       obj->entry = (caddr_t)(obj->relocbase + hdr->e_entry);
-    if (phdr_vaddr != 0) {
-       obj->phdr = (const Elf_Phdr *)(obj->relocbase + phdr_vaddr);
-    } else {
-       obj->phdr = malloc(phsize);
-       if (obj->phdr == NULL) {
-           obj_free(obj);
-           _rtld_error("%s: cannot allocate program header", path);
-           goto error1;
+       obj->phsize = phsize;
+       if (phinterp != NULL)
+               obj->interp = (const char *)(obj->relocbase +
+                   phinterp->p_vaddr);
+       if (phtls != NULL) {
+               tls_dtv_generation++;
+               obj->tlsindex = ++tls_max_index;
+               obj->tlssize = phtls->p_memsz;
+               obj->tlsalign = phtls->p_align;
+               obj->tlspoffset = phtls->p_offset;
+               obj->tlsinitsize = phtls->p_filesz;
+               obj->tlsinit = mapbase + phtls->p_vaddr;
        }
-       memcpy(__DECONST(char *, obj->phdr), (char *)hdr + hdr->e_phoff, 
phsize);
-       obj->phdr_alloc = true;
-    }
-    obj->phsize = phsize;
-    if (phinterp != NULL)
-       obj->interp = (const char *)(obj->relocbase + phinterp->p_vaddr);
-    if (phtls != NULL) {
-       tls_dtv_generation++;
-       obj->tlsindex = ++tls_max_index;
-       obj->tlssize = phtls->p_memsz;
-       obj->tlsalign = phtls->p_align;
-       obj->tlspoffset = phtls->p_offset;
-       obj->tlsinitsize = phtls->p_filesz;
-       obj->tlsinit = mapbase + phtls->p_vaddr;
-    }
-    obj->stack_flags = stack_flags;
-    if (note_start < note_end)
-       digest_notes(obj, note_start, note_end);
-    if (note_map != NULL)
-       munmap(note_map, note_map_len);
-    munmap(hdr, page_size);
-    return (obj);
+       obj->stack_flags = stack_flags;
+       if (note_start < note_end)
+               digest_notes(obj, note_start, note_end);
+       if (note_map != NULL)
+               munmap(note_map, note_map_len);
+       munmap(hdr, page_size);
+       return (obj);
 
 error1:
-    munmap(mapbase, mapsize);
+       munmap(mapbase, mapsize);
 error:
-    if (note_map != NULL && note_map != MAP_FAILED)
-       munmap(note_map, note_map_len);
-    if (!phdr_in_zero_page(hdr))
-       munmap(phdr, hdr->e_phnum * sizeof(phdr[0]));
-    munmap(hdr, page_size);
-    return (NULL);
+       if (note_map != NULL && note_map != MAP_FAILED)
+               munmap(note_map, note_map_len);
+       if (!phdr_in_zero_page(hdr))
+               munmap(phdr, hdr->e_phnum * sizeof(phdr[0]));
+       munmap(hdr, page_size);
+       return (NULL);
 }
 
 bool
@@ -359,7 +381,8 @@ check_elf_headers(const Elf_Ehdr *hdr, const char *path)
        }
        if (hdr->e_phentsize != sizeof(Elf_Phdr)) {
                _rtld_error(
-           "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)", path);
+           "%s: invalid shared object: e_phentsize != sizeof(Elf_Phdr)",
+                   path);
                return (false);
        }
        return (true);
@@ -397,9 +420,8 @@ get_elf_header(int fd, const char *path, const struct stat 
*sbp,
        if (phdr_in_zero_page(hdr)) {
                phdr = (Elf_Phdr *)((char *)hdr + hdr->e_phoff);
        } else {
-               phdr = mmap(NULL, hdr->e_phnum * sizeof(phdr[0]),
-                   PROT_READ, MAP_PRIVATE | MAP_PREFAULT_READ, fd,
-                   hdr->e_phoff);
+               phdr = mmap(NULL, hdr->e_phnum * sizeof(phdr[0]), PROT_READ,
+                   MAP_PRIVATE | MAP_PREFAULT_READ, fd, hdr->e_phoff);
                if (phdr == MAP_FAILED) {
                        _rtld_error("%s: error mapping phdr: %s", path,
                            rtld_strerror(errno));
@@ -417,55 +439,57 @@ error:
 void
 obj_free(Obj_Entry *obj)
 {
-    Objlist_Entry *elm;
-
-    if (obj->tls_static)
-       free_tls_offset(obj);
-    while (obj->needed != NULL) {
-       Needed_Entry *needed = obj->needed;
-       obj->needed = needed->next;
-       free(needed);
-    }
-    while (!STAILQ_EMPTY(&obj->names)) {
-       Name_Entry *entry = STAILQ_FIRST(&obj->names);
-       STAILQ_REMOVE_HEAD(&obj->names, link);
-       free(entry);
-    }
-    while (!STAILQ_EMPTY(&obj->dldags)) {
-       elm = STAILQ_FIRST(&obj->dldags);
-       STAILQ_REMOVE_HEAD(&obj->dldags, link);
-       free(elm);
-    }
-    while (!STAILQ_EMPTY(&obj->dagmembers)) {
-       elm = STAILQ_FIRST(&obj->dagmembers);
-       STAILQ_REMOVE_HEAD(&obj->dagmembers, link);
-       free(elm);
-    }
-    if (obj->vertab)
-       free(obj->vertab);
-    if (obj->origin_path)
-       free(obj->origin_path);
-    if (obj->z_origin)
-       free(__DECONST(void*, obj->rpath));
-    if (obj->priv)
-       free(obj->priv);
-    if (obj->path)
-       free(obj->path);
-    if (obj->phdr_alloc)
-       free(__DECONST(void *, obj->phdr));
-    free(obj);
+       Objlist_Entry *elm;
+
+       if (obj->tls_static)
+               free_tls_offset(obj);
+       while (obj->needed != NULL) {
+               Needed_Entry *needed = obj->needed;
+
+               obj->needed = needed->next;
+               free(needed);
+       }
+       while (!STAILQ_EMPTY(&obj->names)) {
+               Name_Entry *entry = STAILQ_FIRST(&obj->names);
+
+               STAILQ_REMOVE_HEAD(&obj->names, link);
+               free(entry);
+       }
+       while (!STAILQ_EMPTY(&obj->dldags)) {
+               elm = STAILQ_FIRST(&obj->dldags);
+               STAILQ_REMOVE_HEAD(&obj->dldags, link);
+               free(elm);
+       }
+       while (!STAILQ_EMPTY(&obj->dagmembers)) {
+               elm = STAILQ_FIRST(&obj->dagmembers);
+               STAILQ_REMOVE_HEAD(&obj->dagmembers, link);
+               free(elm);
+       }
+       if (obj->vertab)
+               free(obj->vertab);
+       if (obj->origin_path)
+               free(obj->origin_path);
+       if (obj->z_origin)
+               free(__DECONST(void *, obj->rpath));
+       if (obj->priv)
+               free(obj->priv);
+       if (obj->path)
+               free(obj->path);
+       if (obj->phdr_alloc)
+               free(__DECONST(void *, obj->phdr));
+       free(obj);
 }
 
 Obj_Entry *
 obj_new(void)
 {
-    Obj_Entry *obj;
+       Obj_Entry *obj;
 
-    obj = CNEW(Obj_Entry);
-    STAILQ_INIT(&obj->dldags);
-    STAILQ_INIT(&obj->dagmembers);
-    STAILQ_INIT(&obj->names);
-    return obj;
+       obj = CNEW(Obj_Entry);
+       STAILQ_INIT(&obj->dldags);
+       STAILQ_INIT(&obj->dagmembers);
+       STAILQ_INIT(&obj->names);
+       return (obj);
 }
 
 /*
@@ -475,26 +499,27 @@ obj_new(void)
 int
 convert_prot(int elfflags)
 {
-    int prot = 0;
-    if (elfflags & PF_R)
-       prot |= PROT_READ;
-    if (elfflags & PF_W)
-       prot |= PROT_WRITE;
-    if (elfflags & PF_X)
-       prot |= PROT_EXEC;
-    return prot;
+       int prot = 0;
+
+       if ((elfflags & PF_R) != 0)
+               prot |= PROT_READ;
+       if ((elfflags & PF_W) != 0)
+               prot |= PROT_WRITE;
+       if ((elfflags & PF_X) != 0)
+               prot |= PROT_EXEC;
+       return (prot);
 }
 
 static int
 convert_flags(int elfflags)
 {
-    int flags = MAP_PRIVATE; /* All mappings are private */
-
-    /*
-     * Readonly mappings are marked "MAP_NOCORE", because they can be
-     * reconstructed by a debugger.
-     */
-    if (!(elfflags & PF_W))
-       flags |= MAP_NOCORE;
-    return flags;
+       int flags = MAP_PRIVATE; /* All mappings are private */
+
+       /*
+        * Readonly mappings are marked "MAP_NOCORE", because they can be
+        * reconstructed by a debugger.
+        */
+       if ((elfflags & PF_W) == 0)
+               flags |= MAP_NOCORE;
+       return (flags);
 }

Reply via email to