On Tue, Mar 23, 2021 at 01:52:20PM +0100, Martin Pieuchot wrote: > Diff below convert multiple "return(val)" and "return (val)" to > "return val". I only changed those that help decrease the size > of the diff with NetBSD or didn't change anything. > > ok? >
I read through these and agree this should not change any behaviour. ok mlarkin if this helps you move forward by improving diffability. > Index: uvm/uvm_amap.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_amap.c,v > retrieving revision 1.88 > diff -u -p -r1.88 uvm_amap.c > --- uvm/uvm_amap.c 20 Mar 2021 10:24:21 -0000 1.88 > +++ uvm/uvm_amap.c 23 Mar 2021 12:14:26 -0000 > @@ -342,7 +342,7 @@ amap_alloc1(int slots, int waitf, int la > amap = pool_get(&uvm_small_amap_pool[slots - 1], > pwaitf | PR_ZERO); > if (amap == NULL) > - return(NULL); > + return NULL; > > amap->am_lock = NULL; > amap->am_ref = 1; > @@ -355,7 +355,7 @@ amap_alloc1(int slots, int waitf, int la > > if (UVM_AMAP_SMALL(amap)) { > amap->am_small.ac_nslot = slots; > - return (amap); > + return amap; > } > > amap->am_ncused = 0; > @@ -392,14 +392,14 @@ amap_alloc1(int slots, int waitf, int la > } > } > > - return(amap); > + return amap; > > fail1: > free(amap->am_buckets, M_UVMAMAP, buckets * sizeof(*amap->am_buckets)); > TAILQ_FOREACH_SAFE(chunk, &amap->am_chunks, ac_list, tmp) > pool_put(&uvm_amap_chunk_pool, chunk); > pool_put(&uvm_amap_pool, amap); > - return (NULL); > + return NULL; > } > > static void > @@ -423,7 +423,7 @@ amap_alloc(vaddr_t sz, int waitf, int la > > AMAP_B2SLOT(slots, sz); /* load slots */ > if (slots > INT_MAX) > - return (NULL); > + return NULL; > > amap = amap_alloc1(slots, waitf, lazyalloc); > if (amap != NULL) { > @@ -431,7 +431,7 @@ amap_alloc(vaddr_t sz, int waitf, int la > amap_list_insert(amap); > } > > - return(amap); > + return amap; > } > > > Index: uvm/uvm_anon.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_anon.c,v > retrieving revision 1.53 > diff -u -p -r1.53 uvm_anon.c > --- uvm/uvm_anon.c 20 Mar 2021 10:24:21 -0000 1.53 > +++ uvm/uvm_anon.c 23 Mar 2021 12:01:03 -0000 > @@ -67,7 +67,7 @@ uvm_analloc(void) > anon->an_page = NULL; > anon->an_swslot = 0; > } > - return(anon); > + return anon; > } > > /* > Index: uvm/uvm_aobj.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v > retrieving revision 1.92 > diff -u -p -r1.92 uvm_aobj.c > --- uvm/uvm_aobj.c 20 Mar 2021 10:24:21 -0000 1.92 > +++ uvm/uvm_aobj.c 23 Mar 2021 12:17:00 -0000 > @@ -211,7 +211,7 @@ uao_find_swhash_elt(struct uvm_aobj *aob > */ > LIST_FOREACH(elt, swhash, list) { > if (elt->tag == page_tag) > - return(elt); > + return elt; > } > > if (!create) > @@ -234,7 +234,7 @@ uao_find_swhash_elt(struct uvm_aobj *aob > LIST_INSERT_HEAD(swhash, elt, list); > elt->tag = page_tag; > > - return(elt); > + return elt; > } > > /* > @@ -248,7 +248,7 @@ uao_find_swslot(struct uvm_aobj *aobj, i > * if noswap flag is set, then we never return a slot > */ > if (aobj->u_flags & UAO_FLAG_NOSWAP) > - return(0); > + return 0; > > /* > * if hashing, look in hash table. > @@ -258,15 +258,15 @@ uao_find_swslot(struct uvm_aobj *aobj, i > uao_find_swhash_elt(aobj, pageidx, FALSE); > > if (elt) > - return(UAO_SWHASH_ELT_PAGESLOT(elt, pageidx)); > + return UAO_SWHASH_ELT_PAGESLOT(elt, pageidx); > else > - return(0); > + return 0; > } > > /* > * otherwise, look in the array > */ > - return(aobj->u_swslots[pageidx]); > + return aobj->u_swslots[pageidx]; > } > > /* > @@ -289,7 +289,7 @@ uao_set_swslot(struct uvm_object *uobj, > */ > if (aobj->u_flags & UAO_FLAG_NOSWAP) { > if (slot == 0) > - return(0); /* a clear is ok */ > + return 0; /* a clear is ok */ > > /* but a set is not */ > printf("uao_set_swslot: uobj = %p\n", uobj); > @@ -309,7 +309,7 @@ uao_set_swslot(struct uvm_object *uobj, > uao_find_swhash_elt(aobj, pageidx, slot ? TRUE : FALSE); > if (elt == NULL) { > KASSERT(slot == 0); > - return (0); > + return 0; > } > > oldslot = UAO_SWHASH_ELT_PAGESLOT(elt, pageidx); > @@ -336,7 +336,7 @@ uao_set_swslot(struct uvm_object *uobj, > oldslot = aobj->u_swslots[pageidx]; > aobj->u_swslots[pageidx] = slot; > } > - return (oldslot); > + return oldslot; > } > /* > * end of hash/array functions > @@ -749,7 +749,7 @@ uao_create(vsize_t size, int flags) > if (aobj->u_swhash == NULL) { > if (flags & UAO_FLAG_CANFAIL) { > pool_put(&uvm_aobj_pool, aobj); > - return (NULL); > + return NULL; > } > panic("uao_create: hashinit swhash failed"); > } > @@ -759,7 +759,7 @@ uao_create(vsize_t size, int flags) > if (aobj->u_swslots == NULL) { > if (flags & UAO_FLAG_CANFAIL) { > pool_put(&uvm_aobj_pool, aobj); > - return (NULL); > + return NULL; > } > panic("uao_create: malloc swslots failed"); > } > @@ -767,7 +767,7 @@ uao_create(vsize_t size, int flags) > > if (flags & UAO_FLAG_KERNSWAP) { > aobj->u_flags &= ~UAO_FLAG_NOSWAP; /* clear noswap */ > - return(&aobj->u_obj); > + return &aobj->u_obj; > /* done! */ > } > } > @@ -784,7 +784,7 @@ uao_create(vsize_t size, int flags) > LIST_INSERT_HEAD(&uao_list, aobj, u_list); > mtx_leave(&uao_list_lock); > > - return(&aobj->u_obj); > + return &aobj->u_obj; > } > > > @@ -940,7 +940,7 @@ uao_flush(struct uvm_object *uobj, voff_ > * or deactivating pages. > */ > if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) > - return (TRUE); > + return TRUE; > > curoff = start; > for (;;) { > @@ -1016,7 +1016,7 @@ uao_flush(struct uvm_object *uobj, voff_ > } > } > > - return (TRUE); > + return TRUE; > } > > /* > @@ -1118,10 +1118,10 @@ uao_get(struct uvm_object *uobj, voff_t > *npagesp = gotpages; > if (done) > /* bingo! */ > - return(VM_PAGER_OK); > + return VM_PAGER_OK; > else > /* EEK! Need to unlock and I/O */ > - return(VM_PAGER_UNLOCK); > + return VM_PAGER_UNLOCK; > } > > /* > @@ -1249,7 +1249,7 @@ uao_get(struct uvm_object *uobj, voff_t > uvm_pagefree(ptmp); > uvm_unlock_pageq(); > > - return (rv); > + return rv; > } > } > > @@ -1269,7 +1269,7 @@ uao_get(struct uvm_object *uobj, voff_t > > } /* lcv loop */ > > - return(VM_PAGER_OK); > + return VM_PAGER_OK; > } > > /* > @@ -1284,7 +1284,7 @@ uao_dropswap(struct uvm_object *uobj, in > if (slot) { > uvm_swap_free(slot, 1); > } > - return (slot); > + return slot; > } > > /* > Index: uvm/uvm_fault.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_fault.c,v > retrieving revision 1.119 > diff -u -p -r1.119 uvm_fault.c > --- uvm/uvm_fault.c 20 Mar 2021 10:24:21 -0000 1.119 > +++ uvm/uvm_fault.c 23 Mar 2021 12:01:58 -0000 > @@ -1688,7 +1688,7 @@ uvmfault_lookup(struct uvm_faultinfo *uf > while (1) { > if (ufi->orig_rvaddr < ufi->map->min_offset || > ufi->orig_rvaddr >= ufi->map->max_offset) > - return(FALSE); > + return FALSE; > > /* lock map */ > if (write_lock) { > @@ -1701,7 +1701,7 @@ uvmfault_lookup(struct uvm_faultinfo *uf > if (!uvm_map_lookup_entry(ufi->map, ufi->orig_rvaddr, > &ufi->entry)) { > uvmfault_unlockmaps(ufi, write_lock); > - return(FALSE); > + return FALSE; > } > > /* reduce size if necessary */ > @@ -1723,7 +1723,7 @@ uvmfault_lookup(struct uvm_faultinfo *uf > * got it! > */ > ufi->mapv = ufi->map->timestamp; > - return(TRUE); > + return TRUE; > > } /* while loop */ > > @@ -1756,9 +1756,9 @@ uvmfault_relock(struct uvm_faultinfo *uf > vm_map_lock_read(ufi->map); > if (ufi->mapv != ufi->map->timestamp) { > vm_map_unlock_read(ufi->map); > - return(FALSE); > + return FALSE; > } > > counters_inc(uvmexp_counters, flt_relckok); > - return(TRUE); /* got it! */ > + return TRUE; /* got it! */ > } > Index: uvm/uvm_glue.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_glue.c,v > retrieving revision 1.79 > diff -u -p -r1.79 uvm_glue.c > --- uvm/uvm_glue.c 12 Mar 2021 14:15:49 -0000 1.79 > +++ uvm/uvm_glue.c 23 Mar 2021 12:01:46 -0000 > @@ -97,7 +97,7 @@ uvm_kernacc(caddr_t addr, size_t len, in > rv = uvm_map_checkprot(kernel_map, saddr, eaddr, prot); > vm_map_unlock_read(kernel_map); > > - return(rv); > + return rv; > } > > /* > Index: uvm/uvm_km.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_km.c,v > retrieving revision 1.142 > diff -u -p -r1.142 uvm_km.c > --- uvm/uvm_km.c 20 Mar 2021 10:24:21 -0000 1.142 > +++ uvm/uvm_km.c 23 Mar 2021 12:26:41 -0000 > @@ -343,12 +343,12 @@ uvm_km_kmemalloc_pla(struct vm_map *map, > if (__predict_false(uvm_map(map, &kva, size, obj, UVM_UNKNOWN_OFFSET, > valign, UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, > MAP_INHERIT_NONE, MADV_RANDOM, (flags & UVM_KMF_TRYLOCK))) != 0)) { > - return(0); > + return 0; > } > > /* if all we wanted was VA, return now */ > if (flags & UVM_KMF_VALLOC) { > - return(kva); > + return kva; > } > > /* recover object offset from virtual address */ > @@ -405,7 +405,7 @@ uvm_km_kmemalloc_pla(struct vm_map *map, > KASSERT(TAILQ_EMPTY(&pgl)); > pmap_update(pmap_kernel()); > > - return(kva); > + return kva; > } > > /* > @@ -461,7 +461,7 @@ uvm_km_alloc1(struct vm_map *map, vsize_ > UVM_MAPFLAG(PROT_READ | PROT_WRITE, > PROT_READ | PROT_WRITE | PROT_EXEC, > MAP_INHERIT_NONE, MADV_RANDOM, 0)) != 0)) { > - return(0); > + return 0; > } > > /* recover object offset from virtual address */ > @@ -512,7 +512,7 @@ uvm_km_alloc1(struct vm_map *map, vsize_ > if (zeroit) > memset((caddr_t)kva, 0, loopva - kva); > > - return(kva); > + return kva; > } > > /* > @@ -524,13 +524,13 @@ uvm_km_alloc1(struct vm_map *map, vsize_ > vaddr_t > uvm_km_valloc(struct vm_map *map, vsize_t size) > { > - return(uvm_km_valloc_align(map, size, 0, 0)); > + return uvm_km_valloc_align(map, size, 0, 0); > } > > vaddr_t > uvm_km_valloc_try(struct vm_map *map, vsize_t size) > { > - return(uvm_km_valloc_align(map, size, 0, UVM_FLAG_TRYLOCK)); > + return uvm_km_valloc_align(map, size, 0, UVM_FLAG_TRYLOCK); > } > > vaddr_t > @@ -550,10 +550,10 @@ uvm_km_valloc_align(struct vm_map *map, > UVM_UNKNOWN_OFFSET, align, > UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, > MAP_INHERIT_NONE, MADV_RANDOM, flags)) != 0)) { > - return(0); > + return 0; > } > > - return(kva); > + return kva; > } > > /* > @@ -572,7 +572,7 @@ uvm_km_valloc_prefer_wait(struct vm_map > > size = round_page(size); > if (size > vm_map_max(map) - vm_map_min(map)) > - return(0); > + return 0; > > while (1) { > kva = vm_map_min(map); /* hint */ > @@ -585,7 +585,7 @@ uvm_km_valloc_prefer_wait(struct vm_map > prefer, 0, > UVM_MAPFLAG(PROT_READ | PROT_WRITE, PROT_READ | PROT_WRITE, > MAP_INHERIT_NONE, MADV_RANDOM, 0)) == 0)) { > - return(kva); > + return kva; > } > > /* failed. sleep for a while (on map) */ > Index: uvm/uvm_map.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_map.c,v > retrieving revision 1.273 > diff -u -p -r1.273 uvm_map.c > --- uvm/uvm_map.c 12 Mar 2021 14:15:49 -0000 1.273 > +++ uvm/uvm_map.c 23 Mar 2021 12:02:31 -0000 > @@ -1000,7 +1000,7 @@ uvm_mapanon(struct vm_map *map, vaddr_t > */ > new = uvm_mapent_alloc(map, flags); > if (new == NULL) > - return(ENOMEM); > + return ENOMEM; > > vm_map_lock(map); > first = last = NULL; > @@ -1229,7 +1229,7 @@ uvm_map(struct vm_map *map, vaddr_t *add > */ > new = uvm_mapent_alloc(map, flags); > if (new == NULL) > - return(ENOMEM); > + return ENOMEM; > > if (flags & UVM_FLAG_TRYLOCK) { > if (vm_map_lock_try(map) == FALSE) { > @@ -1759,7 +1759,7 @@ uvm_mapent_alloc(struct vm_map *map, int > > RBT_POISON(uvm_map_addr, me, UVMMAP_DEADBEEF); > out: > - return(me); > + return me; > } > > /* > @@ -4229,7 +4229,7 @@ uvm_map_submap(struct vm_map *map, vaddr > result = EINVAL; > > vm_map_unlock(map); > - return(result); > + return result; > } > > /* > Index: uvm/uvm_mmap.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_mmap.c,v > retrieving revision 1.163 > diff -u -p -r1.163 uvm_mmap.c > --- uvm/uvm_mmap.c 7 Oct 2020 12:26:20 -0000 1.163 > +++ uvm/uvm_mmap.c 23 Mar 2021 12:29:24 -0000 > @@ -92,14 +92,14 @@ int uvm_mmapfile(vm_map_t, vaddr_t *, vs > pageoff = (addr & PAGE_MASK); \ > if (pageoff != 0) { \ > if (size > SIZE_MAX - pageoff) \ > - return (EINVAL); /* wraparound */ \ > + return EINVAL; /* wraparound */ \ > addr -= pageoff; \ > size += pageoff; \ > } \ > if (size != 0) { \ > size = (vsize_t)round_page(size); \ > if (size == 0) \ > - return (EINVAL); /* wraparound */ \ > + return EINVAL; /* wraparound */ \ > } \ > } while (0) > > @@ -140,14 +140,14 @@ sys_mquery(struct proc *p, void *v, regi > fd = SCARG(uap, fd); > > if ((prot & PROT_MASK) != prot) > - return (EINVAL); > + return EINVAL; > > if (SCARG(uap, flags) & MAP_FIXED) > flags |= UVM_FLAG_FIXED; > > if (fd >= 0) { > if ((error = getvnode(p, fd, &fp)) != 0) > - return (error); > + return error; > uoff = SCARG(uap, pos); > } else { > fp = NULL; > @@ -165,7 +165,7 @@ sys_mquery(struct proc *p, void *v, regi > > if (fp != NULL) > FRELE(fp, p); > - return (error); > + return error; > } > > int uvm_wxabort; > @@ -181,7 +181,7 @@ uvm_wxcheck(struct proc *p, char *call) > (pr->ps_textvp->v_mount->mnt_flag & MNT_WXALLOWED)); > > if (wxallowed && (pr->ps_flags & PS_WXNEEDED)) > - return (0); > + return 0; > > if (uvm_wxabort) { > /* Report W^X failures */ > @@ -192,7 +192,7 @@ uvm_wxcheck(struct proc *p, char *call) > sigexit(p, SIGABRT); > } > > - return (ENOTSUP); > + return ENOTSUP; > } > > /* > @@ -239,33 +239,33 @@ sys_mmap(struct proc *p, void *v, regist > * Validate the flags. > */ > if ((prot & PROT_MASK) != prot) > - return (EINVAL); > + return EINVAL; > if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) && > (error = uvm_wxcheck(p, "mmap"))) > - return (error); > + return error; > > if ((flags & MAP_FLAGMASK) != flags) > - return (EINVAL); > + return EINVAL; > if ((flags & (MAP_SHARED|MAP_PRIVATE)) == (MAP_SHARED|MAP_PRIVATE)) > - return (EINVAL); > + return EINVAL; > if ((flags & (MAP_FIXED|__MAP_NOREPLACE)) == __MAP_NOREPLACE) > - return (EINVAL); > + return EINVAL; > if (flags & MAP_STACK) { > if ((flags & (MAP_ANON|MAP_PRIVATE)) != (MAP_ANON|MAP_PRIVATE)) > - return (EINVAL); > + return EINVAL; > if (flags & ~(MAP_STACK|MAP_FIXED|MAP_ANON|MAP_PRIVATE)) > - return (EINVAL); > + return EINVAL; > if (pos != 0) > - return (EINVAL); > + return EINVAL; > if ((prot & (PROT_READ|PROT_WRITE)) != (PROT_READ|PROT_WRITE)) > - return (EINVAL); > + return EINVAL; > } > if (size == 0) > - return (EINVAL); > + return EINVAL; > > error = pledge_protexec(p, prot); > if (error) > - return (error); > + return error; > > /* align file position and save offset. adjust size. */ > ALIGN_ADDR(pos, size, pageoff); > @@ -275,15 +275,15 @@ sys_mmap(struct proc *p, void *v, regist > /* adjust address by the same amount as we did the offset */ > addr -= pageoff; > if (addr & PAGE_MASK) > - return (EINVAL); /* not page aligned */ > + return EINVAL; /* not page aligned */ > > if (addr > SIZE_MAX - size) > - return (EINVAL); /* no wrapping! */ > + return EINVAL; /* no wrapping! */ > if (VM_MAXUSER_ADDRESS > 0 && > (addr + size) > VM_MAXUSER_ADDRESS) > - return (EINVAL); > + return EINVAL; > if (vm_min_address > 0 && addr < vm_min_address) > - return (EINVAL); > + return EINVAL; > } > > /* check for file mappings (i.e. not anonymous) and verify file. */ > @@ -430,13 +430,13 @@ is_anon: /* label for SunOS style /dev/z > /* remember to add offset */ > *retval = (register_t)(addr + pageoff); > > - return (error); > + return error; > > out: > KERNEL_UNLOCK(); > if (fp) > FRELE(fp, p); > - return (error); > + return error; > } > > /* > @@ -465,14 +465,14 @@ sys_msync(struct proc *p, void *v, regis > if ((flags & ~(MS_ASYNC | MS_SYNC | MS_INVALIDATE)) != 0 || > (flags & (MS_ASYNC | MS_SYNC | MS_INVALIDATE)) == 0 || > (flags & (MS_ASYNC | MS_SYNC)) == (MS_ASYNC | MS_SYNC)) > - return (EINVAL); > + return EINVAL; > if ((flags & (MS_ASYNC | MS_SYNC)) == 0) > flags |= MS_SYNC; > > /* align the address to a page boundary, and adjust the size > accordingly */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > /* get map */ > map = &p->p_vmspace->vm_map; > @@ -486,7 +486,7 @@ sys_msync(struct proc *p, void *v, regis > else > uvmflags |= PGO_SYNCIO; /* XXXCDC: force sync for now! */ > > - return (uvm_map_clean(map, addr, addr+size, uvmflags)); > + return uvm_map_clean(map, addr, addr+size, uvmflags); > } > > /* > @@ -517,11 +517,11 @@ sys_munmap(struct proc *p, void *v, regi > * Note that VM_*_ADDRESS are not constants due to casts (argh). > */ > if (addr > SIZE_MAX - size) > - return (EINVAL); > + return EINVAL; > if (VM_MAXUSER_ADDRESS > 0 && addr + size > VM_MAXUSER_ADDRESS) > - return (EINVAL); > + return EINVAL; > if (vm_min_address > 0 && addr < vm_min_address) > - return (EINVAL); > + return EINVAL; > map = &p->p_vmspace->vm_map; > > > @@ -533,7 +533,7 @@ sys_munmap(struct proc *p, void *v, regi > */ > if (!uvm_map_checkprot(map, addr, addr + size, PROT_NONE)) { > vm_map_unlock(map); > - return (EINVAL); > + return EINVAL; > } > > TAILQ_INIT(&dead_entries); > @@ -542,7 +542,7 @@ sys_munmap(struct proc *p, void *v, regi > > uvm_unmap_detach(&dead_entries, 0); > > - return (0); > + return 0; > } > > /* > @@ -570,21 +570,21 @@ sys_mprotect(struct proc *p, void *v, re > prot = SCARG(uap, prot); > > if ((prot & PROT_MASK) != prot) > - return (EINVAL); > + return EINVAL; > if ((prot & (PROT_WRITE | PROT_EXEC)) == (PROT_WRITE | PROT_EXEC) && > (error = uvm_wxcheck(p, "mprotect"))) > - return (error); > + return error; > > error = pledge_protexec(p, prot); > if (error) > - return (error); > + return error; > > /* > * align the address to a page boundary, and adjust the size accordingly > */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > return (uvm_map_protect(&p->p_vmspace->vm_map, addr, addr+size, > prot, FALSE)); > @@ -611,9 +611,9 @@ sys_msyscall(struct proc *p, void *v, re > */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > - return (uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size)); > + return uvm_map_syscall(&p->p_vmspace->vm_map, addr, addr+size); > } > > /* > @@ -640,7 +640,7 @@ sys_minherit(struct proc *p, void *v, re > */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > return (uvm_map_inherit(&p->p_vmspace->vm_map, addr, addr+size, > inherit)); > @@ -671,7 +671,7 @@ sys_madvise(struct proc *p, void *v, reg > */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > switch (advice) { > case MADV_NORMAL: > @@ -691,7 +691,7 @@ sys_madvise(struct proc *p, void *v, reg > * Should invent a "weak" mode for uvm_fault() > * which would only do the PGO_LOCKED pgo_get(). > */ > - return (0); > + return 0; > > case MADV_DONTNEED: > /* > @@ -724,13 +724,13 @@ sys_madvise(struct proc *p, void *v, reg > * as it will free swap space allocated to pages in core. > * There's also what to do for device/file/anonymous memory. > */ > - return (EINVAL); > + return EINVAL; > > default: > - return (EINVAL); > + return EINVAL; > } > > - return (error); > + return error; > } > > /* > @@ -755,23 +755,23 @@ sys_mlock(struct proc *p, void *v, regis > /* align address to a page boundary and adjust size accordingly */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > if (atop(size) + uvmexp.wired > uvmexp.wiredmax) > - return (EAGAIN); > + return EAGAIN; > > #ifdef pmap_wired_count > if (size + ptoa(pmap_wired_count(vm_map_pmap(&p->p_vmspace->vm_map))) > > lim_cur(RLIMIT_MEMLOCK)) > - return (EAGAIN); > + return EAGAIN; > #else > if ((error = suser(p)) != 0) > - return (error); > + return error; > #endif > > error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, FALSE, > 0); > - return (error == 0 ? 0 : ENOMEM); > + return error == 0 ? 0 : ENOMEM; > } > > /* > @@ -796,16 +796,16 @@ sys_munlock(struct proc *p, void *v, reg > /* align address to a page boundary, and adjust size accordingly */ > ALIGN_ADDR(addr, size, pageoff); > if (addr > SIZE_MAX - size) > - return (EINVAL); /* disallow wrap-around. */ > + return EINVAL; /* disallow wrap-around. */ > > #ifndef pmap_wired_count > if ((error = suser(p)) != 0) > - return (error); > + return error; > #endif > > error = uvm_map_pageable(&p->p_vmspace->vm_map, addr, addr+size, TRUE, > 0); > - return (error == 0 ? 0 : ENOMEM); > + return error == 0 ? 0 : ENOMEM; > } > > /* > @@ -823,18 +823,18 @@ sys_mlockall(struct proc *p, void *v, re > > if (flags == 0 || > (flags & ~(MCL_CURRENT|MCL_FUTURE)) != 0) > - return (EINVAL); > + return EINVAL; > > #ifndef pmap_wired_count > if ((error = suser(p)) != 0) > - return (error); > + return error; > #endif > > error = uvm_map_pageable_all(&p->p_vmspace->vm_map, flags, > lim_cur(RLIMIT_MEMLOCK)); > if (error != 0 && error != ENOMEM) > - return (EAGAIN); > - return (error); > + return EAGAIN; > + return error; > } > > /* > @@ -845,7 +845,7 @@ sys_munlockall(struct proc *p, void *v, > { > > (void) uvm_map_pageable_all(&p->p_vmspace->vm_map, 0, 0); > - return (0); > + return 0; > } > > /* > @@ -865,7 +865,7 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr > /* > * No more work to do in this case. > */ > - return (0); > + return 0; > } > > vm_map_lock(map); > @@ -883,7 +883,7 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr > /* unmap the region! */ > uvm_unmap(map, *addr, *addr + size); > KERNEL_UNLOCK(); > - return (error); > + return error; > } > /* > * uvm_map_pageable() always returns the map > @@ -895,13 +895,13 @@ uvm_mmaplock(vm_map_t map, vaddr_t *addr > /* unmap the region! */ > uvm_unmap(map, *addr, *addr + size); > KERNEL_UNLOCK(); > - return (error); > + return error; > } > KERNEL_UNLOCK(); > - return (0); > + return 0; > } > vm_map_unlock(map); > - return (0); > + return 0; > } > > /* > @@ -926,7 +926,7 @@ uvm_mmapanon(vm_map_t map, vaddr_t *addr > *addr = round_page(*addr); /* round */ > } else { > if (*addr & PAGE_MASK) > - return(EINVAL); > + return EINVAL; > > uvmflag |= UVM_FLAG_FIXED; > if ((flags & __MAP_NOREPLACE) == 0) > @@ -983,7 +983,7 @@ uvm_mmapfile(vm_map_t map, vaddr_t *addr > *addr = round_page(*addr); /* round */ > } else { > if (*addr & PAGE_MASK) > - return(EINVAL); > + return EINVAL; > > uvmflag |= UVM_FLAG_FIXED; > if ((flags & __MAP_NOREPLACE) == 0) > @@ -1045,7 +1045,7 @@ uvm_mmapfile(vm_map_t map, vaddr_t *addr > } > > if (uobj == NULL) > - return((vp->v_type == VREG) ? ENOMEM : EINVAL); > + return vp->v_type == VREG ? ENOMEM : EINVAL; > > if ((flags & MAP_SHARED) == 0) > uvmflag |= UVM_FLAG_COPYONW; > @@ -1070,7 +1070,7 @@ uvm_mmapfile(vm_map_t map, vaddr_t *addr > if (uobj) > uobj->pgops->pgo_detach(uobj); > > - return (error); > + return error; > } > > /* an address that can't be in userspace or kernelspace */ > @@ -1106,7 +1106,7 @@ sys_kbind(struct proc *p, void *v, regis > /* a NULL paramp disables the syscall for the process */ > if (paramp == NULL) { > pr->ps_kbind_addr = BOGO_PC; > - return (0); > + return 0; > } > > /* security checks */ > @@ -1119,9 +1119,9 @@ sys_kbind(struct proc *p, void *v, regis > else if (pr->ps_kbind_cookie != SCARG(uap, proc_cookie)) > sigexit(p, SIGILL); > if (psize < sizeof(struct __kbind) || psize > sizeof(param)) > - return (EINVAL); > + return EINVAL; > if ((error = copyin(paramp, ¶m, psize))) > - return (error); > + return error; > > /* > * The param argument points to an array of __kbind structures > @@ -1133,7 +1133,7 @@ sys_kbind(struct proc *p, void *v, regis > s = psize; > for (count = 0; s > 0 && count < KBIND_BLOCK_MAX; count++) { > if (s < sizeof(*paramp)) > - return (EINVAL); > + return EINVAL; > s -= sizeof(*paramp); > > baseva = (vaddr_t)paramp[count].kb_addr; > @@ -1145,12 +1145,12 @@ sys_kbind(struct proc *p, void *v, regis > endva >= VM_MAXUSER_ADDRESS || > trunc_page(baseva) != trunc_page(endva) || > s < paramp[count].kb_size) > - return (EINVAL); > + return EINVAL; > > s -= paramp[count].kb_size; > } > if (s > 0) > - return (EINVAL); > + return EINVAL; > data = (const char *)¶mp[count]; > > /* all looks good, so do the bindings */ > @@ -1192,5 +1192,5 @@ sys_kbind(struct proc *p, void *v, regis > } > uvm_unmap_detach(&dead_entries, AMAP_REFALL); > > - return (error); > + return error; > } > Index: uvm/uvm_page.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_page.c,v > retrieving revision 1.155 > diff -u -p -r1.155 uvm_page.c > --- uvm/uvm_page.c 19 Jan 2021 13:21:36 -0000 1.155 > +++ uvm/uvm_page.c 23 Mar 2021 12:32:01 -0000 > @@ -84,7 +84,7 @@ RBT_GENERATE(uvm_objtree, vm_page, objt, > int > uvm_pagecmp(const struct vm_page *a, const struct vm_page *b) > { > - return (a->offset < b->offset ? -1 : a->offset > b->offset); > + return a->offset < b->offset ? -1 : a->offset > b->offset; > } > > /* > @@ -324,7 +324,7 @@ uvm_pageboot_alloc(vsize_t size) > addr = pmap_steal_memory(size, &virtual_space_start, > &virtual_space_end); > > - return(addr); > + return addr; > > #else /* !PMAP_STEAL_MEMORY */ > > @@ -380,7 +380,7 @@ uvm_pageboot_alloc(vsize_t size) > pmap_kenter_pa(vaddr, paddr, PROT_READ | PROT_WRITE); > } > pmap_update(pmap_kernel()); > - return(addr); > + return addr; > #endif /* PMAP_STEAL_MEMORY */ > } > > @@ -428,7 +428,7 @@ uvm_page_physget(paddr_t *paddrp) > /* structure copy */ > seg[0] = seg[1]; > } > - return (TRUE); > + return TRUE; > } > > /* try from rear */ > @@ -446,7 +446,7 @@ uvm_page_physget(paddr_t *paddrp) > /* structure copy */ > seg[0] = seg[1]; > } > - return (TRUE); > + return TRUE; > } > } > > @@ -478,10 +478,10 @@ uvm_page_physget(paddr_t *paddrp) > /* structure copy */ > seg[0] = seg[1]; > } > - return (TRUE); > + return TRUE; > } > > - return (FALSE); /* whoops! */ > + return FALSE; /* whoops! */ > } > > #endif /* PMAP_STEAL_MEMORY */ > @@ -729,7 +729,7 @@ uvm_pglistalloc(psize_t size, paddr_t lo > KASSERT(!(flags & UVM_PLA_WAITOK) ^ !(flags & UVM_PLA_NOWAIT)); > > if (size == 0) > - return (EINVAL); > + return EINVAL; > size = atop(round_page(size)); > > /* > @@ -896,10 +896,10 @@ uvm_pagealloc(struct uvm_object *obj, vo > else > atomic_setbits_int(&pg->pg_flags, PG_CLEAN); > > - return(pg); > + return pg; > > fail: > - return (NULL); > + return NULL; > } > > /* > @@ -1136,7 +1136,7 @@ vm_physseg_find(paddr_t pframe, int *off > if (pframe < seg->end) { > if (offp) > *offp = pframe - seg->start; > - return(try); /* got it */ > + return try; /* got it */ > } > start = try + 1; /* next time, start here */ > len--; /* "adjust" */ > @@ -1147,7 +1147,7 @@ vm_physseg_find(paddr_t pframe, int *off > */ > } > } > - return(-1); > + return -1; > > #else > /* linear search for it */ > @@ -1157,10 +1157,10 @@ vm_physseg_find(paddr_t pframe, int *off > if (pframe >= seg->start && pframe < seg->end) { > if (offp) > *offp = pframe - seg->start; > - return(lcv); /* got it */ > + return lcv; /* got it */ > } > } > - return(-1); > + return -1; > > #endif > } > @@ -1178,7 +1178,7 @@ PHYS_TO_VM_PAGE(paddr_t pa) > > psi = vm_physseg_find(pf, &off); > > - return ((psi == -1) ? NULL : &vm_physmem[psi].pgs[off]); > + return (psi == -1) ? NULL : &vm_physmem[psi].pgs[off]; > } > #endif /* VM_PHYSSEG_MAX > 1 */ > > @@ -1192,7 +1192,7 @@ uvm_pagelookup(struct uvm_object *obj, v > struct vm_page pg; > > pg.offset = off; > - return (RBT_FIND(uvm_objtree, &obj->memt, &pg)); > + return RBT_FIND(uvm_objtree, &obj->memt, &pg); > } > > /* > Index: uvm/uvm_page.h > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_page.h,v > retrieving revision 1.65 > diff -u -p -r1.65 uvm_page.h > --- uvm/uvm_page.h 22 Sep 2020 14:31:08 -0000 1.65 > +++ uvm/uvm_page.h 23 Mar 2021 12:02:07 -0000 > @@ -256,9 +256,9 @@ vm_physseg_find(paddr_t pframe, int *off > if (pframe >= vm_physmem[0].start && pframe < vm_physmem[0].end) { > if (offp) > *offp = pframe - vm_physmem[0].start; > - return(0); > + return 0; > } > - return(-1); > + return -1; > } > > /* > Index: uvm/uvm_pager.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_pager.c,v > retrieving revision 1.75 > diff -u -p -r1.75 uvm_pager.c > --- uvm/uvm_pager.c 12 Mar 2021 14:15:49 -0000 1.75 > +++ uvm/uvm_pager.c 23 Mar 2021 12:02:26 -0000 > @@ -326,7 +326,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, > if ((hi - lo) >> PAGE_SHIFT > *npages) { /* pps too small, bail out! */ > pps[0] = center; > *npages = 1; > - return(pps); > + return pps; > } > > /* now determine the center and attempt to cluster around the edges */ > @@ -410,7 +410,7 @@ uvm_mk_pcluster(struct uvm_object *uobj, > /* > * done! return the cluster array to the caller!!! > */ > - return(ppsp); > + return ppsp; > } > > /* > @@ -586,7 +586,7 @@ ReTry: > * to worry about. > */ > > - return(result); > + return result; > } > > /* > Index: uvm/uvm_swap.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_swap.c,v > retrieving revision 1.149 > diff -u -p -r1.149 uvm_swap.c > --- uvm/uvm_swap.c 4 Mar 2021 09:00:03 -0000 1.149 > +++ uvm/uvm_swap.c 23 Mar 2021 12:02:35 -0000 > @@ -1426,7 +1426,7 @@ ReTry: /* XXXMRG */ > sdp->swd_npginuse += *nslots; > uvmexp.swpginuse += *nslots; > /* done! return drum slot number */ > - return(result + sdp->swd_drumoffset); > + return result + sdp->swd_drumoffset; > } > } > > Index: uvm/uvm_vnode.c > =================================================================== > RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v > retrieving revision 1.112 > diff -u -p -r1.112 uvm_vnode.c > --- uvm/uvm_vnode.c 12 Mar 2021 14:15:49 -0000 1.112 > +++ uvm/uvm_vnode.c 23 Mar 2021 12:02:43 -0000 > @@ -155,7 +155,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a > > /* if we're mapping a BLK device, make sure it is a disk. */ > if (vp->v_type == VBLK && bdevsw[major(vp->v_rdev)].d_type != D_DISK) { > - return(NULL); > + return NULL; > } > > /* > @@ -219,7 +219,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a > if (uvn->u_flags & UVM_VNODE_WANTED) > wakeup(uvn); > uvn->u_flags = 0; > - return(NULL); > + return NULL; > } > > /* > @@ -253,7 +253,7 @@ uvn_attach(struct vnode *vp, vm_prot_t a > if (oldflags & UVM_VNODE_WANTED) > wakeup(uvn); > > - return(&uvn->u_obj); > + return &uvn->u_obj; > } > > > @@ -835,7 +835,7 @@ ReTry: > > uvm_pglistfree(&dead); > > - return(retval); > + return retval; > } > > /* > @@ -885,11 +885,11 @@ uvn_put(struct uvm_object *uobj, struct > > retval = uvm_vnode_lock(uvn); > if (retval) > - return(retval); > + return retval; > retval = uvn_io(uvn, pps, npages, flags, UIO_WRITE); > uvm_vnode_unlock(uvn); > > - return(retval); > + return retval; > } > > /* > @@ -977,9 +977,9 @@ uvn_get(struct uvm_object *uobj, voff_t > > *npagesp = gotpages; /* let caller know */ > if (done) > - return(VM_PAGER_OK); /* bingo! */ > + return VM_PAGER_OK; /* bingo! */ > else > - return(VM_PAGER_UNLOCK); > + return VM_PAGER_UNLOCK; > } > > /* > @@ -992,7 +992,7 @@ uvn_get(struct uvm_object *uobj, voff_t > */ > retval = uvm_vnode_lock(uvn); > if (retval) > - return(retval); > + return retval; > > /* > * step 2: get non-resident or busy pages. > @@ -1098,7 +1098,7 @@ uvn_get(struct uvm_object *uobj, voff_t > uvm_lock_pageq(); > uvm_pagefree(ptmp); > uvm_unlock_pageq(); > - return(result); > + return result; > } > > /* > @@ -1154,7 +1154,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t > /* check for sync'ing I/O. */ > while (uvn->u_flags & UVM_VNODE_IOSYNC) { > if (waitf == M_NOWAIT) { > - return(VM_PAGER_AGAIN); > + return VM_PAGER_AGAIN; > } > uvn->u_flags |= UVM_VNODE_IOSYNCWANTED; > tsleep_nsec(&uvn->u_flags, PVM, "uvn_iosync", INFSLP); > @@ -1162,7 +1162,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t > > /* check size */ > if (file_offset >= uvn->u_size) { > - return(VM_PAGER_BAD); > + return VM_PAGER_BAD; > } > > /* first try and map the pages in (without waiting) */ > @@ -1171,7 +1171,7 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t > > kva = uvm_pagermapin(pps, npages, mapinflags); > if (kva == 0 && waitf == M_NOWAIT) { > - return(VM_PAGER_AGAIN); > + return VM_PAGER_AGAIN; > } > > /* > @@ -1245,13 +1245,13 @@ uvn_io(struct uvm_vnode *uvn, vm_page_t > } > > if (result == 0) > - return(VM_PAGER_OK); > + return VM_PAGER_OK; > > if (result == EIO) { > /* Signal back to uvm_vnode_unlock(). */ > uvn->u_flags |= UVM_VNODE_IOERROR; > } > - return(VM_PAGER_ERROR); > + return VM_PAGER_ERROR; > } > > /* > @@ -1301,7 +1301,7 @@ uvm_vnp_uncache(struct vnode *vp) > > if ((uvn->u_flags & UVM_VNODE_VALID) == 0 || > (uvn->u_flags & UVM_VNODE_BLOCKED) != 0) { > - return(TRUE); > + return TRUE; > } > > /* > @@ -1310,7 +1310,7 @@ uvm_vnp_uncache(struct vnode *vp) > */ > uvn->u_flags &= ~UVM_VNODE_CANPERSIST; > if (uvn->u_obj.uo_refs) { > - return(FALSE); > + return FALSE; > } > > /* > @@ -1343,7 +1343,7 @@ uvm_vnp_uncache(struct vnode *vp) > uvn_detach(&uvn->u_obj); > vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); > > - return(TRUE); > + return TRUE; > } > > /* > @@ -1476,7 +1476,7 @@ uvm_vnode_lock(struct uvm_vnode *uvn) > int netunlocked = 0; > > if (uvn->u_flags & UVM_VNODE_VNISLOCKED) > - return(VM_PAGER_OK); > + return VM_PAGER_OK; > > /* > * This thread may already have the net lock, if we faulted in copyin() > @@ -1499,7 +1499,7 @@ uvm_vnode_lock(struct uvm_vnode *uvn) > error = vn_lock(uvn->u_vnode, LK_EXCLUSIVE | LK_RECURSEFAIL); > if (netunlocked) > NET_LOCK(); > - return(error ? VM_PAGER_ERROR : VM_PAGER_OK); > + return error ? VM_PAGER_ERROR : VM_PAGER_OK; > } > > void >