One small mechanical diff to get rid of the boolean_t signature and reduce difference to NetBSD.
uvm_flush() aka. uao_flush()/udv_flush()/pgo_flush()/uvn_flush() returns TRUE/FALSE only to make uvm_map_clean() conditionally return EFAULT. This makes the *_flush() return 0/EFAULT themselves and thus sets uvm_map_clean()'s `error' return value directly rather than using an intermediate `rv' value to to the FALSE/EFAULT check/set. Use parentheses (around function pointers) while here, reducing NetBSD diff churn. No functional change. Feedback? Objection? OK? Index: uvm_aobj.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v retrieving revision 1.103 diff -u -p -r1.103 uvm_aobj.c --- uvm_aobj.c 29 Dec 2021 20:22:06 -0000 1.103 +++ uvm_aobj.c 10 Jan 2022 10:10:29 -0000 @@ -144,7 +144,7 @@ struct pool uvm_aobj_pool; static struct uao_swhash_elt *uao_find_swhash_elt(struct uvm_aobj *, int, boolean_t); static int uao_find_swslot(struct uvm_object *, int); -static boolean_t uao_flush(struct uvm_object *, voff_t, +static int uao_flush(struct uvm_object *, voff_t, voff_t, int); static void uao_free(struct uvm_aobj *); static int uao_get(struct uvm_object *, voff_t, @@ -861,11 +861,11 @@ uao_detach(struct uvm_object *uobj) * => NOTE: we are allowed to lock the page queues, so the caller * must not be holding the lock on them [e.g. pagedaemon had * better not call us with the queues locked] - * => we return TRUE unless we encountered some sort of I/O error + * => we return 0 unless we encountered some sort of I/O error * XXXJRT currently never happens, as we never directly initiate * XXXJRT I/O */ -boolean_t +static int uao_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) { struct uvm_aobj *aobj = (struct uvm_aobj *) uobj; @@ -893,7 +893,7 @@ uao_flush(struct uvm_object *uobj, voff_ * or deactivating pages. */ if ((flags & (PGO_DEACTIVATE|PGO_FREE)) == 0) { - return TRUE; + return 0; } curoff = start; @@ -971,7 +971,7 @@ uao_flush(struct uvm_object *uobj, voff_ } } - return TRUE; + return 0; } /* Index: uvm_device.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_device.c,v retrieving revision 1.66 diff -u -p -r1.66 uvm_device.c --- uvm_device.c 15 Dec 2021 12:53:53 -0000 1.66 +++ uvm_device.c 10 Jan 2022 10:10:29 -0000 @@ -60,7 +60,7 @@ static void udv_detach(struc static int udv_fault(struct uvm_faultinfo *, vaddr_t, vm_page_t *, int, int, vm_fault_t, vm_prot_t, int); -static boolean_t udv_flush(struct uvm_object *, voff_t, voff_t, +static int udv_flush(struct uvm_object *, voff_t, voff_t, int); /* @@ -290,11 +290,11 @@ again: * * flush pages out of a uvm object. a no-op for devices. */ -static boolean_t +static int udv_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) { - return(TRUE); + return 0; } /* Index: uvm_fault.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_fault.c,v retrieving revision 1.124 diff -u -p -r1.124 uvm_fault.c --- uvm_fault.c 28 Dec 2021 13:16:28 -0000 1.124 +++ uvm_fault.c 10 Jan 2022 10:10:29 -0000 @@ -794,7 +794,7 @@ uvm_fault_check(struct uvm_faultinfo *uf uoff = (flt->startva - ufi->entry->start) + ufi->entry->offset; rw_enter(uobj->vmobjlock, RW_WRITE); - (void) uobj->pgops->pgo_flush(uobj, uoff, uoff + + (void) (uobj->pgops->pgo_flush)(uobj, uoff, uoff + ((vsize_t)nback << PAGE_SHIFT), PGO_DEACTIVATE); rw_exit(uobj->vmobjlock); } Index: uvm_map.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_map.c,v retrieving revision 1.282 diff -u -p -r1.282 uvm_map.c --- uvm_map.c 21 Dec 2021 22:21:32 -0000 1.282 +++ uvm_map.c 10 Jan 2022 10:10:29 -0000 @@ -4658,7 +4658,6 @@ uvm_map_clean(struct vm_map *map, vaddr_ vaddr_t cp_start, cp_end; int refs; int error; - boolean_t rv; KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) != (PGO_FREE|PGO_DEACTIVATE)); @@ -4786,18 +4785,15 @@ flush_object: ((entry->max_protection & PROT_WRITE) != 0 && (entry->etype & UVM_ET_COPYONWRITE) == 0))) { rw_enter(uobj->vmobjlock, RW_WRITE); - rv = uobj->pgops->pgo_flush(uobj, + error = (uobj->pgops->pgo_flush)(uobj, cp_start - entry->start + entry->offset, cp_end - entry->start + entry->offset, flags); rw_exit(uobj->vmobjlock); - - if (rv == FALSE) - error = EFAULT; } } vm_map_unlock_read(map); - return error; + return (error); } /* Index: uvm_vnode.c =================================================================== RCS file: /cvs/src/sys/uvm/uvm_vnode.c,v retrieving revision 1.121 diff -u -p -r1.121 uvm_vnode.c --- uvm_vnode.c 15 Dec 2021 12:53:53 -0000 1.121 +++ uvm_vnode.c 10 Jan 2022 10:10:29 -0000 @@ -82,7 +82,7 @@ extern int rebooting; */ void uvn_cluster(struct uvm_object *, voff_t, voff_t *, voff_t *); void uvn_detach(struct uvm_object *); -boolean_t uvn_flush(struct uvm_object *, voff_t, voff_t, int); +int uvn_flush(struct uvm_object *, voff_t, voff_t, int); int uvn_get(struct uvm_object *, voff_t, vm_page_t *, int *, int, vm_prot_t, int, int); void uvn_init(void); @@ -572,7 +572,7 @@ uvm_vnp_terminate(struct vnode *vp) * => NOTE: we are allowed to lock the page queues, so the caller * must not be holding the lock on them [e.g. pagedaemon had * better not call us with the queues locked] - * => we return TRUE unless we encountered some sort of I/O error + * => we return 0 unless we encountered some sort of I/O error * * comment on "cleaning" object and PG_BUSY pages: * this routine is holding the lock on the object. the only time @@ -588,15 +588,15 @@ uvm_vnp_terminate(struct vnode *vp) * off (i.e. we need to do an iosync). also note that once a * page is PG_BUSY it must stay in its object until it is un-busyed. */ -boolean_t +int uvn_flush(struct uvm_object *uobj, voff_t start, voff_t stop, int flags) { struct uvm_vnode *uvn = (struct uvm_vnode *) uobj; struct vm_page *pp, *ptmp; struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp; struct pglist dead; - int npages, result, lcv; - boolean_t retval, need_iosync, needs_clean; + int npages, result, lcv, retval; + boolean_t need_iosync, needs_clean; voff_t curoff; KASSERT(rw_write_held(uobj->vmobjlock)); @@ -604,7 +604,7 @@ uvn_flush(struct uvm_object *uobj, voff_ /* get init vals and determine how we are going to traverse object */ need_iosync = FALSE; - retval = TRUE; /* return value */ + retval = 0; if (flags & PGO_ALLPAGES) { start = 0; stop = round_page(uvn->u_size); @@ -816,7 +816,7 @@ ReTry: printf("uvn_flush: WARNING: " "changes to page may be " "lost!\n"); - retval = FALSE; + retval = EFAULT; } pmap_page_protect(ptmp, PROT_NONE); uvm_pageclean(ptmp); @@ -844,7 +844,7 @@ ReTry: uvm_pglistfree(&dead); - return retval; + return (retval); } /* Index: uvm_pager.h =================================================================== RCS file: /cvs/src/sys/uvm/uvm_pager.h,v retrieving revision 1.33 diff -u -p -r1.33 uvm_pager.h --- uvm_pager.h 12 Oct 2021 07:38:22 -0000 1.33 +++ uvm_pager.h 10 Jan 2022 10:10:29 -0000 @@ -81,7 +81,7 @@ struct uvm_pagerops { vm_page_t *, int, int, vm_fault_t, vm_prot_t, int); /* flush pages out of obj */ - boolean_t (*pgo_flush)(struct uvm_object *, voff_t, + int (*pgo_flush)(struct uvm_object *, voff_t, voff_t, int); /* get/read page */ int (*pgo_get)(struct uvm_object *, voff_t,