Module Name:    src
Committed By:   skrll
Date:           Thu Jul  9 05:57:15 UTC 2020

Modified Files:
        src/sys/uvm: uvm_amap.c uvm_anon.c uvm_aobj.c uvm_bio.c uvm_device.c
            uvm_fault.c uvm_km.c uvm_map.c uvm_page.c uvm_pager.c uvm_pdaemon.c
            uvm_swap.c uvm_vnode.c

Log Message:
Consistently use UVMHIST(__func__)

Convert UVMHIST_{CALLED,LOG} into UVMHIST_CALLARGS


To generate a diff of this commit:
cvs rdiff -u -r1.121 -r1.122 src/sys/uvm/uvm_amap.c
cvs rdiff -u -r1.78 -r1.79 src/sys/uvm/uvm_anon.c
cvs rdiff -u -r1.148 -r1.149 src/sys/uvm/uvm_aobj.c
cvs rdiff -u -r1.119 -r1.120 src/sys/uvm/uvm_bio.c
cvs rdiff -u -r1.70 -r1.71 src/sys/uvm/uvm_device.c
cvs rdiff -u -r1.227 -r1.228 src/sys/uvm/uvm_fault.c
cvs rdiff -u -r1.158 -r1.159 src/sys/uvm/uvm_km.c
cvs rdiff -u -r1.384 -r1.385 src/sys/uvm/uvm_map.c
cvs rdiff -u -r1.243 -r1.244 src/sys/uvm/uvm_page.c
cvs rdiff -u -r1.127 -r1.128 src/sys/uvm/uvm_pager.c
cvs rdiff -u -r1.129 -r1.130 src/sys/uvm/uvm_pdaemon.c
cvs rdiff -u -r1.196 -r1.197 src/sys/uvm/uvm_swap.c
cvs rdiff -u -r1.114 -r1.115 src/sys/uvm/uvm_vnode.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/uvm/uvm_amap.c
diff -u src/sys/uvm/uvm_amap.c:1.121 src/sys/uvm/uvm_amap.c:1.122
--- src/sys/uvm/uvm_amap.c:1.121	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_amap.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_amap.c,v 1.121 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_amap.c,v 1.122 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -35,7 +35,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.121 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_amap.c,v 1.122 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -235,7 +235,7 @@ amap_alloc(vaddr_t sz, vaddr_t padsz, in
 {
 	struct vm_amap *amap;
 	int slots, padslots;
-	UVMHIST_FUNC("amap_alloc"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	AMAP_B2SLOT(slots, sz);
 	AMAP_B2SLOT(padslots, padsz);
@@ -321,7 +321,7 @@ amap_free(struct vm_amap *amap)
 {
 	int slots;
 
-	UVMHIST_FUNC("amap_free"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	KASSERT(amap->am_ref == 0 && amap->am_nused == 0);
 	KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
@@ -363,9 +363,8 @@ amap_extend(struct vm_map_entry *entry, 
 	const km_flag_t kmflags =
 	    (flags & AMAP_EXTEND_NOWAIT) ? KM_NOSLEEP : KM_SLEEP;
 
-	UVMHIST_FUNC("amap_extend"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "  (entry=%#jx, addsize=%#jx, flags=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "  (entry=%#jx, addsize=%#jx, flags=%#jx)",
 	    (uintptr_t)entry, addsize, flags, 0);
 
 	/*
@@ -725,8 +724,8 @@ amap_wipeout(struct vm_amap *amap)
 {
 	u_int lcv;
 
-	UVMHIST_FUNC("amap_wipeout"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(amap=%#jx)", (uintptr_t)amap, 0,0,0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(amap=%#jx)", (uintptr_t)amap, 0,0,0);
 
 	KASSERT(rw_write_held(amap->am_lock));
 	KASSERT(amap->am_ref == 0);
@@ -797,9 +796,9 @@ amap_copy(struct vm_map *map, struct vm_
 	krwlock_t *oldlock;
 	vsize_t len;
 
-	UVMHIST_FUNC("amap_copy"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist, "  (map=%#j, entry=%#j, flags=%jd)",
-		    (uintptr_t)map, (uintptr_t)entry, flags, 0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "  (map=%#j, entry=%#j, flags=%jd)",
+	    (uintptr_t)map, (uintptr_t)entry, flags, -2);
 
 	KASSERT(map != kernel_map);	/* we use nointr pool */
 
@@ -1416,7 +1415,7 @@ amap_lookup(struct vm_aref *aref, vaddr_
 	struct vm_anon *an;
 	u_int slot;
 
-	UVMHIST_FUNC("amap_lookup"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	KASSERT(rw_lock_held(amap->am_lock));
 
 	AMAP_B2SLOT(slot, offset);
@@ -1445,7 +1444,7 @@ amap_lookups(struct vm_aref *aref, vaddr
 	struct vm_amap *amap = aref->ar_amap;
 	u_int slot;
 
-	UVMHIST_FUNC("amap_lookups"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	KASSERT(rw_lock_held(amap->am_lock));
 
 	AMAP_B2SLOT(slot, offset);
@@ -1483,7 +1482,7 @@ amap_add(struct vm_aref *aref, vaddr_t o
 	struct vm_amap *amap = aref->ar_amap;
 	u_int slot;
 
-	UVMHIST_FUNC("amap_add"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	KASSERT(rw_write_held(amap->am_lock));
 	KASSERT(anon->an_lock == amap->am_lock);
 
@@ -1525,7 +1524,7 @@ amap_unadd(struct vm_aref *aref, vaddr_t
 	struct vm_amap *amap = aref->ar_amap;
 	u_int slot, ptr, last;
 
-	UVMHIST_FUNC("amap_unadd"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	KASSERT(rw_write_held(amap->am_lock));
 
 	AMAP_B2SLOT(slot, offset);
@@ -1593,7 +1592,7 @@ amap_adjref_anons(struct vm_amap *amap, 
 void
 amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
 {
-	UVMHIST_FUNC("amap_ref"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	amap_lock(amap, RW_WRITER);
 	if (flags & AMAP_SHARED) {
@@ -1614,7 +1613,7 @@ amap_ref(struct vm_amap *amap, vaddr_t o
 void
 amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, bool all)
 {
-	UVMHIST_FUNC("amap_unref"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	amap_lock(amap, RW_WRITER);
 

Index: src/sys/uvm/uvm_anon.c
diff -u src/sys/uvm/uvm_anon.c:1.78 src/sys/uvm/uvm_anon.c:1.79
--- src/sys/uvm/uvm_anon.c:1.78	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_anon.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_anon.c,v 1.78 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_anon.c,v 1.79 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.78 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.79 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -106,8 +106,8 @@ uvm_anfree(struct vm_anon *anon)
 {
 	struct vm_page *pg = anon->an_page, *pg2 __diagused;
 
-	UVMHIST_FUNC("uvm_anon_dispose"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
 
 	KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
 	KASSERT(anon->an_ref == 0);
@@ -336,7 +336,7 @@ uvm_anon_pagein(struct vm_amap *amap, st
 void
 uvm_anon_dropswap(struct vm_anon *anon)
 {
-	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	if (anon->an_swslot == 0)
 		return;

Index: src/sys/uvm/uvm_aobj.c
diff -u src/sys/uvm/uvm_aobj.c:1.148 src/sys/uvm/uvm_aobj.c:1.149
--- src/sys/uvm/uvm_aobj.c:1.148	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_aobj.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_aobj.c,v 1.148 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_aobj.c,v 1.149 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1998 Chuck Silvers, Charles D. Cranor and
@@ -38,7 +38,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.148 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_aobj.c,v 1.149 2020/07/09 05:57:15 skrll Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_uvmhist.h"
@@ -290,8 +290,8 @@ uao_set_swslot(struct uvm_object *uobj, 
 	struct uvm_aobj *aobj = (struct uvm_aobj *)uobj;
 	struct uao_swhash_elt *elt;
 	int oldslot;
-	UVMHIST_FUNC("uao_set_swslot"); UVMHIST_CALLED(pdhist);
-	UVMHIST_LOG(pdhist, "aobj %#jx pageidx %jd slot %jd",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "aobj %#jx pageidx %jd slot %jd",
 	    (uintptr_t)aobj, pageidx, slot, 0);
 
 	KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0);
@@ -583,7 +583,7 @@ uao_detach(struct uvm_object *uobj)
 	struct uvm_page_array a;
 	struct vm_page *pg;
 
-	UVMHIST_FUNC("uao_detach"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * Detaching from kernel object is a NOP.
@@ -666,7 +666,7 @@ uao_put(struct uvm_object *uobj, voff_t 
 	struct uvm_page_array a;
 	struct vm_page *pg;
 	voff_t curoff;
-	UVMHIST_FUNC("uao_put"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	KASSERT(UVM_OBJ_IS_AOBJ(uobj));
 	KASSERT(rw_write_held(uobj->vmobjlock));
@@ -801,11 +801,11 @@ uao_get(struct uvm_object *uobj, voff_t 
 	voff_t current_offset;
 	struct vm_page *ptmp;
 	int lcv, gotpages, maxpages, swslot, pageidx;
-	UVMHIST_FUNC("uao_get"); UVMHIST_CALLED(pdhist);
 	bool overwrite = ((flags & PGO_OVERWRITE) != 0);
 	struct uvm_page_array a;
 
-	UVMHIST_LOG(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "aobj=%#jx offset=%jd, flags=%jd",
 		    (uintptr_t)uobj, offset, flags,0);
 
 	/*

Index: src/sys/uvm/uvm_bio.c
diff -u src/sys/uvm/uvm_bio.c:1.119 src/sys/uvm/uvm_bio.c:1.120
--- src/sys/uvm/uvm_bio.c:1.119	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_bio.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_bio.c,v 1.119 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_bio.c,v 1.120 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1998 Chuck Silvers.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.119 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_bio.c,v 1.120 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_ubc.h"
@@ -311,7 +311,7 @@ ubc_fault(struct uvm_faultinfo *ufi, vad
 	int i, error, npages;
 	vm_prot_t prot;
 
-	UVMHIST_FUNC("ubc_fault"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
 	/*
 	 * no need to try with PGO_LOCKED...
@@ -482,9 +482,8 @@ ubc_alloc(struct uvm_object *uobj, voff_
 	struct ubc_map *umap;
 	voff_t umap_offset;
 	int error;
-	UVMHIST_FUNC("ubc_alloc"); UVMHIST_CALLED(ubchist);
-
-	UVMHIST_LOG(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(ubchist, "uobj %#jx offset 0x%jx len 0x%jx",
 	    (uintptr_t)uobj, offset, *lenp, 0);
 
 	KASSERT(*lenp > 0);
@@ -640,9 +639,9 @@ ubc_release(void *va, int flags, struct 
 	struct uvm_object *uobj;
 	vaddr_t umapva;
 	bool unmapped;
-	UVMHIST_FUNC("ubc_release"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
 
-	UVMHIST_LOG(ubchist, "va %#jx", (uintptr_t)va, 0, 0, 0);
 	umap = &ubc_object.umap[((char *)va - ubc_object.kva) >> ubc_winshift];
 	umapva = UBC_UMAP_ADDR(umap);
 	uobj = umap->uobj;
@@ -844,7 +843,7 @@ ubc_alloc_direct(struct uvm_object *uobj
 	int error;
 	int gpflags = flags | PGO_NOTIMESTAMP | PGO_SYNCIO;
 	int access_type = VM_PROT_READ;
-	UVMHIST_FUNC("ubc_alloc_direct"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
 	if (flags & UBC_WRITE) {
 		if (flags & UBC_FAULTBUSY)

Index: src/sys/uvm/uvm_device.c
diff -u src/sys/uvm/uvm_device.c:1.70 src/sys/uvm/uvm_device.c:1.71
--- src/sys/uvm/uvm_device.c:1.70	Mon Feb 24 12:38:57 2020
+++ src/sys/uvm/uvm_device.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_device.c,v 1.70 2020/02/24 12:38:57 rin Exp $	*/
+/*	$NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.70 2020/02/24 12:38:57 rin Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_device.c,v 1.71 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -114,9 +114,8 @@ udv_attach(dev_t device, vm_prot_t acces
 	const struct cdevsw *cdev;
 	dev_type_mmap((*mapfn));
 
-	UVMHIST_FUNC("udv_attach"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(device=%#jx)", device,0,0,0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(device=%#jx)", device,0,0,0);
 
 	/*
 	 * before we do anything, ensure this device supports mmap
@@ -269,7 +268,7 @@ udv_attach(dev_t device, vm_prot_t acces
 static void
 udv_reference(struct uvm_object *uobj)
 {
-	UVMHIST_FUNC("udv_reference"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	rw_enter(uobj->vmobjlock, RW_WRITER);
 	uobj->uo_refs++;
@@ -290,7 +289,7 @@ static void
 udv_detach(struct uvm_object *uobj)
 {
 	struct uvm_device *udv = (struct uvm_device *)uobj;
-	UVMHIST_FUNC("udv_detach"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * loop until done
@@ -364,7 +363,7 @@ udv_fault(struct uvm_faultinfo *ufi, vad
 	int lcv, retval;
 	dev_t device;
 	vm_prot_t mapprot;
-	UVMHIST_FUNC("udv_fault"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	UVMHIST_LOG(maphist,"  flags=%jd", flags,0,0,0);
 
 	/*

Index: src/sys/uvm/uvm_fault.c
diff -u src/sys/uvm/uvm_fault.c:1.227 src/sys/uvm/uvm_fault.c:1.228
--- src/sys/uvm/uvm_fault.c:1.227	Sun May 17 19:38:17 2020
+++ src/sys/uvm/uvm_fault.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_fault.c,v 1.227 2020/05/17 19:38:17 ad Exp $	*/
+/*	$NetBSD: uvm_fault.c,v 1.228 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.227 2020/05/17 19:38:17 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_fault.c,v 1.228 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -280,7 +280,7 @@ uvmfault_anonget(struct uvm_faultinfo *u
 	krw_t lock_type;
 	int error;
 
-	UVMHIST_FUNC("uvmfault_anonget"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	KASSERT(rw_lock_held(anon->an_lock));
 	KASSERT(anon->an_lock == amap->am_lock);
 
@@ -859,9 +859,8 @@ uvm_fault_internal(struct vm_map *orig_m
 	struct vm_page *pages_store[UVM_MAXRANGE], **pages;
 	int error;
 
-	UVMHIST_FUNC("uvm_fault"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(map=%#jx, vaddr=%#jx, at=%jd, ff=%jd)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(map=%#jx, vaddr=%#jx, at=%jd, ff=%jd)",
 	      (uintptr_t)orig_map, vaddr, access_type, fault_flag);
 
 	/* Don't count anything until user interaction is possible */
@@ -979,7 +978,7 @@ uvm_fault_check(
 	struct uvm_object *uobj;
 	vm_prot_t check_prot;
 	int nback, nforw;
-	UVMHIST_FUNC("uvm_fault_check"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * lookup and lock the maps
@@ -1291,7 +1290,7 @@ uvm_fault_upper_lookup(
 	vaddr_t currva;
 	bool shadowed __unused;
 	bool entered;
-	UVMHIST_FUNC("uvm_fault_upper_lookup"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/* locked: maps(read), amap(if there) */
 	KASSERT(amap == NULL ||
@@ -1367,7 +1366,7 @@ uvm_fault_upper_neighbor(
 	struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
 	vaddr_t currva, struct vm_page *pg, bool readonly)
 {
-	UVMHIST_FUNC("uvm_fault_upper_neighbor"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/* locked: amap, anon */
 
@@ -1423,7 +1422,7 @@ uvm_fault_upper(
 	struct vm_anon * const anon = anons[flt->centeridx];
 	struct uvm_object *uobj;
 	int error;
-	UVMHIST_FUNC("uvm_fault_upper"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/* locked: maps(read), amap, anon */
 	KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
@@ -1533,7 +1532,7 @@ uvm_fault_upper_loan(
 {
 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
 	int error = 0;
-	UVMHIST_FUNC("uvm_fault_upper_loan"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	if (!flt->cow_now) {
 
@@ -1599,7 +1598,7 @@ uvm_fault_upper_promote(
 	struct vm_anon * const oanon = anon;
 	struct vm_page *pg;
 	int error;
-	UVMHIST_FUNC("uvm_fault_upper_promote"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	UVMHIST_LOG(maphist, "  case 1B: COW fault",0,0,0,0);
 	cpu_count(CPU_COUNT_FLT_ACOW, 1);
@@ -1650,7 +1649,7 @@ uvm_fault_upper_direct(
 {
 	struct vm_anon * const oanon = anon;
 	struct vm_page *pg;
-	UVMHIST_FUNC("uvm_fault_upper_direct"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	cpu_count(CPU_COUNT_FLT_ANON, 1);
 	pg = anon->an_page;
@@ -1673,7 +1672,7 @@ uvm_fault_upper_enter(
 	struct pmap *pmap = ufi->orig_map->pmap;
 	vaddr_t va = ufi->orig_rvaddr;
 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
-	UVMHIST_FUNC("uvm_fault_upper_enter"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/* locked: maps(read), amap, oanon, anon(if different from oanon) */
 	KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
@@ -1763,7 +1762,7 @@ uvm_fault_upper_done(
 {
 	const bool wire_paging = flt->wire_paging;
 
-	UVMHIST_FUNC("uvm_fault_upper_done"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * ... update the page queues.
@@ -1857,7 +1856,7 @@ uvm_fault_lower(
 	struct uvm_object *uobj = ufi->entry->object.uvm_obj;
 	struct vm_page *uobjpage;
 	int error;
-	UVMHIST_FUNC("uvm_fault_lower"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * now, if the desired page is not shadowed by the amap and we have
@@ -1979,7 +1978,7 @@ uvm_fault_lower_lookup(
 	int lcv, gotpages;
 	vaddr_t currva;
 	bool entered;
-	UVMHIST_FUNC("uvm_fault_lower_lookup"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	rw_enter(uobj->vmobjlock, flt->lower_lock_type);
 
@@ -2122,7 +2121,7 @@ uvm_fault_lower_io(
 	voff_t uoff;
 	vm_prot_t access_type;
 	int advice;
-	UVMHIST_FUNC("uvm_fault_lower_io"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/* update rusage counters */
 	curlwp->l_ru.ru_majflt++;
@@ -2266,7 +2265,7 @@ uvm_fault_lower_direct(
 	struct uvm_object *uobj, struct vm_page *uobjpage)
 {
 	struct vm_page *pg;
-	UVMHIST_FUNC("uvm_fault_lower_direct"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * we are not promoting.   if the mapping is COW ensure that we
@@ -2315,7 +2314,7 @@ uvm_fault_lower_direct_loan(
 	struct vm_page *pg;
 	struct vm_page *uobjpage = *ruobjpage;
 	int error;
-	UVMHIST_FUNC("uvm_fault_lower_direct_loan"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	if (!flt->cow_now) {
 		/* read fault: cap the protection at readonly */
@@ -2378,7 +2377,7 @@ uvm_fault_lower_promote(
 	struct vm_anon *anon;
 	struct vm_page *pg;
 	int error;
-	UVMHIST_FUNC("uvm_fault_lower_promote"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	KASSERT(amap != NULL);
 
@@ -2459,7 +2458,7 @@ uvm_fault_lower_enter(
 	struct vm_amap * const amap = ufi->entry->aref.ar_amap;
 	const bool readonly = uvm_pagereadonly_p(pg);
 	int error;
-	UVMHIST_FUNC("uvm_fault_lower_enter"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	/*
 	 * Locked:
@@ -2559,7 +2558,7 @@ uvm_fault_lower_done(
 	struct uvm_object *uobj, struct vm_page *pg)
 {
 
-	UVMHIST_FUNC("uvm_fault_lower_done"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	if (flt->wire_paging) {
 		uvm_pagelock(pg);

Index: src/sys/uvm/uvm_km.c
diff -u src/sys/uvm/uvm_km.c:1.158 src/sys/uvm/uvm_km.c:1.159
--- src/sys/uvm/uvm_km.c:1.158	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_km.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_km.c,v 1.158 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_km.c,v 1.159 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -152,7 +152,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.158 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_km.c,v 1.159 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 
@@ -262,8 +262,8 @@ uvm_km_bootstrap(vaddr_t start, vaddr_t 
 	struct uvm_map_args args;
 	int error;
 
-	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist, "start=%#jx end=%#jx", start, end, 0,0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "start=%#jx end=%#jx", start, end, 0,0);
 
 	kmeminit_nkmempages();
 	kmemsize = (vsize_t)nkmempages * PAGE_SIZE;

Index: src/sys/uvm/uvm_map.c
diff -u src/sys/uvm/uvm_map.c:1.384 src/sys/uvm/uvm_map.c:1.385
--- src/sys/uvm/uvm_map.c:1.384	Sat May 30 08:50:31 2020
+++ src/sys/uvm/uvm_map.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_map.c,v 1.384 2020/05/30 08:50:31 maxv Exp $	*/
+/*	$NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.384 2020/05/30 08:50:31 maxv Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_map.c,v 1.385 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_ddb.h"
 #include "opt_pax.h"
@@ -785,7 +785,7 @@ uvm_mapent_alloc(struct vm_map *map, int
 {
 	struct vm_map_entry *me;
 	int pflags = (flags & UVM_FLAG_NOWAIT) ? PR_NOWAIT : PR_WAITOK;
-	UVMHIST_FUNC("uvm_mapent_alloc"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	me = pool_cache_get(&uvm_map_entry_cache, pflags);
 	if (__predict_false(me == NULL)) {
@@ -805,9 +805,8 @@ uvm_mapent_alloc(struct vm_map *map, int
 static void
 uvm_mapent_free(struct vm_map_entry *me)
 {
-	UVMHIST_FUNC("uvm_mapent_free"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"<- freeing map entry=%#jx [flags=%jd]",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"<- freeing map entry=%#jx [flags=%jd]",
 		(uintptr_t)me, me->flags, 0, 0);
 	pool_cache_put(&uvm_map_entry_cache, me);
 }
@@ -913,7 +912,7 @@ uvm_map_init(void)
 	 * first, init logging system.
 	 */
 
-	UVMHIST_FUNC("uvm_map_init");
+	UVMHIST_FUNC(__func__);
 	UVMHIST_LINK_STATIC(maphist);
 	UVMHIST_INIT_STATIC(pdhist, pdhistbuf);
 	UVMHIST_CALLED(maphist);
@@ -1128,10 +1127,8 @@ uvm_map_prepare(struct vm_map *map, vadd
 	vm_prot_t prot = UVM_PROTECTION(flags);
 	vm_prot_t maxprot = UVM_MAXPROTECTION(flags);
 
-	UVMHIST_FUNC("uvm_map_prepare");
-	UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
 	    (uintptr_t)map, start, size, flags);
 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
 	    uoffset,0,0);
@@ -1301,10 +1298,8 @@ uvm_map_enter(struct vm_map *map, const 
 	int error;
 	int newetype;
 
-	UVMHIST_FUNC("uvm_map_enter");
-	UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(map=%#jx, start=%#jx, size=%ju, flags=%#jx)",
 	    (uintptr_t)map, start, size, flags);
 	UVMHIST_LOG(maphist, "  uobj/offset %#jx/%jd", (uintptr_t)uobj,
 	    uoffset,0,0);
@@ -1677,10 +1672,8 @@ uvm_map_lookup_entry(struct vm_map *map,
     struct vm_map_entry **entry	/* OUT */)
 {
 	struct vm_map_entry *cur;
-	UVMHIST_FUNC("uvm_map_lookup_entry");
-	UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,addr=%#jx,ent=%#jx)",
 	    (uintptr_t)map, address, (uintptr_t)entry, 0);
 
 	/*
@@ -1814,11 +1807,10 @@ uvm_map_findspace(struct vm_map *map, va
 	struct vm_map_entry *child, *prev, *tmp;
 	vaddr_t orig_hint __diagused;
 	const int topdown = map->flags & VM_MAP_TOPDOWN;
-	UVMHIST_FUNC("uvm_map_findspace");
-	UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(map=%#jx, hint=%#jx, len=%ju, flags=%#jx)",
 	    (uintptr_t)map, hint, length, flags);
+
 	KASSERT((flags & UVM_FLAG_COLORMATCH) != 0 || powerof2(align));
 	KASSERT((flags & UVM_FLAG_COLORMATCH) == 0 || align < uvmexp.ncolors);
 	KASSERT((flags & UVM_FLAG_FIXED) == 0 || align == 0);
@@ -2161,9 +2153,8 @@ uvm_unmap_remove(struct vm_map *map, vad
 {
 	struct vm_map_entry *entry, *first_entry, *next;
 	vaddr_t len;
-	UVMHIST_FUNC("uvm_unmap_remove"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx, start=%#jx, end=%#jx)",
 	    (uintptr_t)map, start, end, 0);
 	VM_MAP_RANGE_CHECK(map, start, end);
 
@@ -2349,7 +2340,7 @@ void
 uvm_unmap_detach(struct vm_map_entry *first_entry, int flags)
 {
 	struct vm_map_entry *next_entry;
-	UVMHIST_FUNC("uvm_unmap_detach"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	while (first_entry) {
 		KASSERT(!VM_MAPENT_ISWIRED(first_entry));
@@ -2405,9 +2396,8 @@ uvm_map_reserve(struct vm_map *map, vsiz
     vaddr_t *raddr	/* IN:hint, OUT: reserved VA */,
     uvm_flag_t flags	/* UVM_FLAG_FIXED or UVM_FLAG_COLORMATCH or 0 */)
 {
-	UVMHIST_FUNC("uvm_map_reserve"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(map=%#jx, size=%#jx, offset=%#jx, addr=%#jx)",
 	    (uintptr_t)map, size, offset, (uintptr_t)raddr);
 
 	size = round_page(size);
@@ -2589,9 +2579,8 @@ uvm_map_extract(struct vm_map *srcmap, v
 	vsize_t elen __unused;
 	int nchain, error, copy_ok;
 	vsize_t nsize;
-	UVMHIST_FUNC("uvm_map_extract"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(srcmap=%#jx,start=%#jx, len=%#jx",
 	    (uintptr_t)srcmap, start, len, 0);
 	UVMHIST_LOG(maphist," ...,dstmap=%#jx, flags=%#jx)",
 	    (uintptr_t)dstmap, flags, 0, 0);
@@ -3007,8 +2996,8 @@ uvm_map_protect(struct vm_map *map, vadd
 {
 	struct vm_map_entry *current, *entry;
 	int error = 0;
-	UVMHIST_FUNC("uvm_map_protect"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_prot=%#jx)",
 	    (uintptr_t)map, start, end, new_prot);
 
 	vm_map_lock(map);
@@ -3167,8 +3156,8 @@ uvm_map_inherit(struct vm_map *map, vadd
     vm_inherit_t new_inheritance)
 {
 	struct vm_map_entry *entry, *temp_entry;
-	UVMHIST_FUNC("uvm_map_inherit"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_inh=%#jx)",
 	    (uintptr_t)map, start, end, new_inheritance);
 
 	switch (new_inheritance) {
@@ -3210,8 +3199,8 @@ int
 uvm_map_advice(struct vm_map *map, vaddr_t start, vaddr_t end, int new_advice)
 {
 	struct vm_map_entry *entry, *temp_entry;
-	UVMHIST_FUNC("uvm_map_advice"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_adv=%#jx)",
 	    (uintptr_t)map, start, end, new_advice);
 
 	vm_map_lock(map);
@@ -3259,8 +3248,8 @@ int
 uvm_map_willneed(struct vm_map *map, vaddr_t start, vaddr_t end)
 {
 	struct vm_map_entry *entry;
-	UVMHIST_FUNC("uvm_map_willneed"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx)",
 	    (uintptr_t)map, start, end, 0);
 
 	vm_map_lock_read(map);
@@ -3326,8 +3315,8 @@ uvm_map_pageable(struct vm_map *map, vad
 #ifdef DIAGNOSTIC
 	u_int timestamp_save;
 #endif
-	UVMHIST_FUNC("uvm_map_pageable"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,new_pageable=%ju)",
 	    (uintptr_t)map, start, end, new_pageable);
 	KASSERT(map->flags & VM_MAP_PAGEABLE);
 
@@ -3582,8 +3571,8 @@ uvm_map_pageable_all(struct vm_map *map,
 #ifdef DIAGNOSTIC
 	u_int timestamp_save;
 #endif
-	UVMHIST_FUNC("uvm_map_pageable_all"); UVMHIST_CALLED(maphist);
-	UVMHIST_LOG(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,flags=%#jx)", (uintptr_t)map, flags,
 	    0, 0);
 
 	KASSERT(map->flags & VM_MAP_PAGEABLE);
@@ -3817,10 +3806,10 @@ uvm_map_clean(struct vm_map *map, vaddr_
 	vsize_t size;
 	voff_t uoff;
 	int error, refs;
-	UVMHIST_FUNC("uvm_map_clean"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(map=%#jx,start=%#jx,end=%#jx,flags=%#jx)",
 	    (uintptr_t)map, start, end, flags);
+
 	KASSERT((flags & (PGO_FREE|PGO_DEACTIVATE)) !=
 		(PGO_FREE|PGO_DEACTIVATE));
 
@@ -4018,7 +4007,7 @@ struct vmspace *
 uvmspace_alloc(vaddr_t vmin, vaddr_t vmax, bool topdown)
 {
 	struct vmspace *vm;
-	UVMHIST_FUNC("uvmspace_alloc"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	vm = pool_cache_get(&uvm_vmspace_cache, PR_WAITOK);
 	uvmspace_init(vm, NULL, vmin, vmax, topdown);
@@ -4036,9 +4025,8 @@ void
 uvmspace_init(struct vmspace *vm, struct pmap *pmap, vaddr_t vmin,
     vaddr_t vmax, bool topdown)
 {
-	UVMHIST_FUNC("uvmspace_init"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "(vm=%#jx, pmap=%#jx, vmin=%#jx, vmax=%#jx",
 	    (uintptr_t)vm, (uintptr_t)pmap, vmin, vmax);
 	UVMHIST_LOG(maphist, "   topdown=%ju)", topdown, 0, 0, 0);
 
@@ -4243,10 +4231,9 @@ uvmspace_free(struct vmspace *vm)
 	struct vm_map *map = &vm->vm_map;
 	int flags;
 
-	UVMHIST_FUNC("uvmspace_free"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm, vm->vm_refcnt,
-	    0, 0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(vm=%#jx) ref=%jd", (uintptr_t)vm,
+	    vm->vm_refcnt, 0, 0);
 	if (atomic_dec_uint_nv(&vm->vm_refcnt) > 0)
 		return;
 
@@ -4492,7 +4479,7 @@ uvmspace_fork(struct vmspace *vm1)
 	struct vm_map *old_map = &vm1->vm_map;
 	struct vm_map *new_map;
 	struct vm_map_entry *old_entry;
-	UVMHIST_FUNC("uvmspace_fork"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 
 	vm_map_lock(old_map);
 
@@ -4717,13 +4704,13 @@ void
 uvm_unmap1(struct vm_map *map, vaddr_t start, vaddr_t end, int flags)
 {
 	struct vm_map_entry *dead_entries;
-	UVMHIST_FUNC("uvm_unmap"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, "  (map=%#jx, start=%#jx, end=%#jx)",
+	    (uintptr_t)map, start, end, 0);
 
 	KASSERTMSG(start < end,
 	    "%s: map %p: start %#jx < end %#jx", __func__, map,
 	    (uintmax_t)start, (uintmax_t)end);
-	UVMHIST_LOG(maphist, "  (map=%#jx, start=%#jx, end=%#jx)",
-	    (uintptr_t)map, start, end, 0);
 	if (map == kernel_map) {
 		LOCKDEBUG_MEM_CHECK((void *)start, end - start);
 	}
@@ -4823,7 +4810,7 @@ uvm_voaddr_acquire(struct vm_map * const
 	bool exclusive = false;
 	void (*unlock_fn)(struct vm_map *);
 
-	UVMHIST_FUNC("uvm_voaddr_acquire"); UVMHIST_CALLED(maphist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(maphist);
 	UVMHIST_LOG(maphist,"(map=%#jx,va=%jx)", (uintptr_t)map, va, 0, 0);
 
 	const vaddr_t start = trunc_page(va);

Index: src/sys/uvm/uvm_page.c
diff -u src/sys/uvm/uvm_page.c:1.243 src/sys/uvm/uvm_page.c:1.244
--- src/sys/uvm/uvm_page.c:1.243	Wed Jun 17 06:24:15 2020
+++ src/sys/uvm/uvm_page.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_page.c,v 1.243 2020/06/17 06:24:15 thorpej Exp $	*/
+/*	$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*-
  * Copyright (c) 2019, 2020 The NetBSD Foundation, Inc.
@@ -95,7 +95,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.243 2020/06/17 06:24:15 thorpej Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_page.c,v 1.244 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_ddb.h"
 #include "opt_uvm.h"
@@ -1645,7 +1645,7 @@ uvm_page_unbusy(struct vm_page **pgs, in
 {
 	struct vm_page *pg;
 	int i;
-	UVMHIST_FUNC("uvm_page_unbusy"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
 	for (i = 0; i < npgs; i++) {
 		pg = pgs[i];
@@ -1707,7 +1707,7 @@ uvm_pagewait(struct vm_page *pg, krwlock
 void
 uvm_pagewakeup(struct vm_page *pg)
 {
-	UVMHIST_FUNC("uvm_pagewakeup"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
 	KASSERT(mutex_owned(&pg->interlock));
 

Index: src/sys/uvm/uvm_pager.c
diff -u src/sys/uvm/uvm_pager.c:1.127 src/sys/uvm/uvm_pager.c:1.128
--- src/sys/uvm/uvm_pager.c:1.127	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_pager.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pager.c,v 1.127 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -32,7 +32,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.127 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pager.c,v 1.128 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -189,9 +189,8 @@ uvm_pagermapin(struct vm_page **pps, int
 	vm_prot_t prot;
 	const bool pdaemon = (curlwp == uvm.pagedaemon_lwp);
 	const u_int first_color = VM_PGCOLOR(*pps);
-	UVMHIST_FUNC("uvm_pagermapin"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist,"(pps=%#jx, npages=%jd, first_color=%ju)",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist,"(pps=%#jx, npages=%jd, first_color=%ju)",
 		(uintptr_t)pps, npages, first_color, 0);
 
 #ifdef PMAP_DIRECT
@@ -280,9 +279,8 @@ uvm_pagermapout(vaddr_t kva, int npages)
 {
 	vsize_t size = ptoa(npages);
 	struct vm_map_entry *entries;
-	UVMHIST_FUNC("uvm_pagermapout"); UVMHIST_CALLED(maphist);
-
-	UVMHIST_LOG(maphist, " (kva=%#jx, npages=%jd)", kva, npages,0,0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(maphist, " (kva=%#jx, npages=%jd)", kva, npages,0,0);
 
 #ifdef PMAP_DIRECT
 	/*
@@ -335,7 +333,7 @@ uvm_aio_aiodone_pages(struct vm_page **p
 	int swslot;
 	int i;
 	bool swap;
-	UVMHIST_FUNC("uvm_aio_aiodone_pages"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
 	swslot = 0;
 	pageout_done = 0;
@@ -523,8 +521,8 @@ uvm_aio_aiodone(struct buf *bp)
 	struct vm_page *pgs[howmany(MAXPHYS, MIN_PAGE_SIZE)];
 	int i, error;
 	bool write;
-	UVMHIST_FUNC("uvm_aio_aiodone"); UVMHIST_CALLED(ubchist);
-	UVMHIST_LOG(ubchist, "bp %#jx", (uintptr_t)bp, 0,0,0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(ubchist, "bp %#jx", (uintptr_t)bp, 0,0,0);
 
 	KASSERT(bp->b_bufsize <= MAXPHYS);
 	KASSERT(npages <= __arraycount(pgs));

Index: src/sys/uvm/uvm_pdaemon.c
diff -u src/sys/uvm/uvm_pdaemon.c:1.129 src/sys/uvm/uvm_pdaemon.c:1.130
--- src/sys/uvm/uvm_pdaemon.c:1.129	Thu Jun 11 22:21:05 2020
+++ src/sys/uvm/uvm_pdaemon.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_pdaemon.c,v 1.129 2020/06/11 22:21:05 ad Exp $	*/
+/*	$NetBSD: uvm_pdaemon.c,v 1.130 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -66,7 +66,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.129 2020/06/11 22:21:05 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.130 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_readahead.h"
@@ -207,7 +207,7 @@ uvmpd_tune(void)
 {
 	int val;
 
-	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * try to keep 0.5% of available RAM free, but limit to between
@@ -244,8 +244,8 @@ uvm_pageout(void *arg)
 	int npages = 0;
 	int extrapages = 0;
 	int fpages;
-	
-	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
+
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
 
@@ -634,7 +634,7 @@ uvmpd_scan_queue(void)
 #endif /* defined(VMSWAP) */
 	int dirtyreacts;
 	krwlock_t *slock;
-	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * swslot is non-zero if we are building a swap cluster.  we want
@@ -883,7 +883,7 @@ static void
 uvmpd_scan(void)
 {
 	int swap_shortage, pages_freed, fpages;
-	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	uvmexp.pdrevs++;
 
@@ -1043,7 +1043,7 @@ uvmpd_pool_drain_thread(void *arg)
 		mutex_exit(&bufcache_lock);
 
 		/*
-		 * drain a pool, and then re-enable the freelist cache. 
+		 * drain a pool, and then re-enable the freelist cache.
 		 */
 		(void)pool_drain(&curpool);
 		KASSERT(curpool != NULL);

Index: src/sys/uvm/uvm_swap.c
diff -u src/sys/uvm/uvm_swap.c:1.196 src/sys/uvm/uvm_swap.c:1.197
--- src/sys/uvm/uvm_swap.c:1.196	Wed Jul  8 13:26:22 2020
+++ src/sys/uvm/uvm_swap.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_swap.c,v 1.196 2020/07/08 13:26:22 skrll Exp $	*/
+/*	$NetBSD: uvm_swap.c,v 1.197 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1995, 1996, 1997, 2009 Matthew R. Green
@@ -30,7 +30,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.196 2020/07/08 13:26:22 skrll Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_swap.c,v 1.197 2020/07/09 05:57:15 skrll Exp $");
 
 #include "opt_uvmhist.h"
 #include "opt_compat_netbsd.h"
@@ -256,7 +256,7 @@ encmap_size(size_t npages)
 void
 uvm_swap_init(void)
 {
-	UVMHIST_FUNC("uvm_swap_init");
+	UVMHIST_FUNC(__func__);
 
 	UVMHIST_CALLED(pdhist);
 	/*
@@ -315,7 +315,7 @@ static void
 swaplist_insert(struct swapdev *sdp, struct swappri *newspp, int priority)
 {
 	struct swappri *spp, *pspp;
-	UVMHIST_FUNC("swaplist_insert"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	KASSERT(rw_write_held(&swap_syscall_lock));
 	KASSERT(mutex_owned(&uvm_swap_data_lock));
@@ -510,7 +510,7 @@ sys_swapctl(struct lwp *l, const struct 
 	size_t	len = 0;
 	int	error;
 	int	priority;
-	UVMHIST_FUNC("sys_swapctl"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * we handle the non-priv NSWAP and STATS request first.
@@ -834,7 +834,7 @@ swap_on(struct lwp *l, struct swapdev *s
 	vmem_addr_t result;
 	struct vattr va;
 	dev_t dev;
-	UVMHIST_FUNC("swap_on"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * we want to enable swapping on sdp.   the swd_vp contains
@@ -1050,8 +1050,8 @@ swap_off(struct lwp *l, struct swapdev *
 	int npages = sdp->swd_npages;
 	int error = 0;
 
-	UVMHIST_FUNC("swap_off"); UVMHIST_CALLED(pdhist);
-	UVMHIST_LOG(pdhist, "  dev=%jx, npages=%jd", sdp->swd_dev,npages, 0, 0);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "  dev=%jx, npages=%jd", sdp->swd_dev,npages, 0, 0);
 
 	KASSERT(rw_write_held(&swap_syscall_lock));
 	KASSERT(mutex_owned(&uvm_swap_data_lock));
@@ -1196,7 +1196,7 @@ swstrategy(struct buf *bp)
 	struct swapdev *sdp;
 	struct vnode *vp;
 	int pageno, bn;
-	UVMHIST_FUNC("swstrategy"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * convert block number to swapdev.   note that swapdev can't
@@ -1284,9 +1284,9 @@ swstrategy(struct buf *bp)
 static int
 swread(dev_t dev, struct uio *uio, int ioflag)
 {
-	UVMHIST_FUNC("swread"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "  dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0);
 
-	UVMHIST_LOG(pdhist, "  dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0);
 	return (physio(swstrategy, NULL, dev, B_READ, minphys, uio));
 }
 
@@ -1297,9 +1297,9 @@ swread(dev_t dev, struct uio *uio, int i
 static int
 swwrite(dev_t dev, struct uio *uio, int ioflag)
 {
-	UVMHIST_FUNC("swwrite"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "  dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0);
 
-	UVMHIST_LOG(pdhist, "  dev=%jx offset=%jx", dev, uio->uio_offset, 0, 0);
 	return (physio(swstrategy, NULL, dev, B_WRITE, minphys, uio));
 }
 
@@ -1341,7 +1341,7 @@ sw_reg_strategy(struct swapdev *sdp, str
 	char 		*addr;
 	off_t		byteoff;
 	int		s, off, nra, error, sz, resid;
-	UVMHIST_FUNC("sw_reg_strategy"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * allocate a vndxfer head for this transfer and point it to
@@ -1490,7 +1490,7 @@ sw_reg_start(struct swapdev *sdp)
 {
 	struct buf	*bp;
 	struct vnode	*vp;
-	UVMHIST_FUNC("sw_reg_start"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/* recursion control */
 	if ((sdp->swd_flags & SWF_BUSY) != 0)
@@ -1543,9 +1543,8 @@ sw_reg_iodone(struct work *wk, void *dum
 	struct swapdev	*sdp = vnx->vx_sdp;
 	int s, resid, error;
 	KASSERT(&vbp->vb_buf.b_work == wk);
-	UVMHIST_FUNC("sw_reg_iodone"); UVMHIST_CALLED(pdhist);
-
-	UVMHIST_LOG(pdhist, "  vbp=%#jx vp=%#jx blkno=%jx addr=%#jx",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "  vbp=%#jx vp=%#jx blkno=%jx addr=%#jx",
 	    (uintptr_t)vbp, (uintptr_t)vbp->vb_buf.b_vp, vbp->vb_buf.b_blkno,
 	    (uintptr_t)vbp->vb_buf.b_data);
 	UVMHIST_LOG(pdhist, "  cnt=%jx resid=%jx",
@@ -1619,7 +1618,7 @@ uvm_swap_alloc(int *nslots /* IN/OUT */,
 {
 	struct swapdev *sdp;
 	struct swappri *spp;
-	UVMHIST_FUNC("uvm_swap_alloc"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	/*
 	 * no swap devices configured yet?   definite failure.
@@ -1720,7 +1719,7 @@ void
 uvm_swap_markbad(int startslot, int nslots)
 {
 	struct swapdev *sdp;
-	UVMHIST_FUNC("uvm_swap_markbad"); UVMHIST_CALLED(pdhist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
 
 	mutex_enter(&uvm_swap_data_lock);
 	sdp = swapdrum_getsdp(startslot);
@@ -1750,9 +1749,8 @@ void
 uvm_swap_free(int startslot, int nslots)
 {
 	struct swapdev *sdp;
-	UVMHIST_FUNC("uvm_swap_free"); UVMHIST_CALLED(pdhist);
-
-	UVMHIST_LOG(pdhist, "freeing %jd slots starting at %jd", nslots,
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "freeing %jd slots starting at %jd", nslots,
 	    startslot, 0, 0);
 
 	/*
@@ -1839,9 +1837,8 @@ uvm_swap_io(struct vm_page **pps, int st
 	vaddr_t kva;
 	int	error, mapinflags;
 	bool write, async, swap_encrypt;
-	UVMHIST_FUNC("uvm_swap_io"); UVMHIST_CALLED(pdhist);
-
-	UVMHIST_LOG(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%jd",
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(pdhist, "<- called, startslot=%jd, npages=%jd, flags=%jd",
 	    startslot, npages, flags, 0);
 
 	write = (flags & B_READ) == 0;

Index: src/sys/uvm/uvm_vnode.c
diff -u src/sys/uvm/uvm_vnode.c:1.114 src/sys/uvm/uvm_vnode.c:1.115
--- src/sys/uvm/uvm_vnode.c:1.114	Mon May 25 21:15:10 2020
+++ src/sys/uvm/uvm_vnode.c	Thu Jul  9 05:57:15 2020
@@ -1,4 +1,4 @@
-/*	$NetBSD: uvm_vnode.c,v 1.114 2020/05/25 21:15:10 ad Exp $	*/
+/*	$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $	*/
 
 /*
  * Copyright (c) 1997 Charles D. Cranor and Washington University.
@@ -45,7 +45,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.114 2020/05/25 21:15:10 ad Exp $");
+__KERNEL_RCSID(0, "$NetBSD: uvm_vnode.c,v 1.115 2020/07/09 05:57:15 skrll Exp $");
 
 #ifdef _KERNEL_OPT
 #include "opt_uvmhist.h"
@@ -175,9 +175,8 @@ uvn_get(struct uvm_object *uobj, voff_t 
 	struct vnode *vp = (struct vnode *)uobj;
 	int error;
 
-	UVMHIST_FUNC("uvn_get"); UVMHIST_CALLED(ubchist);
-
-	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, (int)offset,
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)vp, offset,
 	    0, 0);
 
 	if (vp->v_type == VREG && (access_type & VM_PROT_WRITE) == 0
@@ -290,8 +289,8 @@ uvn_findpage(struct uvm_object *uobj, vo
     unsigned int flags, struct uvm_page_array *a, unsigned int nleft)
 {
 	struct vm_page *pg;
-	UVMHIST_FUNC("uvn_findpage"); UVMHIST_CALLED(ubchist);
-	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
+	UVMHIST_FUNC(__func__);
+	UVMHIST_CALLARGS(ubchist, "vp %#jx off 0x%jx", (uintptr_t)uobj, offset,
 	    0, 0);
 
 	/*
@@ -439,7 +438,7 @@ uvm_vnp_setsize(struct vnode *vp, voff_t
 	struct uvm_object *uobj = &vp->v_uobj;
 	voff_t pgend = round_page(newsize);
 	voff_t oldsize;
-	UVMHIST_FUNC("uvm_vnp_setsize"); UVMHIST_CALLED(ubchist);
+	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
 
 	rw_enter(uobj->vmobjlock, RW_WRITER);
 	UVMHIST_LOG(ubchist, "vp %#jx old 0x%jx new 0x%jx",

Reply via email to