Spell inline correctly, also reduce the diff with NetBSD for uvm_amap.c
and uvm_fault.c.
ok?
Index: uvm/uvm_addr.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_addr.c,v
retrieving revision 1.28
diff -u -p -r1.28 uvm_addr.c
--- uvm/uvm_addr.c 13 Sep 2020 10:05:25 -0000 1.28
+++ uvm/uvm_addr.c 22 Sep 2020 07:12:10 -0000
@@ -186,7 +186,7 @@ uvm_addr_entrybyspace(struct uaddr_free_
}
#endif /* !SMALL_KERNEL */
-static __inline vaddr_t
+static inline vaddr_t
uvm_addr_align_forward(vaddr_t addr, vaddr_t align, vaddr_t offset)
{
vaddr_t adjusted;
@@ -201,7 +201,7 @@ uvm_addr_align_forward(vaddr_t addr, vad
return (adjusted < addr ? adjusted + align : adjusted);
}
-static __inline vaddr_t
+static inline vaddr_t
uvm_addr_align_backward(vaddr_t addr, vaddr_t align, vaddr_t offset)
{
vaddr_t adjusted;
Index: uvm/uvm_amap.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_amap.c,v
retrieving revision 1.82
diff -u -p -r1.82 uvm_amap.c
--- uvm/uvm_amap.c 4 Jan 2020 16:17:29 -0000 1.82
+++ uvm/uvm_amap.c 22 Sep 2020 07:07:45 -0000
@@ -63,20 +63,20 @@ static char amap_small_pool_names[UVM_AM
*/
static struct vm_amap *amap_alloc1(int, int, int);
-static __inline void amap_list_insert(struct vm_amap *);
-static __inline void amap_list_remove(struct vm_amap *);
+static inline void amap_list_insert(struct vm_amap *);
+static inline void amap_list_remove(struct vm_amap *);
struct vm_amap_chunk *amap_chunk_get(struct vm_amap *, int, int, int);
void amap_chunk_free(struct vm_amap *, struct vm_amap_chunk *);
void amap_wiperange_chunk(struct vm_amap *, struct vm_amap_chunk *, int, int);
-static __inline void
+static inline void
amap_list_insert(struct vm_amap *amap)
{
LIST_INSERT_HEAD(&amap_list, amap, am_list);
}
-static __inline void
+static inline void
amap_list_remove(struct vm_amap *amap)
{
LIST_REMOVE(amap, am_list);
@@ -190,13 +190,10 @@ amap_chunk_free(struct vm_amap *amap, st
* here are some in-line functions to help us.
*/
-static __inline void pp_getreflen(int *, int, int *, int *);
-static __inline void pp_setreflen(int *, int, int, int);
-
/*
* pp_getreflen: get the reference and length for a specific offset
*/
-static __inline void
+static inline void
pp_getreflen(int *ppref, int offset, int *refp, int *lenp)
{
@@ -212,7 +209,7 @@ pp_getreflen(int *ppref, int offset, int
/*
* pp_setreflen: set the reference and length for a specific offset
*/
-static __inline void
+static inline void
pp_setreflen(int *ppref, int offset, int ref, int len)
{
if (len == 1) {
Index: uvm/uvm_aobj.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_aobj.c,v
retrieving revision 1.86
diff -u -p -r1.86 uvm_aobj.c
--- uvm/uvm_aobj.c 18 Jul 2019 23:47:33 -0000 1.86
+++ uvm/uvm_aobj.c 22 Sep 2020 07:11:50 -0000
@@ -256,7 +256,7 @@ uao_find_swhash_elt(struct uvm_aobj *aob
/*
* uao_find_swslot: find the swap slot number for an aobj/pageidx
*/
-__inline static int
+inline static int
uao_find_swslot(struct uvm_aobj *aobj, int pageidx)
{
Index: uvm/uvm_fault.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_fault.c,v
retrieving revision 1.98
diff -u -p -r1.98 uvm_fault.c
--- uvm/uvm_fault.c 12 Sep 2020 17:08:49 -0000 1.98
+++ uvm/uvm_fault.c 22 Sep 2020 07:07:59 -0000
@@ -159,7 +159,7 @@ static struct uvm_advice uvmadvice[MADV_
* private prototypes
*/
static void uvmfault_amapcopy(struct uvm_faultinfo *);
-static __inline void uvmfault_anonflush(struct vm_anon **, int);
+static inline void uvmfault_anonflush(struct vm_anon **, int);
void uvmfault_unlockmaps(struct uvm_faultinfo *, boolean_t);
void uvmfault_update_stats(struct uvm_faultinfo *);
@@ -171,7 +171,7 @@ void uvmfault_update_stats(struct uvm_fa
*
* => does not have to deactivate page if it is busy
*/
-static __inline void
+static inline void
uvmfault_anonflush(struct vm_anon **anons, int n)
{
int lcv;
Index: uvm/uvm_map.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_map.c,v
retrieving revision 1.267
diff -u -p -r1.267 uvm_map.c
--- uvm/uvm_map.c 14 Sep 2020 20:31:09 -0000 1.267
+++ uvm/uvm_map.c 22 Sep 2020 07:11:47 -0000
@@ -167,7 +167,7 @@ boolean_t uvm_map_inentry_fix(struct p
* Tree management functions.
*/
-static __inline void uvm_mapent_copy(struct vm_map_entry*,
+static inline void uvm_mapent_copy(struct vm_map_entry*,
struct vm_map_entry*);
static inline int uvm_mapentry_addrcmp(const struct vm_map_entry*,
const struct vm_map_entry*);
@@ -361,7 +361,7 @@ uvm_mapentry_addrcmp(const struct vm_map
/*
* Copy mapentry.
*/
-static __inline void
+static inline void
uvm_mapent_copy(struct vm_map_entry *src, struct vm_map_entry *dst)
{
caddr_t csrc, cdst;
@@ -563,7 +563,7 @@ uvm_map_entrybyaddr(struct uvm_map_addr
* *head must be initialized to NULL before the first call to this macro.
* uvm_unmap_detach(*head, 0) will remove dead entries.
*/
-static __inline void
+static inline void
dead_entry_push(struct uvm_map_deadq *deadq, struct vm_map_entry *entry)
{
TAILQ_INSERT_TAIL(deadq, entry, dfree.deadq);
@@ -4812,8 +4812,8 @@ uvm_map_clip_start(struct vm_map *map, s
/*
* Boundary fixer.
*/
-static __inline vaddr_t uvm_map_boundfix(vaddr_t, vaddr_t, vaddr_t);
-static __inline vaddr_t
+static inline vaddr_t uvm_map_boundfix(vaddr_t, vaddr_t, vaddr_t);
+static inline vaddr_t
uvm_map_boundfix(vaddr_t min, vaddr_t max, vaddr_t bound)
{
return (min < bound && max > bound) ? bound : max;
Index: uvm/uvm_page.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_page.c,v
retrieving revision 1.149
diff -u -p -r1.149 uvm_page.c
--- uvm/uvm_page.c 29 Nov 2019 18:32:40 -0000 1.149
+++ uvm/uvm_page.c 22 Sep 2020 07:12:18 -0000
@@ -129,7 +129,7 @@ static void uvm_pageremove(struct vm_pag
* => call should have already set pg's object and offset pointers
* and bumped the version counter
*/
-__inline static void
+inline static void
uvm_pageinsert(struct vm_page *pg)
{
struct vm_page *dupe;
@@ -147,7 +147,7 @@ uvm_pageinsert(struct vm_page *pg)
*
* => caller must lock page queues
*/
-static __inline void
+static inline void
uvm_pageremove(struct vm_page *pg)
{
KASSERT(pg->pg_flags & PG_TABLED);
Index: uvm/uvm_page.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_page.h,v
retrieving revision 1.64
diff -u -p -r1.64 uvm_page.h
--- uvm/uvm_page.h 29 Nov 2019 18:32:40 -0000 1.64
+++ uvm/uvm_page.h 22 Sep 2020 07:12:21 -0000
@@ -249,7 +249,7 @@ psize_t uvm_pagecount(struct uvm_constr
/*
* vm_physseg_find: find vm_physseg structure that belongs to a PA
*/
-static __inline int
+static inline int
vm_physseg_find(paddr_t pframe, int *offp)
{
/* 'contig' case */
@@ -265,7 +265,7 @@ vm_physseg_find(paddr_t pframe, int *off
* PHYS_TO_VM_PAGE: find vm_page for a PA. used by MI code to get vm_pages
* back from an I/O mapping (ugh!). used in some MD code as well.
*/
-static __inline struct vm_page *
+static inline struct vm_page *
PHYS_TO_VM_PAGE(paddr_t pa)
{
paddr_t pf = atop(pa);