directly use ansi volatile keyword not __volatile from cdefs.h

diff --git lib/libc/arch/aarch64/gen/fpgetround.c 
lib/libc/arch/aarch64/gen/fpgetround.c
index 058fa8fcd8c..b6943e6585e 100644
--- lib/libc/arch/aarch64/gen/fpgetround.c
+++ lib/libc/arch/aarch64/gen/fpgetround.c
@@ -34,7 +34,7 @@ fpgetround(void)
 {
        uint32_t fpscr;
 
-       __asm __volatile("mrs %x0, fpcr" : "=&r"(fpscr));
+       __asm volatile("mrs %x0, fpcr" : "=&r"(fpscr));
 
        return ((fpscr >> 22) & 3);
 }
diff --git lib/libc/arch/aarch64/gen/fpgetsticky.c 
lib/libc/arch/aarch64/gen/fpgetsticky.c
index 723f6156479..90a05d9d75a 100644
--- lib/libc/arch/aarch64/gen/fpgetsticky.c
+++ lib/libc/arch/aarch64/gen/fpgetsticky.c
@@ -40,7 +40,7 @@ fpgetsticky(void)
 {
        fp_except old;
 
-       __asm __volatile("mrs %x0, fpcr" : "=&r"(old));
+       __asm volatile("mrs %x0, fpcr" : "=&r"(old));
 
        return (old & FP_X_MASK);
 }
diff --git lib/libc/arch/aarch64/gen/fpsetround.c 
lib/libc/arch/aarch64/gen/fpsetround.c
index 0eba1541c6f..a638b6ee03d 100644
--- lib/libc/arch/aarch64/gen/fpsetround.c
+++ lib/libc/arch/aarch64/gen/fpsetround.c
@@ -34,10 +34,10 @@ fpsetround(fp_rnd rnd_dir)
 {
        uint32_t old, new;
 
-       __asm __volatile("mrs %x0, fpcr" : "=&r"(old));
+       __asm volatile("mrs %x0, fpcr" : "=&r"(old));
        new = old & ~(3 << 22);
        new |= rnd_dir << 22;
-       __asm __volatile("msr fpcr, %x0" : : "r"(new));
+       __asm volatile("msr fpcr, %x0" : : "r"(new));
 
        return ((old >> 22) & 3);
 }
diff --git lib/libc/arch/aarch64/gen/fpsetsticky.c 
lib/libc/arch/aarch64/gen/fpsetsticky.c
index 592b777b739..422ccf9e7a9 100644
--- lib/libc/arch/aarch64/gen/fpsetsticky.c
+++ lib/libc/arch/aarch64/gen/fpsetsticky.c
@@ -40,10 +40,10 @@ fpsetsticky(fp_except except)
 {
        fp_except old, new;
 
-       __asm __volatile("mrs %x0, fpcr" : "=&r"(old));
+       __asm volatile("mrs %x0, fpcr" : "=&r"(old));
        new = old & ~(FP_X_MASK);
        new &= ~except;
-       __asm __volatile("msr fpcr, %x0" : : "r"(new));
+       __asm volatile("msr fpcr, %x0" : : "r"(new));
 
        return (old & except);
 }
diff --git lib/libm/arch/aarch64/fenv.c lib/libm/arch/aarch64/fenv.c
index 7aca2b86b26..6c0e1f8c6dd 100644
--- lib/libm/arch/aarch64/fenv.c
+++ lib/libm/arch/aarch64/fenv.c
@@ -34,11 +34,11 @@
 #define        _FPUSW_SHIFT    8
 #define        _ENABLE_MASK    (FE_ALL_EXCEPT << _FPUSW_SHIFT)
 
-#define        __mrs_fpcr(r)   __asm __volatile("mrs %x0, fpcr" : "=r" (r))
-#define        __msr_fpcr(r)   __asm __volatile("msr fpcr, %x0" : : "r" (r))
+#define        __mrs_fpcr(r)   __asm volatile("mrs %x0, fpcr" : "=r" (r))
+#define        __msr_fpcr(r)   __asm volatile("msr fpcr, %x0" : : "r" (r))
 
-#define        __mrs_fpsr(r)   __asm __volatile("mrs %x0, fpsr" : "=r" (r))
-#define        __msr_fpsr(r)   __asm __volatile("msr fpsr, %x0" : : "r" (r))
+#define        __mrs_fpsr(r)   __asm volatile("mrs %x0, fpsr" : "=r" (r))
+#define        __msr_fpsr(r)   __asm volatile("msr fpsr, %x0" : : "r" (r))
 
 /*
  * The following constant represents the default floating-point environment
diff --git sys/arch/amd64/include/atomic.h sys/arch/amd64/include/atomic.h
index 6d737123e7b..a4bdeb6c8cf 100644
--- sys/arch/amd64/include/atomic.h
+++ sys/arch/amd64/include/atomic.h
@@ -260,7 +260,7 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned 
long v)
  * ourselves.
  */
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
 #define membar_enter()         __membar("mfence")
diff --git sys/arch/arm/arm/vfp.c sys/arch/arm/arm/vfp.c
index 8f61fc8d395..cddc08532f9 100644
--- sys/arch/arm/arm/vfp.c
+++ sys/arch/arm/arm/vfp.c
@@ -28,7 +28,7 @@
 static inline void
 set_vfp_fpexc(uint32_t val)
 {
-       __asm __volatile(
+       __asm volatile(
            ".fpu vfpv3\n"
            "vmsr fpexc, %0" :: "r" (val));
 }
@@ -37,7 +37,7 @@ static inline uint32_t
 get_vfp_fpexc(void)
 {
        uint32_t val;
-       __asm __volatile(
+       __asm volatile(
            ".fpu vfpv3\n"
            "vmrs %0, fpexc" : "=r" (val));
        return val;
@@ -67,7 +67,7 @@ vfp_store(struct fpreg *vfpsave)
        uint32_t scratch;
 
        if (get_vfp_fpexc() & VFPEXC_EN) {
-               __asm __volatile(
+               __asm volatile(
                    ".fpu vfpv3\n"
                    "vstmia     %1!, {d0-d15}\n"        /* d0-d15 */
                    "vstmia     %1!, {d16-d31}\n"       /* d16-d31 */
@@ -151,7 +151,7 @@ vfp_load(struct proc *p)
        /* enable to be able to load ctx */
        set_vfp_fpexc(VFPEXC_EN);
 
-       __asm __volatile(
+       __asm volatile(
            ".fpu vfpv3\n"
            "vldmia     %1!, {d0-d15}\n"                /* d0-d15 */
            "vldmia     %1!, {d16-d31}\n"               /* d16-d31 */
diff --git sys/arch/arm/include/atomic.h sys/arch/arm/include/atomic.h
index 33b43833dfa..b34e22dd0f3 100644
--- sys/arch/arm/include/atomic.h
+++ sys/arch/arm/include/atomic.h
@@ -234,7 +234,7 @@ _def_atomic_sub_nv(_atomic_sub_long_nv, unsigned long)
 #define atomic_sub_int_nv(_p, _v) _atomic_sub_int_nv((_p), (_v))
 #define atomic_sub_long_nv(_p, _v) _atomic_sub_long_nv((_p), (_v))
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #define membar_enter()         __membar("dmb sy")
 #define membar_exit()          __membar("dmb sy")
diff --git sys/arch/arm64/include/armreg.h sys/arch/arm64/include/armreg.h
index 851f06f8266..4eee955b9da 100644
--- sys/arch/arm64/include/armreg.h
+++ sys/arch/arm64/include/armreg.h
@@ -38,11 +38,11 @@
 
 #define        READ_SPECIALREG(reg)                                            
\
 ({     uint64_t val;                                                   \
-       __asm __volatile("mrs   %0, " __STRING(reg) : "=&r" (val));     \
+       __asm volatile("mrs     %0, " __STRING(reg) : "=&r" (val));     \
        val;                                                            \
 })
 #define        WRITE_SPECIALREG(reg, val)                                      
\
-       __asm __volatile("msr   " __STRING(reg) ", %0" : : "r"((uint64_t)val))
+       __asm volatile("msr     " __STRING(reg) ", %0" : : "r"((uint64_t)val))
 
 /* CCSIDR_EL1 - Current Cache Size ID Register */
 #define        CCSIDR_SETS_MASK        0x0fffe000
diff --git sys/arch/arm64/include/atomic.h sys/arch/arm64/include/atomic.h
index 5cf42178cb5..487d3676ffb 100644
--- sys/arch/arm64/include/atomic.h
+++ sys/arch/arm64/include/atomic.h
@@ -5,7 +5,7 @@
 #ifndef _MACHINE_ATOMIC_H_
 #define _MACHINE_ATOMIC_H_
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #define membar_enter()         __membar("dmb sy")
 #define membar_exit()          __membar("dmb sy")
diff --git sys/arch/arm64/include/cpu.h sys/arch/arm64/include/cpu.h
index 1f3bebe56b4..27f13ea5c97 100644
--- sys/arch/arm64/include/cpu.h
+++ sys/arch/arm64/include/cpu.h
@@ -158,7 +158,7 @@ static inline struct cpu_info *
 curcpu(void)
 {
        struct cpu_info *__ci = NULL;
-       __asm __volatile("mrs %0, tpidr_el1" : "=r" (__ci));
+       __asm volatile("mrs %0, tpidr_el1" : "=r" (__ci));
        return (__ci);
 }
 
diff --git sys/arch/i386/include/atomic.h sys/arch/i386/include/atomic.h
index 4cc1f2b49da..07aa066c759 100644
--- sys/arch/i386/include/atomic.h
+++ sys/arch/i386/include/atomic.h
@@ -244,7 +244,7 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned 
long v)
  * ourselves.
  */
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
 #define membar_enter()         __membar("lock; addl $0,0(%%esp)")
diff --git sys/arch/m88k/include/atomic.h sys/arch/m88k/include/atomic.h
index b4f078d2444..52a7cd84324 100644
--- sys/arch/m88k/include/atomic.h
+++ sys/arch/m88k/include/atomic.h
@@ -187,7 +187,7 @@ __sync_synchronize(void)
 
 /* trap numbers below 128 would cause a privileged instruction fault */
 #define        __membar() do {                                         \
-       __asm __volatile("tb1 0, %%r0, 128" ::: "memory");      \
+       __asm volatile("tb1 0, %%r0, 128" ::: "memory");        \
 } while (0)
 
 #endif /* gcc < 4 */
diff --git sys/arch/octeon/include/octeonvar.h 
sys/arch/octeon/include/octeonvar.h
index 2ba34e615f0..e0ba44ec3b9 100644
--- sys/arch/octeon/include/octeonvar.h
+++ sys/arch/octeon/include/octeonvar.h
@@ -353,7 +353,7 @@ ffs32(uint32_t val)
 {
        int ret;
 
-       __asm __volatile ( \
+       __asm volatile ( \
                _ASM_PROLOGUE_MIPS64
                "       clz     %0, %1                  \n"
                _ASM_EPILOGUE
diff --git sys/arch/powerpc/include/atomic.h sys/arch/powerpc/include/atomic.h
index f543a284d85..c274c2bf00a 100644
--- sys/arch/powerpc/include/atomic.h
+++ sys/arch/powerpc/include/atomic.h
@@ -273,7 +273,7 @@ _atomic_addic_long_nv(volatile unsigned long *p, unsigned 
long v)
 #define atomic_inc_long_nv(_p) _atomic_addic_long_nv((_p), 1)
 #define atomic_dec_long_nv(_p) _atomic_addic_long_nv((_p), -1)
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
 #define membar_enter()         __membar("isync")
diff --git sys/arch/powerpc64/include/atomic.h 
sys/arch/powerpc64/include/atomic.h
index 2f58aa80db7..f9306ae41a2 100644
--- sys/arch/powerpc64/include/atomic.h
+++ sys/arch/powerpc64/include/atomic.h
@@ -273,7 +273,7 @@ _atomic_addic_long_nv(volatile unsigned long *p, unsigned 
long v)
 #define atomic_inc_long_nv(_p) _atomic_addic_long_nv((_p), 1)
 #define atomic_dec_long_nv(_p) _atomic_addic_long_nv((_p), -1)
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
 #define membar_enter()         __membar("isync")
diff --git sys/arch/powerpc64/powerpc64/syncicache.c 
sys/arch/powerpc64/powerpc64/syncicache.c
index 292d10a662a..856d365885c 100644
--- sys/arch/powerpc64/powerpc64/syncicache.c
+++ sys/arch/powerpc64/powerpc64/syncicache.c
@@ -58,7 +58,7 @@ __syncicache(void *from, size_t len)
        __asm volatile ("sync");
        p = (char *)from - off;
        do {
-               __asm __volatile ("icbi 0,%0" :: "r"(p));
+               __asm volatile ("icbi 0,%0" :: "r"(p));
                p += cacheline_size;
                len -= cacheline_size;
        } while (len + cacheline_size > cacheline_size);
diff --git sys/arch/riscv64/include/atomic.h sys/arch/riscv64/include/atomic.h
index 24b5f532be2..ff4de2c0be7 100644
--- sys/arch/riscv64/include/atomic.h
+++ sys/arch/riscv64/include/atomic.h
@@ -5,7 +5,7 @@
 #ifndef _MACHINE_ATOMIC_H_
 #define _MACHINE_ATOMIC_H_
 
-#define __membar(_f) do {__asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do {__asm volatile(_f ::: "memory"); } while (0)
 
 #define membar_enter()         __membar("fence w,rw")
 #define membar_exit()          __membar("fence rw,w")
@@ -27,7 +27,7 @@
 static inline void
 atomic_setbits_int(volatile unsigned int *p, unsigned int v)
 {
-       __asm __volatile("amoor.w zero, %1, %0"
+       __asm volatile("amoor.w zero, %1, %0"
                        : "+A" (*p)
                        : "r" (v)
                        : "memory");
@@ -36,7 +36,7 @@ atomic_setbits_int(volatile unsigned int *p, unsigned int v)
 static inline void
 atomic_store_64(volatile uint64_t *p, uint64_t v)
 {
-       __asm __volatile("amoor.d zero, %1, %0"
+       __asm volatile("amoor.d zero, %1, %0"
                        : "+A" (*p)
                        : "r" (v)
                        : "memory");
@@ -49,7 +49,7 @@ atomic_store_64(volatile uint64_t *p, uint64_t v)
 static inline void
 atomic_clearbits_int(volatile unsigned int *p, unsigned int v)
 {
-       __asm __volatile("amoand.w zero, %1, %0"
+       __asm volatile("amoand.w zero, %1, %0"
                        : "+A" (*p)
                        : "r" (~v)
                        : "memory");
diff --git sys/arch/riscv64/include/cpu.h sys/arch/riscv64/include/cpu.h
index a455820bb7c..ccc8f66d697 100644
--- sys/arch/riscv64/include/cpu.h
+++ sys/arch/riscv64/include/cpu.h
@@ -137,7 +137,7 @@ static inline struct cpu_info *
 curcpu(void)
 {
        struct cpu_info *__ci = NULL;
-       __asm __volatile("mv %0, tp" : "=&r"(__ci));
+       __asm volatile("mv %0, tp" : "=&r"(__ci));
        return (__ci);
 }
 
@@ -238,7 +238,7 @@ void        savectx         (struct pcb *pcb);
 static inline void
 intr_enable(void)
 {
-       __asm __volatile("csrsi sstatus, %0" :: "i" (SSTATUS_SIE));
+       __asm volatile("csrsi sstatus, %0" :: "i" (SSTATUS_SIE));
 }
 
 static inline u_long
@@ -246,7 +246,7 @@ intr_disable(void)
 {
        uint64_t ret;
 
-       __asm __volatile(
+       __asm volatile(
            "csrrci %0, sstatus, %1"
            : "=&r" (ret) : "i" (SSTATUS_SIE)
        );
@@ -257,7 +257,7 @@ intr_disable(void)
 static inline void
 intr_restore(u_long s)
 {
-       __asm __volatile("csrs sstatus, %0" :: "r" (s));
+       __asm volatile("csrs sstatus, %0" :: "r" (s));
 }
 
 void   delay (unsigned);
diff --git sys/arch/riscv64/include/cpufunc.h sys/arch/riscv64/include/cpufunc.h
index 884a1ed642a..e6d80b91cda 100644
--- sys/arch/riscv64/include/cpufunc.h
+++ sys/arch/riscv64/include/cpufunc.h
@@ -49,19 +49,19 @@ breakpoint(void)
 static __inline void
 fence_i(void)
 {
-       __asm __volatile("fence.i" ::: "memory");
+       __asm volatile("fence.i" ::: "memory");
 }
 
 static __inline void
 sfence_vma(void)
 {
-       __asm __volatile("sfence.vma" ::: "memory");
+       __asm volatile("sfence.vma" ::: "memory");
 }
 
 static __inline void
 sfence_vma_page(uintptr_t addr)
 {
-       __asm __volatile("sfence.vma %0"
+       __asm volatile("sfence.vma %0"
                        :
                        : "r" (addr)
                        : "memory");
@@ -71,7 +71,7 @@ sfence_vma_page(uintptr_t addr)
 static __inline void
 sfence_vma_asid(uint64_t asid)
 {
-       __asm __volatile("sfence.vma x0, %0"
+       __asm volatile("sfence.vma x0, %0"
                        :
                        : "r" (asid)
                        : "memory");
@@ -80,7 +80,7 @@ sfence_vma_asid(uint64_t asid)
 static __inline void
 sfence_vma_page_asid(uintptr_t addr, uint64_t asid)
 {
-       __asm __volatile("sfence.vma %0, %1"
+       __asm volatile("sfence.vma %0, %1"
                         :
                         : "r" (addr), "r" (asid)
                         : "memory");
@@ -96,7 +96,7 @@ extern void (*cpu_dcache_wb_range)(paddr_t, psize_t);
 static __inline void
 load_satp(uint64_t val)
 {
-       __asm __volatile("csrw satp, %0" :: "r"(val));
+       __asm volatile("csrw satp, %0" :: "r"(val));
 }
 
 #define        cpufunc_nullop()                riscv_nullop()
diff --git sys/arch/riscv64/include/riscvreg.h 
sys/arch/riscv64/include/riscvreg.h
index e57720d499c..3f2bc41cf07 100644
--- sys/arch/riscv64/include/riscvreg.h
+++ sys/arch/riscv64/include/riscvreg.h
@@ -197,38 +197,38 @@
 
 #define csr_swap(csr, val)                                             \
 ({     if (CSR_ZIMM(val))                                              \
-               __asm __volatile("csrrwi %0, " #csr ", %1"              \
+               __asm volatile("csrrwi %0, " #csr ", %1"                \
                                : "=r" (val) : "i" (val));              \
        else                                                            \
-               __asm __volatile("csrrw %0, " #csr ", %1"               \
+               __asm volatile("csrrw %0, " #csr ", %1"         \
                                : "=r" (val) : "r" (val));              \
        val;                                                            \
 })
 
 #define csr_write(csr, val)                                            \
 ({     if (CSR_ZIMM(val))                                              \
-               __asm __volatile("csrwi " #csr ", %0" :: "i" (val));    \
+               __asm volatile("csrwi " #csr ", %0" :: "i" (val));      \
        else                                                            \
-               __asm __volatile("csrw " #csr ", %0" ::  "r" (val));    \
+               __asm volatile("csrw " #csr ", %0" ::  "r" (val));      \
 })
 
 #define csr_set(csr, val)                                              \
 ({     if (CSR_ZIMM(val))                                              \
-               __asm __volatile("csrsi " #csr ", %0" :: "i" (val));    \
+               __asm volatile("csrsi " #csr ", %0" :: "i" (val));      \
        else                                                            \
-               __asm __volatile("csrs " #csr ", %0" :: "r" (val));     \
+               __asm volatile("csrs " #csr ", %0" :: "r" (val));       \
 })
 
 #define csr_clear(csr, val)                                            \
 ({     if (CSR_ZIMM(val))                                              \
-               __asm __volatile("csrci " #csr ", %0" :: "i" (val));    \
+               __asm volatile("csrci " #csr ", %0" :: "i" (val));      \
        else                                                            \
-               __asm __volatile("csrc " #csr ", %0" :: "r" (val));     \
+               __asm volatile("csrc " #csr ", %0" :: "r" (val));       \
 })
 
 #define csr_read(csr)                                                  \
 ({     u_long val;                                                     \
-       __asm __volatile("csrr %0, " #csr : "=r" (val));                \
+       __asm volatile("csrr %0, " #csr : "=r" (val));          \
        val;                                                            \
 })
 
diff --git sys/arch/riscv64/include/sbi.h sys/arch/riscv64/include/sbi.h
index 0cb68d797f8..f8d0e316a12 100644
--- sys/arch/riscv64/include/sbi.h
+++ sys/arch/riscv64/include/sbi.h
@@ -116,7 +116,7 @@ sbi_call(uint64_t arg7, uint64_t arg6, uint64_t arg0, 
uint64_t arg1,
        register uintptr_t a6 __asm ("a6") = (uintptr_t)(arg6);
        register uintptr_t a7 __asm ("a7") = (uintptr_t)(arg7);
 
-       __asm __volatile(                       \
+       __asm volatile(                 \
                "ecall"                         \
                :"+r"(a0), "+r"(a1)             \
                :"r"(a2), "r"(a3), "r"(a6), "r"(a7)     \
diff --git sys/arch/riscv64/riscv64/pmap.c sys/arch/riscv64/riscv64/pmap.c
index 87bcca8c7e8..7f644310ee0 100644
--- sys/arch/riscv64/riscv64/pmap.c
+++ sys/arch/riscv64/riscv64/pmap.c
@@ -1328,7 +1328,7 @@ pmap_bootstrap(long kvo, vaddr_t l1pt, vaddr_t 
kernelstart, vaddr_t kernelend,
 
        //switching to new page table
        uint64_t satp = pmap_kernel()->pm_satp;
-       __asm __volatile("csrw satp, %0" :: "r" (satp) : "memory");
+       __asm volatile("csrw satp, %0" :: "r" (satp) : "memory");
 
        printf("all mapped\n");
 

Reply via email to