Re: use volatile not __volatile

2022-08-28 Thread Philip Guenther
On Sat, Aug 27, 2022 at 6:31 PM Jonathan Gray  wrote:

> directly use ansi volatile keyword not __volatile from cdefs.h
>

Yay!
ok guenther@


use volatile not __volatile

2022-08-27 Thread Jonathan Gray
directly use ansi volatile keyword not __volatile from cdefs.h

diff --git lib/libc/arch/aarch64/gen/fpgetround.c 
lib/libc/arch/aarch64/gen/fpgetround.c
index 058fa8fcd8c..b6943e6585e 100644
--- lib/libc/arch/aarch64/gen/fpgetround.c
+++ lib/libc/arch/aarch64/gen/fpgetround.c
@@ -34,7 +34,7 @@ fpgetround(void)
 {
uint32_t fpscr;
 
-   __asm __volatile("mrs %x0, fpcr" : "=&r"(fpscr));
+   __asm volatile("mrs %x0, fpcr" : "=&r"(fpscr));
 
return ((fpscr >> 22) & 3);
 }
diff --git lib/libc/arch/aarch64/gen/fpgetsticky.c 
lib/libc/arch/aarch64/gen/fpgetsticky.c
index 723f6156479..90a05d9d75a 100644
--- lib/libc/arch/aarch64/gen/fpgetsticky.c
+++ lib/libc/arch/aarch64/gen/fpgetsticky.c
@@ -40,7 +40,7 @@ fpgetsticky(void)
 {
fp_except old;
 
-   __asm __volatile("mrs %x0, fpcr" : "=&r"(old));
+   __asm volatile("mrs %x0, fpcr" : "=&r"(old));
 
return (old & FP_X_MASK);
 }
diff --git lib/libc/arch/aarch64/gen/fpsetround.c 
lib/libc/arch/aarch64/gen/fpsetround.c
index 0eba1541c6f..a638b6ee03d 100644
--- lib/libc/arch/aarch64/gen/fpsetround.c
+++ lib/libc/arch/aarch64/gen/fpsetround.c
@@ -34,10 +34,10 @@ fpsetround(fp_rnd rnd_dir)
 {
uint32_t old, new;
 
-   __asm __volatile("mrs %x0, fpcr" : "=&r"(old));
+   __asm volatile("mrs %x0, fpcr" : "=&r"(old));
new = old & ~(3 << 22);
new |= rnd_dir << 22;
-   __asm __volatile("msr fpcr, %x0" : : "r"(new));
+   __asm volatile("msr fpcr, %x0" : : "r"(new));
 
return ((old >> 22) & 3);
 }
diff --git lib/libc/arch/aarch64/gen/fpsetsticky.c 
lib/libc/arch/aarch64/gen/fpsetsticky.c
index 592b777b739..422ccf9e7a9 100644
--- lib/libc/arch/aarch64/gen/fpsetsticky.c
+++ lib/libc/arch/aarch64/gen/fpsetsticky.c
@@ -40,10 +40,10 @@ fpsetsticky(fp_except except)
 {
fp_except old, new;
 
-   __asm __volatile("mrs %x0, fpcr" : "=&r"(old));
+   __asm volatile("mrs %x0, fpcr" : "=&r"(old));
new = old & ~(FP_X_MASK);
new &= ~except;
-   __asm __volatile("msr fpcr, %x0" : : "r"(new));
+   __asm volatile("msr fpcr, %x0" : : "r"(new));
 
return (old & except);
 }
diff --git lib/libm/arch/aarch64/fenv.c lib/libm/arch/aarch64/fenv.c
index 7aca2b86b26..6c0e1f8c6dd 100644
--- lib/libm/arch/aarch64/fenv.c
+++ lib/libm/arch/aarch64/fenv.c
@@ -34,11 +34,11 @@
 #define_FPUSW_SHIFT8
 #define_ENABLE_MASK(FE_ALL_EXCEPT << _FPUSW_SHIFT)
 
-#define__mrs_fpcr(r)   __asm __volatile("mrs %x0, fpcr" : "=r" (r))
-#define__msr_fpcr(r)   __asm __volatile("msr fpcr, %x0" : : "r" (r))
+#define__mrs_fpcr(r)   __asm volatile("mrs %x0, fpcr" : "=r" (r))
+#define__msr_fpcr(r)   __asm volatile("msr fpcr, %x0" : : "r" (r))
 
-#define__mrs_fpsr(r)   __asm __volatile("mrs %x0, fpsr" : "=r" (r))
-#define__msr_fpsr(r)   __asm __volatile("msr fpsr, %x0" : : "r" (r))
+#define__mrs_fpsr(r)   __asm volatile("mrs %x0, fpsr" : "=r" (r))
+#define__msr_fpsr(r)   __asm volatile("msr fpsr, %x0" : : "r" (r))
 
 /*
  * The following constant represents the default floating-point environment
diff --git sys/arch/amd64/include/atomic.h sys/arch/amd64/include/atomic.h
index 6d737123e7b..a4bdeb6c8cf 100644
--- sys/arch/amd64/include/atomic.h
+++ sys/arch/amd64/include/atomic.h
@@ -260,7 +260,7 @@ _atomic_sub_long_nv(volatile unsigned long *p, unsigned 
long v)
  * ourselves.
  */
 
-#define __membar(_f) do { __asm __volatile(_f ::: "memory"); } while (0)
+#define __membar(_f) do { __asm volatile(_f ::: "memory"); } while (0)
 
 #if defined(MULTIPROCESSOR) || !defined(_KERNEL)
 #define membar_enter() __membar("mfence")
diff --git sys/arch/arm/arm/vfp.c sys/arch/arm/arm/vfp.c
index 8f61fc8d395..cddc08532f9 100644
--- sys/arch/arm/arm/vfp.c
+++ sys/arch/arm/arm/vfp.c
@@ -28,7 +28,7 @@
 static inline void
 set_vfp_fpexc(uint32_t val)
 {
-   __asm __volatile(
+   __asm volatile(
".fpu vfpv3\n"
"vmsr fpexc, %0" :: "r" (val));
 }
@@ -37,7 +37,7 @@ static inline uint32_t
 get_vfp_fpexc(void)
 {
uint32_t val;
-   __asm __volatile(
+   __asm volatile(
".fpu vfpv3\n"
"vmrs %0, fpexc" : "=r" (val));
return val;
@@ -67,7 +67,7 @@ vfp_store(struct fpreg *vfpsave)
uint32_t scratch;
 
if (get_vfp_fpexc() & VFPEXC_EN) {
-   __asm __volatile(
+   __asm volatile(
".fpu vfpv3\n"
"vstmia %1!, {d0-d15}\n"/* d0-d15 */
"vstmia %1!, {d16-d31}\n"   /* d16-d31 */
@@ -151,7 +151,7 @@ vfp_load(struct proc *p)
/* enable to be able to load ctx */
set_vfp_fpexc(VFPEXC_EN);
 
-   __asm __volatile(
+   __asm volatile(
".fpu vfpv3\n"
"vldmia %1!, {d0-d15}\n"/* d0-d15 */
"vldmia %1!, {d16-d31}\n"   /* d16-d31 */
diff --git sys/arch/arm/include/atomi