this is kind of like the change i just made to sparc64.

sparc created instances of inline functions for each of the splfoo
calls. this provides an splraise call and turns splfoo into a macro
to splraise(IPL_FOO).

the spl code is quite long so i turned it into functions instead
of inlines.

could someone test this? i haven't got a running (walking?) sparc
anymore.

cheers,
dlg

Index: include/psl.h
===================================================================
RCS file: /cvs/src/sys/arch/sparc/include/psl.h,v
retrieving revision 1.28
diff -u -p -r1.28 psl.h
--- include/psl.h       29 Mar 2014 18:09:30 -0000      1.28
+++ include/psl.h       13 Jun 2016 01:27:01 -0000
@@ -142,28 +142,6 @@ setpsr(newpsr)
        __asm volatile("nop");
 }
 
-static __inline int
-spl0()
-{
-       int psr, oldipl;
-
-       /*
-        * wrpsr xors two values: we choose old psr and old ipl here,
-        * which gives us the same value as the old psr but with all
-        * the old PIL bits turned off.
-        */
-       __asm volatile("rd %%psr,%0" : "=r" (psr));
-       oldipl = psr & PSR_PIL;
-       __asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
-
-       /*
-        * Three instructions must execute before we can depend
-        * on the bits to be changed.
-        */
-       __asm volatile("nop; nop; nop");
-       return (oldipl);
-}
-
 #ifdef DIAGNOSTIC
 /*
  * Although this function is implemented in MI code, it must be in this MD
@@ -183,83 +161,11 @@ void splassert_check(int, const char *);
 #define splsoftassert(wantipl) do { /* nada */ } while (0)
 #endif
 
-/*
- * PIL 1 through 14 can use this macro.
- * (spl0 and splhigh are special since they put all 0s or all 1s
- * into the ipl field.)
- */
-#define        SPL(name, newipl) \
-static __inline int name(void); \
-static __inline int name() \
-{ \
-       int psr, oldipl; \
-       __asm volatile("rd %%psr,%0" : "=r" (psr)); \
-       oldipl = psr & PSR_PIL; \
-       psr &= ~oldipl; \
-       __asm volatile("wr %0,%1,%%psr" : : \
-           "r" (psr), "n" ((newipl) << 8)); \
-       __asm volatile("nop; nop; nop"); \
-       __asm volatile("":::"memory");  /* protect from reordering */ \
-       return (oldipl); \
-}
-/* A non-priority-decreasing version of SPL */
-#define        SPLHOLD(name, newipl) \
-static __inline int name(void); \
-static __inline int name() \
-{ \
-       int psr, oldipl; \
-       __asm volatile("rd %%psr,%0" : "=r" (psr)); \
-       oldipl = psr & PSR_PIL; \
-       if ((newipl << 8) <= oldipl) \
-               return oldipl; \
-       psr &= ~oldipl; \
-       __asm volatile("wr %0,%1,%%psr" : : \
-           "r" (psr), "n" ((newipl) << 8)); \
-       __asm volatile("nop; nop; nop"); \
-       __asm volatile("":::"memory");  /* protect from reordering */ \
-       return (oldipl); \
-}
-
-SPLHOLD(splsoftint, IPL_SOFTINT)
-#define        splsoftclock            splsoftint
-#define        splsoftnet              splsoftint
-SPLHOLD(splausoft, IPL_AUSOFT)
-SPLHOLD(splfdsoft, IPL_FDSOFT)
-SPLHOLD(splbio, IPL_BIO)
-SPLHOLD(splnet, IPL_NET)
-SPLHOLD(spltty, IPL_TTY)
-SPLHOLD(splvm, IPL_VM)
-SPLHOLD(splclock, IPL_CLOCK)
-SPLHOLD(splfd, IPL_FD)
-SPLHOLD(splzs, IPL_ZS)
-SPLHOLD(splaudio, IPL_AUHARD)
-SPLHOLD(splsched, IPL_SCHED)
-SPLHOLD(splstatclock, IPL_STATCLOCK)
-
-static __inline int splhigh()
-{
-       int psr, oldipl;
-
-       __asm volatile("rd %%psr,%0" : "=r" (psr));
-       __asm volatile("wr %0,0,%%psr" : : "r" (psr | PSR_PIL));
-       __asm volatile("and %1,%2,%0; nop; nop" : "=r" (oldipl) : \
-           "r" (psr), "n" (PSR_PIL));
-       __asm volatile("":::"memory");  /* protect from reordering */
-       return (oldipl);
-}
-
-/* splx does not have a return value */
-static __inline void splx(newipl)
-       int newipl;
-{
-       int psr;
+int    spl0(void);
+int    splraise(int);
+int    splhigh(int);
+void   splx(int);
 
-       __asm volatile("":::"memory");  /* protect from reordering */
-       __asm volatile("rd %%psr,%0" : "=r" (psr));
-       __asm volatile("wr %0,%1,%%psr" : : \
-           "r" (psr & ~PSR_PIL), "rn" (newipl));
-       __asm volatile("nop; nop; nop");
-}
 #endif /* KERNEL && !_LOCORE */
 
 #endif /* PSR_IMPL */
Index: sparc/intr.c
===================================================================
RCS file: /cvs/src/sys/arch/sparc/sparc/intr.c,v
retrieving revision 1.43
diff -u -p -r1.43 intr.c
--- sparc/intr.c        10 Dec 2015 19:48:04 -0000      1.43
+++ sparc/intr.c        13 Jun 2016 01:27:01 -0000
@@ -544,3 +544,70 @@ splassert_check(int wantipl, const char 
        }
 }
 #endif
+
+int
+spl0(void)
+{
+       int psr, oldipl;
+
+       /*
+        * wrpsr xors two values: we choose old psr and old ipl here,
+        * which gives us the same value as the old psr but with all
+        * the old PIL bits turned off.
+        */
+       __asm volatile("rd %%psr,%0" : "=r" (psr));
+       oldipl = psr & PSR_PIL;
+       __asm volatile("wr %0,%1,%%psr" : : "r" (psr), "r" (oldipl));
+
+       /*
+        * Three instructions must execute before we can depend
+        * on the bits to be changed.
+        */
+       __asm volatile("nop; nop; nop");
+       return (oldipl);
+}
+
+int
+splraise(int newipl)
+{
+       int psr, oldipl;
+
+       newipl <<= 8;
+
+       __asm volatile("rd %%psr,%0" : "=r" (psr));
+       oldipl = psr & PSR_PIL;
+       if (newipl <= oldipl)
+               return oldipl;
+
+       psr &= ~oldipl;
+       __asm volatile("wr %0,%1,%%psr" : : "r" (psr), "n" (newipl));
+       __asm volatile("nop; nop; nop");
+       __asm volatile("":::"memory");  /* protect from reordering */ \
+
+       return (oldipl);
+}
+
+int
+splhigh(void)
+{
+       int psr, oldipl;
+
+       __asm volatile("rd %%psr,%0" : "=r" (psr));
+       __asm volatile("wr %0,0,%%psr" : : "r" (psr | PSR_PIL));
+       __asm volatile("and %1,%2,%0; nop; nop" : "=r" (oldipl) :
+           "r" (psr), "n" (PSR_PIL));
+       __asm volatile("":::"memory");  /* protect from reordering */
+       return (oldipl);
+}
+
+void
+splx(int newipl)
+{
+       int psr;
+
+       __asm volatile("":::"memory");  /* protect from reordering */
+       __asm volatile("rd %%psr,%0" : "=r" (psr));
+       __asm volatile("wr %0,%1,%%psr" : :
+           "r" (psr & ~PSR_PIL), "rn" (newipl));
+       __asm volatile("nop; nop; nop");
+}

Reply via email to