Module Name:    src
Committed By:   matt
Date:           Sun Jun 23 06:19:55 UTC 2013

Modified Files:
        src/lib/libc_vfp: vfpdf.S vfpsf.S

Log Message:
Add EABI (aeabi) support


To generate a diff of this commit:
cvs rdiff -u -r1.1 -r1.2 src/lib/libc_vfp/vfpdf.S src/lib/libc_vfp/vfpsf.S

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/lib/libc_vfp/vfpdf.S
diff -u src/lib/libc_vfp/vfpdf.S:1.1 src/lib/libc_vfp/vfpdf.S:1.2
--- src/lib/libc_vfp/vfpdf.S:1.1	Mon Jan 28 17:04:40 2013
+++ src/lib/libc_vfp/vfpdf.S	Sun Jun 23 06:19:55 2013
@@ -29,7 +29,7 @@
 
 #include <arm/asm.h>
 
-RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
+RCSID("$NetBSD: vfpdf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
 
 /*
  * This file provides softfloat compatible routines which use VFP instructions
@@ -50,6 +50,19 @@ RCSID("$NetBSD: vfpdf.S,v 1.1 2013/01/28
 #endif
 #define	vmov_args	vmov_arg0; vmov_arg1
 
+#ifdef __ARM_EABI__
+#define	__adddf3	__aeabi_dadd
+#define	__divdf3	__aeabi_ddiv
+#define	__muldf3	__aeabi_dmul
+#define	__subdf3	__aeabi_dsub
+#define	__negdf2	__aeabi_dneg
+#define	__extendsfdf2	__aeabi_f2d
+#define	__fixdfsi	__aeabi_d2iz
+#define	__fixunsdfsi	__aeabi_d2uiz
+#define	__floatsidf	__aeabi_i2d
+#define	__floatunsidf	__aeabi_ui2d
+#endif
+
 ENTRY(__adddf3)
 	vmov_args
 	vadd.f64	d0, d0, d1
@@ -64,6 +77,15 @@ ENTRY(__subdf3)
 	RET
 END(__subdf3)
 
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_drsub)
+	vmov_args
+	vsub.f64	d0, d1, d0
+	vmov_ret
+	RET
+END(__aeabi_drsub)
+#endif
+
 ENTRY(__muldf3)
 	vmov_args
 	vmul.f64	d0, d0, d1
@@ -120,6 +142,91 @@ ENTRY(__floatunsidf)
 	RET
 END(__floatunsidf)
 
+/*
+ * Effect of a floating point comparision on the condition flags.
+ *      N Z C V
+ * EQ = 0 1 1 0
+ * LT = 1 0 0 0
+ * GT = 0 0 1 0
+ * UN = 0 0 1 1
+ */
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_cdcmpeq)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	RET
+END(__aeabi_cdcmpeq)
+
+ENTRY(__aeabi_cdcmple)
+	vmov_args
+	vcmpe.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	RET
+END(__aeabi_cdcmple)
+
+ENTRY(__aeabi_cdrcmple)
+	vmov_args
+	vcmpe.f64	d1, d0
+	vmrs		APSR_nzcv, fpscr
+	RET
+END(__aeabi_cdrcmple)
+
+ENTRY(__aeabi_dcmpeq)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	moveq		r0, #1		/* (a == b) */
+	movne		r0, #0		/* (a != b) or unordered */
+	RET
+END(__aeabi_dcmpeq)
+
+ENTRY(__aeabi_dcmplt)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	movlt		r0, #1		/* (a < b) */
+	movcs		r0, #0		/* (a >= b) or unordered */
+	RET
+END(__aeabi_dcmplt)
+
+ENTRY(__aeabi_dcmple)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	movls		r0, #1		/* (a <= b) */
+	movhi		r0, #0		/* (a > b) or unordered */
+	RET
+END(__aeabi_dcmple)
+
+ENTRY(__aeabi_dcmpge)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	movge		r0, #1		/* (a >= b) */
+	movlt		r0, #0		/* (a < b) or unordered */
+	RET
+END(__aeabi_dcmpge)
+
+ENTRY(__aeabi_dcmpgt)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	movgt		r0, #1		/* (a > b) */
+	movle		r0, #0		/* (a <= b) or unordered */
+	RET
+END(__aeabi_dcmpgt)
+
+ENTRY(__aeabi_dcmpun)
+	vmov_args
+	vcmp.f64	d0, d1
+	vmrs		APSR_nzcv, fpscr
+	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
+	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
+	RET
+END(__aeabi_dcmpun)
+
+#else
 /* N set if compare <= result */
 /* Z set if compare = result */
 /* C set if compare (=,>=,UNORD) result */
@@ -163,3 +270,4 @@ ENTRY(__unorddf2)
 	movvc		r0, #0		/* isnan(a) || isnan(b) */
 	RET
 END(__unorddf2)
+#endif /* !__ARM_EABI__ */
Index: src/lib/libc_vfp/vfpsf.S
diff -u src/lib/libc_vfp/vfpsf.S:1.1 src/lib/libc_vfp/vfpsf.S:1.2
--- src/lib/libc_vfp/vfpsf.S:1.1	Mon Jan 28 17:04:40 2013
+++ src/lib/libc_vfp/vfpsf.S	Sun Jun 23 06:19:55 2013
@@ -30,7 +30,7 @@
 #include <arm/asm.h>
 #include <arm/vfpreg.h>
 
-RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28 17:04:40 matt Exp $")
+RCSID("$NetBSD: vfpsf.S,v 1.2 2013/06/23 06:19:55 matt Exp $")
 
 /*
  * This file provides softfloat compatible routines which use VFP instructions
@@ -40,6 +40,19 @@ RCSID("$NetBSD: vfpsf.S,v 1.1 2013/01/28
  * This file implements the single precision floating point routines.
  */
 
+#ifdef __ARM_EABI__
+#define	__addsf3	__aeabi_fadd
+#define	__divsf3	__aeabi_fdiv
+#define	__mulsf3	__aeabi_fmul
+#define	__subsf3	__aeabi_fsub
+#define	__negsf2	__aeabi_fneg
+#define	__truncdfsf2	__aeabi_d2f
+#define	__fixsfsi	__aeabi_f2iz
+#define	__fixunssfsi	__aeabi_f2uiz
+#define	__floatsisf	__aeabi_i2f
+#define	__floatunsisf	__aeabi_ui2f
+#endif
+
 ENTRY(__addsf3)
 	vmov		s0, s1, r0, r1
 	vadd.f32	s0, s0, s1
@@ -54,6 +67,15 @@ ENTRY(__subsf3)
 	RET
 END(__subsf3)
 
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_frsub)
+	vmov		s0, s1, r0, r1
+	vsub.f32	s0, s1, s0
+	vmov		r0, s0
+	RET
+END(__aeabi_frsub)
+#endif
+
 ENTRY(__mulsf3)
 	vmov		s0, s1, r0, r1
 	vmul.f32	s0, s0, s1
@@ -114,6 +136,91 @@ ENTRY(__floatunsisf)
 	RET
 END(__floatunsisf)
 
+/*
+ * Effect of a floating point comparision on the condition flags.
+ *      N Z C V
+ * EQ = 0 1 1 0
+ * LT = 1 0 0 0
+ * GT = 0 0 1 0
+ * UN = 0 0 1 1
+ */
+#ifdef __ARM_EABI__
+ENTRY(__aeabi_cfcmpeq)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	RET
+END(__aeabi_cfcmpeq)
+
+ENTRY(__aeabi_cfcmple)
+	vmov		s0, s1, r0, r1
+	vcmpe.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	RET
+END(__aeabi_cfcmple)
+
+ENTRY(__aeabi_cfrcmple)
+	vmov		s0, s1, r0, r1
+	vcmpe.f32	s1, s0
+	vmrs		APSR_nzcv, fpscr
+	RET
+END(__aeabi_cfrcmple)
+
+ENTRY(__aeabi_fcmpeq)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	moveq		r0, #1		/* (a == b) */
+	movne		r0, #0		/* (a != b) or unordered */
+	RET
+END(__aeabi_fcmpeq)
+
+ENTRY(__aeabi_fcmplt)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	movlt		r0, #1		/* (a < b) */
+	movcs		r0, #0		/* (a >= b) or unordered */
+	RET
+END(__aeabi_fcmplt)
+
+ENTRY(__aeabi_fcmple)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	movls		r0, #1		/* (a <= b) */
+	movhi		r0, #0		/* (a > b) or unordered */
+	RET
+END(__aeabi_fcmple)
+
+ENTRY(__aeabi_fcmpge)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	movge		r0, #1		/* (a >= b) */
+	movlt		r0, #0		/* (a < b) or unordered */
+	RET
+END(__aeabi_fcmpge)
+
+ENTRY(__aeabi_fcmpgt)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	movgt		r0, #1		/* (a > b) */
+	movle		r0, #0		/* (a <= b) or unordered */
+	RET
+END(__aeabi_fcmpgt)
+
+ENTRY(__aeabi_fcmpun)
+	vmov		s0, s1, r0, r1
+	vcmp.f32	s0, s1
+	vmrs		APSR_nzcv, fpscr
+	movvs		r0, #1		/* (isnan(a) || isnan(b)) */
+	movvc		r0, #0		/* !isnan(a) && !isnan(b) */
+	RET
+END(__aeabi_fcmpun)
+
+#else
 /* N set if compare <= result */
 /* Z set if compare = result */
 /* C set if compare (=,>=,UNORD) result */
@@ -157,3 +264,4 @@ ENTRY(__unordsf2)
 	movvc		r0, #0		/* isnan(a) || isnan(b) */
 	RET
 END(__unordsf2)
+#endif /* !__ARM_EABI__ */

Reply via email to