Re: [Qemu-devel] [PATCH] softfloat: define floatx80_default_inf

2017-07-03 Thread Laurent Vivier
Le 29/06/2017 à 21:15, Philippe Mathieu-Daudé a écrit :
> On Thu, Jun 29, 2017 at 4:04 PM, Laurent Vivier  wrote:
>> Signed-off-by: Laurent Vivier 
> 
> Reviewed-by: Philippe Mathieu-Daudé 
> 

I've sent an updated version of this patch in my series "target/m68k:
implement 680x0 FPU (part 3)".

Thanks,
Laurent



Re: [Qemu-devel] [PATCH] softfloat: define floatx80_default_inf

2017-06-29 Thread Philippe Mathieu-Daudé
On Thu, Jun 29, 2017 at 4:04 PM, Laurent Vivier  wrote:
> Signed-off-by: Laurent Vivier 

Reviewed-by: Philippe Mathieu-Daudé 

> ---
>  fpu/softfloat-specialize.h | 10 ++
>  fpu/softfloat.c| 38 ++
>  include/fpu/softfloat.h|  8 +++-
>  3 files changed, 43 insertions(+), 13 deletions(-)
>
> diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h
> index de2c5d5..139b197 100644
> --- a/fpu/softfloat-specialize.h
> +++ b/fpu/softfloat-specialize.h
> @@ -178,6 +178,16 @@ floatx80 floatx80_default_nan(float_status *status)
>  }
>
>  
> /*
> +| The pattern for a default generated extended double-precision inf.
> +**/
> +
> +#define floatx80_default_inf_high 0x7FFF
> +#define floatx80_default_inf_low  LIT64(0x8000)
> +
> +const floatx80 floatx80_default_inf
> += make_floatx80_init(floatx80_default_inf_high, 
> floatx80_default_inf_low);
> +
> +/*
>  | The pattern for a default generated quadruple-precision NaN.
>  
> **/
>  float128 float128_default_nan(float_status *status)
> diff --git a/fpu/softfloat.c b/fpu/softfloat.c
> index 7af14e2..67f1dd9 100644
> --- a/fpu/softfloat.c
> +++ b/fpu/softfloat.c
> @@ -913,7 +913,9 @@ static floatx80 roundAndPackFloatx80(int8_t 
> roundingPrecision, flag zSign,
> ) {
>  return packFloatx80( zSign, 0x7FFE, ~ roundMask );
>  }
> -return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000 ) 
> );
> +return packFloatx80(zSign,
> +floatx80_default_inf_high,
> +floatx80_default_inf_low);
>  }
>  if ( zExp <= 0 ) {
>  isTiny =
> @@ -1885,7 +1887,9 @@ floatx80 float32_to_floatx80(float32 a, float_status 
> *status)
>  if (aSig) {
>  return commonNaNToFloatx80(float32ToCommonNaN(a, status), 
> status);
>  }
> -return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000 ) );
> +return packFloatx80(aSign,
> +floatx80_default_inf_high,
> +floatx80_default_inf_low);
>  }
>  if ( aExp == 0 ) {
>  if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
> @@ -3666,7 +3670,9 @@ floatx80 float64_to_floatx80(float64 a, float_status 
> *status)
>  if (aSig) {
>  return commonNaNToFloatx80(float64ToCommonNaN(a, status), 
> status);
>  }
> -return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000 ) );
> +return packFloatx80(aSign,
> +floatx80_default_inf_high,
> +floatx80_default_inf_low);
>  }
>  if ( aExp == 0 ) {
>  if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
> @@ -4927,8 +4933,8 @@ int64_t floatx80_to_int64(floatx80 a, float_status 
> *status)
>  if ( shiftCount ) {
>  float_raise(float_flag_invalid, status);
>  if (! aSign
> - || (( aExp == 0x7FFF )
> -  && ( aSig != LIT64( 0x8000 ) ) )
> + || ((aExp == floatx80_default_inf_high)
> + && (aSig != floatx80_default_inf_low))
> ) {
>  return LIT64( 0x7FFF );
>  }
> @@ -5219,7 +5225,9 @@ static floatx80 addFloatx80Sigs(floatx80 a, floatx80 b, 
> flag zSign,
>  if ((uint64_t)(bSig << 1)) {
>  return propagateFloatx80NaN(a, b, status);
>  }
> -return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000 ) 
> );
> +return packFloatx80(zSign,
> +floatx80_default_inf_high,
> +floatx80_default_inf_low);
>  }
>  if ( aExp == 0 ) ++expDiff;
>  shift64ExtraRightJamming( aSig, 0, - expDiff, ,  );
> @@ -5294,7 +5302,8 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, 
> flag zSign,
>  if ((uint64_t)(bSig << 1)) {
>  return propagateFloatx80NaN(a, b, status);
>  }
> -return packFloatx80( zSign ^ 1, 0x7FFF, LIT64( 0x8000 ) 
> );
> +return packFloatx80(zSign ^ 1, floatx80_default_inf_high,
> +floatx80_default_inf_low);
>  }
>  if ( aExp == 0 ) ++expDiff;
>  shift128RightJamming( aSig, 0, - expDiff, ,  );
> @@ -5399,7 +5408,8 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, 
> float_status *status)
>  return propagateFloatx80NaN(a, b, status);
>  }
>  if ( ( 

[Qemu-devel] [PATCH] softfloat: define floatx80_default_inf

2017-06-29 Thread Laurent Vivier
Signed-off-by: Laurent Vivier 
---
 fpu/softfloat-specialize.h | 10 ++
 fpu/softfloat.c| 38 ++
 include/fpu/softfloat.h|  8 +++-
 3 files changed, 43 insertions(+), 13 deletions(-)

diff --git a/fpu/softfloat-specialize.h b/fpu/softfloat-specialize.h
index de2c5d5..139b197 100644
--- a/fpu/softfloat-specialize.h
+++ b/fpu/softfloat-specialize.h
@@ -178,6 +178,16 @@ floatx80 floatx80_default_nan(float_status *status)
 }
 
 /*
+| The pattern for a default generated extended double-precision inf.
+**/
+
+#define floatx80_default_inf_high 0x7FFF
+#define floatx80_default_inf_low  LIT64(0x8000)
+
+const floatx80 floatx80_default_inf
+= make_floatx80_init(floatx80_default_inf_high, floatx80_default_inf_low);
+
+/*
 | The pattern for a default generated quadruple-precision NaN.
 **/
 float128 float128_default_nan(float_status *status)
diff --git a/fpu/softfloat.c b/fpu/softfloat.c
index 7af14e2..67f1dd9 100644
--- a/fpu/softfloat.c
+++ b/fpu/softfloat.c
@@ -913,7 +913,9 @@ static floatx80 roundAndPackFloatx80(int8_t 
roundingPrecision, flag zSign,
) {
 return packFloatx80( zSign, 0x7FFE, ~ roundMask );
 }
-return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000 ) );
+return packFloatx80(zSign,
+floatx80_default_inf_high,
+floatx80_default_inf_low);
 }
 if ( zExp <= 0 ) {
 isTiny =
@@ -1885,7 +1887,9 @@ floatx80 float32_to_floatx80(float32 a, float_status 
*status)
 if (aSig) {
 return commonNaNToFloatx80(float32ToCommonNaN(a, status), status);
 }
-return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000 ) );
+return packFloatx80(aSign,
+floatx80_default_inf_high,
+floatx80_default_inf_low);
 }
 if ( aExp == 0 ) {
 if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
@@ -3666,7 +3670,9 @@ floatx80 float64_to_floatx80(float64 a, float_status 
*status)
 if (aSig) {
 return commonNaNToFloatx80(float64ToCommonNaN(a, status), status);
 }
-return packFloatx80( aSign, 0x7FFF, LIT64( 0x8000 ) );
+return packFloatx80(aSign,
+floatx80_default_inf_high,
+floatx80_default_inf_low);
 }
 if ( aExp == 0 ) {
 if ( aSig == 0 ) return packFloatx80( aSign, 0, 0 );
@@ -4927,8 +4933,8 @@ int64_t floatx80_to_int64(floatx80 a, float_status 
*status)
 if ( shiftCount ) {
 float_raise(float_flag_invalid, status);
 if (! aSign
- || (( aExp == 0x7FFF )
-  && ( aSig != LIT64( 0x8000 ) ) )
+ || ((aExp == floatx80_default_inf_high)
+ && (aSig != floatx80_default_inf_low))
) {
 return LIT64( 0x7FFF );
 }
@@ -5219,7 +5225,9 @@ static floatx80 addFloatx80Sigs(floatx80 a, floatx80 b, 
flag zSign,
 if ((uint64_t)(bSig << 1)) {
 return propagateFloatx80NaN(a, b, status);
 }
-return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000 ) );
+return packFloatx80(zSign,
+floatx80_default_inf_high,
+floatx80_default_inf_low);
 }
 if ( aExp == 0 ) ++expDiff;
 shift64ExtraRightJamming( aSig, 0, - expDiff, ,  );
@@ -5294,7 +5302,8 @@ static floatx80 subFloatx80Sigs(floatx80 a, floatx80 b, 
flag zSign,
 if ((uint64_t)(bSig << 1)) {
 return propagateFloatx80NaN(a, b, status);
 }
-return packFloatx80( zSign ^ 1, 0x7FFF, LIT64( 0x8000 ) );
+return packFloatx80(zSign ^ 1, floatx80_default_inf_high,
+floatx80_default_inf_low);
 }
 if ( aExp == 0 ) ++expDiff;
 shift128RightJamming( aSig, 0, - expDiff, ,  );
@@ -5399,7 +5408,8 @@ floatx80 floatx80_mul(floatx80 a, floatx80 b, 
float_status *status)
 return propagateFloatx80NaN(a, b, status);
 }
 if ( ( bExp | bSig ) == 0 ) goto invalid;
-return packFloatx80( zSign, 0x7FFF, LIT64( 0x8000 ) );
+return packFloatx80(zSign, floatx80_default_inf_high,
+   floatx80_default_inf_low);
 }
 if ( bExp == 0x7FFF ) {
 if ((uint64_t)(bSig << 1)) {
@@ -5410,7 +5420,8 @@ floatx80 floatx80_mul(floatx80 a,