On 02/19/2015 11:59 AM, Dylan Baker wrote:
On Thu, Feb 19, 2015 at 12:06:32AM -0600, Micah Fedke wrote:
---
   generated_tests/gen_shader_precision_tests.py | 148
++++++++++++++++++++++++--
   1 file changed, 137 insertions(+), 11 deletions(-)

diff --git a/generated_tests/gen_shader_precision_tests.py
b/generated_tests/gen_shader_precision_tests.py
index cfa5065..0bda05a 100644
--- a/generated_tests/gen_shader_precision_tests.py
+++ b/generated_tests/gen_shader_precision_tests.py
@@ -49,29 +49,155 @@
   from builtin_function import *  import mako.template  import os
+import struct
+import bigfloat

What does bigfloat buy us that numpy doesn't? That should be part of
yoru commit message.

You'll also need to add a cmake check for bigfloat in this patch. Also,
looking at bigfloat it seems to be a wrapper around GNU MPFR, can it be
used on windows?
Its package page at python.org lists it as OS Independent:
https://pypi.python.org/pypi/bigfloat/

Is this the proper authority on these types of things?


    from templates import template_file
   -tolerances = {'pow': 16.0, -              'exp': 3.0,
-              'exp2': 3.0,
-              'log': 3.0,
-              'log2': 3.0,
-              'sqrt': 3.0,
-              'inversesqrt': 2.0,
-              'op-div': 2.5,
-              'op-assign-div': 2.5,
-              }
+
+allowed_error_scale = 4.0
    trig_builtins = ('sin', 'cos', 'tan',                   'asin',
'acos', 'atan',                   'sinh', 'cosh', 'tanh',
      'asinh', 'acosh', 'atanh')
   +high_precision = bigfloat.precision(113)
+low_precision = bigfloat.precision(23)
+
   def _is_sequence(arg):
       return (not hasattr(arg, "strip") and
               hasattr(arg, "__iter__"))
   +def _len_any(a):
+    """ a version of len that returns 1 if passed a non-sequence type
+    """
+    return len(a) if _is_sequence(a) else 1

I'm not sure how I feel about this. I have a feeling that passing
different data types around for long periods is going to result in lots
of hard to find bugs. Is it possible to just put non-sequence items into
a list of one elements?

+
+def _floatToBits(f):
+    s = struct.pack('>f', f)
+    return struct.unpack('>l', s)[0]
+
+def _bitsToFloat(b):
+    s = struct.pack('>l', b)
+    return struct.unpack('>f', s)[0]
+
+def _ulpsize(f):
+    """ determine _ulpsize in the direction of nearest infinity
+        which gives the worst case scenario for edge cases
+    """
+    return bigfloat.next_up(f)-f if f >= 0.0 \
+            else f-bigfloat.next_down(f)
+
+def _capture_error(precise, imprecise):
+    """Perform the legwork of calculating the difference in error of
the high
+    precision and low precision runs.  Decide whether this difference
in error
+    is within allowable tolerances.  The range of allowable tolerances is
+    subjective, as ARB_shader_precision (and GLSL spec as of v4.5) gives no
+    direct guidance for complex functions.  Toronto, et.  al. use
quadrupled
+    error as a limit in "Practically Accurate Floating-Point Math,"
Computing
+    Now, Oct. 2014.  Also use the difference in error and the value of
one ulp
+    at the output to calculate the tolerance range in ulps for use by the
+    shader test, should this vector pass the badlands check.
+    """
+
+    ers = []
+    bls = []
+    cts = []
+    with high_precision:
+        error = bigfloat.abs(precise - imprecise)
+    ers.append(error)
+    with low_precision:
+        ulpsz = _ulpsize(imprecise)
+    with high_precision:
+        bls.append(error > ulpsz*allowed_error_scale)
+        cts.append(bigfloat.round(error/ulpsz))
+    return {'errors':ers, 'badlands':bls, 'component_tolerances':cts}
+
+def _analyze_ref_fn(fn, args):
+    """Many functions contain ill-conditioned regions referred to as
"badlands"
+    (see Toronto, et. al., "Practically Accurate Floating-Point Math,"
+    Computing Now, Oct. 2014).  Within these regions errors in the
inputs are
+    magnified significantly, making the function impossible to test
with any
+    reasonable accuracy.  A complex function that operates on floating
point
+    numbers has the potential to generate such error propagation even
if the
+    inputs are exact floating point numbers, since intermediate results
can be
+    generated with error.  In order to identify and avoid these areas,
we run
+    the function once at a lower precision and once at a higher
precision, and
+    compare the outputs.  Propagating errors will be greater at lower
precision
+    and less at higher precision for a given set of function inputs,
allowing
+    us to identify the badlands of the function.
+    """
+
+    ret = {'errors':[], 'badlands':[], 'component_tolerances':[]}
+    with high_precision:
+        precise = fn(args)
+    with low_precision:
+        imprecise = fn(args)
+    if _len_any(imprecise) == 1:
+        ret = _capture_error(precise, imprecise)
+    else:
+        for i, arg in enumerate(imprecise):
+            rettmp = _capture_error(precise[i], arg)
+            ret['errors'].extend(rettmp['errors'])
+            ret['badlands'].extend(rettmp['badlands'])
+
ret['component_tolerances'].extend(rettmp['component_tolerances'])
+    return ret
+
+simple_fns = {'op-mult': 0.0,
+              'op-assign-mult': 0.0,
+              'op-div': 2.5,
+              'op-assign-div': 2.5,
+              'pow': 16.0, +              'exp': 3.0,
+              'exp2': 3.0,
+              'log': 3.0,
+              'log2': 3.0,
+              'sqrt': 3.0,
+              'inversesqrt': 2.0}
+ +complex_fns = {}
+
+componentwise_fns = ('mod', 'mix', 'smoothstep' )
+
+def _gen_tolerance(name, rettype, args):
+    """Return the tolerance that should be allowed for a function for the
+    test vector passed in.  Return -1 for any vectors that would push the
+    tolerance outside of acceptable bounds +    """
+    if name in simple_fns:
+        if name == 'op-mult' or name == 'op-assign-mult':

if name in ['op-mult', 'op-assign-mult']:

+            x_type = glsl_type_of(args[0])
+            y_type = glsl_type_of(args[1])
+            if x_type.is_vector and y_type.is_matrix:
+                mult_func = _vec_times_mat_ref
+            elif x_type.is_matrix and y_type.is_vector:
+                mult_func = _mat_times_vec_ref
+            elif x_type.is_matrix and y_type.is_matrix:
+                mult_func = _mat_times_mat_ref
+            else:
+                return simple_fns[name] +            ret =
_analyze_ref_fn(mult_func, args)
+            return -1.0 if any(ret['badlands']) else map(float,
ret['component_tolerances'])

Generally at this point python (both upstream and community) discourage
the use of map and filter, with a preference for comprehensions.
[float(x) for x in ret['component_tolerances']] should be what you want.

I'm also assuming that you are aware that any() will find any truthy
value: so any number that isn't 0, any non-empty string, any non-empty
container, etc.

+        else:
+            return simple_fns[name] +    elif name in complex_fns:
+        if name in componentwise_fns:
+            ret = {'errors':[], 'badlands':[], 'component_tolerances':[]}
+            for component in range(rettype.num_cols*rettype.num_rows):
+                current_args = []
+                for i, arg in enumerate(args):
+                    current_args.append(arg[component%len(arg)] if
_len_any(arg) > 1 else arg)
+                rettmp = _analyze_ref_fn(complex_fns[name], current_args)
+                ret['errors'].extend(rettmp['errors'])
+                ret['badlands'].extend(rettmp['badlands'])
+
ret['component_tolerances'].extend(rettmp['component_tolerances'])
+        else:
+            ret = _analyze_ref_fn(complex_fns[name], args)
+        return -1.0 if any(ret['badlands']) else map(float,
ret['component_tolerances'])
+    else:
+        return 0.0
+
   def make_indexers(signature):
      """Build a list of strings which index into every possible
      value of the result.  For example, if the result is a vec2,
@@ -160,7 +286,7 @@ def main():
                   with open(output_filename, 'w') as f:
                       f.write(template.render_unicode(
signature=signature,
     test_vectors=test_vectors,
-                                                     tolerances=tolerances,
+                                                     tolerances=simple_fns,

invocation=invocation,

num_elements=num_elements,
                                                        indexers=indexers,
--
2.2.2

_______________________________________________
Piglit mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/piglit

--

Micah Fedke
Collabora Ltd.
+44 1223 362967
https://www.collabora.com/
https://twitter.com/collaboraltd
_______________________________________________
Piglit mailing list
[email protected]
http://lists.freedesktop.org/mailman/listinfo/piglit

Reply via email to