From: Roland Scheidegger <[email protected]>
Some formats had some "implied" precision which relied on the driver picking a
specific hw format (e.g. RGB5 and RGB5 both relied on driver picking 565).
These tests will now be skipped (for the exact test).
Others didn't have any implied precision but relied on driver picking a format
with at least 8 bits even though the internal format only implied 4.
Also, the exact tests for srgb8 and srgb8_alpha8 were failing too due to using
GL_BYTE instead of GL_UNSIGNED_BYTE.
With these fixes the only tests llvmpipe is failing seem to be the exact
GL_RGB8_SNORM and GL_RGB16_SNORM ones (1, 2 or 4 channels work and the errors
seem to be one bit so maybe triggers some conversion somewhere using different
signed conversion formula).
---
tests/texturing/teximage-colors.c | 43 ++++++++++++++++++++++++++++++++++-----
1 file changed, 38 insertions(+), 5 deletions(-)
diff --git a/tests/texturing/teximage-colors.c
b/tests/texturing/teximage-colors.c
index 2bee02f..dcbab65 100644
--- a/tests/texturing/teximage-colors.c
+++ b/tests/texturing/teximage-colors.c
@@ -60,12 +60,12 @@ struct texture_format formats[] = {
FORMAT(GL_RGB, GL_RGB, GL_NONE),
FORMAT(GL_R3_G3_B2, GL_RGB, GL_UNSIGNED_BYTE_3_3_2),
- FORMAT(GL_RGB4, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
- FORMAT(GL_RGB5, GL_RGB, GL_UNSIGNED_SHORT_5_6_5),
+ FORMAT(GL_RGB4, GL_RGB, GL_NONE),
+ FORMAT(GL_RGB5, GL_RGB, GL_NONE),
FORMAT(GL_RGB8, GL_RGB, GL_UNSIGNED_BYTE),
FORMAT(GL_RGB8_SNORM, GL_RGB, GL_BYTE),
- FORMAT(GL_SRGB8, GL_RGB, GL_BYTE),
- FORMAT(GL_RGB10, GL_RGB, GL_UNSIGNED_SHORT),
+ FORMAT(GL_SRGB8, GL_RGB, GL_UNSIGNED_BYTE),
+ FORMAT(GL_RGB10, GL_RGB, GL_NONE),
FORMAT(GL_R11F_G11F_B10F, GL_RGB, GL_NONE),
FORMAT(GL_RGB12, GL_RGB, GL_NONE),
FORMAT(GL_RGB9_E5, GL_RGB, GL_NONE),
@@ -81,7 +81,7 @@ struct texture_format formats[] = {
FORMAT(GL_RGBA8, GL_RGBA, GL_UNSIGNED_BYTE),
FORMAT(GL_RGB10_A2, GL_RGBA, GL_UNSIGNED_INT_10_10_10_2),
FORMAT(GL_RGBA8_SNORM, GL_RGBA, GL_BYTE),
- FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_BYTE),
+ FORMAT(GL_SRGB8_ALPHA8, GL_RGBA, GL_UNSIGNED_BYTE),
FORMAT(GL_RGBA12, GL_RGBA, GL_NONE),
FORMAT(GL_RGBA16, GL_RGBA, GL_UNSIGNED_SHORT),
FORMAT(GL_RGBA16_SNORM, GL_RGBA, GL_SHORT),
@@ -543,6 +543,39 @@ piglit_init(int argc, char **argv)
tolerance[3] = 0.3;
break;
}
+
+ /* The tolerance lowering above only works for formats which have
+ explicit data types associated with them and even then it's fishy
+ for some.
+ The default sort of assumes at least 7 bits which doesn't make
+ much sense in any case (for the specific formats with more bits).
+ But just fix the cases which cannot pass (unless the driver encodes
+ them with more bits). */