I tried doing some of these. You have a few options when doing this in clutter:
1. Make clutter_texture_set_from_yuv_data() do the conversion using a fragment shader on platforms which do not support ycbcr extensions. 2. Add another function like clutter_texture_set_from_yuv_data() but implemented in GL for hardware with no ycbcr. 3. Fix clutter-gst-video-sink.c to do it. I tried to do 3. See this patch below, it first advertizes I420 support in clutter-video-sink. If ycbcr is supported, it calls clutter_texture_set_from_yuv_data(). Otherwise, it sets up a shader fragment. I could not get the shader part to work as I have trouble understanding how you can take a 12bit space (I420) and put out 32bits (RGBA). I think I need to use a different pixel format, but clutter_texture_set_from_rgb_data() only does 24/32bpp. The video is smudgy when the shader is applied, but I do see some 20% drop in CPU (720p, H264 video) as ffmpegcolorspace is bypassed. There are two shaders, the first one cannot work in theory as it takes in 3 textures and I think clutter sends in just one. Also, the ny=720.0-gl... line is hardcoded for 720p. found this one here: http://www.fourcc.org/source/YUV420P-OpenGL-GLSLang.c The second one is more promising and it supports YUY2 as well. Found this here: http://groups.google.com/group/comp.graphics.api.opengl/browse_thread/th read/1848af414389f9ba/dc9da51d16617a3b Any inputs welcome - especially the ones telling me how to fix it! -Ashwin --- clutter-gst-video-sink.c (revision 2711) +++ clutter-gst-video-sink.c (working copy) @@ -45,14 +45,118 @@ #include <clutter/clutter.h> #include <string.h> -static GstStaticPadTemplate sinktemplate - = GST_STATIC_PAD_TEMPLATE ("sink", +#if 1 +#define HAVE_SHADER +#define YUV_CAPS ";" GST_VIDEO_CAPS_YUV ("{ I420 }") +const char *yuv_to_rgb_glsl= + "uniform sampler2DRect Ytex;\n" + "uniform sampler2DRect Utex,Vtex;\n" + "void main(void) {\n" + " float nx,ny,r,g,b,y,u,v;\n" + " nx=gl_TexCoord[0].x;\n" + " ny=720.0-gl_TexCoord[0].y;\n" + " y=texture2DRect(Ytex,vec2(nx,ny)).r;\n" + " u=texture2DRect(Utex,vec2(nx/2.0,ny/2.0)).r;\n" + " v=texture2DRect(Vtex,vec2(nx/2.0,ny/2.0)).r;\n" + + " y=1.1643*(y-0.0625);\n" + " u=u-0.5;\n" + " v=v-0.5;\n" + + " r=y+1.5958*v;\n" + " g=y-0.39173*u-0.81290*v;\n" + " b=y+2.017*u;\n" + + " gl_FragColor=vec4(r,g,b,1.0);\n" + "}\n"; + +const char *yuv_to_rgb_glsl1 = + "uniform sampler2DRect Texture0;" + "uniform int SrcTexWidth;" + "uniform float SrcTexRelation;" + "const vec4 stdBias = vec4( -0.0625, -0.5, -0.5, 0 );" + "const mat3 RgbToYuv = mat3 (1.0, 0.0, 1.4022, 1.0, -0.3456, -0.7145," + "1.0, 1.7710, 0.0);" + "" + "vec3 yuy22rgb16()" + "{" + " vec4 texColor0;" + " vec4 texColor1;" + " float isOddUV = fract(floor(gl_TexCoord[0].s * SrcTexWidth)*0.5)*2;" + " float texel_sample = 1.0 / SrcTexWidth;" + " float tc = SrcTexRelation - gl_TexCoord[0].t;" + " vec2 coord0 = vec2(gl_TexCoord[0].s - (isOddUV * texel_sample), tc);" + " vec2 coord1 = vec2(coord0.x + texel_sample, tc);" + " texColor0 = texture2DRect( Texture0, coord0 );" + " texColor1 = texture2DRect( Texture0, coord1 );" + " texColor0.g = texColor0.a;" + " texColor0.b = texColor1.a;" + " texColor1.r = texColor1.r;" + " texColor1.g = texColor0.a;" + " texColor1.b = texColor1.a;" + " texColor0 += stdBias;" + " texColor0 *= (1.0 - isOddUV);" + " texColor1 += stdBias;" + " texColor1 *= (isOddUV);" + " texColor0 = texColor0 + texColor1;" + " vec3 color = vec3(texColor0.r, texColor0.g, texColor0.b);" + " color = color * RgbToYuv;" + " return color;" + "" + "}" + "" + "vec3 uyvy2rgb16()" + "{" + " vec4 texColor0;" + " vec4 texColor1;" + " float isOddUV = floor(fract(gl_TexCoord[0].s * SrcTexWidth)*0.5)*2;" + " float texel_sample = 1.0 / SrcTexWidth;" + " float tc = SrcTexRelation - gl_TexCoord[0].t;" + " vec2 coord0 = vec2(gl_TexCoord[0].s - (isOddUV * texel_sample), tc);" + " vec2 coord1 = vec2(coord0.x + texel_sample, tc);" + " texColor0 = texture2DRect( Texture0, coord0 );" + " texColor1 = texture2DRect( Texture0, coord1 );" + "" + " texColor0.r = texColor0.a;" + " texColor0.g = texColor0.g;" + " texColor0.b = texColor1.b;" + " texColor1.r = texColor1.a;" + " texColor1.g = texColor0.g;" + " texColor1.b = texColor1.b;" + " texColor0 += stdBias;" + " texColor0 *= (1.0 - isOddUV);" + " texColor1 += stdBias;" + " texColor1 *= (isOddUV);" + " texColor0 = texColor0 + texColor1;" + " vec3 color = vec3(texColor0.r, texColor0.g, texColor0.b);" + " color = color * RgbToYuv;" + " return color;" + "" + "}" + "" + "void main(void)" + "{" + " vec3 color;" + " color = uyvy2rgb16();" + " gl_FragColor = vec4(color, 1.0);" + "" + "} "; + + +#else +#define YUV_CAPS +#endif + +static GstStaticPadTemplate sinktemplate = + GST_STATIC_PAD_TEMPLATE ("sink", GST_PAD_SINK, GST_PAD_ALWAYS, - GST_STATIC_CAPS (GST_VIDEO_CAPS_RGBx ";" \ - GST_VIDEO_CAPS_BGRx ) \ + GST_STATIC_CAPS (GST_VIDEO_CAPS_RGBx ";" + GST_VIDEO_CAPS_BGRx + YUV_CAPS ) ); + GST_DEBUG_CATEGORY_STATIC (clutter_gst_video_sink_debug); #define GST_CAT_DEFAULT clutter_gst_video_sink_debug @@ -73,10 +177,13 @@ ClutterTexture *texture; GAsyncQueue *async_queue; gboolean rgb_ordering; + gboolean yuy2_ordering; + gboolean use_rgb; int width; int height; int fps_n, fps_d; int par_n, par_d; + ClutterShader *shader; }; @@ -116,6 +223,8 @@ ClutterGstVideoSinkPrivate); priv->async_queue = g_async_queue_new (); + + priv->shader = NULL; } static gboolean @@ -123,7 +232,6 @@ { ClutterGstVideoSinkPrivate *priv; GstBuffer *buffer; - priv = data; buffer = g_async_queue_try_pop (priv->async_queue); @@ -132,6 +240,7 @@ return FALSE; } + if (priv->use_rgb) { clutter_texture_set_from_rgb_data (priv->texture, GST_BUFFER_DATA (buffer), TRUE, @@ -142,7 +251,62 @@ priv->rgb_ordering ? 0 : CLUTTER_TEXTURE_RGB_FLAG_BGR, NULL); + } +#if defined (HAVE_SHADER) + else { + clutter_texture_set_from_rgb_data (priv->texture, + GST_BUFFER_DATA (buffer), + TRUE, + 61*priv->width / 100, + 61 * priv->height / 100, + (4 * 61 * priv->width / 100 + 3) &~ 3, + 4, + 0, + NULL); + if (!priv->shader) { + GError *error; + + /* init shader needed for transfroming yuv->rgb + * this is done here since gst pipe must be in correct state + * there are thread issues as well (need to do in clutter thread) + * it crashes otherwise. + */ + priv->shader = clutter_shader_new (); + + error = NULL; + clutter_shader_set_fragment_source (priv->shader, yuv_to_rgb_glsl, -1); + clutter_shader_bind (priv->shader, &error); + printf("bind ok\n"); + + if (error) + { + g_print ("unable to load shader: %s\n", + error->message); + g_error_free (error); + + return FALSE; + } + clutter_actor_set_shader (CLUTTER_ACTOR(priv->texture), priv->shader); + clutter_actor_set_shader_param (CLUTTER_ACTOR(priv->texture), "SrcTexRelation", 1.0); + clutter_actor_set_shader_param (CLUTTER_ACTOR(priv->texture), "SrcTexWidth", 1280.0); + + printf("shader was set\n"); + + + } + } +#else + else { + clutter_texture_set_from_yuv_data (priv->texture, + GST_BUFFER_DATA (buffer), + priv->width, + priv->height, + 0, + NULL); + } +#endif + gst_buffer_unref (buffer); return FALSE; @@ -222,9 +386,35 @@ else priv->par_n = priv->par_d = 1; + //gst_structure_get_int (structure, "red_mask", &red_mask); + //priv->rgb_ordering = (red_mask == 0xff000000); + + if (strcmp (gst_structure_get_name (structure), "video/x-raw-rgb") == 0) { + + printf ("using RGB\n"); + priv->use_rgb = TRUE; gst_structure_get_int (structure, "red_mask", &red_mask); - priv->rgb_ordering = (red_mask == 0xff000000); + if (red_mask == 0xff000000) { + priv->rgb_ordering = TRUE; + } else { + priv->rgb_ordering = FALSE; + } + } else { + unsigned int fourcc; + + printf ("using YUV\n"); + priv->use_rgb = FALSE; + + gst_structure_get_fourcc (structure, "format", &fourcc); + if (fourcc == GST_MAKE_FOURCC ('Y', 'U', 'Y', '2')) { + priv->yuy2_ordering = TRUE; + } else { + priv->yuy2_ordering = FALSE; + } + + } + return TRUE; } > -----Original Message----- > From: Matthew Allum [mailto:[EMAIL PROTECTED] > Sent: Monday, May 19, 2008 8:20 AM > To: Florent > Cc: clutter > Subject: Re: [clutter] cluttergst improvement and [Fwd: OpenGL- > acceleratedcolorspace conversion] > > Hi; > > On Mon, 2008-05-19 at 14:08 +0200, Florent wrote: > > > > Just for reference, looks like the gstreamer community is circling > > around reusable colorspace conversion plugins as well as optimized > > glsink (recalling the quite recent media-center-related threads on > > this very ml). > > > > How shall such improvements be integrated in clutter's gst video sink > > ? Using it in place of ffmpegcolorspace elements, or should we > > directly improve the cluttergst module ? > > > > Are there perspectives on using a "stock/legacy" glimagesink as > > cluttergst videosink (through context sharing?)? > > > > It would indeed be nice to reuse it, we'd need to be able to specify an > already existing context, a texture for it to target aswell as assume it > does any GL calls in the main Clutter thread. Also we obviously would > not want it to render the texture then to an X drawable. I am not sure > if its that flexible/generic ? > > The big lose of course (which would likely take a lot of modifications > to fix) is it would not work on GLES. > > == Matthew > > -- > To unsubscribe send a mail to [EMAIL PROTECTED] -- To unsubscribe send a mail to [EMAIL PROTECTED]
