The reference software implementation of SurfaceFlinger doesn't
support YUV color conversion. It uses the Y plane for monochrome so
that something is displayed.

On Dec 4, 3:22 am, Shirish <[EMAIL PROTECTED]> wrote:
> Hi,
>
> I want to use the YUV format and its a different format. In that case
> do I need to only change the Surface Flinger to use the Open GL
> extension for YUV format. Is there some other module which needs to
> change?
> Does currently Android 1.0 Release has Open GL extensions for the YUV
> format?
>
> Thanks,
> -Shirish
>
> On Dec 4, 2:44 pm, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]> wrote:
>
> > yuv422 to rgb565 doesn't work well
>
> > uint32_t mCoefTbl32[516];
> > uint8_t *mCoefTbl = NULL;
> > static void init_coff()
> > {
> >     uint8_t *clip;
> >     int i;
> >     mCoefTbl = (uint8_t *)mCoefTbl32;
> >     *((uint32_t*)mCoefTbl) = (int)(65536*0.4681); //0.714);
> >     *((uint32_t*)(mCoefTbl+4)) =  (int)(65536*1.5748);//1.402);
> >     *((uint32_t*)(mCoefTbl+8)) =  (int)(65536*0.1873);//0.344);
> >     *((uint32_t*)(mCoefTbl+12)) =  (int)(65536*1.8556);//1.772);
>
> >     clip = mCoefTbl + 400;
> >     /* do 5 bit conversion */
> >      memset( &clip[-384], 0, 385*sizeof( *clip));
> >      memset( &clip[ 640], 0, 385*sizeof( *clip));
>
> >     for (i=1; i<255; i++){  // range of (x>>3) between -24 and 56
> >         clip[i] = i>>3;
> >         clip[i+1024] = i>>2;
> >     }
> >     memset( &clip[255], 31, 385*sizeof( *clip));
> >     memset( &clip[1279], 63, 385*sizeof( *clip));
>
> > }
>
> > static int32_t cc16(uint16_t *pY,uint8_t *pCb, uint8_t *pCr, uint8_t
> > *dst, int32_t *disp, uint8_t *coff_tbl)
> > {
> >     #define OFFSET_5_0  2
> >     #define OFFSET_6_0  (1+1024)
> >     #define OFFSET_5_1  6
> >     #define OFFSET_6_1  (3+1024)
>
> >     uint16_t    *pDst;
> >     int32_t     src_pitch, dst_pitch, src_width;
> >     int32_t     Y, Cb, Cr, Cg;
> >     int32_t     deltaY, deltaDst, deltaCbCr;
> >     int32_t     row, col;
> >     int32_t     tmp0, tmp1, tmp2;
> >     uint32_t    rgb;
> >     uint8_t *clip = coff_tbl+400;
> >     int32_t  cc1 = (*((int32_t*)(clip - 400)));
> >     int32_t  cc3 = (*((int32_t*)(clip - 396)));
> >     int32_t  cc2 = (*((int32_t*)(clip - 392)));
> >     int32_t  cc4 = (*((int32_t*)(clip - 388)));
>
> >     src_pitch   =   disp[0];
> >     dst_pitch   =   disp[1];
> >     src_width   =   disp[2];
>
> >     if(disp[6]) /* rotate 180 and flip */
> >     {   /* move the starting point to the bottom-left corner of the
> > picture */
> >         deltaY = src_pitch*(disp[3]-1);
> >         deltaY = (src_pitch>>1)*((disp[3]>>1)-1);
> >         deltaY = -src_width-(src_pitch<<1);
> >         deltaCbCr = -((src_width+src_pitch)>>1);
> >         src_pitch = -(src_pitch>>1);
> >     }
> >     else
> >     {
> >         deltaY      =   (src_pitch<<1)-src_width;
> >         deltaCbCr   =   (src_pitch-src_width)>>1;
> >         src_pitch >>= 1;
> >     }
>
> >     deltaDst    =   (dst_pitch<<1)-src_width;
> >     pDst =  (uint16_t *)dst;
>
> >     for(row = disp[3]; row >0; row-=2){
>
> >         for(col = src_width-1; col >=0; col-=2){
>
> >             Cb = *pCb++;    Cr = *pCr++;
> >             Y = pY[src_pitch];
>
> >             Cb -= 128; Cr -= 128;
> >             Cg  =   Cr*cc1;
> >             Cr  *= cc3;
>
> >             Cg  +=  Cb*cc2;
> >             Cb  *=  cc4;
>
> >             tmp0    =   (Y & 0xFF); //Low endian    left pixel
> >             tmp0    += OFFSET_5_0;
>
> >             tmp1    =   tmp0 - (Cg>>16);
> >             tmp2    =   tmp0 + (Cb>>16);
> >             tmp0    =   tmp0 + (Cr>>16);
>
> >             tmp0    =   clip[tmp0];
> >             tmp1    =   clip[tmp1 + OFFSET_6_0 - OFFSET_5_0];
> >             tmp2    =   clip[tmp2];
> >             //RGB_565
>
> >             rgb     =   tmp1|(tmp0<<6);
> >             rgb     =   tmp2|(rgb<<5);
>
> >              Y   = (Y>>8) & 0xFF;
>
> >             Y   += OFFSET_5_1;
> >             tmp1    =   (Y) - (Cg>>16);
> >             tmp2    =   (Y) + (Cb>>16);
> >             tmp0    =   (Y) + (Cr>>16);
>
> >             tmp0    =   clip[tmp0];
> >             tmp1    =   clip[tmp1 + OFFSET_6_1 - OFFSET_5_1];
> >             tmp2    =   clip[tmp2];
>
> >             //RGB_565
>
> >             tmp0    =   tmp1|(tmp0<<6);
> >             tmp0    =   tmp2|(tmp0<<5);
>
> >             rgb     |=  (tmp0<<16);
>
> >             *( (uint32_t*)(pDst+dst_pitch) )    = rgb;
>
> >             //load the top two pixels
> >             Y = *pY++;
>
> >             tmp0    =   (Y & 0xFF); //Low endian    left pixel
> >             tmp0    += OFFSET_5_1;
>
> >             tmp1    =   tmp0 - (Cg>>16);
> >             tmp2    =   tmp0 + (Cb>>16);
> >             tmp0    =   tmp0 + (Cr>>16);
>
> >             tmp0    =   clip[tmp0];
> >             tmp1    =   clip[tmp1 + OFFSET_6_1 - OFFSET_5_1];
> >             tmp2    =   clip[tmp2];
> >             //RGB_565
>
> >             rgb     =   tmp1|(tmp0<<6);
> >             rgb     =   tmp2|(rgb<<5);
>
> >              Y   = (Y>>8) & 0xFF;
>
> >             Y   += OFFSET_5_0;
> >             tmp1    =   (Y) - (Cg>>16);
> >             tmp2    =   (Y) + (Cb>>16);
> >             tmp0    =   (Y) + (Cr>>16);
>
> >             tmp0    =   clip[tmp0];
> >             tmp1    =   clip[tmp1 + OFFSET_6_0 - OFFSET_5_0];
> >             tmp2    =   clip[tmp2];
>
> >             //RGB_565
>
> >             tmp0    =   tmp1|(tmp0<<6);
> >             tmp0    =   tmp2|(tmp0<<5);
>
> >             rgb     |=  (tmp0<<16);
> >             *( (uint32_t *)pDst)    = rgb;  pDst+=2;
>
> >         }//end of COL
>
> >         pY  +=  (deltaY>>1);
> >         pCb +=  deltaCbCr;
> >         pCr +=  deltaCbCr;
> >         pDst+=  (deltaDst); //coz pDst defined as UINT *
> >     }
> >     return 1;
>
> > }
>
> > On 11月19日, 下午5时02分, "[EMAIL PROTECTED]" <[EMAIL PROTECTED]> wrote:
>
> > > thanks very much,cpu is pxa300,camera only provides yuv422 & RawRGB,
>
> > > "On the G1 we use another code path to draw video frames" which
> > > path,thanks~
>
> > > On 11月19日, 下午4时07分, Mathias Agopian <[EMAIL PROTECTED]> wrote:
>
> > > > Hi,
>
> > > > On Tue, Nov 18, 2008 at 11:15 PM, [EMAIL PROTECTED]
>
> > > > <[EMAIL PROTECTED]> wrote:
>
> > > > > the camera provide yuv422 data,butopenGL"just show the Y plane of
> > > > >YUVbuffers"(frameworks\base\libs\surfaceflinger\LayerBase.cpp
> > > > > 624),must convert yuv422 to rgb565,camera preview is ok,a little
> > > > > slow,why,thanks
>
> > > > No,OpenGLdoesn't supportYUVtextures. That's just that.
>
> > > > On the G1 we use another code path to draw video frames, it doesn't
> > > > useOpenGLES, instead it uses the 2D engine. The code that you are
> > > > referring to exists only so that theYUVvideo node will display
> > > > "something" when used on the emulator or on a device that doesn't do
> > > >YUV(in which case the video node should not try to produceYUV
> > > > output). It's mostly there for debugging.
>
> > > > Of course, *some*YUVextensions forOpenGLES exist,  and the code
> > > > in SurfaceFlinger doesn't make use of them at this time. Exercise left
> > > > to the reader... or until real h/w with these capabilities falls into
> > > > my hands.
>
> > > > What h/w are you using?
>
> > > > Mathias- 隐藏被引用文字 -
>
> > > - 显示引用的文字 -- Hide quoted text -
>
> > - Show quoted text -
--~--~---------~--~----~------------~-------~--~----~
unsubscribe: [EMAIL PROTECTED]
website: http://groups.google.com/group/android-porting
-~----------~----~----~----~------~----~------~--~---

Reply via email to