Module Name: xsrc Committed By: macallan Date: Thu Dec 7 19:23:22 UTC 2017
Modified Files: xsrc/external/mit/xf86-video-suncg14/dist/src: cg14.h cg14_accel.c cg14_render.c Log Message: RGB/BGR-flip source images in CG14Comp_Over32*() with non-solid sources as well. This takes care of most remaining RGB/BGR issues in xrender. Next step: do more than one pixel at a time in those operations. To generate a diff of this commit: cvs rdiff -u -r1.12 -r1.13 \ xsrc/external/mit/xf86-video-suncg14/dist/src/cg14.h \ xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_accel.c cvs rdiff -u -r1.10 -r1.11 \ xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_render.c Please note that diffs are not public domain; they are subject to the copyright notices on the relevant files.
Modified files: Index: xsrc/external/mit/xf86-video-suncg14/dist/src/cg14.h diff -u xsrc/external/mit/xf86-video-suncg14/dist/src/cg14.h:1.12 xsrc/external/mit/xf86-video-suncg14/dist/src/cg14.h:1.13 --- xsrc/external/mit/xf86-video-suncg14/dist/src/cg14.h:1.12 Mon Oct 30 22:09:54 2017 +++ xsrc/external/mit/xf86-video-suncg14/dist/src/cg14.h Thu Dec 7 19:23:22 2017 @@ -135,13 +135,13 @@ void CG14Comp_Over8Solid(Cg14Ptr, uint32 void CG14Comp_Over32Solid(Cg14Ptr, uint32_t, uint32_t, uint32_t, uint32_t, int, int); void CG14Comp_Over32(Cg14Ptr, uint32_t, uint32_t, uint32_t, uint32_t, - int, int); + int, int, int); void CG14Comp_Over32Mask(Cg14Ptr, uint32_t, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t, int, int); + uint32_t, uint32_t, int, int, int); void CG14Comp_Over32Mask_noalpha(Cg14Ptr, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t, uint32_t, int, int); + uint32_t, uint32_t, uint32_t, int, int, int); void CG14Comp_Over32Mask32_noalpha(Cg14Ptr, uint32_t, uint32_t, uint32_t, - uint32_t, uint32_t, uint32_t, int, int); + uint32_t, uint32_t, uint32_t, int, int, int); void CG14Comp_Add8(Cg14Ptr, uint32_t, uint32_t, uint32_t, uint32_t, int, int); void CG14Comp_Add8_32(Cg14Ptr, uint32_t, uint32_t, uint32_t, uint32_t, Index: xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_accel.c diff -u xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_accel.c:1.12 xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_accel.c:1.13 --- xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_accel.c:1.12 Mon Oct 30 22:09:54 2017 +++ xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_accel.c Thu Dec 7 19:23:22 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: cg14_accel.c,v 1.12 2017/10/30 22:09:54 macallan Exp $ */ +/* $NetBSD: cg14_accel.c,v 1.13 2017/12/07 19:23:22 macallan Exp $ */ /* * Copyright (c) 2013 Michael Lorenz * All rights reserved. @@ -888,11 +888,15 @@ CG14Composite(PixmapPtr pDst, int srcX, Cg14Ptr p = GET_CG14_FROM_SCRN(pScrn); uint32_t dstoff, dstpitch; uint32_t dst, msk, src; + int flip = 0; ENTER; dstoff = exaGetPixmapOffset(pDst); dstpitch = exaGetPixmapPitch(pDst); + flip = (PICT_FORMAT_TYPE(p->srcformat) != + PICT_FORMAT_TYPE(p->dstformat)); + switch (p->op) { case PictOpOver: dst = dstoff + (dstY * dstpitch) + (dstX << 2); @@ -943,12 +947,12 @@ CG14Composite(PixmapPtr pDst, int srcX, src, p->srcpitch, msk, p->mskpitch, dst, dstpitch, - width, height); + width, height, flip); } else { CG14Comp_Over32(p, src, p->srcpitch, dst, dstpitch, - width, height); + width, height, flip); } break; case PICT_x8r8g8b8: @@ -967,7 +971,7 @@ CG14Composite(PixmapPtr pDst, int srcX, src, p->srcpitch, msk, p->mskpitch, dst, dstpitch, - width, height); + width, height, flip); } else if ((p->mskformat == PICT_a8r8g8b8) || (p->mskformat == PICT_a8b8g8r8)) { msk = p->mskoff + @@ -977,7 +981,7 @@ CG14Composite(PixmapPtr pDst, int srcX, src, p->srcpitch, msk, p->mskpitch, dst, dstpitch, - width, height); + width, height, flip); } else { xf86Msg(X_ERROR, "no src alpha, mask is %x\n", p->mskformat); } Index: xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_render.c diff -u xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_render.c:1.10 xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_render.c:1.11 --- xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_render.c:1.10 Mon Oct 30 22:09:54 2017 +++ xsrc/external/mit/xf86-video-suncg14/dist/src/cg14_render.c Thu Dec 7 19:23:22 2017 @@ -1,4 +1,4 @@ -/* $NetBSD: cg14_render.c,v 1.10 2017/10/30 22:09:54 macallan Exp $ */ +/* $NetBSD: cg14_render.c,v 1.11 2017/12/07 19:23:22 macallan Exp $ */ /* * Copyright (c) 2013 Michael Lorenz * All rights reserved. @@ -469,7 +469,7 @@ void CG14Comp_Add8_32(Cg14Ptr p, void CG14Comp_Over32(Cg14Ptr p, uint32_t src, uint32_t srcpitch, uint32_t dst, uint32_t dstpitch, - int width, int height) + int width, int height, int flip) { uint32_t srcx, dstx, m; int line, x, i; @@ -484,6 +484,14 @@ void CG14Comp_Over32(Cg14Ptr p, for (x = 0; x < width; x++) { /* fetch source pixel */ write_sx_io(p, srcx, SX_LDUQ0(12, 0, srcx & 7)); + if (flip) { + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(13, 0, 40, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(15, 0, 13, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(40, 0, 15, 0)); + } /* fetch dst pixel */ write_sx_io(p, dstx, SX_LDUQ0(20, 0, dstx & 7)); /* src is premultiplied with alpha */ @@ -507,7 +515,7 @@ void CG14Comp_Over32Mask(Cg14Ptr p, uint32_t src, uint32_t srcpitch, uint32_t msk, uint32_t mskpitch, uint32_t dst, uint32_t dstpitch, - int width, int height) + int width, int height, int flip) { uint32_t srcx, dstx, mskx, m; int line, x, i; @@ -523,6 +531,14 @@ void CG14Comp_Over32Mask(Cg14Ptr p, for (x = 0; x < width; x++) { /* fetch source pixel */ write_sx_io(p, srcx, SX_LDUQ0(12, 0, srcx & 7)); + if (flip) { + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(13, 0, 40, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(15, 0, 13, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(40, 0, 15, 0)); + } /* fetch mask */ write_sx_io(p, mskx & (~7), SX_LDB(9, 0, mskx & 7)); /* fetch dst pixel */ @@ -556,7 +572,7 @@ void CG14Comp_Over32Mask_noalpha(Cg14Ptr uint32_t src, uint32_t srcpitch, uint32_t msk, uint32_t mskpitch, uint32_t dst, uint32_t dstpitch, - int width, int height) + int width, int height, int flip) { uint32_t srcx, dstx, mskx, m; int line, x, i; @@ -572,6 +588,14 @@ void CG14Comp_Over32Mask_noalpha(Cg14Ptr for (x = 0; x < width; x++) { /* fetch source pixel */ write_sx_io(p, srcx, SX_LDUQ0(12, 0, srcx & 7)); + if (flip) { + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(13, 0, 40, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(15, 0, 13, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(40, 0, 15, 0)); + } /* set src alpha to 0xff */ write_sx_reg(p, SX_INSTRUCTIONS, SX_ORS(8, 0, 12, 0)); @@ -607,7 +631,7 @@ void CG14Comp_Over32Mask32_noalpha(Cg14P uint32_t src, uint32_t srcpitch, uint32_t msk, uint32_t mskpitch, uint32_t dst, uint32_t dstpitch, - int width, int height) + int width, int height, int flip) { uint32_t srcx, dstx, mskx, m; int line, x, i; @@ -623,6 +647,14 @@ void CG14Comp_Over32Mask32_noalpha(Cg14P for (x = 0; x < width; x++) { /* fetch source pixel */ write_sx_io(p, srcx, SX_LDUQ0(12, 0, srcx & 7)); + if (flip) { + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(13, 0, 40, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(15, 0, 13, 0)); + write_sx_reg(p, SX_INSTRUCTIONS, + SX_ORS(40, 0, 15, 0)); + } /* fetch mask */ write_sx_io(p, mskx, SX_LDUQ0(16, 0, mskx & 7)); /* fetch dst pixel */