ffmpeg | branch: master | Clément Bœsch <u...@pkh.me> | Mon Mar 20 11:31:27 2017 +0100| [eed8ccde3e56a020255f8df3855702d029f30f21] | committer: Clément Bœsch
Merge commit '131a85a1fed9966bbd38517f76abfac0237e39dc' * commit '131a85a1fed9966bbd38517f76abfac0237e39dc': utvideo: Change type of array stride parameters to ptrdiff_t Merged-by: Clément Bœsch <u...@pkh.me> > http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=eed8ccde3e56a020255f8df3855702d029f30f21 --- libavcodec/utvideo.h | 2 +- libavcodec/utvideodec.c | 20 ++++++++++---------- libavcodec/utvideoenc.c | 13 +++++++------ 3 files changed, 18 insertions(+), 17 deletions(-) diff --git a/libavcodec/utvideo.h b/libavcodec/utvideo.h index 68257fe..1d70865 100644 --- a/libavcodec/utvideo.h +++ b/libavcodec/utvideo.h @@ -81,7 +81,7 @@ typedef struct UtvideoContext { int frame_pred; int pro; - int slice_stride; + ptrdiff_t slice_stride; uint8_t *slice_bits, *slice_buffer[4]; int slice_bits_size; } UtvideoContext; diff --git a/libavcodec/utvideodec.c b/libavcodec/utvideodec.c index 8978a53..7c65d77 100644 --- a/libavcodec/utvideodec.c +++ b/libavcodec/utvideodec.c @@ -124,7 +124,7 @@ static int build_huff(const uint8_t *src, VLC *vlc, int *fsym) } static int decode_plane10(UtvideoContext *c, int plane_no, - uint16_t *dst, int step, int stride, + uint16_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, const uint8_t *huff, int use_pred) @@ -229,7 +229,7 @@ fail: } static int decode_plane(UtvideoContext *c, int plane_no, - uint8_t *dst, int step, int stride, + uint8_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred) { @@ -333,8 +333,8 @@ fail: return AVERROR_INVALIDDATA; } -static void restore_rgb_planes(uint8_t *src, int step, int stride, int width, - int height) +static void restore_rgb_planes(uint8_t *src, int step, ptrdiff_t stride, + int width, int height) { int i, j; uint8_t r, g, b; @@ -377,7 +377,7 @@ static void restore_rgb_planes10(AVFrame *frame, int width, int height) #undef B #undef C -static void restore_median_planar(UtvideoContext *c, uint8_t *src, int stride, +static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode) { int i, j, slice; @@ -425,7 +425,7 @@ static void restore_median_planar(UtvideoContext *c, uint8_t *src, int stride, * so restoring function should take care of possible padding between * two parts of the same "line". */ -static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, int stride, +static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, int width, int height, int slices, int rmode) { int i, j, slice; @@ -433,7 +433,7 @@ static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, int stride uint8_t *bsrc; int slice_start, slice_height; const int cmask = ~(rmode ? 3 : 1); - const int stride2 = stride << 1; + const ptrdiff_t stride2 = stride << 1; for (slice = 0; slice < slices; slice++) { slice_start = ((slice * height) / slices) & cmask; @@ -476,7 +476,7 @@ static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, int stride } } -static void restore_median_packed(uint8_t *src, int step, int stride, +static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode) { int i, j, slice; @@ -532,7 +532,7 @@ static void restore_median_packed(uint8_t *src, int step, int stride, * so restoring function should take care of possible padding between * two parts of the same "line". */ -static void restore_median_packed_il(uint8_t *src, int step, int stride, +static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride, int width, int height, int slices, int rmode) { int i, j, slice; @@ -540,7 +540,7 @@ static void restore_median_packed_il(uint8_t *src, int step, int stride, uint8_t *bsrc; int slice_start, slice_height; const int cmask = ~(rmode ? 3 : 1); - const int stride2 = stride << 1; + const ptrdiff_t stride2 = stride << 1; for (slice = 0; slice < slices; slice++) { slice_start = ((slice * height) / slices) & cmask; diff --git a/libavcodec/utvideoenc.c b/libavcodec/utvideoenc.c index 2b1230f..840742c 100644 --- a/libavcodec/utvideoenc.c +++ b/libavcodec/utvideoenc.c @@ -242,8 +242,9 @@ FF_ENABLE_DEPRECATION_WARNINGS return 0; } -static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, - int step, int stride, int width, int height) +static void mangle_rgb_planes(uint8_t *dst[4], ptrdiff_t dst_stride, + uint8_t *src, int step, ptrdiff_t stride, + int width, int height) { int i, j; int k = 2 * dst_stride; @@ -276,7 +277,7 @@ static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, } /* Write data to a plane with left prediction */ -static void left_predict(uint8_t *src, uint8_t *dst, int stride, +static void left_predict(uint8_t *src, uint8_t *dst, ptrdiff_t stride, int width, int height) { int i, j; @@ -296,8 +297,8 @@ static void left_predict(uint8_t *src, uint8_t *dst, int stride, #undef B /* Write data to a plane with median prediction */ -static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, - int width, int height) +static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, + ptrdiff_t stride, int width, int height) { int i, j; int A, B; @@ -398,7 +399,7 @@ static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, } static int encode_plane(AVCodecContext *avctx, uint8_t *src, - uint8_t *dst, int stride, int plane_no, + uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb) { UtvideoContext *c = avctx->priv_data; ====================================================================== diff --cc libavcodec/utvideo.h index 68257fe,c7f5414..1d70865 --- a/libavcodec/utvideo.h +++ b/libavcodec/utvideo.h @@@ -79,9 -76,8 +79,9 @@@ typedef struct UtvideoContext int compression; int interlaced; int frame_pred; + int pro; - int slice_stride; + ptrdiff_t slice_stride; uint8_t *slice_bits, *slice_buffer[4]; int slice_bits_size; } UtvideoContext; diff --cc libavcodec/utvideodec.c index 8978a53,3a58156..7c65d77 --- a/libavcodec/utvideodec.c +++ b/libavcodec/utvideodec.c @@@ -123,113 -77,8 +123,113 @@@ static int build_huff(const uint8_t *sr syms, sizeof(*syms), sizeof(*syms), 0); } +static int decode_plane10(UtvideoContext *c, int plane_no, - uint16_t *dst, int step, int stride, ++ uint16_t *dst, int step, ptrdiff_t stride, + int width, int height, + const uint8_t *src, const uint8_t *huff, + int use_pred) +{ + int i, j, slice, pix, ret; + int sstart, send; + VLC vlc; + GetBitContext gb; + int prev, fsym; + + if ((ret = build_huff10(huff, &vlc, &fsym)) < 0) { + av_log(c->avctx, AV_LOG_ERROR, "Cannot build Huffman codes\n"); + return ret; + } + if (fsym >= 0) { // build_huff reported a symbol to fill slices with + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint16_t *dest; + + sstart = send; + send = (height * (slice + 1) / c->slices); + dest = dst + sstart * stride; + + prev = 0x200; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + pix = fsym; + if (use_pred) { + prev += pix; + prev &= 0x3FF; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + } + return 0; + } + + send = 0; + for (slice = 0; slice < c->slices; slice++) { + uint16_t *dest; + int slice_data_start, slice_data_end, slice_size; + + sstart = send; + send = (height * (slice + 1) / c->slices); + dest = dst + sstart * stride; + + // slice offset and size validation was done earlier + slice_data_start = slice ? AV_RL32(src + slice * 4 - 4) : 0; + slice_data_end = AV_RL32(src + slice * 4); + slice_size = slice_data_end - slice_data_start; + + if (!slice_size) { + av_log(c->avctx, AV_LOG_ERROR, "Plane has more than one symbol " + "yet a slice has a length of zero.\n"); + goto fail; + } + + memcpy(c->slice_bits, src + slice_data_start + c->slices * 4, + slice_size); + memset(c->slice_bits + slice_size, 0, AV_INPUT_BUFFER_PADDING_SIZE); + c->bdsp.bswap_buf((uint32_t *) c->slice_bits, + (uint32_t *) c->slice_bits, + (slice_data_end - slice_data_start + 3) >> 2); + init_get_bits(&gb, c->slice_bits, slice_size * 8); + + prev = 0x200; + for (j = sstart; j < send; j++) { + for (i = 0; i < width * step; i += step) { + if (get_bits_left(&gb) <= 0) { + av_log(c->avctx, AV_LOG_ERROR, + "Slice decoding ran out of bits\n"); + goto fail; + } + pix = get_vlc2(&gb, vlc.table, vlc.bits, 3); + if (pix < 0) { + av_log(c->avctx, AV_LOG_ERROR, "Decoding error\n"); + goto fail; + } + if (use_pred) { + prev += pix; + prev &= 0x3FF; + pix = prev; + } + dest[i] = pix; + } + dest += stride; + } + if (get_bits_left(&gb) > 32) + av_log(c->avctx, AV_LOG_WARNING, + "%d bits left after decoding slice\n", get_bits_left(&gb)); + } + + ff_free_vlc(&vlc); + + return 0; +fail: + ff_free_vlc(&vlc); + return AVERROR_INVALIDDATA; +} + static int decode_plane(UtvideoContext *c, int plane_no, - uint8_t *dst, int step, int stride, + uint8_t *dst, int step, ptrdiff_t stride, int width, int height, const uint8_t *src, int use_pred) { @@@ -351,34 -200,8 +351,34 @@@ static void restore_rgb_planes(uint8_t } } -static void restore_median(uint8_t *src, int step, ptrdiff_t stride, - int width, int height, int slices, int rmode) +static void restore_rgb_planes10(AVFrame *frame, int width, int height) +{ + uint16_t *src_r = (uint16_t *)frame->data[2]; + uint16_t *src_g = (uint16_t *)frame->data[0]; + uint16_t *src_b = (uint16_t *)frame->data[1]; + int r, g, b; + int i, j; + + for (j = 0; j < height; j++) { + for (i = 0; i < width; i++) { + r = src_r[i]; + g = src_g[i]; + b = src_b[i]; + src_r[i] = (r + g - 0x200) & 0x3FF; + src_b[i] = (b + g - 0x200) & 0x3FF; + } + src_r += frame->linesize[2] / 2; + src_g += frame->linesize[0] / 2; + src_b += frame->linesize[1] / 2; + } +} + +#undef A +#undef B +#undef C + - static void restore_median_planar(UtvideoContext *c, uint8_t *src, int stride, ++static void restore_median_planar(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, + int width, int height, int slices, int rmode) { int i, j, slice; int A, B, C; @@@ -390,56 -213,6 +390,56 @@@ slice_start = ((slice * height) / slices) & cmask; slice_height = ((((slice + 1) * height) / slices) & cmask) - slice_start; + + if (!slice_height) + continue; + bsrc = src + slice_start * stride; + + // first line - left neighbour prediction + bsrc[0] += 0x80; + c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); + bsrc += stride; + if (slice_height <= 1) + continue; + // second line - first element has top prediction, the rest uses median + C = bsrc[-stride]; + bsrc[0] += C; + A = bsrc[0]; + for (i = 1; i < width; i++) { + B = bsrc[i - stride]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + bsrc += stride; + // the rest of lines use continuous median prediction + for (j = 2; j < slice_height; j++) { + c->llviddsp.add_median_pred(bsrc, bsrc - stride, + bsrc, width, &A, &B); + bsrc += stride; + } + } +} + +/* UtVideo interlaced mode treats every two lines as a single one, + * so restoring function should take care of possible padding between + * two parts of the same "line". + */ - static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, int stride, ++static void restore_median_planar_il(UtvideoContext *c, uint8_t *src, ptrdiff_t stride, + int width, int height, int slices, int rmode) +{ + int i, j, slice; + int A, B, C; + uint8_t *bsrc; + int slice_start, slice_height; + const int cmask = ~(rmode ? 3 : 1); - const int stride2 = stride << 1; ++ const ptrdiff_t stride2 = stride << 1; + + for (slice = 0; slice < slices; slice++) { + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - + slice_start; + slice_height >>= 1; if (!slice_height) continue; @@@ -447,55 -220,6 +447,55 @@@ // first line - left neighbour prediction bsrc[0] += 0x80; + A = c->llviddsp.add_left_pred(bsrc, bsrc, width, 0); + c->llviddsp.add_left_pred(bsrc + stride, bsrc + stride, width, A); + bsrc += stride2; + if (slice_height <= 1) + continue; + // second line - first element has top prediction, the rest uses median + C = bsrc[-stride2]; + bsrc[0] += C; + A = bsrc[0]; + for (i = 1; i < width; i++) { + B = bsrc[i - stride2]; + bsrc[i] += mid_pred(A, B, (uint8_t)(A + B - C)); + C = B; + A = bsrc[i]; + } + c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, + bsrc + stride, width, &A, &B); + bsrc += stride2; + // the rest of lines use continuous median prediction + for (j = 2; j < slice_height; j++) { + c->llviddsp.add_median_pred(bsrc, bsrc - stride2, + bsrc, width, &A, &B); + c->llviddsp.add_median_pred(bsrc + stride, bsrc - stride, + bsrc + stride, width, &A, &B); + bsrc += stride2; + } + } +} + - static void restore_median_packed(uint8_t *src, int step, int stride, ++static void restore_median_packed(uint8_t *src, int step, ptrdiff_t stride, + int width, int height, int slices, int rmode) +{ + int i, j, slice; + int A, B, C; + uint8_t *bsrc; + int slice_start, slice_height; + const int cmask = ~rmode; + + for (slice = 0; slice < slices; slice++) { + slice_start = ((slice * height) / slices) & cmask; + slice_height = ((((slice + 1) * height) / slices) & cmask) - + slice_start; + + if (!slice_height) + continue; + bsrc = src + slice_start * stride; + + // first line - left neighbour prediction + bsrc[0] += 0x80; A = bsrc[0]; for (i = step; i < width * step; i += step) { bsrc[i] += A; @@@ -532,8 -256,8 +532,8 @@@ * so restoring function should take care of possible padding between * two parts of the same "line". */ - static void restore_median_packed_il(uint8_t *src, int step, int stride, -static void restore_median_il(uint8_t *src, int step, ptrdiff_t stride, - int width, int height, int slices, int rmode) ++static void restore_median_packed_il(uint8_t *src, int step, ptrdiff_t stride, + int width, int height, int slices, int rmode) { int i, j, slice; int A, B, C; diff --cc libavcodec/utvideoenc.c index 2b1230f,ef51ed0..840742c --- a/libavcodec/utvideoenc.c +++ b/libavcodec/utvideoenc.c @@@ -292,12 -285,9 +293,12 @@@ static void left_predict(uint8_t *src, } } +#undef A +#undef B + /* Write data to a plane with median prediction */ - static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, - int width, int height) + static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, + ptrdiff_t stride, int width, int height) { int i, j; int A, B; @@@ -398,7 -388,7 +399,7 @@@ static int write_huff_codes(uint8_t *sr } static int encode_plane(AVCodecContext *avctx, uint8_t *src, - uint8_t *dst, int stride, int plane_no, - uint8_t *dst, ptrdiff_t stride, ++ uint8_t *dst, ptrdiff_t stride, int plane_no, int width, int height, PutByteContext *pb) { UtvideoContext *c = avctx->priv_data; _______________________________________________ ffmpeg-cvslog mailing list ffmpeg-cvslog@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog