The line alignment for B1W0 and B0W1 1 bpp video in nut was previously 4
bytes, which generated alignment warning messages, not only for
odd-width files. The alignment is now 16 bytes.
Also, made some simplifications, added some placeholders for the future
PAL8 fourcc in nut, and added comments regarding the temporary nature of
some of the code, which will be removed when PAL8 is fully implemented
in nut (whenever that will be).
Mats
--
Mats Peterson
http://matsp888.no-ip.org/~mats/
>From 74681b2856afbf12a9588120c5039afdf02fa58b Mon Sep 17 00:00:00 2001
From: Mats Peterson <matsp...@yahoo.com>
Date: Thu, 4 Feb 2016 22:44:33 +0100
Subject: [PATCH] lavc/rawdec: Use 16-byte line alignment for B1W0 and B0W1 video in nut
---
libavcodec/rawdec.c | 78 ++++++++++++++++++++++++++++++++++++---------------
1 file changed, 56 insertions(+), 22 deletions(-)
diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c
index 93cbedf..525db4c 100644
--- a/libavcodec/rawdec.c
+++ b/libavcodec/rawdec.c
@@ -41,7 +41,11 @@ typedef struct RawVideoContext {
AVBufferRef *palette;
int frame_size; /* size of the frame in bytes */
int flip;
- int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov
+ int is_1_2_4_8_bpp; // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
+ int is_mono;
+ int is_pal8;
+ int is_nut_mono;
+ int is_nut_pal8;
int is_yuv2;
int is_lt_16bpp; // 16bpp pixfmt and bits_per_coded_sample < 16
int tff;
@@ -96,7 +100,7 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
else {
memset(context->palette->data, 0, AVPALETTE_SIZE);
- if (avctx->bits_per_coded_sample == 1)
+ if (avctx->bits_per_coded_sample <= 1)
memset(context->palette->data, 0xff, 4);
}
}
@@ -108,11 +112,24 @@ static av_cold int raw_init_decoder(AVCodecContext *avctx)
avctx->codec_tag == MKTAG('W','R','A','W'))
context->flip = 1;
+ if (avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ||
+ avctx->pix_fmt == AV_PIX_FMT_MONOBLACK)
+ context->is_mono = 1;
+ else if (avctx->pix_fmt == AV_PIX_FMT_PAL8)
+ context->is_pal8 = 1;
+
+ if (avctx->codec_tag == MKTAG('B','1','W','0') ||
+ avctx->codec_tag == MKTAG('B','0','W','1'))
+ context->is_nut_mono = 1;
+ else if (avctx->codec_tag == MKTAG('P','A','L','8'))
+ context->is_nut_pal8 = 1;
+
if (avctx->codec_tag == AV_RL32("yuv2") &&
avctx->pix_fmt == AV_PIX_FMT_YUYV422)
context->is_yuv2 = 1;
- if (avctx->pix_fmt == AV_PIX_FMT_PAL8 && avctx->bits_per_coded_sample == 1)
+ /* Temporary solution until PAL8 is implemented in nut */
+ if (context->is_pal8 && avctx->bits_per_coded_sample == 1)
avctx->pix_fmt = AV_PIX_FMT_NONE;
return 0;
@@ -160,22 +177,34 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
int linesize_align = 4;
- int avpkt_stride;
+ int stride;
int res, len;
int need_copy;
AVFrame *frame = data;
+ if (avctx->width <= 0) {
+ av_log(avctx, AV_LOG_ERROR, "width is not set\n");
+ return AVERROR_INVALIDDATA;
+ }
if (avctx->height <= 0) {
av_log(avctx, AV_LOG_ERROR, "height is not set\n");
return AVERROR_INVALIDDATA;
}
- avpkt_stride = avpkt->size / avctx->height;
- if (avpkt_stride == 0) {
- av_log(avctx, AV_LOG_ERROR, "Packet too small (%d) height (%d)\n", avpkt->size, avctx->height);
+ if (context->is_nut_mono)
+ stride = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
+ else if (context->is_nut_pal8)
+ stride = avctx->width;
+ else
+ stride = avpkt->size / avctx->height;
+
+ if (avpkt->size < stride * avctx->height) {
+ av_log(avctx, AV_LOG_ERROR, "Packet too small (%d)\n", avpkt->size);
return AVERROR_INVALIDDATA;
}
+
+ /* Temporary solution until PAL8 is implemented in nut */
if (avctx->pix_fmt == AV_PIX_FMT_NONE &&
avctx->bits_per_coded_sample == 1 &&
avctx->frame_number == 0 &&
@@ -185,18 +214,23 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE, NULL);
if (!pal) {
avctx->pix_fmt = AV_PIX_FMT_MONOWHITE;
- } else
+ context->is_mono = 1;
+ context->is_pal8 = 0;
+ } else {
avctx->pix_fmt = AV_PIX_FMT_PAL8;
+ context->is_mono = 0;
+ context->is_pal8 = 1;
+ }
}
-
desc = av_pix_fmt_desc_get(avctx->pix_fmt);
if ((avctx->bits_per_coded_sample == 8 || avctx->bits_per_coded_sample == 4
- || avctx->bits_per_coded_sample == 2 || avctx->bits_per_coded_sample == 1) &&
- (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_MONOWHITE) &&
- (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' '))) {
+ || avctx->bits_per_coded_sample <= 2) &&
+ (context->is_mono || context->is_pal8) &&
+ (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ') ||
+ context->is_nut_mono || context->is_nut_pal8)) {
context->is_1_2_4_8_bpp = 1;
- if (avctx->bits_per_coded_sample == 1 && avctx->pix_fmt == AV_PIX_FMT_MONOWHITE) {
+ if (context->is_mono) {
int row_bytes = avctx->width / 8 + (avctx->width & 7 ? 1 : 0);
context->frame_size = av_image_get_buffer_size(avctx->pix_fmt,
FFALIGN(row_bytes, 16) * 8,
@@ -240,19 +274,18 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
if (!frame->buf[0])
return AVERROR(ENOMEM);
- // 1, 2, 4 and 8 bpp in avi/mov
+ // 1, 2, 4 and 8 bpp in avi/mov, 1 and 8 bpp in nut
if (context->is_1_2_4_8_bpp) {
int i, j, row_pix = 0;
uint8_t *dst = frame->buf[0]->data;
- buf_size = context->frame_size -
- (avctx->pix_fmt == AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
- if (avctx->bits_per_coded_sample == 8 || avctx->pix_fmt == AV_PIX_FMT_MONOWHITE) {
- int pix_per_byte = avctx->pix_fmt == AV_PIX_FMT_MONOWHITE ? 8 : 1;
+ buf_size = context->frame_size - (context->is_pal8 ? AVPALETTE_SIZE : 0);
+ if (avctx->bits_per_coded_sample == 8 || context->is_nut_pal8 || context->is_mono) {
+ int pix_per_byte = context->is_mono ? 8 : 1;
for (i = 0, j = 0; j < buf_size && i<avpkt->size; i++, j++) {
dst[j] = buf[i];
row_pix += pix_per_byte;
if (row_pix >= avctx->width) {
- i += avpkt_stride - (i % avpkt_stride) - 1;
+ i += stride - (i % stride) - 1;
j += 16 - (j % 16) - 1;
row_pix = 0;
}
@@ -263,7 +296,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
dst[2 * j + 1] = buf[i] & 15;
row_pix += 2;
if (row_pix >= avctx->width) {
- i += avpkt_stride - (i % avpkt_stride) - 1;
+ i += stride - (i % stride) - 1;
j += 8 - (j % 8) - 1;
row_pix = 0;
}
@@ -276,7 +309,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
dst[4 * j + 3] = buf[i] & 3;
row_pix += 4;
if (row_pix >= avctx->width) {
- i += avpkt_stride - (i % avpkt_stride) - 1;
+ i += stride - (i % stride) - 1;
j += 4 - (j % 4) - 1;
row_pix = 0;
}
@@ -294,7 +327,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
dst[8 * j + 7] = buf[i] & 1;
row_pix += 8;
if (row_pix >= avctx->width) {
- i += avpkt_stride - (i % avpkt_stride) - 1;
+ i += stride - (i % stride) - 1;
j += 2 - (j % 2) - 1;
row_pix = 0;
}
@@ -371,6 +404,7 @@ static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
+ avctx->pix_fmt==AV_PIX_FMT_MONOBLACK ||
avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
--
1.7.10.4
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel