On 10 April 2018 at 22:21, Patrick Keroulas <
patrick.kerou...@savoirfairelinux.com> wrote:

> From: Damien Riegel <damien.rie...@savoirfairelinux.com>
>
> This codec is already capable of depacking some combinations of pixel
> formats and depth as defined in the RFC4175. The only difference between
> progressive and interlace is that either a packet will contain the whole
> frame, or only a field of the frame.
>
> As FFmpeg is not capable of handling fields only and recompose an
> interlaced frame from that, it has to be done by the codec. To achieve
> that, it must use two AVPacket: one for each field (top and bottom).
>
> Signed-off-by: Damien Riegel <damien.rie...@savoirfairelinux.com>
> Signed-off-by: Patrick Keroulas <patrick.kerou...@savoirfairelinux.com>
> ---
>  libavcodec/avcodec.h   |  4 +++
>  libavcodec/bitpacked.c | 76 ++++++++++++++++++++++++++++++
> ++++++++++++--------
>  2 files changed, 68 insertions(+), 12 deletions(-)
>
> diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
> index fb0c6fa..350e8d9 100644
> --- a/libavcodec/avcodec.h
> +++ b/libavcodec/avcodec.h
> @@ -1480,6 +1480,10 @@ typedef struct AVPacket {
>   */
>  #define AV_PKT_FLAG_DISPOSABLE 0x0010
>
> +/**
> + * The packet contains a top field.
> + */
> +#define AV_PKT_FLAG_TOP_FIELD  0x0010
>
>  enum AVSideDataParamChangeFlags {
>      AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT  = 0x0001,
> diff --git a/libavcodec/bitpacked.c b/libavcodec/bitpacked.c
> index 85d4bdd..d81703d 100644
> --- a/libavcodec/bitpacked.c
> +++ b/libavcodec/bitpacked.c
> @@ -33,15 +33,19 @@
>
>  struct BitpackedContext {
>      int (*decode)(AVCodecContext *avctx, AVFrame *frame,
> -                  AVPacket *pkt);
> +                  AVPacket *pkt, int top_field);
> +    AVPacket *first_field_pkt;
>  };
>
>  /* For this format, it's a simple passthrough */
>  static int bitpacked_decode_uyvy422(AVCodecContext *avctx, AVFrame
> *frame,
> -                                    AVPacket *avpkt)
> +                                    AVPacket *avpkt, int top_field)
>  {
>      int ret;
>
> +    if (frame->interlaced_frame)
> +        return AVERROR_PATCHWELCOME;
> +
>      /* there is no need to copy as the data already match
>       * a known pixel format */
>      frame->buf[0] = av_buffer_ref(avpkt->buf);
> @@ -56,17 +60,22 @@ static int bitpacked_decode_uyvy422(AVCodecContext
> *avctx, AVFrame *frame,
>  }
>
>  static int bitpacked_decode_yuv422p10(AVCodecContext *avctx, AVFrame
> *frame,
> -                                      AVPacket *avpkt)
> +                                      AVPacket *avpkt, int top_field)
>  {
>      uint64_t frame_size = (uint64_t)avctx->width *
> (uint64_t)avctx->height * 20;
>      uint64_t packet_size = (uint64_t)avpkt->size * 8;
> +    int interlaced = frame->interlaced_frame;
>      GetBitContext bc;
>      uint16_t *y, *u, *v;
>      int ret, i, j;
>
> -
> -    if (frame_size > packet_size)
> +    /* check packet size depending on the interlaced/progressive format */
> +    if (interlaced) {
> +        if ((frame_size >> 1) > packet_size)
> +            return AVERROR_INVALIDDATA;
> +    } else if (frame_size > packet_size) {
>          return AVERROR_INVALIDDATA;
> +    }
>
>      if (avctx->width % 2)
>          return AVERROR_PATCHWELCOME;
> @@ -75,7 +84,18 @@ static int bitpacked_decode_yuv422p10(AVCodecContext
> *avctx, AVFrame *frame,
>      if (ret)
>          return ret;
>
> -    for (i = 0; i < avctx->height; i++) {
> +    /*
> +     * if the frame is interlaced, the avpkt we are getting is either the
> top
> +     * or the bottom field. If it's the bottom field, it contains all the
> odd
> +     * lines of the recomposed frame, so we start at offset 1.
> +     */
> +    i = (interlaced && !top_field) ? 1 : 0;
> +
> +    /*
> +     * Packets from interlaced frames contain either even lines, or odd
> +     * lines, so increment by two in that case.
> +     */
> +    for (; i < avctx->height; interlaced ? i += 2 : i++) {
>          y = (uint16_t*)(frame->data[0] + i * frame->linesize[0]);
>          u = (uint16_t*)(frame->data[1] + i * frame->linesize[1]);
>          v = (uint16_t*)(frame->data[2] + i * frame->linesize[2]);
> @@ -100,13 +120,20 @@ static av_cold int bitpacked_init_decoder(AVCodecContext
> *avctx)
>
>      if (avctx->codec_tag == MKTAG('U', 'Y', 'V', 'Y')) {
>          if (avctx->bits_per_coded_sample == 16 &&
> -            avctx->pix_fmt == AV_PIX_FMT_UYVY422)
> +            avctx->pix_fmt == AV_PIX_FMT_UYVY422) {
> +
> +            if (avctx->field_order > AV_FIELD_PROGRESSIVE) {
> +                av_log(avctx, AV_LOG_ERROR, "interlaced not yet supported
> for 8-bit\n");
> +                return AVERROR_PATCHWELCOME;
> +            }
> +
>              bc->decode = bitpacked_decode_uyvy422;
> -        else if (avctx->bits_per_coded_sample == 20 &&
> -                 avctx->pix_fmt == AV_PIX_FMT_YUV422P10)
> +        } else if (avctx->bits_per_coded_sample == 20 &&
> +                 avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
>              bc->decode = bitpacked_decode_yuv422p10;
> -        else
> +        } else {
>              return AVERROR_INVALIDDATA;
> +        }
>      } else {
>          return AVERROR_INVALIDDATA;
>      }
> @@ -120,24 +147,49 @@ static int bitpacked_decode(AVCodecContext *avctx,
> void *data, int *got_frame,
>      struct BitpackedContext *bc = avctx->priv_data;
>      int buf_size = avpkt->size;
>      AVFrame *frame = data;
> +    int top_field = 0;
>      int res;
>
>      frame->pict_type = AV_PICTURE_TYPE_I;
>      frame->key_frame = 1;
>
> +    if (avctx->field_order != AV_FIELD_PROGRESSIVE) {
> +        top_field = avpkt->flags & AV_PKT_FLAG_TOP_FIELD;
> +        frame->interlaced_frame = 1;
> +        frame->top_field_first = 1;
> +    }
> +
>      if (avctx->pix_fmt == AV_PIX_FMT_YUV422P10) {
>          res = ff_get_buffer(avctx, frame, 0);
>          if (res < 0)
>              return res;
>      }
>
> -    res = bc->decode(avctx, frame, avpkt);
> +    if (frame->interlaced_frame) {
> +
> +        if (top_field) {
> +            bc->first_field_pkt = av_packet_clone(avpkt);
> +            return 0;
> +
> +        } else if (bc->first_field_pkt) {
> +            /* Combine the 2 fields in a single frame.
> +             * N fields/s give N/2 frames/s. */
> +            res = bc->decode(avctx, frame, bc->first_field_pkt, 1);
> +            res += bc->decode(avctx, frame, avpkt, 0);
> +
> +            av_packet_free(&bc->first_field_pkt);
> +        } else {
> +            return 0;
> +        }
> +    } else {
> +        res = bc->decode(avctx, frame, avpkt, 0);
> +    }
> +
>      if (res)
>          return res;
>
>      *got_frame = 1;
>      return buf_size;
> -
>  }
>
>  AVCodec ff_bitpacked_decoder = {
> --
> 2.7.4
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>

No. What you should do instead is get the first field, alloc a buffer which
is big enough to hold the full frame (2 fields), render the first field you
get with a stride * 2. Then you av_frame_ref the frame into your context,
output no frame for that packet and wait for the next field. If the field
you get is the same polarity (e.g. top and you have a top in your context
as well), av_frame_free your ref'd frame and start all over. If you get
what you expect, render to your ref'd frame in your context (again with
stride * 2 to skip every other line) and then you av_frame_ref that frame
onto the avframe you need to output on. Then you unref your context's
avframe, say that you have a frame ready and that's it.
Too many mistakes can be made here, this ensures that you waste no time
copying anything at all and that if something happens like packet loss
you'll recover without leaking resources or worse.
_______________________________________________
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Reply via email to