Thanks Andrey. I'll paste my code below. All this does is read each frame
and convert it to a packed RGBA buffer.

I should also mention that I am building my app with MSVC++ 9.0 for Windows
(64-bit). I haven't tested it with other platforms/compilers.

// Main.cpp
//
#include <vector>

// Needed before including libav headers
//
#define __STDC_CONSTANT_MACROS

// Includes for libav
//
extern "C" {
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavutil/mathematics.h"
#include "libavutil/samplefmt.h"
#include "libswscale/swscale.h"
}

#define BUF_SIZE 2048

int main(int argc, char *argv[])
{
  char lastError[BUF_SIZE];
  int streamIndex = -1;

  if (argc < 2)
  {
    printf("Usage: fftest inputfile\n");
    return 1;
  }

  // Initialize Ffmpeg
  //
  ::av_register_all();
  ::avformat_network_init();
  ::avcodec_register_all();

  AVInputFormat *format = 0;
  AVFormatContext *avFormatContext = 0;

  // Open the stream and read the header
  //
  int ret = ::avformat_open_input(&avFormatContext,
    argv[1],
    format,
    0);

  if (ret < 0)
  {
    ::av_strerror(ret, lastError, BUF_SIZE);
    ::printf("%s\n", lastError);
    return 1;
  }

  // Retrieve stream information into streams field
  //
  ret = ::avformat_find_stream_info(avFormatContext, 0);
  if (ret >= 0)
  {
    // Get the video stream
    //
    for (unsigned int i = 0; i < avFormatContext->nb_streams; ++i)
    {
      // Print info about each stream to stderr
      //
      ::av_dump_format(avFormatContext, i, argv[1], false);

      // Find the first video stream
      //
      if (avFormatContext->streams[i]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO)
      {
        streamIndex = i;
        break;
      }
    }

    // Check if no video stream was found
    //
    if (streamIndex == -1)
    {
      ::printf("No video stream found\n");
      return 1;
    }
  }
  else
  {
    ::av_strerror(ret, lastError, BUF_SIZE);
    ::printf("%s\n", lastError);
    return 1;
  }

  // Get pointer to the stream
  //
  AVStream *stream = avFormatContext->streams[streamIndex];

  // Set up the video codec
  //
  AVCodec *codec = 0;
  AVCodecContext *avCodecContext = 0;

  if (stream->codec && stream->codec->codec_id != CODEC_ID_NONE)
  {
    // Get a pointer to the codec context (what the stream knows about the
codec)
    //
    avCodecContext = stream->codec;

    // Find the appropriate video codec based on the stream info
    //
    codec = ::avcodec_find_decoder(avCodecContext->codec_id);
  }
  else
  {
    ::printf("Codec not found\n");
    return 1;
  }

  // Check if the codec was found
  //
  if (!codec || !avCodecContext)
  {
    printf("Could not find appropriate video codec\n");
    return 1;
  }

  // Open codec
  //
  ret = ::avcodec_open2(avCodecContext, codec, 0);
  if (ret != 0)
  {
    ::av_strerror(ret, lastError, BUF_SIZE);
    printf("Could not open codec: %s\n", lastError);
    return 1;
  }

  // Get video dimensions
  //
  int width = avCodecContext->width;
  int height = avCodecContext->height;

  // Allocate frames in original color space and RGBA space
  //
  AVFrame *frame = ::avcodec_alloc_frame();
  AVFrame *frameRgba = ::avcodec_alloc_frame();

  if (frame == 0 || frameRgba == 0)
  {
    printf("Could not allocate frame\n");
    return 1;
  }

  // Allocate RGBA buffer
  //
  std::vector<unsigned char> rgbaBuffer(::avpicture_get_size(PIX_FMT_RGBA,
width, height));

  // Assign appropriate parts of buffer to image planes in frameRgba
  //
  ret = ::avpicture_fill((AVPicture *)frameRgba, &(rgbaBuffer[0]),
PIX_FMT_RGBA, width, height);
  if (ret != rgbaBuffer.size())
  {
    printf("avpicture_fill returned incorrect # of bytes\n");
    return 1;
  }

  SwsContext *swsContextRgba = ::sws_getContext(
    width, height, avCodecContext->pix_fmt,
    width, height, PIX_FMT_RGBA,
    SWS_FAST_BILINEAR,
    0, 0, 0);

  if (!swsContextRgba)
  {
    printf("Error getting SwsContext\n", lastError);
    return 1;
  }

  AVPacket packet;
  bool eof = false;

  // Begin reading frames
  //
  for (int i = 0; !eof; ++i)
  {
    printf("Reading frame %d... ", i);

    int frameFinished = 0;

    while (frameFinished == 0)
    {
      int ret = ::av_read_frame(avFormatContext, &packet);
      if (ret == AVERROR_EOF)
      {
        // EOF has been reached
        //
        printf("EOF\n");
        eof = true;
        break;
      }
      else if (ret < 0 || packet.data == 0)
      {
        ::av_strerror(ret, lastError, BUF_SIZE);
        printf("%s\n", lastError);
        return 1;
      }

      // Is this a packet from our video stream?
      //
      if (packet.stream_index == streamIndex)
      {
        // Decode video frame
        //
        ret = ::avcodec_decode_video2(avCodecContext, frame,
&frameFinished, &packet);

        if (ret < 0)
        {
          ::av_strerror(ret, lastError, BUF_SIZE);
          printf("%s\n", lastError);
          return 1;
        }

        // Did we get a video frame?
        //
        if (frameFinished)
        {
          // Convert the image from its native format to RGBA
          // NOTE: assumes source format has 3 planes
          //
          uint8_t* const srcPtrs[3] = {frame->data[0], frame->data[1],
frame->data[2]};
          int srcStrides[3] = {frame->linesize[0], frame->linesize[1],
frame->linesize[2]};

          uint8_t* const dstPtrs[1] = {frameRgba->data[0]};
          int dstStrides[1] = {frameRgba->linesize[0]};

          ::sws_scale(swsContextRgba, srcPtrs, srcStrides, 0, height,
dstPtrs, dstStrides);

          // Free the packet that was allocated by av_read_frame
          //
          ::av_free_packet(&packet);
        }
      }

      // Free the packet that was allocated by av_read_frame
      //
      ::av_free_packet(&packet);
    }

    printf("done\n");
  }

  // Free structures
  //
  ::av_free(frame);
  ::av_free(frameRgba);
  ::avcodec_close(avCodecContext);
  ::av_close_input_file(avFormatContext);
  ::sws_freeContext(swsContextRgba);

  return 0;
}




On Mon, May 28, 2012 at 3:45 AM, Andrey Utkin <
[email protected]> wrote:

> 2012/5/25 Gunner Beckwith <[email protected]>:
> > Hello,
> >
> > My problem is that av_read_frame(fmtContext, &packet) seems to return
> > AVERROR_EOF before the end of the file is reached. This is for a .ts file
> > with h264-compressed video. An interesting pattern is that it seems to
> > always return exactly 10 frames too early, no matter the length
> (#frames) or
> > size (resolution) of the video file.
> >
> > I am creating the MPEG-TS file by running the ffmpeg command:
> >
> > ffmpeg -f lavfi -i testsrc=s=640x480:r=30:d=2 -vframes 60 -vcodec libx264
> > FfmpegInputStreamTest.ts
> >
> > E.g., this creates a 60-frame sequence at 640x480 resolution.
> av_read_frame
> > always returns EOF after 50 frames. If I do a 300-frame sequence at
> 320x240,
> > it returns EOF after 290 frames.
> >
> > Can anyone think of ideas what might be going on?
>
> Please share your app source code so we investigate the issue the same way.
>
> --
> Andrey Utkin
> _______________________________________________
> libav-api mailing list
> [email protected]
> https://lists.libav.org/mailman/listinfo/libav-api
>
_______________________________________________
libav-api mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-api

Reply via email to