On 24/3/09 2:44 AM, Robert Osfield wrote:
    Thanks David, changes look sensible, I've tested then against ffmpeg
    installed from Ubuntu repositories and svn/trunk of ffmpeg and both
    are picked up fine.  Fingers crossed things will continue working
    fine under other platforms as well.

I got the following type error from gcc 4.0.1 on OS X 10.5.6:
/Users/uli/Projects/osg/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp: In member function ‘int osgFFmpeg::FFmpegDecoderVideo::convert(AVPicture*, int, AVPicture*, int, int, int)’: /Users/uli/Projects/osg/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp:245: error: invalid conversion from ‘int’ to ‘PixelFormat’ /Users/uli/Projects/osg/OpenSceneGraph/src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp:245: error: initializing argument 3 of ‘SwsContext* sws_getContext(int, int, PixelFormat, int, int, PixelFormat, int, SwsFilter*, SwsFilter*, double*)’

It expects 'src_pix_fmt' and 'dst_pix_fmt' to be of type 'PixelFormat' rather than int. The attached cast fixes this (for me).

I've also added Matroska video to the list of supported extensions:

Index: ../src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp
===================================================================
--- ../src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp     (revision 9964)
+++ ../src/osgPlugins/ffmpeg/FFmpegDecoderVideo.cpp     (working copy)
@@ -240,8 +240,8 @@
 #ifdef USE_SWSCALE
     if (m_swscale_ctx==0)
     {
-        m_swscale_ctx = sws_getContext(src_width, src_height, src_pix_fmt,
- src_width, src_height, dst_pix_fmt,
+        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) 
src_pix_fmt,
+                                      src_width, src_height, (PixelFormat) 
dst_pix_fmt,
                                       /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, 
NULL, NULL);
     }

Index: ../src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp
===================================================================
--- ../src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp     (revision 9964)
+++ ../src/osgPlugins/ffmpeg/ReaderWriterFFmpeg.cpp     (working copy)
@@ -36,7 +36,8 @@
         supportsExtension("ogg", "Theora movie format");
         supportsExtension("mpg", "Mpeg movie format");
         supportsExtension("mpv", "Mpeg movie format");
-        supportsExtension("wmv", "");
+        supportsExtension("wmv", "Windows Media Video format");
+        supportsExtension("mkv", "Matroska");

         // Register all FFmpeg formats/codecs
         av_register_all();

Cheers,
/ulrich
#include "FFmpegDecoderVideo.hpp"

#include <osg/Notify>
#include <osg/Timer>

#include <stdexcept>
#include <string.h>

namespace osgFFmpeg {

FFmpegDecoderVideo::FFmpegDecoderVideo(PacketQueue & packets, FFmpegClocks & 
clocks) :
    m_packets(packets),
    m_clocks(clocks),
    m_stream(0),
    m_context(0),
    m_codec(0),
    m_packet_data(0),
    m_bytes_remaining(0),
    m_packet_pts(AV_NOPTS_VALUE),
    m_writeBuffer(0),
    m_user_data(0),
    m_publish_func(0),
    m_exit(false)
#ifdef USE_SWSCALE
    ,m_swscale_ctx(0)
#endif
{

}



FFmpegDecoderVideo::~FFmpegDecoderVideo()
{
    osg::notify(osg::INFO)<<"Destructing FFmpegDecoderVideo..."<<std::endl;


    if (isRunning())
    {
        m_exit = true;
#if 0        
        while(isRunning()) { OpenThreads::YieldCurrentThread(); }
#else        
        join();
#endif
    }
    
#ifdef USE_SWSCALE
    if (m_swscale_ctx)
    {
        sws_freeContext(m_swscale_ctx);
        m_swscale_ctx = 0;
    }
#endif

    osg::notify(osg::INFO)<<"Destructed FFmpegDecoderVideo"<<std::endl;
}



void FFmpegDecoderVideo::open(AVStream * const stream)
{
    m_stream = stream;
    m_context = stream->codec;

    // Trust the video size given at this point
    // (avcodec_open seems to sometimes return a 0x0 size)
    m_width = m_context->width;
    m_height = m_context->height;
    findAspectRatio();

    // Find out whether we support Alpha channel
    m_alpha_channel = (m_context->pix_fmt == PIX_FMT_YUVA420P);

    // Find out the framerate
    m_frame_rate = av_q2d(stream->r_frame_rate);

    // Find the decoder for the video stream
    m_codec = avcodec_find_decoder(m_context->codec_id);

    if (m_codec == 0)
        throw std::runtime_error("avcodec_find_decoder() failed");

    // Inform the codec that we can handle truncated bitstreams
    //if (p_codec->capabilities & CODEC_CAP_TRUNCATED)
    //    m_context->flags |= CODEC_FLAG_TRUNCATED;

    // Open codec
    if (avcodec_open(m_context, m_codec) < 0)
        throw std::runtime_error("avcodec_open() failed");

    // Allocate video frame
    m_frame.reset(avcodec_alloc_frame());

    // Allocate converted RGB frame
    m_frame_rgba.reset(avcodec_alloc_frame());
    m_buffer_rgba[0].resize(avpicture_get_size(PIX_FMT_RGB32, width(), 
height()));
    m_buffer_rgba[1].resize(m_buffer_rgba[0].size());

    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
    avpicture_fill((AVPicture *) (m_frame_rgba).get(), &(m_buffer_rgba[0])[0], 
PIX_FMT_RGB32, width(), height());

    // Override get_buffer()/release_buffer() from codec context in order to 
retrieve the PTS of each frame.
    m_context->opaque = this;
    m_context->get_buffer = getBuffer;
    m_context->release_buffer = releaseBuffer;
}


void FFmpegDecoderVideo::close(bool waitForThreadToExit)
{
    m_exit = true;
    
    if (isRunning() && waitForThreadToExit)
    {
        while(isRunning()) { OpenThreads::Thread::YieldCurrentThread(); }
    }
}


void FFmpegDecoderVideo::run()
{
    try
    {
        decodeLoop();
    }

    catch (const std::exception & error)
    {
        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : " << error.what() 
<< std::endl;
    }

    catch (...)
    {
        osg::notify(osg::WARN) << "FFmpegDecoderVideo::run : unhandled 
exception" << std::endl;
    }
}



void FFmpegDecoderVideo::decodeLoop()
{
    FFmpegPacket packet;
    double pts;

    while (! m_exit)
    {
        // Work on the current packet until we have decoded all of it

        while (m_bytes_remaining > 0)
        {
            // Save global PTS to be stored in m_frame via getBuffer()

            m_packet_pts = packet.packet.pts;

            // Decode video frame

            int frame_finished = 0;

            const int bytes_decoded = avcodec_decode_video(m_context, 
m_frame.get(), &frame_finished, m_packet_data, m_bytes_remaining);

            if (bytes_decoded < 0)
                throw std::runtime_error("avcodec_decode_video failed()");

            m_bytes_remaining -= bytes_decoded;
            m_packet_data += bytes_decoded;

            // Find out the frame pts

            if (packet.packet.dts == AV_NOPTS_VALUE && m_frame->opaque != 0 && 
*reinterpret_cast<const int64_t*>(m_frame->opaque) != AV_NOPTS_VALUE)
            {
                pts = *reinterpret_cast<const int64_t*>(m_frame->opaque);
            }
            else if (packet.packet.dts != AV_NOPTS_VALUE)
            {
                pts = packet.packet.dts;
            }
            else
            {
                pts = 0;
            }

            pts *= av_q2d(m_stream->time_base);

            // Publish the frame if we have decoded a complete frame
            if (frame_finished)
            {
                const double synched_pts = 
m_clocks.videoSynchClock(m_frame.get(), av_q2d(m_stream->time_base), pts);
                const double frame_delay = 
m_clocks.videoRefreshSchedule(synched_pts);

                publishFrame(frame_delay);
            }
        }

        // Get the next packet

        pts = 0;

        if (packet.valid())
            packet.clear();

        bool is_empty = true;
        packet = m_packets.timedPop(is_empty, 10);

        if (! is_empty)
        {
            if (packet.type == FFmpegPacket::PACKET_DATA)
            {
                m_bytes_remaining = packet.packet.size;
                m_packet_data = packet.packet.data;
            }
            else if (packet.type == FFmpegPacket::PACKET_FLUSH)
            {
                avcodec_flush_buffers(m_context);
                m_clocks.rewindVideo();
            }
        }
    }
}



void FFmpegDecoderVideo::findAspectRatio()
{
    float ratio = 0.0f;

    if (m_context->sample_aspect_ratio.num != 0)
        ratio = float(av_q2d(m_context->sample_aspect_ratio));

    if (ratio <= 0.0f)
        ratio = 1.0f;

    m_pixel_aspect_ratio = ratio;
}

int FFmpegDecoderVideo::convert(AVPicture *dst, int dst_pix_fmt, AVPicture *src,
            int src_pix_fmt, int src_width, int src_height)
{
    osg::Timer_t startTick = osg::Timer::instance()->tick();
#ifdef USE_SWSCALE
    if (m_swscale_ctx==0)
    {
        m_swscale_ctx = sws_getContext(src_width, src_height, (PixelFormat) 
src_pix_fmt,
                                      src_width, src_height, (PixelFormat) 
dst_pix_fmt,
                                      /*SWS_BILINEAR*/ SWS_BICUBIC, NULL, NULL, 
NULL);
    }
    

    osg::notify(osg::INFO)<<"Using sws_scale ";
    
    int result =  sws_scale(m_swscale_ctx,
                            (src->data), (src->linesize), 0, src_height,
                            (dst->data), (dst->linesize));
#else

    osg::notify(osg::INFO)<<"Using img_convert ";

    int result = img_convert(dst, dst_pix_fmt, src,
                             src_pix_fmt, src_width, src_height);

#endif
    osg::Timer_t endTick = osg::Timer::instance()->tick();
    osg::notify(osg::INFO)<<" time = 
"<<osg::Timer::instance()->delta_m(startTick,endTick)<<"ms"<<std::endl;

    return result;
}


void FFmpegDecoderVideo::publishFrame(const double delay)
{
    // If no publishing function, just ignore the frame
    if (m_publish_func == 0)
        return;

    // If the display delay is too small, we better skip the frame.
    if (delay < -0.010)
        return;
        
    AVPicture * const src = (AVPicture *) m_frame.get();
    AVPicture * const dst = (AVPicture *) m_frame_rgba.get();

    // Assign appropriate parts of the buffer to image planes in m_frame_rgba
    avpicture_fill((AVPicture *) (m_frame_rgba).get(), 
&(m_buffer_rgba[m_writeBuffer])[0], PIX_FMT_RGB32, width(), height());

    // Convert YUVA420p (i.e. YUV420p plus alpha channel) using our own routine

    if (m_context->pix_fmt == PIX_FMT_YUVA420P)
        yuva420pToRgba(dst, src, width(), height());
    else
        convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width(), height());

    // Wait 'delay' seconds before publishing the picture.
    int i_delay = static_cast<int>(delay * 1000000 + 0.5);

    while (i_delay > 1000)
    {
        // Avoid infinite/very long loops
        if (m_exit)
            return;

        const int micro_delay = (std::min)(1000000, i_delay);

        OpenThreads::Thread::microSleep(micro_delay);

        i_delay -= micro_delay;
    }

    m_writeBuffer = 1-m_writeBuffer;

    m_publish_func(* this, m_user_data);
}



void FFmpegDecoderVideo::yuva420pToRgba(AVPicture * const dst, AVPicture * 
const src, int width, int height)
{
    convert(dst, PIX_FMT_RGB32, src, m_context->pix_fmt, width, height);

    const size_t bpp = 4;

    uint8_t * a_dst = dst->data[0] + 3;

    for (int h = 0; h < height; ++h) {

        const uint8_t * a_src = src->data[3] + h * src->linesize[3];

        for (int w = 0; w < width; ++w) {
            *a_dst = *a_src;
            a_dst += bpp;
            a_src += 1;
        }
    }
}



int FFmpegDecoderVideo::getBuffer(AVCodecContext * const context, AVFrame * 
const picture)
{
    const FFmpegDecoderVideo * const this_ = reinterpret_cast<const 
FFmpegDecoderVideo*>(context->opaque);

    const int result = avcodec_default_get_buffer(context, picture);
    int64_t * p_pts = reinterpret_cast<int64_t*>( av_malloc(sizeof(int64_t)) );

    *p_pts = this_->m_packet_pts;
    picture->opaque = p_pts;

    return result;
}



void FFmpegDecoderVideo::releaseBuffer(AVCodecContext * const context, AVFrame 
* const picture)
{
    if (picture != 0)
        av_freep(&picture->opaque);

    avcodec_default_release_buffer(context, picture);
}



} // namespace osgFFmpeg
/* -*-c++-*- OpenSceneGraph - Copyright (C) 1998-2006 Robert Osfield 
 *
 * This library is open source and may be redistributed and/or modified under  
 * the terms of the OpenSceneGraph Public License (OSGPL) version 0.0 or 
 * (at your option) any later version.  The full license is in LICENSE file
 * included with this distribution, and on the openscenegraph.org website.
 * 
 * This library is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 * OpenSceneGraph Public License for more details.
*/

#include <osgDB/Registry>
#include <osgDB/FileNameUtils>
#include <osgDB/FileUtils>

#include "FFmpegHeaders.hpp"
#include "FFmpegImageStream.hpp"



/** Implementation heavily inspired by http://www.dranger.com/ffmpeg/ */

class ReaderWriterFFmpeg : public osgDB::ReaderWriter
{
public:

    ReaderWriterFFmpeg()
    {
        supportsProtocol("http","Read video/audio from http using ffmpeg.");

        supportsExtension("avi", "");
        supportsExtension("flv", "");
        supportsExtension("mov", "");
        supportsExtension("ogg", "Theora movie format");
        supportsExtension("mpg", "Mpeg movie format");
        supportsExtension("mpv", "Mpeg movie format");
        supportsExtension("wmv", "Windows Media Video format");
        supportsExtension("mkv", "Matroska");

        // Register all FFmpeg formats/codecs
        av_register_all();
    }

    virtual ~ReaderWriterFFmpeg()
    {

    }

    virtual const char * className() const
    {
        return "ReaderWriterFFmpeg";
    }

    virtual ReadResult readImage(const std::string & filename, const 
osgDB::ReaderWriter::Options * options) const
    {
        if (filename.compare(0, 5, "/dev/")==0)
        {
            return readImageStream(filename, options);
        }
    
        const std::string ext = osgDB::getLowerCaseFileExtension(filename);
        if (! acceptsExtension(ext))
            return ReadResult::FILE_NOT_HANDLED;

        const std::string path = osgDB::containsServerAddress(filename) ?
            filename :
            osgDB::findDataFile(filename, options);

        if (path.empty())
            return ReadResult::FILE_NOT_FOUND;

        return readImageStream(filename, options);
    }
    
    ReadResult readImageStream(const std::string& filename, const 
osgDB::ReaderWriter::Options * options) const
    {
        osg::notify(osg::INFO) << "ReaderWriterFFmpeg::readImage " << filename 
<< std::endl;

        osg::ref_ptr<osgFFmpeg::FFmpegImageStream> image_stream(new 
osgFFmpeg::FFmpegImageStream);

        if (! image_stream->open(filename))
            return ReadResult::FILE_NOT_HANDLED;

        return image_stream.release();
    }

private:

};



REGISTER_OSGPLUGIN(ffmpeg, ReaderWriterFFmpeg)
_______________________________________________
osg-submissions mailing list
[email protected]
http://lists.openscenegraph.org/listinfo.cgi/osg-submissions-openscenegraph.org

Reply via email to