Hi Robert and Tanguy
I noticed you talking about video plugins. I've written a few image streams
that allow me to run video files and webcam streams, but haven't taken the
leap to creating a plug-in. I'd find it very interesting to look at the
source for this and see what steps are required (as manually attaching the
images to textures is a nightmare)
One thing I am keen to solve is the non power of 2 video issue, currently I
have to create a texture the nearest ^2 up then load the image into the
corner of it. I then use a TexMat to rescale the texcoords to account for
this ( the reason for this is pushing non ^2 textures onto graphics cards is
slow and webcams don't usually do ^2)
Would a plugin automatically be able to insert this TexMat ?
Cheers
Thomas Hogarth (finally my first post :) )
PS
Also I've attached my rather limited ffmpeg stream for you guys to look at,
I never finished as I moved over to Directshow for windows and Quicktime for
OSX, but it does get the frames out
I guess your using FFMPEG as it is cross platform ?
#pragma once
#include "VideoStream.h"
#//include "StdAfx.h"
extern "C" {
#define __STDC_CONSTANT_MACROS
#define __STDC_LIMIT_MACROS
#include <avformat.h>
#include <avcodec.h>
}
class CFFMPEGStream : public CVideoStream
{
public:
CFFMPEGStream(void);
CFFMPEGStream(const char* file, bool isRect);
virtual ~CFFMPEGStream(void);
virtual Object* clone() const { return new CFFMPEGStream; }
virtual bool isSameKindAs(const Object* obj) const {
return dynamic_cast<const CFFMPEGStream*>(obj) != NULL;
}
virtual const char* className() const { return "FFMPEGStream"; }
/// Start or continue stream.
virtual void StartStream() { CVideoStream::StartStream(); }
/// Stop stream at current position.
virtual void stop() { CVideoStream::stop(); }
/// Rewind stream to beginning.
virtual void rewind() {CVideoStream::rewind(); }
//Inherit from stream
//void run();
void UpdateStream();
protected:
int iFrame; //curent frame number
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame; //frame from video file
AVFrame *pFrameRGB; //converted frame
int numBytes; //size of frame buffer
uint8_t *buffer;
float timeToNext;
int lastFrame;
int width,height;
public:
bool ReadNextFrame();
bool ConvertFrameToRGB();
bool SaveCurrentFrame();
void CleanUp();
};
#include ".\ffmpegstream.h"
#include <osg/Notify>
#include <osg/Timer>
#include <osg/Node>
#include <iostream>
using namespace std;
CFFMPEGStream::CFFMPEGStream(void)
{
}
CFFMPEGStream::~CFFMPEGStream(void)
{
CleanUp();
}
CFFMPEGStream::CFFMPEGStream(const char* file, bool isRect) :
CVideoStream(isRect)
{
iFrame=0; //start on frame 0
// Register all formats and codecs for the ffmpeg lib
av_register_all();
// Open video file
if(av_open_input_file(&pFormatCtx, file, NULL, 0, NULL)!=0)
{
//AfxMessageBox("Failed to open FFMPEG file");
return; // Couldn't open file
}
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
{
//AfxMessageBox("Failed to find FFMPEG stream information");
return; // Couldn't find stream information
}
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, file, false);
// Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
if(videoStream==-1)
{
//AfxMessageBox("Failed to find FFMPEG stream");
return; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
//AfxMessageBox("Failed to open FFMPEG decoder");
return; // Codec not found
}
// Inform the codec that we can handle truncated bitstreams -- i.e.,
// bitstreams where frame boundaries can fall in the middle of packets
if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
//AfxMessageBox("Failed to open FFMPEG Codec");
return; // Could not open codec
}
// Hack to correct wrong frame rates that seem to be generated by some
// codecs
//if(pCodecCtx->frame_rate>1000 && pCodecCtx->frame_rate_base==1)
// pCodecCtx->frame_rate_base=1000;
m_timeToNextFrame = 30;//pCodecCtx->sample_rate;
// cout<<"time to next frame "<< pCodecCtx->sframe_rate<<endl;
//CString str;
//str.Format("time to fpeg frame %d", m_timeToNextFrame);
//AfxMessageBox(str);
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
{
//AfxMessageBox("Failed to allocate FFMPEG RGB buffer");
return;
}
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width,
pCodecCtx->height);
buffer=new uint8_t[numBytes];
// Assign appropriate parts of buffer to image planes in pFrameRGB
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_BGR24,
pCodecCtx->width, pCodecCtx->height);
width = pCodecCtx->width;
height = pCodecCtx->height;
// create an image that same size (packing set to 1)
this->allocateImage(width, height, 1, GL_BGR, GL_UNSIGNED_BYTE, 1);
startThread();
//ffmpeg comes out upside down so set the yflipped to true by default
//m_yFlipped = true;
m_isValid=true;
}
void CFFMPEGStream::UpdateStream()
{
this->lock();
if(!this->ReadNextFrame())
{this->stop();return;}
if(!ConvertFrameToRGB())
{this->stop();return;}
// this->flipVertical();
//set the texture
this->setImage( width, height,1, GL_BGR, GL_BGR,
GL_UNSIGNED_BYTE,
this->pFrameRGB->data[0], osg::Image::NO_DELETE, 1);
//Sleep(30);
this->unlock();
}
//
// created 23/07/2007 by Thomas Hogarth
// loop till the next frame is found for our stream.
// then store frame in the pFrame Structure
//
bool CFFMPEGStream::ReadNextFrame()
{
static AVPacket packet;
static int bytesRemaining=0;
static uint8_t *rawData;
static bool fFirstTime=true;
int bytesDecoded;
int frameFinished;
// First time we're called, set packet.data to NULL to indicate it
// doesn't have to be freed
if(fFirstTime)
{
fFirstTime=false;
packet.data=NULL;
}
// Decode packets until we have decoded a complete frame
while(true)
{
// Work on the current packet until we have decoded all of it
while(bytesRemaining > 0)
{
// Decode the next chunk of data
bytesDecoded=avcodec_decode_video(pCodecCtx, pFrame,
&frameFinished, rawData, bytesRemaining);
// Was there an error?
if(bytesDecoded < 0)
{
fprintf(stderr, "Error while decoding frame\n");
return false;
}
bytesRemaining-=bytesDecoded;
rawData+=bytesDecoded;
// Did we finish the current frame? Then we can return
if(frameFinished)
return true;
}
// Read the next packet, skipping all packets that aren't for this
// stream
do
{
// Free old packet
if(packet.data!=NULL)
av_free_packet(&packet);
// Read new packet
if(av_read_packet(pFormatCtx, &packet)<0)
goto loop_exit;
} while(packet.stream_index!=videoStream);
bytesRemaining=packet.size;
rawData=packet.data;
}
loop_exit:
// Decode the rest of the last frame
bytesDecoded=avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
rawData, bytesRemaining);
// Free last packet
if(packet.data!=NULL)
av_free_packet(&packet);
return frameFinished!=0;
}
//
// created 23/07/2007 by Thomas Hogarth
// Converts the current pFrame to RGB format and Stores in the pFrameRGB
variable
//
bool CFFMPEGStream::ConvertFrameToRGB()
{
iFrame++;
//cout <<"frame %d"<< iFrame<<endl;
img_convert((AVPicture *)pFrameRGB, PIX_FMT_BGR24, (AVPicture*)pFrame,
pCodecCtx->pix_fmt, pCodecCtx->width,
pCodecCtx->height);
return true;
}
//
// created 23/07/2007 by Thomas Hogarth
// Save the current rgb frame to disk
//
bool CFFMPEGStream::SaveCurrentFrame()
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "c:/frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return false;
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// sprint
// Write pixel data
for(y=0; y<height; y++)
fwrite(pFrameRGB->data[0]+y*pFrameRGB->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
return true;
}
//
// created 23/07/2007 by Thomas Hogarth
// cleanup memory and release codec handles etc
//
void CFFMPEGStream::CleanUp()
{
// Free the RGB image
delete [] buffer;
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
}_______________________________________________
osg-users mailing list
[email protected]
http://lists.openscenegraph.org/listinfo.cgi/osg-users-openscenegraph.org