Dear Ronald, thanks for your atention, im trying get streams and save
(mjpeg, mpeg4, h264) from ip cameras (sony, panasonic, axis) on rtsp
protocols, i start modifying the output_example.c original, the sample
work well with HTTP protocol , but when i try get streams in RTSP
protocol don't work.
i tried this with ffplay an ffmpeg using "?tcp" and "-v 2" but the
result is always the same. (Error while parsing header)
ffplay rtsp://admin:[email protected]/nphMpeg4/nil-640x48?tcp (Error
while parsing header)
ffplay rtsp://admin:[email protected]?tcp/nphMpeg4/nil-640x48 (Error
while parsing header)
and
ffmpeg -v 2 -i rtsp://admin:[email protected]?tcp/nphMpeg4/nil-640x48
(Error while parsing header)
I debugged on my sample code, and i saw the error is on
"av_open_input_file" function, becasue this return -22
code error. maybe i need recompile libav* libraries with other options..?
pd: attach my sample code
thanks
German
On Mon, Mar 15, 2010 at 11:59 AM, Ronald S. Bultje <[email protected]> wrote:
> Hi,
>
> On Mon, Mar 15, 2010 at 11:54 AM, German Vallejos C.
> <[email protected]> wrote:
>> ffplay rtsp://admin:[email protected]/nphMpeg4/nil-640x48 (Error
>> while parsing header)
>
> Please provide COMPLETE output. Also use -v2 to get more output from
> the RTSP stack.
>
>> does ffmepg (libav* libraries ) support RTSP protocol ..?
>
> Yes. But you need to give more information to help us help you. We
> can't help you, we have no idea what you're doing.
>
> For example, to your previous email, I asked you to add "?tcp" to the
> URI, you never replied. Did that fix it? Did anything change? Etc.
>
> Ronald
> _______________________________________________
> libav-user mailing list
> [email protected]
> https://lists.mplayerhq.hu/mailman/listinfo/libav-user
>
//g++ *.cpp -lavcodec -lavformat -lswscale -lhighgui -lavdevice
#include "CCapture.h"
int main()
{
CCapture* Capt = NULL;
Capt = new CCapture();
Capt->setEncodeOptions( "mpeg4", "10", "768000", "640x480" );
Capt->open( "rtsp://sil-lab:[email protected]/nphMpeg4/nil-640x480" );
Capt->encode( "test.mp4" );
if( Capt != NULL ){
delete( Capt );
Capt = NULL;
}
return 0;
}
#include "CCapture.h"
CCapture::CCapture()
{
m_capture = NULL;
//AVFrame *m_picture, *m_tmp_picture;
m_video_outbuf = NULL;
m_frame_count = 0;
m_video_outbuf_size = 0;
//encode options
m_codec_name = NULL;
//AVRational m_frame_rate;
m_bit_rate = 0;
m_frame_width = 0;
m_frame_height = 0;
m_file_iformat = NULL;
m_file_oformat = NULL;
m_frame_aspect_ratio = 0;
//PixelFormat m_pix_fmt;
initialize();
}
CCapture::~CCapture()
{
finalize();
m_file_iformat = NULL;
m_file_oformat = NULL;
}
void CCapture::initialize()
{
//initialze input file
m_capture = (Capture *)malloc( (1)*sizeof(Capture) );
if( m_capture == NULL) return;
m_capture->av_frame = NULL;
m_capture->av_format_ctx = NULL;
m_capture->video_stream = -1;
m_capture->av_codec_ctx = NULL;
m_capture->av_codec = NULL;
m_capture->eof = 1;
}
void CCapture::finalize()
{
if( m_capture != NULL )
{
m_capture->eof = 1;
//free the YUV frame
if( m_capture->av_frame != NULL ){
av_free( m_capture->av_frame );
m_capture->av_frame = NULL;
}
//close the codec
if (m_capture->av_codec_ctx != NULL) {
avcodec_close(m_capture->av_codec_ctx);
m_capture->av_codec_ctx = NULL;
}
//close the video file
if( m_capture->av_format_ctx != NULL ) {
av_close_input_file(m_capture->av_format_ctx);
m_capture->av_format_ctx = NULL;
}
//cancel the video stream
m_capture->video_stream = -1;
//release m_capture
free( m_capture );
m_capture = NULL;
}
}
void CCapture::open( const char* url )
{
int i;
//register all the codecs, parsers and bitstream filters which were enabled at configuration time
avcodec_register_all();
//initializes libavformat and registers all the muxers, demuxers and protocols
av_register_all();
//setMJPEG();
setRTSP();
//open video file
//if( av_open_input_file(&(m_capture->av_format_ctx), url, NULL, 0, NULL) != 0 ){
int ret = av_open_input_file(&(m_capture->av_format_ctx), url, m_file_iformat, 0, NULL);
printError( "", ret );
printf("return av_open_input_file %d\n", ret );
if( ret != 0 ){
printf( "couldn't open url %s\n", url );
exit(1);
}
//retrieve stream information
if( av_find_stream_info( m_capture->av_format_ctx ) < 0 ){
printf( "couldn't find stream information\n" );
return;
}
//dump information about file onto standard error (show information input file)
dump_format( m_capture->av_format_ctx, 0, url, 0 );
//find the first video stream
for( i = 0; i < (int)m_capture->av_format_ctx->nb_streams; i++ )
{
if( m_capture->av_format_ctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO ){
m_capture->video_stream = i;
break;
}
}
if( m_capture->video_stream == -1 ){
printf( "didn't find a video stream\n" );
return;
}
//get a pointer to the codec context for the video stream
m_capture->av_codec_ctx = m_capture->av_format_ctx->streams[m_capture->video_stream]->codec;
printf("tipo pixel format entrada ........ %d\n", m_capture->av_codec_ctx->pix_fmt );
//find the decoder for the video stream
m_capture->av_codec = avcodec_find_decoder( m_capture->av_codec_ctx->codec_id );
if( m_capture->av_codec == NULL ){
printf( "codec not found\n" );
return;
}
//open codec
if( avcodec_open(m_capture->av_codec_ctx, m_capture->av_codec) < 0 ){
printf( "could not open codec\n" );
return;
}
if( m_capture->av_format_ctx->streams[m_capture->video_stream]->sample_aspect_ratio.num )//gvc
m_frame_aspect_ratio = av_q2d( m_capture->av_format_ctx->streams[m_capture->video_stream]->sample_aspect_ratio );
else
m_frame_aspect_ratio = av_q2d( m_capture->av_codec_ctx->sample_aspect_ratio );//gvc
//m_frame_rate.num = m_capture->av_format_ctx->streams[m_capture->video_stream]->r_frame_rate.num;//gvc
//m_frame_rate.den = m_capture->av_format_ctx->streams[m_capture->video_stream]->r_frame_rate.den;//gvc
//GVC m_pix_fmt = m_capture->av_codec_ctx->pix_fmt; //gvc
//hack to correct wrong frame rates that seem to be generated by some codecs
if( m_capture->av_codec_ctx->time_base.den > 1000 && m_capture->av_codec_ctx->time_base.num == 1)
m_capture->av_codec_ctx->time_base.num = 1000;
//allocate video frame to av_frame
m_capture->av_frame = avcodec_alloc_frame();
if( m_capture->av_frame == NULL){
perror("avcodec_alloc_frame");
return;
}
}
AVFrame* CCapture::getAVFrame()
{
AVPacket packet;
int frame_finished;
m_capture->eof = 1;
//read frames
while( av_read_frame(m_capture->av_format_ctx, &packet) >= 0 )
{
printf(".....................................................\n");
//is this a packet from the video stream..?
if( packet.stream_index == m_capture->video_stream)
{
//decode video frame
avcodec_decode_video( m_capture->av_codec_ctx, m_capture->av_frame, &frame_finished, packet.data, packet.size );
//free the packet that was allocated by 'av_read_frame' function
av_free_packet( &packet );
//did we get a video frame?
if( frame_finished )
{
if( m_capture->av_frame->data == NULL )
return NULL;
m_capture->eof = 0;
return m_capture->av_frame;
}
}else{//process audio etc...
//free the packet that was allocated by 'av_read_frame' function
av_free_packet( &packet );
}
}
return NULL;
}
Capture* CCapture::getCapture()
{
return m_capture;
}
void CCapture::writeVideoFrame( AVFormatContext *oc, AVStream *st )
{
int out_size, ret;
AVCodecContext *c;
static struct SwsContext *img_convert_ctx;
c = st->codec;
//copy input avframe to output avframe
av_picture_copy( (AVPicture*)m_picture, //dest. frame
(AVPicture*)m_capture->av_frame, //src. frame
0, //src. format
m_frame_width,
m_frame_height );
printf("1 -------------------------\n" );
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near
futur for that */
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= (uint8_t *)m_picture;
pkt.size= sizeof(AVPicture);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
/* encode the image */
//out_size = avcodec_encode_video( c, m_video_outbuf, m_video_outbuf_size, m_capture->av_frame );
out_size = avcodec_encode_video( c, m_video_outbuf, m_video_outbuf_size, m_picture ); //m_picture estaba el error para salvar mjpeg
/* if zero size, it means the image was buffered */
printf("2 ------------------------- \n");
if (out_size > 0) {
AVPacket pkt;
av_init_packet( &pkt );
if( c->coded_frame->pts != AV_NOPTS_VALUE )
pkt.pts= av_rescale_q( c->coded_frame->pts, c->time_base, st->time_base );
if( c->coded_frame->key_frame )
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= m_video_outbuf;
pkt.size= out_size;
/* write the compressed frame in the media file */
ret = av_interleaved_write_frame(oc, &pkt);
} else {
ret = 0;
}
}
if (ret != 0) {
fprintf(stderr, "Error while writing video frame\n");
exit(1);
}
m_frame_count++;
}
AVStream* CCapture::addVideoStream( AVFormatContext *oc, enum CodecID codec_id )
{
AVCodecContext *c;
AVStream *st;
AVRational fps;
CodecID c_id;
//AVCodec *codec; //add by gvc
st = av_new_stream( oc, 0 );
if( !st ){
fprintf( stderr, "Could not alloc stream\n" );
exit(1);
}
//set encode codec
if( m_codec_name ){
c = st->codec;
c->codec_id = findCodecOrDie( m_codec_name, CODEC_TYPE_VIDEO, 1 );
c->codec_type = CODEC_TYPE_VIDEO;
//codec = avcodec_find_encoder_by_name(m_codec_name); //ad by gvc
}else{
c = st->codec;
c->codec_id = codec_id;
c->codec_type = CODEC_TYPE_VIDEO;
//codec = avcodec_find_encoder_by_name(m_codec_name); //ad by gvc
}
fps = m_frame_rate.num ? m_frame_rate : (AVRational){25,1}; //gvc
//put sample parameters
c->bit_rate = m_bit_rate;//400000;
c->width = m_frame_width;
c->height = m_frame_height;
/* time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps content,
timebase should be 1/framerate and timestamp increments should be
identically 1. */
c->time_base.den = fps.num;//STREAM_FRAME_RATE; //gvc
c->time_base.num = fps.den; //gvc
c->sample_aspect_ratio = av_d2q( m_frame_aspect_ratio*c->height/c->width, 255 );
//gvc c->gop_size = 0;//12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = PIX_FMT_YUVJ420P ;// aqui tipo pixel format m_pix_fmt;// gvc STREAM_PIX_FMT;
// some formats want stream headers to be separate
if( oc->oformat->flags & AVFMT_GLOBALHEADER )
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
return st;
}
AVFrame* CCapture::allocPicture( enum PixelFormat pix_fmt, int width, int height )
{
AVFrame *picture;
uint8_t *picture_buf;
int size;
picture = avcodec_alloc_frame();
if (!picture)
return NULL;
size = avpicture_get_size(pix_fmt, width, height);
picture_buf = (uint8_t *)av_malloc(size);
if (!picture_buf) {
av_free(picture);
return NULL;
}
avpicture_fill((AVPicture *)picture, picture_buf,
pix_fmt, width, height);
return picture;
}
void CCapture::openVideoEncoder( AVFormatContext *oc, AVStream *st )
{
AVCodec *codec;
AVCodecContext *c;
c = st->codec;
printf( ".......... c->codec_id %d\n", c->codec_id );
//find the video encoder
codec = avcodec_find_encoder( c->codec_id );
if( !codec ){
fprintf( stderr, "codec not found\n" );
exit(1);
}
printf("1 ............. c %d\n", c->pix_fmt);
printf("2 ............ %s\n", codec->name );
//open the codec
if( avcodec_open(c, codec) < 0 ){
fprintf( stderr, "could not open codec\n" );
exit(1);
}
printf("3 ............. %s\n", codec->name );
m_video_outbuf = NULL;
if( !(oc->oformat->flags & AVFMT_RAWPICTURE) ){
/* allocate output buffer */
/* XXX: API change will be done */
/* buffers passed into lav* can be allocated any way you prefer,
as long as they're aligned enough for the architecture, and
they're freed appropriately (such as using av_free for buffers
allocated with av_malloc) */
m_video_outbuf_size = 1024*256;//200000;
m_video_outbuf = (uint8_t*)av_malloc(m_video_outbuf_size);
}
//allocate the encoded raw picture
m_picture = allocPicture( c->pix_fmt, c->width, c->height );
if( !m_picture ){
fprintf(stderr, "could not allocate picture\n");
exit(1);
}
/*if the output format is not YUV420P, then a temporary YUV420P
picture is needed too. It is then converted to the required
output format */
m_tmp_picture = NULL;
if( c->pix_fmt != PIX_FMT_YUV420P )
{
m_tmp_picture = allocPicture( PIX_FMT_YUV420P, c->width, c->height );
if (!m_tmp_picture) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
}
void CCapture::closeVideo(AVFormatContext *oc, AVStream *st)
{
avcodec_close(st->codec);
av_free( m_picture->data[0]);
av_free(m_picture);
if (m_tmp_picture) {
av_free(m_tmp_picture->data[0]);
av_free(m_tmp_picture);
}
av_free(m_video_outbuf);
}
void CCapture::encode( const char* filename )
{
AVOutputFormat *fmt;
AVFormatContext *oc;
AVStream *video_st;
double audio_pts, video_pts;
int i;
//auto detect the output format from the name. default is mpeg.
fmt = guess_format( NULL, filename, NULL );
if( !fmt ){
printf( "could not deduce output format from file extension: using MPEG.\n" );
fmt = guess_format("mpeg", NULL, NULL);
}
if( !fmt ){
fprintf( stderr, "Could not find suitable output format\n" );
exit(1);
}
//allocate the output media context
oc = avformat_alloc_context();
if( !oc ){
fprintf( stderr, "memory error\n" );
exit(1);
}
oc->oformat = fmt;
snprintf( oc->filename, sizeof(oc->filename), "%s", filename );
//add the video streams using the default format codecs and initialize the codecs
video_st = NULL;
//is a known codec fmt->video_codec..?
if( fmt->video_codec != CODEC_ID_NONE ){
video_st = addVideoStream( oc, fmt->video_codec );
}
//set the output parameters (must be done even if no parameters).
if( av_set_parameters( oc, NULL ) < 0 ){
fprintf( stderr, "invalid output format parameters\n" );
exit(1);
}
//fills the streams field of the AVFormatContext with valid information
dump_format( oc, 0, filename, 1 );
// now that all the parameters are set, we can open the
//video codecs and allocate the necessary encode buffers
if( video_st )
openVideoEncoder( oc, video_st );
//open the output file, if needed
if( !(fmt->flags & AVFMT_NOFILE) ) {
if( url_fopen(&oc->pb, filename, URL_WRONLY) < 0 ) {
fprintf( stderr, "could not open '%s'\n", filename );
exit(1);
}
}
//write the stream header
if( av_write_header(oc) < 0 ){
fprintf( stderr, "could not write header for output file (incorrect codec parameters ?)\n");
AVERROR( EINVAL );
}
int x = 0;
for( ;; )
{
printf("valor index %d\n", x );
if(x > 100 )
break;
getAVFrame();
//write interleaved audio and video frames
writeVideoFrame( oc, video_st );
x ++;
}
/* write the trailer, if any. the trailer must be written
* before you close the CodecContexts open when you wrote the
* header; otherwise write_trailer may try to use memory that
* was freed on av_codec_close() */
av_write_trailer(oc);
/* close each codec */
if (video_st)
closeVideo(oc, video_st);
//free the streams
for(i = 0; i < oc->nb_streams; i++) {
av_freep(&oc->streams[i]->codec);
av_freep(&oc->streams[i]);
}
if (!(fmt->flags & AVFMT_NOFILE)) {
//close the output file
url_fclose(oc->pb);
}
//free the stream
av_free(oc);
}
enum CodecID CCapture::findCodecOrDie( const char *name, int type, int encoder )
{
const char *codec_string = encoder ? "encoder" : "decoder";
AVCodec *codec;
if( !name )
return CODEC_ID_NONE;
codec = encoder ? avcodec_find_encoder_by_name(name) : avcodec_find_decoder_by_name(name);
if( !codec ){
fprintf( stderr, "unknown %s '%s'\n", codec_string, name );
//avExit(EXIT_CLOSE_ALL_FILES);
}
if( codec->type != type ){
fprintf( stderr, "invalid %s type '%s'\n", codec_string, name );
//avExit(EXIT_CLOSE_ALL_FILES);
}
return codec->id;
}
void CCapture::setEncodeOptions( const char* codec, const char* frame_rate, const char* bit_rate, const char* size )
{
setCodec( codec );
setFrameRate( frame_rate );
setBitrate( bit_rate );
setFrameSize( size );
//setPreset( "hq" );
//m_file_iformat = av_find_input_format( "h264" );
}
void CCapture::setCodec( const char* arg )
{
//gvc av_freep( m_codec_name );
m_codec_name = av_strdup( arg );
}
void CCapture::setFrameRate( const char* arg )
{
if( av_parse_video_frame_rate( &m_frame_rate, arg ) < 0 ){
fprintf( stderr, "incorrect value for %s: %s\n", m_frame_rate, arg );
}
}
void CCapture::setBitrate( const char* arg )
{
int bit_rate = atoi(arg);
if( bit_rate < 1000 )
fprintf( stderr, "WARNING: The bitrate parameter is set too low. It takes bits/s as argument, not kbits/s\n" );
else
m_bit_rate = bit_rate;
}
void CCapture::setFrameSize( const char* arg )
{
if( av_parse_video_frame_size(&m_frame_width, &m_frame_height, arg) < 0 ){
fprintf(stderr, "incorrect frame size\n");
//avExit(EXIT_CLOSE_ALL_FILES);
}
if( (m_frame_width % 2) != 0 || (m_frame_height % 2) != 0 ){
fprintf(stderr, "Frame size must be a multiple of 2\n");
//avExit(EXIT_CLOSE_ALL_FILES);
}
}
void CCapture::setMJPEG()
{
m_file_iformat = av_find_input_format( "mjpeg" );
m_file_oformat = guess_format( "mjpeg", NULL, NULL );
if( !m_file_iformat && !m_file_oformat ){
fprintf( stderr, "unknown input or output format: mjpeg\n" );
//av_exit(1);
}
}
void CCapture::setRTSP()
{
m_file_iformat = av_find_input_format( "rtsp" );
m_file_oformat = guess_format( "rtsp", NULL, NULL );
if( !m_file_iformat && !m_file_oformat ){
fprintf( stderr, "unknown input or output format: rtsp\n" );
//av_exit(1);
}
}
void CCapture::setPreset( const char *opt )
{
//function called by h264 format
const char *arg = "hq";//TODO
char *audio_codec_name = NULL;
char *subtitle_codec_name = NULL;
FILE *f = NULL;
char filename[1000], tmp[1000], tmp2[1000], line[1000];
int i;
//TODO GVC const char *base[2] = { getenv("HOME"), FFMPEG_DATADIR, };
const char *base[2] = { getenv("HOME"), "/usr/local/share/ffmpeg", };
for( i = !base[0]; i < 2 && !f; i++ )
{
snprintf( filename, sizeof(filename), "%s%s/%s.ffpreset", base[i], i ? "" : "/.ffmpeg", arg );
f = fopen( filename, "r" );
if( !f ){
char *codec_name = *opt == 'v' ? m_codec_name : *opt == 'a' ? audio_codec_name : subtitle_codec_name;
snprintf( filename, sizeof(filename), "%s%s/%s-%s.ffpreset", base[i], i ? "" : "/.ffmpeg", codec_name, arg );
f = fopen( filename, "r" );
}
}
/*TODO GVC if( !f && ((arg[0]=='.' && arg[1]=='/') || arg[0]=='/' || is_dos_path(arg)) ){
av_strlcpy( filename, arg, sizeof(filename) );
f = fopen( filename, "r" );
}*/
if( !f ){
fprintf( stderr, "File for preset '%s' not found\n", arg );
//avExit(EXIT_CLOSE_ALL_FILES);
}
while(!feof(f))
{
int e = fscanf( f, "%999[^\n]\n", line ) - 1;
if( line[0] == '#' && !e )
continue;
e|= sscanf( line, "%999[^=]=%999[^\n]\n", tmp, tmp2 ) - 2;
if( e ){
fprintf( stderr, "%s: Invalid syntax: '%s'\n", filename, line );
//avExit(EXIT_CLOSE_ALL_FILES);
}
if( !strcmp(tmp, "acodec") ){
}else if(!strcmp(tmp, "vcodec")){
setCodec(tmp2);
}else if(!strcmp(tmp, "scodec")){
;
}/*else if(optDefault(tmp, tmp2) < 0){
fprintf(stderr, "%s: Invalid option or argument: '%s', parsed as '%s' = '%s'\n", filename, line, tmp, tmp2);
//avExit(EXIT_CLOSE_ALL_FILES);
}*/
}
fclose(f);
}
void CCapture::printError( const char *filename, int err )
{
switch( err )
{
case AVERROR_NUMEXPECTED:
fprintf(stderr, "%s: syntax expected in filename\n",
filename);
break;
case AVERROR_INVALIDDATA:
fprintf(stderr, "%s: error while parsing header\n", filename);
break;
case AVERROR_NOFMT:
fprintf(stderr, "%s: unknown format\n", filename);
break;
case AVERROR(EIO):
fprintf(stderr, "%s: I/O error occurred\n"
"usually that means that input file is truncated and/or corrupted.\n",
filename);
break;
case AVERROR(ENOMEM):
fprintf(stderr, "%s: memory allocation error occurred\n", filename);
break;
case AVERROR(ENOENT):
fprintf(stderr, "%s: no such file or directory\n", filename);
break;
case AVERROR(ENOSYS):
fprintf(stderr, "%s: operation not supported\n", filename);
break;
case AVERROR(EPIPE):
fprintf(stderr, "%s: end of file\n", filename);
break;
case AVERROR_PATCHWELCOME:
fprintf(stderr, "%s: not yet implemented in libavcodec\n", filename);
break;
#if CONFIG_NETWORK
case AVERROR(FF_NETERROR(EPROTONOSUPPORT)):
fprintf( stderr, "%s: Unsupported network protocol\n", filename );
break;
#endif
default:
fprintf(stderr, "%s: unkown error: [%d]\n", filename, err );
break;
}
}
extern "C"{
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
}
typedef struct Capture
{
//frame in original colorspace (unknown, usually YUV420)
AVFrame* av_frame;
AVFormatContext* av_format_ctx;
int video_stream;
AVCodecContext* av_codec_ctx;
AVCodec* av_codec;
int eof;
} Capture;
#define INT64_C(c) c ## LL
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT PIX_FMT_YUV420P /* default pix_fmt */
class CCapture
{
public:
CCapture();
~CCapture();
void open( const char* url );
AVFrame* getAVFrame();
Capture* getCapture();
void setOutpuFile( const char* file_name, const char* format_output );
void encode( const char* filename );
void setEncodeOptions( const char* codec, const char* frame_rate, const char* bit_rate, const char* size );
private:
void initialize();
void finalize();
void openVideoEncoder( AVFormatContext *oc, AVStream *st );
void writeVideoFrame( AVFormatContext *oc, AVStream *st );
AVStream* addVideoStream( AVFormatContext *oc, enum CodecID codec_id );
AVFrame* allocPicture( enum PixelFormat pix_fmt, int width, int height );
void closeVideo(AVFormatContext *oc, AVStream *st);
enum CodecID findCodecOrDie( const char *name, int type, int encoder );
void setCodec( const char* arg );
void setFrameRate( const char* arg );
void setBitrate( const char* arg );
void setFrameSize( const char* arg );
void setMJPEG();
void setRTSP();
void setPreset( const char *opt );
void printError( const char *filename, int err );
Capture* m_capture;
AVFrame *m_picture, *m_tmp_picture;
uint8_t* m_video_outbuf;
int m_frame_count, m_video_outbuf_size;
//encode options
char* m_codec_name;
AVRational m_frame_rate;
int m_bit_rate;
int m_frame_width, m_frame_height;
AVInputFormat* m_file_iformat;
AVOutputFormat* m_file_oformat;
int m_frame_aspect_ratio;
PixelFormat m_pix_fmt;
};
_______________________________________________
libav-user mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/libav-user