On Thu, Aug 4, 2011 at 4:26 AM, <[email protected]> wrote:
> Hello Matt, Hello people,
>
> thank you for your hint. I am trying this, but I have problems creating the
> required paramters. Here is my code:
>
> void YUVtoRGB_Converter::YUV2MP4ES(const uint8_t *YUVbuf, uint8_t *MP4buf,
> int MP4bufSize)
> {
> AVCodec *codec;
> AVCodecContext *c= NULL;
> //uint8_t *extradata;
> //int extradata_size;
> int i, out_size, size, x, y, outbuf_size;
> AVFrame *picture;
> uint8_t *outbuf;
> unsigned char *picture_buf;
>
> //printf("Video encoding\n");
>
> av_register_all();
>
> /* find the mpeg4 video encoder */
> codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
> if (!codec) {
> fprintf(stderr, "codec not found\n");
> exit(1);
> }
>
> c = avcodec_alloc_context3(codec);
> picture= avcodec_alloc_frame();
>
> /* put sample parameters */
> c->bit_rate = 400000;
> /* resolution must be a multiple of two */
> c->width = CAM_RESOLUTION_WIDTH;
> c->height = CAM_RESOLUTION_HEIGHT;
> /* frames per second */
> //c->time_base= (AVRational){1,25};
> c->time_base.num = 1;
> c->time_base.den = FRAMERATE;
> c->gop_size = 10; /* emit one intra frame every ten frames */
> c->max_b_frames=1;
> c->pix_fmt = PIX_FMT_YUV420P;
>
> /* open it */
> if (avcodec_open(c, codec) < 0) {
> fprintf(stderr, "could not open codec\n");
> exit(1);
> }
> /// --> here creating the required parameters
> AVFormatContext *t = avformat_alloc_context();
> AVDictionary **opt = NULL; // <-- this is the problem i think
>
> f = fopen("test", "a+b");
> if (!f) {
> fprintf(stderr, "could not open %s\n", "test");
> exit(1);
> }
> if (this->fileExists == false) {
> // here throws exception
> // Access violation reading location from 0xAddress
> avformat_write_header(t, opt);
> this->fileExists = true;
> }
> //.... and so on.
>
> How can I get the "AVDictionary **opt" parameter properly?
>
You're writing to a format context which wasn't initialized with the
file to be written. Here's my example code:
int CEncodeFile::OpenFile(const char* szFilename, const SEncodeParams*
psEncodeParams, int* pnPicSizeInFmt,
int anLinesizes[4])
{
if (!szFilename || !psEncodeParams || !pnPicSizeInFmt || !anLinesizes)
return BAD_PARAMS;
*pnPicSizeInFmt= 0;
EnterCriticalSection(&m_rFileSafe);
if (m_pFormatCtx) // File was already opened
{
LeaveCriticalSection(&m_rFileSafe);
return FILE_OPEN;
}
// Save params internally
m_sEncodeParams= *psEncodeParams;
/* Auto detect the output format from the filename, otherwise use
provided format. */
avformat_alloc_output_context2(&m_pFormatCtx, NULL, NULL, szFilename);
if (!m_pFormatCtx)
{
avformat_alloc_output_context2(&m_pFormatCtx, NULL,
m_sEncodeParams.szOutFormat, szFilename);
}
if (!m_pFormatCtx)
{
LeaveCriticalSection(&m_rFileSafe);
return E_NO_OUT_FORMAT;
}
/* Add the video stream using the default format codecs and
initialize the codecs. */
if (m_pFormatCtx->oformat->video_codec == CODEC_ID_NONE) // Format
doesn't have codec
{
LeaveCriticalSection(&m_rFileSafe);
return E_CODEC_NOT_FOUND;
}
// Add the video stream where we'll save the pictures
m_pAVStream = av_new_stream(m_pFormatCtx, 0);
if (!m_pAVStream)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return E_NO_STREAM;
}
/* Set the codec of our stream.*/
AVCodecContext* pCodecCtx = m_pAVStream->codec;
pCodecCtx->codec_id = m_sEncodeParams.eCodecID;
pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
/* Save parameters */
pCodecCtx->bit_rate = m_sEncodeParams.nBitRate;
/* resolution must be a multiple of two */
pCodecCtx->width = m_sEncodeParams.nWidth;
pCodecCtx->height = m_sEncodeParams.nHeight;
/* Time base: this is the fundamental unit of time (in seconds) in terms
of which frame timestamps are represented. for fixed-fps
content,
timebase should be 1/framerate and timestamp increments should
be
identically 1. */
pCodecCtx->time_base.den = m_sEncodeParams.nFRNum;
pCodecCtx->time_base.num = m_sEncodeParams.nFRDen;
pCodecCtx->gop_size = 12; /* emit one intra frame every twelve frames
at most */
pCodecCtx->pix_fmt = m_sEncodeParams.ePixelFmtOut;
pCodecCtx->max_b_frames = 1;
// Some formats want stream headers to be separate
if(m_pFormatCtx->oformat->flags & AVFMT_GLOBALHEADER)
pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
/* Find the video encoder for this stream's codec. */
AVCodec* pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
if (!pCodec)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return E_CODEC_NOT_FOUND;
}
/* Initialize the codecCtx with the codec. */
if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return E_CODEC_NO_OPEN;
}
// Raw picture format requires no copying so no buffer.
if (!(m_pFormatCtx->oformat->flags & AVFMT_RAWPICTURE))
{
// Output buffer for the video file.
m_nBuffer=
max(m_sEncodeParams.nWidth*m_sEncodeParams.nHeight*6+1664,
1024*256);
m_auchVideoOutBuf = (unsigned char*) av_malloc(m_nBuffer);
if (!m_auchVideoOutBuf)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return ALLOC_MEMORY_ERROR;
}
}
/* Allocate the picture for the encoded pic fmt. If the in/out format
are the same, incoming pics
are saved in here. Otherwise, it first converted to out format
and
then saved here. */
m_pAVFrame = avcodec_alloc_frame();
if (!m_pAVFrame)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return ALLOC_MEMORY_ERROR;
}
/* See avpicture_fill, av_image_fill_pointers, avpicture_layout,
and
avpicture_get_size for how
different pixfmts are stored in the linear buffer.
avpicture_fill will be called on the data buffer of each
incoming
picture so we don't need to allocate
a buffer for incoming pics, just the frame. */
/* if the output format is not the same as the input, then a temporary
picture with input format is also needed. We also need to
provide a
conversion context. */
if (pCodecCtx->pix_fmt != m_sEncodeParams.ePixelFmtIn)
{
/* Now prepre the buffers that will contain the output
image. If the
input and output formats are the
same we don't do this, because we'll fill the input
buffer directly
with the input images. */
if (av_image_alloc(m_pAVFrame->data, m_pAVFrame->linesize,
pCodecCtx->width,
pCodecCtx->height, pCodecCtx->pix_fmt, 1)<0)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return ALLOC_MEMORY_ERROR;
}
// Frame for input image, no buffer allocation because we use
input
image directly.
m_pAVFrameTmp= avcodec_alloc_frame();
if (!m_pAVFrameTmp)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return ALLOC_MEMORY_ERROR;
}
// But we still need to know the linesizes of the input image
for
the conversion.
if (av_image_fill_linesizes(m_pAVFrameTmp->linesize,
m_sEncodeParams.ePixelFmtIn, pCodecCtx->width)<0)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return LINESIZES_ERROR;
}
// Get the conversion context
m_pConvertCtx = sws_getCachedContext(NULL, pCodecCtx->width,
pCodecCtx->height, m_sEncodeParams.ePixelFmtIn,
pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
if (!m_pConvertCtx)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return NO_CONVERT_FMT;
}
}
/* Now open the output file, if needed. */
if (!(m_pFormatCtx->oformat->flags & AVFMT_NOFILE))
{
if (avio_open(&m_pFormatCtx->pb, szFilename, AVIO_FLAG_WRITE) <
0)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return E_CANNOT_OPEN_FILE;
}
}
/* Now return the required size of the buffer of the input image.
Note, this will include
paddding and alignment. */
AVPicture sAVFrameDummy;
*pnPicSizeInFmt= avpicture_fill(&sAVFrameDummy, NULL,
m_sEncodeParams.ePixelFmtIn,
pCodecCtx->width, pCodecCtx->height);
m_nPicSizeInFmt= *pnPicSizeInFmt; // Save it for later use in
copying image
if (av_image_fill_linesizes(anLinesizes, m_sEncodeParams.ePixelFmtIn,
pCodecCtx->width)<0)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return LINESIZES_ERROR;
}
/* Write the stream header, if any */
if (avformat_write_header(m_pFormatCtx, NULL)<0)
{
FinishUp();
LeaveCriticalSection(&m_rFileSafe);
return E_HEADER_NO_WRITE;
}
m_nFrame= 0;
m_bDone= false;
LeaveCriticalSection(&m_rFileSafe);
return 0;
}
Matt
> Thank you in advice.
>
> Cheers
> Murat
>
>
> --- Matthew Einhorn <[email protected]> schrieb am Mi, 3.8.2011:
>
>> Von: Matthew Einhorn <[email protected]>
>> Betreff: Re: [Libav-user] Converting YUV 2 MPEG4
>> An: "This list is about using libavcodec, libavformat, libavutil,
>> libavdevice and libavfilter." <[email protected]>
>> Datum: Mittwoch, 3. August, 2011 14:50 Uhr
>> On Wed, Aug 3, 2011 at 9:37 AM,
>> <[email protected]>
>> wrote:
>> > Hello boys and girls,
>> >
>> > I want to convert a YUV signal from my POE-camera into
>> a MPEG4 stream, for which I have used the libavcodec.lib
>> library of yours.
>> >
>> > I am confused with the file-format headers for the
>> target video file. For an instance :
>> > out_size = avcodec_encode_video(c, outbuf,
>> outbuf_size,
>> > outpic);
>> > Is there no mpeg4 specific file format header
>> required, in the beginning of the video file? Or is the
>> library function handling this internally?
>> >
>> I use avformat_write_header() before starting to write
>> frames and
>> av_write_trailer() afterwords. I think this should do what
>> you want.
>>
>> Matt
>>
>>
>> > Cheers Murat
>> >
>> > _______________________________________________
>> > Libav-user mailing list
>> > [email protected]
>> > http://ffmpeg.org/mailman/listinfo/libav-user
>> >
>> _______________________________________________
>> Libav-user mailing list
>> [email protected]
>> http://ffmpeg.org/mailman/listinfo/libav-user
>>
> _______________________________________________
> Libav-user mailing list
> [email protected]
> http://ffmpeg.org/mailman/listinfo/libav-user
>
_______________________________________________
Libav-user mailing list
[email protected]
http://ffmpeg.org/mailman/listinfo/libav-user