Hi folks! I'm working on an application written in C that makes use of the ffmpeg libraries to record the desktop on OS X using the avfoundation device. I've included the relevant parts of the code in this email. Full code can be found here http://stackoverflow.com/q/29341161/799285. I think I'm doing everything properly upto the decode encode loop which I've included below.
When I run this program, I'm able to encode about 50-100 frames before I receive a SIGSEGV. Valgrind analysis can be found here: http://pastebin.com/MPeRhjhN. For each frame that was encoded, my memory usage goes up by about 4-6MB so I know something is not being freed properly. Can I get some feedback on this code? Anything that I've done wrong? or am using incorrectly? Your help is much appreciated, and if you ever are in Waterloo, ON, I shall buy you a pint or two. int frame_count = 0; while (av_read_frame(inFmtCtx, &inPacket) >= 0) { if (inPacket.stream_index == videostream) { avcodec_decode_video2(inCodecCtx, inFrame, &frameFinished, &inPacket); // 1 Frame might need more than 1 packet to be filled if (frameFinished) { outFrameYUV = av_frame_alloc(); uint8_t *buffer = (uint8_t *)av_malloc(numBytes); int ret = avpicture_fill((AVPicture *)outFrameYUV, buffer, PIX_FMT_YUV420P, inCodecCtx->width, inCodecCtx->height); if (ret < 0) { printf("%d is return val for fill\n", ret); return -1; } // convert image to YUV sws_scale(swsCtx, (uint8_t const *const *)inFrame->data, inFrame->linesize, 0, inCodecCtx->height, outFrameYUV->data, outFrameYUV->linesize); // outFrameYUV now holds the YUV scaled frame/picture outFrameYUV->format = outCodecCtx->pix_fmt; outFrameYUV->width = outCodecCtx->width; outFrameYUV->height = outCodecCtx->height; AVPacket pkt; int got_output; av_init_packet(&pkt); pkt.data = NULL; pkt.size = 0; outFrameYUV->pts = frame_count; ret = avcodec_encode_video2(outCodecCtx, &pkt, outFrameYUV, &got_output); if (ret < 0) { fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret)); return -1; } if (got_output) { if (stream->codec->coded_frame->key_frame) { pkt.flags |= AV_PKT_FLAG_KEY; } pkt.stream_index = stream->index; if (pkt.pts != AV_NOPTS_VALUE) pkt.pts = av_rescale_q(pkt.pts, stream->codec->time_base, stream->time_base); if (pkt.dts != AV_NOPTS_VALUE) pkt.dts = av_rescale_q(pkt.dts, stream->codec->time_base, stream->time_base); if (avio_open_dyn_buf(&outFmtCtx->pb) != 0) { printf("ERROR: Unable to open dynamic buffer\n"); } ret = av_interleaved_write_frame(outFmtCtx, &pkt); unsigned char *pb_buffer; int len = avio_close_dyn_buf(outFmtCtx->pb, (unsigned char **)&pb_buffer); avio_write(outFmtCtx->pb, (unsigned char *)pb_buffer, len); } else { ret = 0; } if (ret != 0) { fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret)); exit(1); } fprintf(stderr, "encoded frame #%d\n", frame_count); frame_count++; av_free_packet(&pkt); av_frame_unref(outFrameYUV); av_free(buffer); } } av_free_packet(&inPacket); } av_write_trailer(outFmtCtx); Here's my ffmpeg library versions: ffmpeg version N-70876-g294bb6c Copyright (c) 2000-2015 the FFmpeg developers built with Apple LLVM version 6.0 (clang-600.0.56) (based on LLVM 3.5svn) configuration: --prefix=/usr/local --enable-gpl --enable-postproc --enable-pthreads --enable-libmp3lame --enable-libtheora --enable-libx264 --enable-libvorbis --disable-mmx --disable-ssse3 --disable-armv5te --disable-armv6 --disable-neon --enable-shared --disable-static --disable-stripping libavutil 54. 20.100 / 54. 20.100 libavcodec 56. 29.100 / 56. 29.100 libavformat 56. 26.101 / 56. 26.101 libavdevice 56. 4.100 / 56. 4.100 libavfilter 5. 13.101 / 5. 13.101 libswscale 3. 1.101 / 3. 1.101 libswresample 1. 1.100 / 1. 1.100 libpostproc 53. 3.100 / 53. 3.100 Hyper fast Audio and Video encoder Thanks, Praj
_______________________________________________ Libav-user mailing list [email protected] http://ffmpeg.org/mailman/listinfo/libav-user
