On 26/06/12 03:18, mgriffith wrote:
You mentioned you wrote a new ffmpegReader that works with current
ffmpeg libraries; would you be willing to share it? I'm having a doozy
of a time dealing with reading in ProRes files in linux Sad
Hi guys,
These are *unsupported* in 6.3, so you're on your own with these, but
the attached files build against ffmpeg from February and *should* work
with the current ffmpeg API.
You'll obviously have to use it against a newer version of the ffmpeg
libs (our bundled ones won't work for 6.x), but the end result appears
to be several in-demand codecs work surprisingly well through ffmpeg :)
Peter
--
Peter Pearson, Software Engineer
The Foundry, 6th Floor, The Communications Building,
48 Leicester Square, London, UK, WC2H 7LT
Tel: +44 (0)20 7434 0449 Web: www.thefoundry.co.uk
The Foundry Visionmongers Ltd.
Registered in England and Wales No: 4642027
// Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
#include "Build/fnBuild.h"
#include "DDImage/DDString.h"
#include "DDImage/Reader.h"
#include "DDImage/Row.h"
#ifdef _WIN32
#include <io.h>
#endif
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
extern "C" {
#include <errno.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <libavutil/avutil.h>
#include <libavutil/error.h>
}
using namespace DD::Image;
namespace
{
void ffmpegError(int error, char* errorBuf, size_t bufSize)
{
av_strerror(error, errorBuf, bufSize);
}
}
class ffmpegReader : public Reader
{
public:
explicit ffmpegReader(Read* iop);
~ffmpegReader();
virtual bool videosequence() const { return true; }
void engine(int y, int x, int r, ChannelMask channels, Row& row);
void open();
private:
MetaData::Bundle meta;
static const Reader::Description d;
public:
const MetaData::Bundle& fetchMetaData(const char* key) { return meta; }
private:
bool findStreamInfo();
bool hasVideo() const { return !videoIdx_.empty(); }
AVStream* getVideoStream() { return currVideoIdx_ >= 0 ? context_->streams[videoIdx_[currVideoIdx_]] : NULL; }
void setMetaDataItem(MetaData::Bundle& metadata, const std::string& DDIkey, AVDictionary* dict, const char* ffmpegKey);
void setMetaDataItem(MetaData::Bundle& metadata, const std::string& DDIkey, const char* stringValue);
double fps() const;
void openVideoCodec();
void closeVideoCodec();
int64_t getTimeStamp(int pos) const;
bool seek(int pos);
bool decodeImage();
int width() const { return width_; }
int height() const { return height_; }
// use internal frame query function, as ffmpeg uses 0-based frames for most codecs
// so they might not be the same as Nuke's frames
int ffmpegFrame() const;
bool isCodecBlacklisted(const char* name);
private:
AVFormatContext* context_;
AVInputFormat* format_;
AVFrame* avFrame_;
AVCodec* videoCodec_;
AVPacket pkt_;
SwsContext* convertCtx_;
std::vector<int> videoIdx_;
int fpsNum_;
int fpsDen_;
int currVideoIdx_;
uint64_t frames_;
int width_;
int height_;
double aspect_;
std::vector<unsigned char> data_;
bool offsetTime_;
int lastSearchPos_;
int lastDecodedPos_;
bool codecZeroBasedStart_;
bool _invalidState;
};
static const char* codecBlacklist[] =
{
"r10k",
"ogg",
"mpjpeg",
"asf",
"asf_stream",
"h261",
"h263",
"rcv",
"yuv4mpegpipe",
"prores",
"dnxhd",
"swf",
"dv",
"ipod",
"psp",
"image2",
"3g2",
"3gp",
"RoQ",
#ifdef FN_OS_WINDOWS
"dirac",
"ffm",
#elif FN_OS_MAC
"mjpeg",
"vc1",
#elif FN_OS_LINUX
"mjpeg",
"vc1",
#endif
NULL
};
ffmpegReader::ffmpegReader(Read* iop)
: Reader(iop)
, context_(NULL)
, format_(NULL)
, avFrame_(NULL)
, videoCodec_(NULL)
, convertCtx_(NULL)
, fpsNum_(0)
, fpsDen_(0)
, currVideoIdx_(-1)
, frames_(0)
, width_(720)
, height_(576)
, aspect_(1.0f)
, offsetTime_(true)
, lastSearchPos_(-1)
, lastDecodedPos_(-1)
, codecZeroBasedStart_(true)
, _invalidState(false)
{
av_log_set_level(AV_LOG_WARNING);
av_register_all();
avFrame_ = avcodec_alloc_frame();
// FIXME_GC: shouldn't the plugin be passed the filename without the prefix?
int offset = 0;
std::string filename(iop->filename());
if (filename.find("ffmpeg:") != std::string::npos)
offset = 7;
int error = avformat_open_input(&context_, iop->filename() + offset, format_, NULL);
if (error < 0) {
char szError[1024];
ffmpegError(error, szError, 1024);
iop->error(szError);
}
else {
// FIXME_GC: needs to know if it's streamable.
error = avformat_find_stream_info(context_, NULL);
if (error < 0) {
char szError[1024];
ffmpegError(error, szError, 1024);
iop->error(szError);
}
else {
if (findStreamInfo() && getVideoStream()) {
AVCodecContext* codecContext = getVideoStream()->codec;
if (isCodecBlacklisted(codecContext->codec->name)) {
iop->error("unsupported codec...");
_invalidState = true;
return;
}
_invalidState = false;
if (getVideoStream()->sample_aspect_ratio.num)
aspect_ = av_q2d(getVideoStream()->sample_aspect_ratio);
else if (codecContext->sample_aspect_ratio.num)
aspect_ = av_q2d(codecContext->sample_aspect_ratio);
info_.channels(Mask_RGBA);
set_info(width_, height_, 3, aspect_);
info_.first_frame(1);
info_.last_frame(frames_);
data_.resize(width() * height() * 3);
// hack so seeking works from our intended position.
if (!strcmp(codecContext->codec->name, "mjpeg") || !strcmp(codecContext->codec->name, "dvvideo"))
offsetTime_ = false;
// H264 codec uses a different initial frame than the other codecs
if (!strcmp(codecContext->codec->name, "h264"))
codecZeroBasedStart_ = false;
AVDictionary* metadata = context_->metadata;
setMetaDataItem(meta, MetaData::CREATOR, metadata, "author");
setMetaDataItem(meta, MetaData::COMMENT, metadata, "comment");
setMetaDataItem(meta, MetaData::PROJECT, metadata, "album");
setMetaDataItem(meta, MetaData::COPYRIGHT, metadata, "copyright");
meta.setData("ffmpeg/num_streams", context_->nb_streams);
setMetaDataItem(meta, "ffmpeg/codec/codecName", codecContext->codec->name);
meta.setData(MetaData::FRAME_RATE, fps());
meta.setData("ffmpeg/codec/timecodeFrameStart", (unsigned int)codecContext->timecode_frame_start);
meta.setData("ffmpeg/codec/startTime", (unsigned int)context_->start_time);
}
else {
iop->error("unable to find codec");
_invalidState = true;
}
}
}
}
ffmpegReader::~ffmpegReader()
{
closeVideoCodec();
if (context_)
avformat_close_input(&context_);
av_free(avFrame_);
av_free(convertCtx_);
}
void ffmpegReader::engine(int y, int x, int rx, ChannelMask channels, Row& out)
{
foreach ( z, channels ) {
float* TO = out.writable(z) + x;
unsigned char* FROM = &data_[0];
FROM += (height() - y - 1) * width() * 3;
FROM += x * 3;
from_byte(z, TO, FROM + z - 1, NULL, rx - x, 3);
}
}
void ffmpegReader::open()
{
int internalFrame = ffmpegFrame();
if (lastDecodedPos_ + 1 != internalFrame) {
seek(0);
seek(internalFrame);
}
av_init_packet(&pkt_);
bool hasPicture = false;
int error = 0;
while (error >= 0 && !hasPicture) {
error = av_read_frame(context_, &pkt_);
if (error < 0)
break;
if (error >= 0 && videoIdx_.size() && currVideoIdx_ != -1 && pkt_.stream_index == videoIdx_[currVideoIdx_])
hasPicture = decodeImage();
av_free_packet(&pkt_);
}
}
bool ffmpegReader::findStreamInfo()
{
for (int i = 0; i < static_cast<int>(context_->nb_streams); ++i) {
AVCodecContext* codecContext = context_->streams[i]->codec;
if (avcodec_find_decoder(codecContext->codec_id) == NULL)
continue;
switch (codecContext->codec_type) {
case AVMEDIA_TYPE_VIDEO:
videoIdx_.push_back(i);
if (currVideoIdx_ < 0)
currVideoIdx_ = 0;
width_ = codecContext->width;
height_ = codecContext->height;
break;
// ignore all audio streams
case AVMEDIA_TYPE_AUDIO:
default:
break;
}
}
if (!hasVideo())
return false;
AVStream* stream = getVideoStream();
if (stream->r_frame_rate.num != 0 && stream->r_frame_rate.den != 0) {
fpsNum_ = stream->r_frame_rate.num;
fpsDen_ = stream->r_frame_rate.den;
}
openVideoCodec();
// Set the duration
if ((uint64_t)context_->duration != AV_NOPTS_VALUE)
frames_ = uint64_t((fps() * (double)context_->duration / (double)AV_TIME_BASE));
else
frames_ = 1 << 29;
// try to calculate the number of frames
if (!frames_) {
seek(0);
av_init_packet(&pkt_);
av_read_frame(context_, &pkt_);
uint64_t firstPts = pkt_.pts;
uint64_t maxPts = firstPts;
seek(1 << 29);
av_init_packet(&pkt_);
while (stream && av_read_frame(context_, &pkt_) >= 0) {
uint64_t currPts = (uint64_t)(av_q2d(getVideoStream()->time_base) * (pkt_.pts - firstPts) * fps());
if (currPts > maxPts)
maxPts = currPts;
}
frames_ = maxPts;
}
return true;
}
void ffmpegReader::setMetaDataItem(MetaData::Bundle& metadata, const std::string& DDIkey, AVDictionary* dict, const char* ffmpegKey)
{
if (!dict)
return;
AVDictionaryEntry* dictEntry = av_dict_get(dict, ffmpegKey, NULL, 0);
if (!dictEntry)
return;
metadata.setData(DDIkey, dictEntry->value);
}
void ffmpegReader::setMetaDataItem(MetaData::Bundle& metadata, const std::string& DDIkey, const char* stringValue)
{
if (!stringValue)
return;
metadata.setData(DDIkey, stringValue);
}
double ffmpegReader::fps() const
{
if (fpsDen_)
return fpsNum_ / (double)fpsDen_;
return 1.0f;
}
void ffmpegReader::openVideoCodec()
{
AVStream* stream = getVideoStream();
AVCodecContext* codecContext = stream->codec;
videoCodec_ = avcodec_find_decoder(codecContext->codec_id);
if (videoCodec_ == NULL || avcodec_open2(codecContext, videoCodec_, NULL) < 0)
currVideoIdx_ = -1;
}
void ffmpegReader::closeVideoCodec()
{
AVStream* stream = getVideoStream();
if (stream && stream->codec)
avcodec_close(stream->codec);
}
int64_t ffmpegReader::getTimeStamp(int pos) const
{
int64_t timestamp = (int64_t)(((double) pos / fps()) * AV_TIME_BASE);
if ((uint64_t) context_->start_time != AV_NOPTS_VALUE)
timestamp += context_->start_time;
return timestamp;
}
bool ffmpegReader::seek(int pos)
{
int64_t offset = getTimeStamp(pos);
if (offsetTime_) {
offset -= AV_TIME_BASE;
if (offset < context_->start_time)
offset = 0;
}
avcodec_flush_buffers(getVideoStream()->codec);
if (av_seek_frame(context_, -1, offset, AVSEEK_FLAG_BACKWARD) < 0)
return false;
return true;
}
bool ffmpegReader::decodeImage()
{
if (_invalidState)
return false;
// search for our picture.
double pts = 0;
if ((uint64_t) pkt_.dts != AV_NOPTS_VALUE)
pts = av_q2d(getVideoStream()->time_base) * pkt_.dts;
int curPos = int(pts * fps() + 0.5f);
if (curPos == lastSearchPos_)
curPos = lastSearchPos_ + 1;
lastSearchPos_ = curPos;
if ((uint64_t)context_->start_time != AV_NOPTS_VALUE)
curPos -= int(context_->start_time * fps() / AV_TIME_BASE);
int hasPicture = 0;
int curSearch = 0;
AVCodecContext* codecContext = getVideoStream()->codec;
if (curPos >= ffmpegFrame())
avcodec_decode_video2(codecContext, avFrame_, &hasPicture, &pkt_);
else if (offsetTime_)
avcodec_decode_video2(codecContext, avFrame_, &curSearch, &pkt_);
if (!hasPicture)
return false;
lastDecodedPos_ = lastSearchPos_;
AVPicture output;
avpicture_fill(&output, &data_[0], PIX_FMT_RGB24, width_, height_);
if (!convertCtx_) {
convertCtx_ = sws_getContext(width_, height_, codecContext->pix_fmt, width_, height_,
PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
}
sws_scale(convertCtx_, avFrame_->data, avFrame_->linesize, 0, height_, output.data, output.linesize);
return true;
}
int ffmpegReader::ffmpegFrame() const
{
int internalFrame = frame();
// ffmpeg uses 0-based frames for most codecs, so we need to convert Nuke's 1-based frames
// H264 seems to still use 1-based, however
if (codecZeroBasedStart_)
internalFrame --;
return internalFrame;
}
bool ffmpegReader::isCodecBlacklisted(const char* name)
{
const char** iterator = codecBlacklist;
while (*iterator != NULL) {
if (strncmp(name, *iterator, strlen(*iterator)) == 0) {
return true;
}
++iterator;
}
return false;
}
static Reader* build(Read* iop, int fd, const unsigned char* b, int n)
{
::close(fd);
return new ffmpegReader(iop);
}
static bool test(int fd, const unsigned char* block, int length)
{
return true;
}
const Reader::Description ffmpegReader::d("ffmpeg\0", build, test);
// Copyright (c) 2009 The Foundry Visionmongers Ltd. All Rights Reserved.
#include "Build/fnBuild.h"
#include "DDImage/DDString.h"
#include "DDImage/Writer.h"
#include "DDImage/Row.h"
#include "DDImage/Knobs.h"
#define INT64_C(c) (c ## LL)
#define UINT64_C(c) (c ## ULL)
extern "C" {
#include <errno.h>
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include "libavutil/imgutils.h"
#include "libavformat/avio.h"
#include <libavutil/opt.h>
#include <libswscale/swscale.h>
#include <libavutil/avutil.h>
#include <libavutil/error.h>
}
using namespace DD::Image;
class ffmpegWriter : public Writer
{
private:
enum WriterError { SUCCESS = 0, IGNORE_FINISH, CLEANUP };
public:
explicit ffmpegWriter(Write* iop);
~ffmpegWriter();
virtual bool movie() const { return true; }
void execute();
void finish();
void knobs(Knob_Callback f);
static const Writer::Description d;
private:
void freeFormat();
bool isCodecBlacklisted(const char* name);
private:
AVCodecContext* codecContext_;
AVFormatContext* formatContext_;
AVStream* stream_;
std::vector<std::string> formatsLongNames_;
std::vector<const char*> formatsShortNames_;
std::vector<const char*> codecsLongNames_;
std::vector<const char*> codecsShortNames_;
WriterError error_;
// knobs variables
float fps_;
int format_;
int codec_;
int bitrate_;
int bitrateTolerance_;
int gopSize_;
int bFrames_;
int mbDecision_;
};
static const char* codecBlacklist[] =
{
"r10k",
"ogg",
"mpjpeg",
"asf",
"asf_stream",
"h261",
"h263",
"rcv",
"yuv4mpegpipe",
"prores",
"dnxhd",
"swf",
"dv",
"ipod",
"psp",
"image2",
"3g2",
"3gp",
"RoQ",
#ifdef FN_OS_WINDOWS
"dirac",
"ffm",
#elif FN_OS_MAC
"mjpeg",
"vc1",
#elif FN_OS_LINUX
"mjpeg",
"vc1",
#endif
NULL
};
ffmpegWriter::ffmpegWriter(Write* iop)
: Writer(iop)
, codecContext_(NULL)
, formatContext_(NULL)
, stream_(0)
, error_(IGNORE_FINISH)
, fps_(DD::Image::root_real_fps())
, format_(0)
, codec_(0)
, bitrate_(400000)
, bitrateTolerance_(4000 * 10000)
, gopSize_(12)
, bFrames_(0)
, mbDecision_(FF_MB_DECISION_SIMPLE)
{
av_log_set_level(AV_LOG_WARNING);
av_register_all();
formatsLongNames_.push_back("default");
formatsShortNames_.push_back("default");
AVOutputFormat* fmt = av_oformat_next(NULL);
while (fmt) {
if (fmt->video_codec != CODEC_ID_NONE && !isCodecBlacklisted( fmt->name ) ) {
if (fmt->long_name) {
formatsLongNames_.push_back(std::string(fmt->long_name) + std::string(" (") + std::string(fmt->name) + std::string(")"));
formatsShortNames_.push_back(fmt->name);
}
}
fmt = av_oformat_next(fmt);
}
formatsShortNames_.push_back(0);
codecsLongNames_.push_back("default");
codecsShortNames_.push_back("default");
AVCodec* c = av_codec_next(NULL);
while (c) {
if (c->type == AVMEDIA_TYPE_VIDEO && c->encode && !isCodecBlacklisted( c->name ) ) {
if (c->long_name) {
codecsLongNames_.push_back(c->long_name);
codecsShortNames_.push_back(c->name);
}
}
c = av_codec_next(c);
}
codecsLongNames_.push_back(0);
codecsShortNames_.push_back(0);
}
bool ffmpegWriter::isCodecBlacklisted(const char* name)
{
const char** iterator = codecBlacklist;
while( *iterator != NULL ) {
if (strncmp(name, *iterator, strlen(*iterator)) == 0) {
return true;
}
++iterator;
}
return false;
}
ffmpegWriter::~ffmpegWriter()
{
av_free(codecContext_);
}
void ffmpegWriter::execute()
{
error_ = IGNORE_FINISH;
AVOutputFormat* fmt = 0;
if (!format_) {
fmt = av_guess_format(NULL, filename(), NULL);
if (!fmt) {
iop->critical("could not deduce output format from file extension");
return;
}
}
else {
fmt = av_guess_format(formatsShortNames_[format_], NULL, NULL);
if (!fmt) {
iop->critical("could not deduce output format");
return;
}
}
if (!formatContext_)
avformat_alloc_output_context2(&formatContext_, fmt, NULL, filename());
snprintf(formatContext_->filename, sizeof(formatContext_->filename), "%s", filename());
CodecID codecId = fmt->video_codec;
if (codec_) {
AVCodec* userCodec = avcodec_find_encoder_by_name(codecsShortNames_[codec_]);
if (userCodec) {
codecId = userCodec->id;
}
}
AVCodec* videoCodec = avcodec_find_encoder(codecId);
if (!videoCodec) {
iop->critical("unable to find codec");
freeFormat();
return;
}
PixelFormat pixFMT = PIX_FMT_YUV420P;
if (videoCodec->pix_fmts != NULL) {
pixFMT = *videoCodec->pix_fmts;
}
else {
if (strcmp(fmt->name, "gif") == 0) {
pixFMT = PIX_FMT_RGB24;
}
}
if (isCodecBlacklisted(videoCodec->name)) {
iop->critical("unsupported codec");
freeFormat();
return;
}
if (!stream_) {
stream_ = avformat_new_stream(formatContext_, NULL);
if (!stream_) {
iop->critical("out of memory");
return;
}
codecContext_ = stream_->codec;
// this seems to be needed for certain codecs, as otherwise they don't have relevant options set
avcodec_get_context_defaults3(codecContext_, videoCodec);
codecContext_->pix_fmt = pixFMT; // this is set to the first element of FMT a choice could be added
codecContext_->bit_rate = bitrate_;
codecContext_->bit_rate_tolerance = bitrateTolerance_;
codecContext_->width = width();
codecContext_->height = height();
// Bug 23953
// ffmpeg does a horrible job of converting floats to AVRationals
// It adds 0.5 randomly and does some other stuff
// To work around that, we just multiply the fps by what I think is a reasonable number to make it an int
// and use the reasonable number as the numerator for the timebase.
// Timebase is not the frame rate; it's the inverse of the framerate
// So instead of doing 1/fps, we just set the numerator and denominator of the timebase directly.
// The upshot is that this allows ffmpeg to properly do framerates of 23.78 (or 23.796, which is what the user really wants when they put that in).
//
// The code was this:
//stream_->codec->time_base = av_d2q(1.0 / fps_, 100);
const float CONVERSION_FACTOR = 1000.0f;
codecContext_->time_base.num = (int) CONVERSION_FACTOR;
codecContext_->time_base.den = (int) (fps_ * CONVERSION_FACTOR);
//
codecContext_->gop_size = gopSize_;
// NOTE: in new ffmpeg, bframes don't seem to work correctly - ffmpeg crashes...
if (bFrames_) {
codecContext_->max_b_frames = bFrames_;
codecContext_->b_frame_strategy = 0;
codecContext_->b_quant_factor = 2.0f;
}
codecContext_->mb_decision = mbDecision_;
if (!strcmp(formatContext_->oformat->name, "mp4") || !strcmp(formatContext_->oformat->name, "mov") || !strcmp(formatContext_->oformat->name, "3gp"))
codecContext_->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (formatContext_->oformat->flags & AVFMT_GLOBALHEADER)
codecContext_->flags |= CODEC_FLAG_GLOBAL_HEADER;
if (avcodec_open2(codecContext_, videoCodec, NULL) < 0) {
iop->critical("unable to open codec");
freeFormat();
return;
}
if (!(fmt->flags & AVFMT_NOFILE)) {
if (avio_open(&formatContext_->pb, filename(), AVIO_FLAG_WRITE) < 0) {
iop->critical("unable to open file");
freeFormat();
return;
}
}
avformat_write_header(formatContext_, NULL);
}
error_ = CLEANUP;
AVPicture picture;
int picSize = avpicture_get_size(PIX_FMT_RGB24, width(), height());
// allocate a buffer for the picture's image...
uint8_t* buffer = (uint8_t*)av_malloc(picSize);
// blank the values - this initialises stuff and seems to be needed
avpicture_fill(&picture, buffer, PIX_FMT_RGB24, width(), height());
Row row(0, width());
input0().validate();
input0().request(0, 0, width(), height(), Mask_RGB, 1);
for (int y = 0; y < height(); ++y) {
get(y, 0, width(), Mask_RGB, row);
if (iop->aborted()) {
av_free(buffer);
return;
}
for (Channel z = Chan_Red; z <= Chan_Blue; incr(z)) {
const float* from = row[z];
to_byte(z - 1, picture.data[0] + (height() - y - 1) * picture.linesize[0] + z - 1, from, NULL, width(), 3);
}
}
// now allocate an image frame for the image in the output codec's format...
AVFrame* output = avcodec_alloc_frame();
picSize = avpicture_get_size(pixFMT, width(), height());
uint8_t* outBuffer = (uint8_t*)av_malloc(picSize);
av_image_alloc(output->data, output->linesize, width(), height(), pixFMT, 1);
SwsContext* convertCtx = sws_getContext(width(), height(), PIX_FMT_RGB24, width(), height(),
pixFMT, SWS_BICUBIC, NULL, NULL, NULL);
int sliceHeight = sws_scale(convertCtx, picture.data, picture.linesize, 0, height(), output->data, output->linesize);
assert(sliceHeight > 0);
int ret = 0;
if ((formatContext_->oformat->flags & AVFMT_RAWPICTURE) != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = stream_->index;
pkt.data = (uint8_t*)output;
pkt.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(formatContext_, &pkt);
}
else {
uint8_t* outbuf = (uint8_t*)av_malloc(picSize);
assert(outbuf != NULL);
ret = avcodec_encode_video(codecContext_, outbuf, picSize, output);
if (ret > 0) {
AVPacket pkt;
av_init_packet(&pkt);
if (codecContext_->coded_frame && static_cast<unsigned long>(codecContext_->coded_frame->pts) != AV_NOPTS_VALUE)
pkt.pts = av_rescale_q(codecContext_->coded_frame->pts, codecContext_->time_base, stream_->time_base);
if (codecContext_->coded_frame && codecContext_->coded_frame->key_frame)
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = stream_->index;
pkt.data = outbuf;
pkt.size = ret;
ret = av_interleaved_write_frame(formatContext_, &pkt);
}
else {
// we've got an error
char szError[1024];
av_strerror(ret, szError, 1024);
iop->error(szError);
}
av_free(outbuf);
}
av_free(outBuffer);
av_free(buffer);
av_free(output);
if (ret) {
iop->critical("error writing frame to file");
return;
}
error_ = SUCCESS;
}
void ffmpegWriter::finish()
{
if (error_ == IGNORE_FINISH)
return;
av_write_trailer(formatContext_);
avcodec_close(codecContext_);
if (!(formatContext_->oformat->flags & AVFMT_NOFILE))
avio_close(formatContext_->pb);
freeFormat();
}
void ffmpegWriter::knobs(Knob_Callback f)
{
static std::vector<const char*> formatsAliases;
formatsAliases.resize(formatsLongNames_.size());
for (int i = 0; i < static_cast<int>(formatsLongNames_.size()); ++i)
formatsAliases[i] = formatsLongNames_[i].c_str();
formatsAliases.push_back(0);
Enumeration_knob(f, &format_, &formatsAliases[0], "format");
Float_knob(f, &fps_, IRange(0.0, 100.0f), "fps");
BeginClosedGroup(f, "Advanced");
Enumeration_knob(f, &codec_, &codecsLongNames_[0], "codec");
Int_knob(f, &bitrate_, IRange(0.0, 400000), "bitrate");
SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER);
Int_knob(f, &bitrateTolerance_, IRange(0, 4000 * 10000), "bitrateTol", "bitrate tolerance");
SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER);
Int_knob(f, &gopSize_, IRange(0, 30), "gopSize", "GOP size");
SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER);
Int_knob(f, &bFrames_, IRange(0, 30), "bFrames", "B Frames");
SetFlags(f, Knob::SLIDER | Knob::LOG_SLIDER);
static const char* mbDecisionTypes[] = {
"FF_MB_DECISION_SIMPLE", "FF_MB_DECISION_BITS", "FF_MB_DECISION_RD", 0
};
Enumeration_knob(f, &mbDecision_, mbDecisionTypes, "mbDecision", "macro block decision mode");
EndGroup(f);
}
void ffmpegWriter::freeFormat()
{
for (int i = 0; i < static_cast<int>(formatContext_->nb_streams); ++i)
av_freep(&formatContext_->streams[i]);
av_free(formatContext_);
formatContext_ = NULL;
stream_ = NULL;
}
static Writer* build(Write* iop)
{
return new ffmpegWriter(iop);
}
const Writer::Description ffmpegWriter::d("ffmpeg\0mov\0avi\0", build);
_______________________________________________
Nuke-python mailing list
Nuke-python@support.thefoundry.co.uk, http://forums.thefoundry.co.uk/
http://support.thefoundry.co.uk/cgi-bin/mailman/listinfo/nuke-python