[FFmpeg-devel] [PATCHv2 2/2] avdevice/sdl2 : add option to set window position
From: Dave Rice Allows arrangement of multiple windows such as: ffmpeg -re -f lavfi -i mandelbrot -f sdl -window_x 1 -window_y 1 mandelbrot -vf waveform,format=yuv420p -f sdl -window_x 641 -window_y 1 waveform -vf vectorscope,format=yuv420p -f sdl -window_x 1 -window_y 481 vectorscop Some changes by Marton Balint: - allow negative position (partially or fully out-of-screen positions seem to be sanitized automatically by SDL (or my WM?), so no special handling is needed) - only show window after the position is set - do not use resizable and borderless flags at the same time, that caused issues in ffplay - add docs Signed-off-by: Marton Balint --- doc/outdevs.texi | 4 libavdevice/sdl2.c| 15 ++- libavdevice/version.h | 4 ++-- 3 files changed, 16 insertions(+), 7 deletions(-) diff --git a/doc/outdevs.texi b/doc/outdevs.texi index 2518f9b559..7509ac695d 100644 --- a/doc/outdevs.texi +++ b/doc/outdevs.texi @@ -398,6 +398,10 @@ Set the SDL window size, can be a string of the form If not specified it defaults to the size of the input video, downscaled according to the aspect ratio. +@item window_x +@item window_y +Set the position of the window on the screen. + @item window_fullscreen Set fullscreen mode when non-zero value is provided. Default value is zero. diff --git a/libavdevice/sdl2.c b/libavdevice/sdl2.c index da5143078e..72ebca90d8 100644 --- a/libavdevice/sdl2.c +++ b/libavdevice/sdl2.c @@ -40,6 +40,7 @@ typedef struct { SDL_Renderer *renderer; char *window_title; int window_width, window_height; /**< size of the window */ +int window_x, window_y; /**< position of the window */ int window_fullscreen; int window_borderless; int enable_quit_action; @@ -155,8 +156,6 @@ static int sdl2_write_trailer(AVFormatContext *s) return 0; } -#define SDL_BASE_FLAGS (SDL_SWSURFACE|SDL_WINDOW_RESIZABLE) - static int sdl2_write_header(AVFormatContext *s) { SDLContext *sdl = s->priv_data; @@ -196,8 +195,9 @@ static int sdl2_write_header(AVFormatContext *s) } /* resize texture to width and height from the codec context information */ -flags = SDL_BASE_FLAGS | (sdl->window_fullscreen ? SDL_WINDOW_FULLSCREEN : 0) | - (sdl->window_borderless ? SDL_WINDOW_BORDERLESS : 0); +flags = SDL_WINDOW_HIDDEN | +(sdl->window_fullscreen ? SDL_WINDOW_FULLSCREEN : 0) | +(sdl->window_borderless ? SDL_WINDOW_BORDERLESS : SDL_WINDOW_RESIZABLE); /* initialization */ if (!sdl->inited){ @@ -210,12 +210,15 @@ static int sdl2_write_header(AVFormatContext *s) compute_texture_rect(s); if (SDL_CreateWindowAndRenderer(sdl->window_width, sdl->window_height, -flags, &sdl->window, &sdl->renderer) != 0){ +flags, &sdl->window, +&sdl->renderer) != 0) { av_log(sdl, AV_LOG_ERROR, "Couldn't create window and renderer: %s\n", SDL_GetError()); goto fail; } SDL_SetWindowTitle(sdl->window, sdl->window_title); +SDL_SetWindowPosition(sdl->window, sdl->window_x, sdl->window_y); +SDL_ShowWindow(sdl->window); sdl->texture = SDL_CreateTexture(sdl->renderer, sdl->texture_fmt, SDL_TEXTUREACCESS_STREAMING, codecpar->width, codecpar->height); @@ -337,6 +340,8 @@ static int sdl2_write_packet(AVFormatContext *s, AVPacket *pkt) static const AVOption options[] = { { "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, { "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM }, +{ "window_x", "set SDL window x position", OFFSET(window_x), AV_OPT_TYPE_INT,{ .i64 = SDL_WINDOWPOS_CENTERED }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, +{ "window_y", "set SDL window y position", OFFSET(window_y), AV_OPT_TYPE_INT,{ .i64 = SDL_WINDOWPOS_CENTERED }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM }, { "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_BOOL, { .i64 = 0 },0, 1, AV_OPT_FLAG_ENCODING_PARAM }, { "window_borderless", "set SDL window border off", OFFSET(window_borderless), AV_OPT_TYPE_BOOL, { .i64 = 0 },0, 1, AV_OPT_FLAG_ENCODING_PARAM }, { "window_enable_quit", "set if quit action is available", OFFSET(enable_quit_action), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM }, diff --git a/libavdevice/version.h b/libavdevice/version.h index e6ee009cc4..bce104b905 100644 --- a/libavdevice/version.h +++ b/libavdevice/version.h @@ -28,8 +28,8 @@ #include "libavutil/version.h" #define LIBAVDEVICE_VERSION_MAJOR 58 -#define LIBAVDEVICE_VERSION_MI
Re: [FFmpeg-devel] [PATCH] decklink: Fix compile breakage on OSX
On Fri, 19 Oct 2018, Devin Heitmueller wrote: Make the function static, or else Clang complains with: error: no previous prototype for function 'decklink_get_attr_string' [-Werror,-Wmissing-prototypes] Oops, sorry. Applied, thanks. Marton Signed-off-by: Devin Heitmueller --- libavdevice/decklink_common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavdevice/decklink_common.cpp b/libavdevice/decklink_common.cpp index b88d6c6219..130e70b2ca 100644 --- a/libavdevice/decklink_common.cpp +++ b/libavdevice/decklink_common.cpp @@ -77,7 +77,7 @@ static IDeckLinkIterator *decklink_create_iterator(AVFormatContext *avctx) return iter; } -int decklink_get_attr_string(IDeckLink *dl, BMDDeckLinkAttributeID cfg_id, const char **s) +static int decklink_get_attr_string(IDeckLink *dl, BMDDeckLinkAttributeID cfg_id, const char **s) { DECKLINK_STR tmp; HRESULT hr; -- 2.13.2 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
[FFmpeg-devel] [PATCH v3] avcodec: libdav1d AV1 decoder wrapper.
Originally written by Ronald S. Bultje, with fixes, optimizations and improvements by James Almer. Signed-off-by: James Almer --- Updated to work with libdav1d git head. configure | 4 + libavcodec/Makefile| 1 + libavcodec/allcodecs.c | 1 + libavcodec/libdav1d.c | 271 + 4 files changed, 277 insertions(+) create mode 100644 libavcodec/libdav1d.c diff --git a/configure b/configure index 85d5dd5962..99e1a6b76b 100755 --- a/configure +++ b/configure @@ -226,6 +226,7 @@ External library support: --enable-libcelt enable CELT decoding via libcelt [no] --enable-libcdio enable audio CD grabbing with libcdio [no] --enable-libcodec2 enable codec2 en/decoding using libcodec2 [no] + --enable-libdav1denable AV1 decoding via libdav1d [no] --enable-libdavs2enable AVS2 decoding via libdavs2 [no] --enable-libdc1394 enable IIDC-1394 grabbing using libdc1394 and libraw1394 [no] @@ -1712,6 +1713,7 @@ EXTERNAL_LIBRARY_LIST=" libcaca libcelt libcodec2 +libdav1d libdc1394 libdrm libflite @@ -3088,6 +3090,7 @@ libaom_av1_encoder_select="extract_extradata_bsf" libcelt_decoder_deps="libcelt" libcodec2_decoder_deps="libcodec2" libcodec2_encoder_deps="libcodec2" +libdav1d_decoder_deps="libdav1d" libdavs2_decoder_deps="libdavs2" libfdk_aac_decoder_deps="libfdk_aac" libfdk_aac_encoder_deps="libfdk_aac" @@ -6063,6 +6066,7 @@ enabled libcelt && require libcelt celt/celt.h celt_decode -lcelt0 && die "ERROR: libcelt must be installed and version must be >= 0.11.0."; } enabled libcaca && require_pkg_config libcaca caca caca.h caca_create_canvas enabled libcodec2 && require libcodec2 codec2/codec2.h codec2_create -lcodec2 +enabled libdav1d && require_pkg_config libdav1d "dav1d >= 0.0.1" "dav1d/dav1d.h" dav1d_version enabled libdavs2 && require_pkg_config libdavs2 "davs2 >= 1.5.115" davs2.h davs2_decoder_open enabled libdc1394 && require_pkg_config libdc1394 libdc1394-2 dc1394/dc1394.h dc1394_new enabled libdrm&& require_pkg_config libdrm libdrm xf86drm.h drmGetVersion diff --git a/libavcodec/Makefile b/libavcodec/Makefile index a97055ef3f..0f1e93d478 100644 --- a/libavcodec/Makefile +++ b/libavcodec/Makefile @@ -954,6 +954,7 @@ OBJS-$(CONFIG_LIBAOM_AV1_ENCODER) += libaomenc.o OBJS-$(CONFIG_LIBCELT_DECODER)+= libcelt_dec.o OBJS-$(CONFIG_LIBCODEC2_DECODER) += libcodec2.o codec2utils.o OBJS-$(CONFIG_LIBCODEC2_ENCODER) += libcodec2.o codec2utils.o +OBJS-$(CONFIG_LIBDAV1D_DECODER) += libdav1d.o OBJS-$(CONFIG_LIBDAVS2_DECODER) += libdavs2.o OBJS-$(CONFIG_LIBFDK_AAC_DECODER) += libfdk-aacdec.o OBJS-$(CONFIG_LIBFDK_AAC_ENCODER) += libfdk-aacenc.o diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index c0b4d56d0d..d2628df620 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -674,6 +674,7 @@ extern AVCodec ff_libaom_av1_encoder; extern AVCodec ff_libcelt_decoder; extern AVCodec ff_libcodec2_encoder; extern AVCodec ff_libcodec2_decoder; +extern AVCodec ff_libdav1d_decoder; extern AVCodec ff_libdavs2_decoder; extern AVCodec ff_libfdk_aac_encoder; extern AVCodec ff_libfdk_aac_decoder; diff --git a/libavcodec/libdav1d.c b/libavcodec/libdav1d.c new file mode 100644 index 00..da826a42ed --- /dev/null +++ b/libavcodec/libdav1d.c @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2018 Ronald S. Bultje + * Copyright (c) 2018 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include + +#include "libavutil/avassert.h" +#include "libavutil/fifo.h" +#include "libavutil/opt.h" + +#include "avcodec.h" +#include "decode.h" +#include "internal.h" + +typedef struct Libdav1dContext { +AVClass *class; +Dav1dContext *c; + +AVFifoBuffer *cache; +Dav1dData data; +int frame_threads, tile_threads; +} Libdav1dContext; + +static av_cold int libdav1d_init(AVCodecContext *c) +{ +Libdav1dContext *dav1d = c->priv_data; +Dav1dSettings s; +int res; + +av_log(c, AV_LOG_INFO, "libdav1d %s\n", dav1d_ve
Re: [FFmpeg-devel] [PATCH] fftools/ffmpeg.c: allow forcing input framerate on streamcopy
On 10/19/18 3:02 PM, Carl Eugen Hoyos wrote: 2018-10-19 20:39 GMT+02:00, Leo Izen : On 10/19/18 2:26 PM, Carl Eugen Hoyos wrote: 2018-10-19 4:58 GMT+02:00, Leo Izen : --- fftools/ffmpeg.c | 8 +--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index da4259a9a8..5d68194676 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -2045,12 +2045,14 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ost->sync_opts++; -if (pkt->pts != AV_NOPTS_VALUE) +if (ist->framerate.num) +opkt.pts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->mux_timebase) - ost_tb_start_time; +else if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; else opkt.pts = AV_NOPTS_VALUE; -if (pkt->dts == AV_NOPTS_VALUE) +if (pkt->dts == AV_NOPTS_VALUE || ist->framerate.num) opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); else opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); @@ -2602,7 +2604,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo avpkt = *pkt; } -if (pkt && pkt->dts != AV_NOPTS_VALUE) { +if (pkt && pkt->dts != AV_NOPTS_VALUE && !ist->framerate.num) { ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) ist->next_pts = ist->pts = ist->dts; How can this be tested? Carl Eugen I'm not entirely sure. I ran "make fate" and it passed, and I successfully rescaled a 30fps clip to 15fps, 20fps, 45fps, and 60fps using -r:v as an input option. I'm not entirely sure what the standard procedure is for performing more rigorous tests. What I meant was: Which kind of input and output did you use to test your patch? Carl Eugen ___ It worked perfectly at various framerates for AVI files. It also worked in practice for MP4 and Matroska files, although my patch doesn't properly set the avg_frame_rate and r_frame_rate of the output stream. Intelligent players like MPV play it fine and ignore the incorrect *_frame_rate metadata. Anyway, here's an updated patch that fixes that problem so it shouldn't be an issue. Leo Izen --- fftools/ffmpeg.c | 19 ++- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index da4259a9a8..6e81716795 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -2045,12 +2045,14 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ost->sync_opts++; - if (pkt->pts != AV_NOPTS_VALUE) + if (ist->framerate.num) + opkt.pts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->mux_timebase) - ost_tb_start_time; + else if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; else opkt.pts = AV_NOPTS_VALUE; - if (pkt->dts == AV_NOPTS_VALUE) + if (pkt->dts == AV_NOPTS_VALUE || ist->framerate.num) opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); else opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); @@ -2602,7 +2604,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo avpkt = *pkt; } - if (pkt && pkt->dts != AV_NOPTS_VALUE) { + if (pkt && pkt->dts != AV_NOPTS_VALUE && !ist->framerate.num) { ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) ist->next_pts = ist->pts = ist->dts; @@ -3158,8 +3160,15 @@ static int init_output_stream_streamcopy(OutputStream *ost) else sar = par_src->sample_aspect_ratio; ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar; - ost->st->avg_frame_rate = ist->st->avg_frame_rate; - ost->st->r_frame_rate = ist->st->r_frame_rate; + + if (ist->framerate.num) { + ost->st->avg_frame_rate = ist->framerate; + ost->st->r_frame_rate = ist->framerate; + } else { + ost->st->avg_frame_rate = ist->st->avg_frame_rate; + ost->st->r_frame_rate = ist->st->r_frame_rate; + } + break; } -- 2.19.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] avcodec/proresenc_aw improvements
On 7/23/18, Martin Vignali wrote: >> > >> >> > Do you think it's better to only authorize few colorspace ? >> >> depends on what happens if "others" are stored. >> if the official decoders fail with a blank screen then its probably >> not a good idea to use such a value. If OTOH they ignore values they >> do not support then it may be ok. >> > > > Seems like not all prores decoder use these values. > Will check, with offical decoder. > > Martin > ___ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel > See coverity bug report, avctx->profile is not checked for valid values i think. ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
[FFmpeg-devel] [PATCH] decklink: Fix compile breakage on OSX
Make the function static, or else Clang complains with: error: no previous prototype for function 'decklink_get_attr_string' [-Werror,-Wmissing-prototypes] Signed-off-by: Devin Heitmueller --- libavdevice/decklink_common.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libavdevice/decklink_common.cpp b/libavdevice/decklink_common.cpp index b88d6c6219..130e70b2ca 100644 --- a/libavdevice/decklink_common.cpp +++ b/libavdevice/decklink_common.cpp @@ -77,7 +77,7 @@ static IDeckLinkIterator *decklink_create_iterator(AVFormatContext *avctx) return iter; } -int decklink_get_attr_string(IDeckLink *dl, BMDDeckLinkAttributeID cfg_id, const char **s) +static int decklink_get_attr_string(IDeckLink *dl, BMDDeckLinkAttributeID cfg_id, const char **s) { DECKLINK_STR tmp; HRESULT hr; -- 2.13.2 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
[FFmpeg-devel] [PATCH] avfilter: add vibrance filter
Signed-off-by: Paul B Mahol --- libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/vf_vibrance.c | 169 ++ 3 files changed, 171 insertions(+) create mode 100644 libavfilter/vf_vibrance.c diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 46c6023bcc..38fe649078 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -393,6 +393,7 @@ OBJS-$(CONFIG_VAGUEDENOISER_FILTER) += vf_vaguedenoiser.o OBJS-$(CONFIG_VECTORSCOPE_FILTER)+= vf_vectorscope.o OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o OBJS-$(CONFIG_VFRDET_FILTER) += vf_vfrdet.o +OBJS-$(CONFIG_VIBRANCE_FILTER) += vf_vibrance.o OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 536765581b..2289efbb5b 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -374,6 +374,7 @@ extern AVFilter ff_vf_vaguedenoiser; extern AVFilter ff_vf_vectorscope; extern AVFilter ff_vf_vflip; extern AVFilter ff_vf_vfrdet; +extern AVFilter ff_vf_vibrance; extern AVFilter ff_vf_vidstabdetect; extern AVFilter ff_vf_vidstabtransform; extern AVFilter ff_vf_vignette; diff --git a/libavfilter/vf_vibrance.c b/libavfilter/vf_vibrance.c new file mode 100644 index 00..df07b914cb --- /dev/null +++ b/libavfilter/vf_vibrance.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2018 Paul B Mahol + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "libavutil/opt.h" +#include "libavutil/imgutils.h" +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "video.h" + +typedef struct VibranceContext { +const AVClass *class; + +float intensity; + +int depth; +int planewidth[4]; +int planeheight[4]; + +int (*do_slice)(AVFilterContext *s, void *arg, +int jobnr, int nb_jobs); +} VibranceContext; + +static int vibrance_slice(AVFilterContext *avctx, void *arg, int jobnr, int nb_jobs) +{ +VibranceContext *s = avctx->priv; +AVFrame *frame = arg; +const float intensity = s->intensity; +const float abs_intensity = fabsf(intensity); +const float one_intensity = 1.f - abs_intensity; +const int slice_start = (s->planeheight[1] * jobnr) / nb_jobs; +const int slice_end = (s->planeheight[1] * (jobnr + 1)) / nb_jobs; +const int ulinesize = frame->linesize[1]; +const int vlinesize = frame->linesize[2]; +uint8_t *uptr = frame->data[1] + slice_start * ulinesize; +uint8_t *vptr = frame->data[2] + slice_start * vlinesize; + +for (int y = slice_start; y < slice_end; y++) { +for (int x = 0; x < s->planewidth[1]; x++) { +float saturation, target_saturation, hue; +int u = uptr[x] - 128; +int v = vptr[x] - 128; + +saturation = hypotf(u, v) / 180.f; +if (intensity < 0) { +target_saturation = saturation * saturation; +} else { +target_saturation = sqrtf(saturation); +} +saturation = abs_intensity * target_saturation + one_intensity * saturation; +hue = atan2f(v, u); + +uptr[x] = av_clip_uint8(floorf(128 + 180.f * saturation * cosf(hue))); +vptr[x] = av_clip_uint8(floorf(128 + 180.f * saturation * sinf(hue))); +} +uptr += ulinesize; +vptr += vlinesize; +} + +return 0; +} + +static int filter_frame(AVFilterLink *link, AVFrame *frame) +{ +AVFilterContext *avctx = link->dst; +VibranceContext *s = avctx->priv; +int res; + +if (res = avctx->internal->execute(avctx, s->do_slice, frame, NULL, + FFMIN(frame->height, ff_filter_get_nb_threads(avctx +return res; + +return ff_filter_frame(avctx->outputs[0], frame); +} + +static av_cold int query_formats(AVFilterContext *avctx) +{ +static const enum AVPixelFormat pixel_fmts[] = { +AV_PIX_FMT_YUV420P, +AV_PIX_FMT_YUV422P, +AV_PIX
Re: [FFmpeg-devel] [PATCH] fftools/ffmpeg.c: allow forcing input framerate on streamcopy
2018-10-19 20:39 GMT+02:00, Leo Izen : > On 10/19/18 2:26 PM, Carl Eugen Hoyos wrote: >> 2018-10-19 4:58 GMT+02:00, Leo Izen : >>> --- >>> fftools/ffmpeg.c | 8 +--- >>> 1 file changed, 5 insertions(+), 3 deletions(-) >>> >>> diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c >>> index da4259a9a8..5d68194676 100644 >>> --- a/fftools/ffmpeg.c >>> +++ b/fftools/ffmpeg.c >>> @@ -2045,12 +2045,14 @@ static void do_streamcopy(InputStream *ist, >>> OutputStream *ost, const AVPacket *p >>> if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) >>> ost->sync_opts++; >>> >>> -if (pkt->pts != AV_NOPTS_VALUE) >>> +if (ist->framerate.num) >>> +opkt.pts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, >>> ost->mux_timebase) - ost_tb_start_time; >>> +else if (pkt->pts != AV_NOPTS_VALUE) >>> opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, >>> ost->mux_timebase) - ost_tb_start_time; >>> else >>> opkt.pts = AV_NOPTS_VALUE; >>> >>> -if (pkt->dts == AV_NOPTS_VALUE) >>> +if (pkt->dts == AV_NOPTS_VALUE || ist->framerate.num) >>> opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, >>> ost->mux_timebase); >>> else >>> opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, >>> ost->mux_timebase); >>> @@ -2602,7 +2604,7 @@ static int process_input_packet(InputStream *ist, >>> const AVPacket *pkt, int no_eo >>> avpkt = *pkt; >>> } >>> >>> -if (pkt && pkt->dts != AV_NOPTS_VALUE) { >>> +if (pkt && pkt->dts != AV_NOPTS_VALUE && !ist->framerate.num) { >>> ist->next_dts = ist->dts = av_rescale_q(pkt->dts, >>> ist->st->time_base, AV_TIME_BASE_Q); >>> if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || >>> !ist->decoding_needed) >>> ist->next_pts = ist->pts = ist->dts; >> How can this be tested? >> >> Carl Eugen > > I'm not entirely sure. I ran "make fate" and it passed, and I > successfully rescaled a 30fps clip to 15fps, 20fps, 45fps, and 60fps > using -r:v as an input option. I'm not entirely sure what the standard > procedure is for performing more rigorous tests. What I meant was: Which kind of input and output did you use to test your patch? Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH] fftools/ffmpeg.c: allow forcing input framerate on streamcopy
On 10/19/18 2:26 PM, Carl Eugen Hoyos wrote: 2018-10-19 4:58 GMT+02:00, Leo Izen : --- fftools/ffmpeg.c | 8 +--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index da4259a9a8..5d68194676 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -2045,12 +2045,14 @@ static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *p if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) ost->sync_opts++; -if (pkt->pts != AV_NOPTS_VALUE) +if (ist->framerate.num) +opkt.pts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, ost->mux_timebase) - ost_tb_start_time; +else if (pkt->pts != AV_NOPTS_VALUE) opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time; else opkt.pts = AV_NOPTS_VALUE; -if (pkt->dts == AV_NOPTS_VALUE) +if (pkt->dts == AV_NOPTS_VALUE || ist->framerate.num) opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase); else opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase); @@ -2602,7 +2604,7 @@ static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eo avpkt = *pkt; } -if (pkt && pkt->dts != AV_NOPTS_VALUE) { +if (pkt && pkt->dts != AV_NOPTS_VALUE && !ist->framerate.num) { ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q); if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed) ist->next_pts = ist->pts = ist->dts; How can this be tested? Carl Eugen I'm not entirely sure. I ran "make fate" and it passed, and I successfully rescaled a 30fps clip to 15fps, 20fps, 45fps, and 60fps using -r:v as an input option. I'm not entirely sure what the standard procedure is for performing more rigorous tests. Leo Izen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH]lavc/sinewin: Do not declare tables as const
2018-10-19 20:35 GMT+02:00, Paul B Mahol : > On 10/19/18, Carl Eugen Hoyos wrote: >> 2018-10-16 23:58 GMT+02:00, Carl Eugen Hoyos : >>> 2018-10-14 23:51 GMT+02:00, Carl Eugen Hoyos : 2018-10-14 22:30 GMT+02:00, Hendrik Leppkes : > On Sun, Oct 14, 2018 at 10:18 PM Carl Eugen Hoyos > wrote: >> >> Attached patch is supposed to fix ticket #7491, I currently >> don't have gcc 8 to test myself. > > Only the 120 and 960 tables are affected by this bug because > they are not being created by the hardcoded tables logic, yet > this patch changes the const attribute for all tables, defeating > one purpose of the hardcoded tables. > Can we adjust this to only affect the two tables that need it Did that in attached patch, please comment. >>> >>> I will push this patch if there are no objections. >> >> Patch applied. > > Very ugly solution. I don't disagree but keeping the hard-to-debug crash is not a solution imo. Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH]lavf/matroskadec: Simplify string length calculation slightly
2018-09-26 23:30 GMT+02:00, Carl Eugen Hoyos : > 2018-09-19 19:18 GMT+02:00, Carl Eugen Hoyos : > >> Attached patch removes useless sizeof's, we require sizeof(char)==1 >> afaict. > > I will push this if there are no objections. Patch applied. Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH]lavc/sinewin: Do not declare tables as const
On 10/19/18, Carl Eugen Hoyos wrote: > 2018-10-16 23:58 GMT+02:00, Carl Eugen Hoyos : >> 2018-10-14 23:51 GMT+02:00, Carl Eugen Hoyos : >>> 2018-10-14 22:30 GMT+02:00, Hendrik Leppkes : On Sun, Oct 14, 2018 at 10:18 PM Carl Eugen Hoyos wrote: > > Attached patch is supposed to fix ticket #7491, I currently > don't have gcc 8 to test myself. Only the 120 and 960 tables are affected by this bug because they are not being created by the hardcoded tables logic, yet this patch changes the const attribute for all tables, defeating one purpose of the hardcoded tables. >>> Can we adjust this to only affect the two tables that need it >>> >>> Did that in attached patch, please comment. >> >> I will push this patch if there are no objections. > > Patch applied. Very ugly solution. ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH]lavc/sinewin: Do not declare tables as const
2018-10-16 23:58 GMT+02:00, Carl Eugen Hoyos : > 2018-10-14 23:51 GMT+02:00, Carl Eugen Hoyos : >> 2018-10-14 22:30 GMT+02:00, Hendrik Leppkes : >>> On Sun, Oct 14, 2018 at 10:18 PM Carl Eugen Hoyos >>> wrote: Attached patch is supposed to fix ticket #7491, I currently don't have gcc 8 to test myself. >>> >>> Only the 120 and 960 tables are affected by this bug because >>> they are not being created by the hardcoded tables logic, yet >>> this patch changes the const attribute for all tables, defeating >>> one purpose of the hardcoded tables. >> >>> Can we adjust this to only affect the two tables that need it >> >> Did that in attached patch, please comment. > > I will push this patch if there are no objections. Patch applied. Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH]lavf/dump: Fix a typo: comentary -> commentary
2018-10-19 2:01 GMT+02:00, Michael Niedermayer : > On Thu, Oct 18, 2018 at 12:15:40AM +0200, Carl Eugen Hoyos wrote: >> Hi! >> >> Is there a reason to keep the wrong spelling? > > i dont see a reason but maybe someone else does ... Patch applied. Thank you, Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH] fftools/ffmpeg.c: allow forcing input framerate on streamcopy
2018-10-19 4:58 GMT+02:00, Leo Izen : > --- > fftools/ffmpeg.c | 8 +--- > 1 file changed, 5 insertions(+), 3 deletions(-) > > diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c > index da4259a9a8..5d68194676 100644 > --- a/fftools/ffmpeg.c > +++ b/fftools/ffmpeg.c > @@ -2045,12 +2045,14 @@ static void do_streamcopy(InputStream *ist, > OutputStream *ost, const AVPacket *p > if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) > ost->sync_opts++; > > -if (pkt->pts != AV_NOPTS_VALUE) > +if (ist->framerate.num) > +opkt.pts = av_rescale_q(ist->pts, AV_TIME_BASE_Q, > ost->mux_timebase) - ost_tb_start_time; > +else if (pkt->pts != AV_NOPTS_VALUE) > opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, > ost->mux_timebase) - ost_tb_start_time; > else > opkt.pts = AV_NOPTS_VALUE; > > -if (pkt->dts == AV_NOPTS_VALUE) > +if (pkt->dts == AV_NOPTS_VALUE || ist->framerate.num) > opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, > ost->mux_timebase); > else > opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, > ost->mux_timebase); > @@ -2602,7 +2604,7 @@ static int process_input_packet(InputStream *ist, > const AVPacket *pkt, int no_eo > avpkt = *pkt; > } > > -if (pkt && pkt->dts != AV_NOPTS_VALUE) { > +if (pkt && pkt->dts != AV_NOPTS_VALUE && !ist->framerate.num) { > ist->next_dts = ist->dts = av_rescale_q(pkt->dts, > ist->st->time_base, AV_TIME_BASE_Q); > if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || > !ist->decoding_needed) > ist->next_pts = ist->pts = ist->dts; How can this be tested? Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH] ffmpeg: flush_encoders should not treat avcodec_send_frame return AVERROR_EOF as fatal
2018-10-19 14:27 GMT+02:00, Jason Stevens : > this fixes AV_CODEC_CAP_DELAY encoders going into FATAL error > and exiting on quit or finish encoding once they go into draining mode > and send_frame() returns AVERROR_EOF Could you provide a command line / sample that triggers this issue? Carl Eugen ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Re: [FFmpeg-devel] [PATCH V3] Add a filter implementing HDR image reconstruction from a single exposure using deep CNNs
On Fri, Oct 19, 2018 at 10:11 AM Guo, Yejun wrote: > see the algorithm's paper and code below. > > the filter's parameter looks like: > sdr2hdr=model_filename=/path_to_tensorflow_graph.pb:out_fmt=gbrp10le > can you add some usage documentation to doc/filters.texi? The input of the deep CNN model is RGB24 while the output is float > for each color channel. This is the filter's default behavior to > output format with gbrpf32le. And gbrp10le is also supported as the > output, so we can see the rendering result in a player, as a reference. > > To generate the model file, we need modify the original script a little. > - set name='y' for y_final within script at > https://github.com/gabrieleilertsen/hdrcnn/blob/master/network.py > - add the following code to the script at > https://github.com/gabrieleilertsen/hdrcnn/blob/master/hdrcnn_predict.py > > graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, > ["y"]) > tf.train.write_graph(graph, '.', 'graph.pb', as_text=False) > > The filter only works when tensorflow C api is supported in the system, > native backend is not supported since there are some different types of > layers in the deep CNN model, besides CONV and DEPTH_TO_SPACE. > > https://arxiv.org/pdf/1710.07480.pdf: > author = "Eilertsen, Gabriel and Kronander, Joel, and Denes, > Gyorgy and Mantiuk, Rafał and Unger, Jonas", > title= "HDR image reconstruction from a single exposure using > deep CNNs", > journal = "ACM Transactions on Graphics (TOG)", > number = "6", > volume = "36", > articleno= "178", > year = "2017" > > https://github.com/gabrieleilertsen/hdrcnn > > btw, as a whole solution, metadata should also be generated from > the sdr video, so to be encoded as a HDR video. Not supported yet. > This patch just focuses on this paper. > Is this something you are working on and will it be added later? > v3: use int16_t instead of short > v2: use AV_OPT_TYPE_PIXEL_FMT for filter option > remove some unnecessary code > Use in->linesize[0] and FFMAX/FFMIN > remove flag AVFILTER_FLAG_SLICE_THREADS > add av_log message when error > there is no need for this block to be left in the commit log > Signed-off-by: Guo, Yejun > --- > libavfilter/Makefile | 1 + > libavfilter/allfilters.c | 1 + > libavfilter/vf_sdr2hdr.c | 266 > +++ > 3 files changed, 268 insertions(+) > create mode 100644 libavfilter/vf_sdr2hdr.c > > +static av_cold int init(AVFilterContext* context) > +{ > +SDR2HDRContext* ctx = context->priv; > + > +if (ctx->out_fmt != AV_PIX_FMT_GBRPF32LE && ctx->out_fmt != > AV_PIX_FMT_GBRP10LE) { > +av_log(context, AV_LOG_ERROR, "could not support the output > format\n"); > +return AVERROR(ENOSYS); > +} > + > +#if (CONFIG_LIBTENSORFLOW == 1) > +ctx->dnn_module = ff_get_dnn_module(DNN_TF); > +if (!ctx->dnn_module){ > +av_log(context, AV_LOG_ERROR, "could not create DNN module for > tensorflow backend\n"); > +return AVERROR(ENOMEM); > +} > +if (!ctx->model_filename){ > +av_log(context, AV_LOG_ERROR, "model file for network was not > specified\n"); > +return AVERROR(EIO); > +} > +if (!ctx->dnn_module->load_model) { > +av_log(context, AV_LOG_ERROR, "load_model for network was not > specified\n"); > +return AVERROR(EIO); > +} > +ctx->model = (ctx->dnn_module->load_model)(ctx->model_filename); > +if (!ctx->model){ > +av_log(context, AV_LOG_ERROR, "could not load DNN model\n"); > +return AVERROR(EIO); > +} > +return 0; > +#else > +return AVERROR(EIO); > +#endif > +} > this is incorrect, what you should do is make libtensorflow a dependency of this filter in the configure file and disable this filter when it is not enabled > + > +static int query_formats(AVFilterContext* context) > +{ > +const enum AVPixelFormat in_formats[] = {AV_PIX_FMT_RGB24, > + AV_PIX_FMT_NONE}; > +enum AVPixelFormat out_formats[2]; > +SDR2HDRContext* ctx = context->priv; > +AVFilterFormats* formats_list; > +int ret = 0; > + > +formats_list = ff_make_format_list(in_formats); > +if ((ret = ff_formats_ref(formats_list, > &context->inputs[0]->out_formats)) < 0) > +return ret; > + > +out_formats[0] = ctx->out_fmt; > +out_formats[1] = AV_PIX_FMT_NONE; > +formats_list = ff_make_format_list(out_formats); > +if ((ret = ff_formats_ref(formats_list, > &context->outputs[0]->in_formats)) < 0) > +return ret; > + > +return 0; > +} > + > +static int config_props(AVFilterLink* inlink) > +{ > +AVFilterContext* context = inlink->dst; > +SDR2HDRContext* ctx = context->priv; > +AVFilterLink* outlink = context->outputs[0]; > +DNNReturnType result; > + > +// the dnn model is tied with resolution due to deconv layer of > tensorflow > +//
[FFmpeg-devel] [PATCH] avformat/libsrt: add several options supported in srt 1.3.0
Several SRT options are missing. Since pkg_config requires libsrt v1.3.0 and above, it should be able to support options added in libsrt v1.3.0 and below. This commit adds 8 SRT options. sndbuf, rcvbuf, lossmaxttl, minversion, streamid, smoother, messageapi and transtype The keys of option are equivalent to stransmit. https://github.com/Haivision/srt/blob/v1.3.0/apps/socketoptions.hpp#L196-L223 --- doc/protocols.texi | 85 ++-- libavformat/libsrt.c | 56 + 2 files changed, 139 insertions(+), 2 deletions(-) diff --git a/doc/protocols.texi b/doc/protocols.texi index b34f29eebf..5bd0985b0e 100644 --- a/doc/protocols.texi +++ b/doc/protocols.texi @@ -1306,10 +1306,10 @@ set by the peer side. Before version 1.3.0 this option is only available as @option{latency}. @item recv_buffer_size=@var{bytes} -Set receive buffer size, expressed in bytes. +Set UDP receive buffer size, expressed in bytes. @item send_buffer_size=@var{bytes} -Set send buffer size, expressed in bytes. +Set UDP send buffer size, expressed in bytes. @item rw_timeout Set raise error timeout for read/write optations. @@ -1329,6 +1329,87 @@ have no chance of being delivered in time. It was automatically enabled in the sender if the receiver supports it. +@item sndbuf=@var{bytes} +Set send buffer size, expressed in bytes. + +@item rcvbuf=@var{bytes} +Set receive buffer size, expressed in bytes. + +Receive buffer must not be greater than @option{ffs}. + +@item lossmaxttl=@var{packets} +The value up to which the Reorder Tolerance may grow. When +Reorder Tolerance is > 0, then packet loss report is delayed +until that number of packets come in. Reorder Tolerance +increases every time a "belated" packet has come, but it +wasn't due to retransmission (that is, when UDP packets tend +to come out of order), with the difference between the latest +sequence and this packet's sequence, and not more than the +value of this option. By default it's 0, which means that this +mechanism is turned off, and the loss report is always sent +immediately upon experiencing a "gap" in sequences. + +@item minversion +The minimum SRT version that is required from the peer. A connection +to a peer that does not satisfy the minimum version requirement +will be rejected. + +The version format in hex is 0xXXYYZZ for x.y.z in human readable +form, where x = ("%d", (version>>16) & 0xff), etc. + +@item streamid=@var{string} +A string limited to 512 characters that can be set on the socket prior +to connecting. This stream ID will be able to be retrieved by the +listener side from the socket that is returned from srt_accept and +was connected by a socket with that set stream ID. SRT does not enforce +any special interpretation of the contents of this string. +This option doesn’t make sense in Rendezvous connection; the result +might be that simply one side will override the value from the other +side and it’s the matter of luck which one would win + +@item smoother=@var{live|file} +The type of Smoother used for the transmission for that socket, which +is responsible for the transmission and congestion control. The Smoother +type must be exactly the same on both connecting parties, otherwise +the connection is rejected. + +@item messageapi=@var{1|0} +When set, this socket uses the Message API, otherwise it uses Buffer +API. Note that in live mode (see @option{transtype}) there’s only +message API available. In File mode you can chose to use one of two modes: + +Stream API (default, when this option is false). In this mode you may +send as many data as you wish with one sending instruction, or even use +dedicated functions that read directly from a file. The internal facility +will take care of any speed and congestion control. When receiving, you +can also receive as many data as desired, the data not extracted will be +waiting for the next call. There is no boundary between data portions in +the Stream mode. + +Message API. In this mode your single sending instruction passes exactly +one piece of data that has boundaries (a message). Contrary to Live mode, +this message may span across multiple UDP packets and the only size +limitation is that it shall fit as a whole in the sending buffer. The +receiver shall use as large buffer as necessary to receive the message, +otherwise the message will not be given up. When the message is not +complete (not all packets received or there was a packet loss) it will +not be given up. + +@item transtype=@var{live|file} +Sets the transmission type for the socket, in particular, setting this +option sets multiple other parameters to their default values as required +for a particular transmission type. + +live: Set options as for live transmission. In this mode, you should +send by one sending instruction only so many data that fit in one UDP packet, +and limited to the value defined first in @option{payload_size} (1316 is +default in this mode). There is no spee
[FFmpeg-devel] [PATCH] ffmpeg: flush_encoders should not treat avcodec_send_frame return AVERROR_EOF as fatal
this fixes AV_CODEC_CAP_DELAY encoders going into FATAL error and exiting on quit or finish encoding once they go into draining mode and send_frame() returns AVERROR_EOF Signed-off-by: Jason Stevens --- fftools/ffmpeg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fftools/ffmpeg.c b/fftools/ffmpeg.c index da4259a9a8..f30ed946c0 100644 --- a/fftools/ffmpeg.c +++ b/fftools/ffmpeg.c @@ -1939,7 +1939,7 @@ static void flush_encoders(void) while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) { ret = avcodec_send_frame(enc, NULL); -if (ret < 0) { +if (ret < 0 && ret != AVERROR_EOF) { av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n", desc, av_err2str(ret)); -- 2.19.1 ___ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
[FFmpeg-devel] [PATCH V3] Add a filter implementing HDR image reconstruction from a single exposure using deep CNNs
see the algorithm's paper and code below. the filter's parameter looks like: sdr2hdr=model_filename=/path_to_tensorflow_graph.pb:out_fmt=gbrp10le The input of the deep CNN model is RGB24 while the output is float for each color channel. This is the filter's default behavior to output format with gbrpf32le. And gbrp10le is also supported as the output, so we can see the rendering result in a player, as a reference. To generate the model file, we need modify the original script a little. - set name='y' for y_final within script at https://github.com/gabrieleilertsen/hdrcnn/blob/master/network.py - add the following code to the script at https://github.com/gabrieleilertsen/hdrcnn/blob/master/hdrcnn_predict.py graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, ["y"]) tf.train.write_graph(graph, '.', 'graph.pb', as_text=False) The filter only works when tensorflow C api is supported in the system, native backend is not supported since there are some different types of layers in the deep CNN model, besides CONV and DEPTH_TO_SPACE. https://arxiv.org/pdf/1710.07480.pdf: author = "Eilertsen, Gabriel and Kronander, Joel, and Denes, Gyorgy and Mantiuk, Rafał and Unger, Jonas", title= "HDR image reconstruction from a single exposure using deep CNNs", journal = "ACM Transactions on Graphics (TOG)", number = "6", volume = "36", articleno= "178", year = "2017" https://github.com/gabrieleilertsen/hdrcnn btw, as a whole solution, metadata should also be generated from the sdr video, so to be encoded as a HDR video. Not supported yet. This patch just focuses on this paper. v3: use int16_t instead of short v2: use AV_OPT_TYPE_PIXEL_FMT for filter option remove some unnecessary code Use in->linesize[0] and FFMAX/FFMIN remove flag AVFILTER_FLAG_SLICE_THREADS add av_log message when error Signed-off-by: Guo, Yejun --- libavfilter/Makefile | 1 + libavfilter/allfilters.c | 1 + libavfilter/vf_sdr2hdr.c | 266 +++ 3 files changed, 268 insertions(+) create mode 100644 libavfilter/vf_sdr2hdr.c diff --git a/libavfilter/Makefile b/libavfilter/Makefile index 62cc2f5..88e7da6 100644 --- a/libavfilter/Makefile +++ b/libavfilter/Makefile @@ -360,6 +360,7 @@ OBJS-$(CONFIG_SOBEL_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o OBJS-$(CONFIG_SPLIT_FILTER) += split.o OBJS-$(CONFIG_SPP_FILTER)+= vf_spp.o OBJS-$(CONFIG_SR_FILTER) += vf_sr.o +OBJS-$(CONFIG_SDR2HDR_FILTER)+= vf_sdr2hdr.o OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o framesync.o OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync.o diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index 5e72803..1645c0f 100644 --- a/libavfilter/allfilters.c +++ b/libavfilter/allfilters.c @@ -319,6 +319,7 @@ extern AVFilter ff_vf_scale_npp; extern AVFilter ff_vf_scale_qsv; extern AVFilter ff_vf_scale_vaapi; extern AVFilter ff_vf_scale2ref; +extern AVFilter ff_vf_sdr2hdr; extern AVFilter ff_vf_select; extern AVFilter ff_vf_selectivecolor; extern AVFilter ff_vf_sendcmd; diff --git a/libavfilter/vf_sdr2hdr.c b/libavfilter/vf_sdr2hdr.c new file mode 100644 index 000..6a51a54 --- /dev/null +++ b/libavfilter/vf_sdr2hdr.c @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2018 Guo Yejun + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Filter implementing HDR image reconstruction from a single exposure using deep CNNs. + * https://arxiv.org/pdf/1710.07480.pdf + */ + +#include "avfilter.h" +#include "formats.h" +#include "internal.h" +#include "libavutil/opt.h" +#include "libavutil/qsort.h" +#include "libavformat/avio.h" +#include "libswscale/swscale.h" +#include "dnn_interface.h" +#include + +typedef struct SDR2HDRContext { +const AVClass *class; + +char* model_filename; +enum AVPixelFormat out_fmt; +DNNModule* dnn_module; +DNNModel* model; +DNNData input, output; +} SDR2HDRContext; + +#define OFFSET(x) offsetof(SDR2HDRContext, x) +#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OP