[FFmpeg-devel] [PATCH] lavc/cfhd: added alpha decompanding in rgba12 ticket #6265

2018-03-22 Thread Gagandeep Singh
alpha decompanding curve added to post process the decoded alpha channel ticket 
#6265
---
 libavcodec/cfhd.c | 19 +++
 1 file changed, 19 insertions(+)

diff --git a/libavcodec/cfhd.c b/libavcodec/cfhd.c
index fd834b..e35732df45 100644
--- a/libavcodec/cfhd.c
+++ b/libavcodec/cfhd.c
@@ -37,6 +37,9 @@
 #include "thread.h"
 #include "cfhd.h"
 
+#define ALPHA_COMPAND_DC_OFFSET 256
+#define ALPHA_COMPAND_GAIN 9400
+
 enum CFHDParam {
 ChannelCount =  12,
 SubbandCount =  14,
@@ -94,6 +97,20 @@ static inline int dequant_and_decompand(int level, int 
quantisation)
FFSIGN(level) * quantisation;
 }
 
+static inline void process_alpha(int16_t *alpha, int width)
+{
+int i, channel;
+for (i = 0; i < width; i++) {
+channel   = alpha[i];
+channel  -= ALPHA_COMPAND_DC_OFFSET;
+channel <<= 3;
+channel  *= ALPHA_COMPAND_GAIN;
+channel >>= 16;
+channel   = av_clip_uintp2(channel, 12);
+alpha[i]  = channel;
+}
+}
+
 static inline void filter(int16_t *output, ptrdiff_t out_stride,
   int16_t *low, ptrdiff_t low_stride,
   int16_t *high, ptrdiff_t high_stride,
@@ -792,6 +809,8 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, 
int *got_frame,
 high = s->plane[plane].l_h[7];
 for (i = 0; i < lowpass_height * 2; i++) {
 horiz_filter_clip(dst, low, high, lowpass_width, s->bpc);
+if (act_plane == 3)
+process_alpha(dst, lowpass_width * 2);
 low  += lowpass_width;
 high += lowpass_width;
 dst  += pic->linesize[act_plane] / 2;
-- 
2.14.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread wm4
On Thu, 22 Mar 2018 12:32:29 +0100
Nicolas George  wrote:

> Josh de Kock (2018-03-22):
> > There is always the option to just merge lavf and lavd. The state of them
> > being sort-of merged, but not really, isn't very good and adds a lot of
> > complexity especially in inter-library dependencies (which are unneeded if
> > lavf and lavd are either merged or actually separate).  
> 
> You are driving your reasoning the wrong way: you start from the
> limitations of your new API, and based on what it can do you intent huge
> changes to the project that affect user interface. It should be the
> opposite: first decide on the user interface and general design, and
> then make sure the API allow it.
> 
> For user interface, I state:
> 
> 1. FFmpeg should allow users to select devices the same way as
>(de)muxers, because it allows them to use devices in a basic way even
>when applications are not explicitly prepared for them, that makes
>extra features for free.

Devices are not muxers/demuxers and can behave significantly
differently. Thus they should be accessible via a different API, and in
particular avoid accidental use of device (de)muxers if only normal
(de)muxers are wanted by the application.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH v2] avcodec/arm/hevcdsp_sao : add NEON optimization for sao

2018-03-22 Thread Yingming Fan
From: Meng Wang 

Signed-off-by: Meng Wang 
---
This v2 patch remove unused codes 'stride_dst /= sizeof(uint8_t);' compared to 
v1. V1 have this codes because we referred to hevc dsp template codes.

As FFmpeg hevc decoder have no SAO neon optimization, we add sao_band and 
sao_edge neon codes in this patch.
I have already submit a patch called 'checkasm/hevc_sao : add hevc_sao for 
checkasm' several days ago.
Results below was printed by hevc_sao checkasm on an armv7 device Nexus 5. 
From the results we can see: hevc_sao_band speed up ~2x, hevc_sao_edge speed up 
~4x. 
Also test FATE under armv7 device and MacOS.

hevc_sao_band_8x8_8_c: 804.9
hevc_sao_band_8x8_8_neon: 452.4
hevc_sao_band_16x16_8_c: 2638.1
hevc_sao_band_16x16_8_neon: 1169.9
hevc_sao_band_32x32_8_c: 9259.9
hevc_sao_band_32x32_8_neon: 3956.1
hevc_sao_band_48x48_8_c: 20344.6
hevc_sao_band_48x48_8_neon: 8649.6
hevc_sao_band_64x64_8_c: 35684.6
hevc_sao_band_64x64_8_neon: 15213.1
hevc_sao_edge_8x8_8_c: 1761.6
hevc_sao_edge_8x8_8_neon: 414.6
hevc_sao_edge_16x16_8_c: 6844.4
hevc_sao_edge_16x16_8_neon: 1589.9
hevc_sao_edge_32x32_8_c: 27156.4
hevc_sao_edge_32x32_8_neon: 6116.6
hevc_sao_edge_48x48_8_c: 60004.6
hevc_sao_edge_48x48_8_neon: 13686.4
hevc_sao_edge_64x64_8_c: 106708.1
hevc_sao_edge_64x64_8_neon: 24240.1

 libavcodec/arm/Makefile|   3 +-
 libavcodec/arm/hevcdsp_init_neon.c |  59 
 libavcodec/arm/hevcdsp_sao_neon.S  | 181 +
 3 files changed, 242 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/arm/hevcdsp_sao_neon.S

diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index 1eeac5449e..9c164f82ae 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -136,7 +136,8 @@ NEON-OBJS-$(CONFIG_DCA_DECODER)+= 
arm/synth_filter_neon.o
 NEON-OBJS-$(CONFIG_HEVC_DECODER)   += arm/hevcdsp_init_neon.o   \
   arm/hevcdsp_deblock_neon.o\
   arm/hevcdsp_idct_neon.o   \
-  arm/hevcdsp_qpel_neon.o
+  arm/hevcdsp_qpel_neon.o   \
+  arm/hevcdsp_sao_neon.o
 NEON-OBJS-$(CONFIG_RV30_DECODER)   += arm/rv34dsp_neon.o
 NEON-OBJS-$(CONFIG_RV40_DECODER)   += arm/rv34dsp_neon.o\
   arm/rv40dsp_neon.o
diff --git a/libavcodec/arm/hevcdsp_init_neon.c 
b/libavcodec/arm/hevcdsp_init_neon.c
index a4628d2a93..af68e24f93 100644
--- a/libavcodec/arm/hevcdsp_init_neon.c
+++ b/libavcodec/arm/hevcdsp_init_neon.c
@@ -21,8 +21,16 @@
 #include "libavutil/attributes.h"
 #include "libavutil/arm/cpu.h"
 #include "libavcodec/hevcdsp.h"
+#include "libavcodec/avcodec.h"
 #include "hevcdsp_arm.h"
 
+void ff_hevc_sao_band_filter_neon_8_wrapper(uint8_t *_dst, uint8_t *_src,
+  ptrdiff_t stride_dst, ptrdiff_t stride_src,
+  int16_t *sao_offset_val, int sao_left_class,
+  int width, int height);
+void ff_hevc_sao_edge_filter_neon_8_wrapper(uint8_t *_dst, uint8_t *_src, 
ptrdiff_t stride_dst, int16_t *sao_offset_val,
+  int eo, int width, int height);
+
 void ff_hevc_v_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
 void ff_hevc_h_loop_filter_luma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
_beta, int *_tc, uint8_t *_no_p, uint8_t *_no_q);
 void ff_hevc_v_loop_filter_chroma_neon(uint8_t *_pix, ptrdiff_t _stride, int 
*_tc, uint8_t *_no_p, uint8_t *_no_q);
@@ -142,6 +150,47 @@ QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v2_neon_8);
 QPEL_FUNC_UW(ff_hevc_put_qpel_uw_h3v3_neon_8);
 #undef QPEL_FUNC_UW
 
+void ff_hevc_sao_band_filter_neon_8(uint8_t *dst, uint8_t *src, ptrdiff_t 
stride_dst, ptrdiff_t stride_src, int width, int height, int16_t *offset_table);
+
+void ff_hevc_sao_band_filter_neon_8_wrapper(uint8_t *_dst, uint8_t *_src,
+  ptrdiff_t stride_dst, ptrdiff_t stride_src,
+  int16_t *sao_offset_val, int sao_left_class,
+  int width, int height) {
+uint8_t *dst = (uint8_t *)_dst;
+uint8_t *src = (uint8_t *)_src;
+int16_t offset_table[32] = {0};
+int k;
+
+for (k = 0; k < 4; k++) {
+offset_table[(k + sao_left_class) & 31] = sao_offset_val[k + 1];
+}
+
+ff_hevc_sao_band_filter_neon_8(dst, src, stride_dst, stride_src, width, 
height, offset_table);
+}
+
+void ff_hevc_sao_edge_filter_neon_8(uint8_t *dst, uint8_t *src, ptrdiff_t 
stride_dst, ptrdiff_t stride_src, int width, int height,
+int a_stride, int b_stride, int16_t 
*sao_offset_val, uint8_t *edge_idx);
+
+void ff_hevc_sao_edge_filter_neon_8_wrapper(uint8_t *_dst, 

[FFmpeg-devel] [PATCH v2] dashdec: Support signaling of last segment number (Adding space to avoid mixing styles)

2018-03-22 Thread sanilraut
---
 libavformat/dashdec.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/libavformat/dashdec.c b/libavformat/dashdec.c
index bf61837..db63a99 100644
--- a/libavformat/dashdec.c
+++ b/libavformat/dashdec.c
@@ -922,8 +922,8 @@ static int parse_manifest_representation(AVFormatContext 
*s, const char *url,
 rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 
10);
 xmlFree(startnumber_val);
 }
-if(adaptionset_supplementalproperty_node){
-
if(strcmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), 
"http://dashif.org/guidelines/last-segment-number;) == 0){
+if (adaptionset_supplementalproperty_node) {
+if 
(!strcmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), 
"http://dashif.org/guidelines/last-segment-number;)) {
 val = 
xmlGetProp(adaptionset_supplementalproperty_node,"value");
 rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
 xmlFree(val);
@@ -1833,7 +1833,7 @@ static int open_demux_for_component(AVFormatContext *s, 
struct representation *p
 pls->parent = s;
 pls->cur_seq_no  = calc_cur_seg_no(s, pls);
 
-if(pls->last_seq_no == 0){
+if (!pls->last_seq_no) {
 pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
 }
 
-- 
2.7.4

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Josh de Kock

On 2018/03/22 10:29, Nicolas George wrote:

Josh de Kock (2018-03-22):

  I have -ffunroll'd the macros for you Nicolas.


That is not what I asked. The macros were just a way of making the poor
API a little less poor, but the problem still remains:


+#ifdef CONFIG_AVDEVICE
+opaque = 0;
+if (muxdemuxers != SHOW_DEMUXERS) {
+while ((ofmt = av_outdev_iterate())) {
+if ((!name || strcmp(ofmt-> name, name) < 0) && strcmp(ofmt-> 
name, last_name) > 0) {
+name = ofmt - > name;
+long_name = ofmt - > long_name;
+is_ofmt = 1;
  }
-if (name && strcmp(ifmt->name, name) == 0)
-decode = 1;
  }
  }
+
+opaque = 0;
+if (muxdemuxers != SHOW_MUXERS) {
+while ((ifmt = av_indev_iterate())) {
+if ((!name || strcmp(ifmt-> name, name) < 0) && strcmp(ifmt-> 
name, last_name) > 0) {
+name = ifmt - > name;
+long_name = ifmt - > long_name;
+is_ifmt = 1;
+}
+}
+}
+#endif


There is a separate loop for devices, and I strongly oppose to that. Fix
your API so that all (de)muxer-like components are returned in a single
loop.

Regards,



I strongly oppose using the same loop. Separating devices' iteration is 
one of the first steps to separating lavf from lavd.


--
Josh
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Nicolas George
Josh de Kock (2018-03-22):
> I strongly oppose using the same loop. Separating devices' iteration is one
> of the first steps to separating lavf from lavd.

And I oppose separating lavf from lavd, was it not clear enough? I have
given technical arguments in my first mail.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Paul B Mahol
On 3/22/18, Nicolas George  wrote:
> Josh de Kock (2018-03-22):
>> There is always the option to just merge lavf and lavd. The state of them
>> being sort-of merged, but not really, isn't very good and adds a lot of
>> complexity especially in inter-library dependencies (which are unneeded
>> if
>> lavf and lavd are either merged or actually separate).
>
> You are driving your reasoning the wrong way: you start from the
> limitations of your new API, and based on what it can do you intent huge
> changes to the project that affect user interface. It should be the
> opposite: first decide on the user interface and general design, and
> then make sure the API allow it.
>
> For user interface, I state:
>
> 1. FFmpeg should allow users to select devices the same way as
>(de)muxers, because it allows them to use devices in a basic way even
>when applications are not explicitly prepared for them, that makes
>extra features for free.
>
> Hence, I deduce:
>
> 2. All lavf APIs should treat devices the same way as (de)muxers.
>
> And I still think that the better option is to revert the new API and
> design a new new one, learning from the small mistakes of this one.

Please, for once just leave this project.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Nicolas George
Josh de Kock (2018-03-22):
> Merging lavd into lavf may be the best option here, as it allows us to
> change the return type of av_iterate_indev() etc to an AVDevice or another
> type which may represent an actual device as opposed to a purely
> input/output device (which is just implemented as an actual lavf format). I
> don't know; say this is merged and then we add an AVDevice, then we already
> have device iteration functions which return AVFormat so we would need
> different function names to accommodate the lavd change requiring yet
> another API change--so I am not entirely sure that the current patches
> (implementing option 1) are the best way to go about it.

I am sorry, but I have trouble understanding what you are trying to say.
Maybe rephrase it with shorter sentences?

There is no separate type for devices, they are coded as AVInputFormat
and AVOutputFormat. That is fine, because they are designed to behave
like (de)muxers and can be used anywhere a (de)muxer can be used.

The result is a huge benefit for users. Just look at ffmpeg: it was
designed for (de)muxers, but thanks to that it can do real-time capture
and quick on-the-fly preview. The features are not as complete as with a
real device API, but most of the time the basic features provided by
lavf are largely enough to suit the needs.

You want proof? Just look at the users mailing-list where questions are
asked about dshow, X11 capture, Decklink cards, etc.

If you were to change the lavd API to make it different from (de)muxers,
all applications that right now can use devices automatically would lose
that ability, to the detriment of users.

Also, a general note about user interface design: specific choices
should override general choices.

In more specific words: applications can only assume what their users
want or need, in general way. On the other hand, an user knows what he
or she needs right now, in this specific case. That means that if the
user wants to use a device, then the user should be, as much as
possible, be allowed to use a device, and the application has no
business trying to prevent that. The current API makes it more
convenient to applications to not mess that up.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v2] fftools/cmdutils: add support for level flag in loglevel option parser

2018-03-22 Thread Tobias Rapp

On 22.03.2018 00:59, Michael Niedermayer wrote:

On Wed, Mar 21, 2018 at 03:20:30PM +0100, Tobias Rapp wrote:

Allows to manage the AV_LOG_PRINT_LEVEL flag as a prefix to the loglevel
option value, similar to the existing AV_LOG_SKIP_REPEATED flag.

Signed-off-by: Tobias Rapp 
---
  doc/fftools-common-opts.texi | 11 +++
  fftools/cmdutils.c   | 26 +++---
  2 files changed, 26 insertions(+), 11 deletions(-)

diff --git a/doc/fftools-common-opts.texi b/doc/fftools-common-opts.texi
index 185ec21..9b6bc44 100644
--- a/doc/fftools-common-opts.texi
+++ b/doc/fftools-common-opts.texi
@@ -168,14 +168,17 @@ The returned list cannot be assumed to be always complete.
  ffmpeg -sinks pulse,server=192.168.0.4
  @end example
  
-@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}

+@item -loglevel [repeat+][level+]@var{loglevel} | -v 
[repeat+][level+]@var{loglevel}
  Set the logging level used by the library.
  Adding "repeat+" indicates that repeated log output should not be compressed
  to the first line and the "Last message repeated n times" line will be
  omitted. "repeat" can also be used alone.
-If "repeat" is used alone, and with no prior loglevel set, the default
-loglevel will be used. If multiple loglevel parameters are given, using
-'repeat' will not change the loglevel.
+Adding "level+" indicates that log output should add a @code{[level]} prefix to
+each message line. This can be used as an alternative to log coloring, e.g. 
when
+dumping the log to file.
+If "repeat" and/or "level" is used alone, and with no prior loglevel set, the
+default loglevel will be used. If multiple loglevel parameters are given, using
+'repeat'/'level' will not change the loglevel.
  @var{loglevel} is a string or a number containing one of the following values:
  @table @samp
  @item quiet, -8
diff --git a/fftools/cmdutils.c b/fftools/cmdutils.c
index 708a849..51fa88c 100644
--- a/fftools/cmdutils.c
+++ b/fftools/cmdutils.c
@@ -888,16 +888,28 @@ int opt_loglevel(void *optctx, const char *opt, const 
char *arg)
  
  flags = av_log_get_flags();

  tail = strstr(arg, "repeat");
-if (tail)
+if (tail == arg) {
  flags &= ~AV_LOG_SKIP_REPEATED;
-else
+arg += 6 + (arg[6] == '+');
+if (!*arg) {
+av_log_set_flags(flags);
+return 0;
+}
+} else {
  flags |= AV_LOG_SKIP_REPEATED;
-
+}
+tail = strstr(arg, "level");
+if (tail == arg) {
+flags |= AV_LOG_PRINT_LEVEL;
+arg += 5 + (arg[5] == '+');
+if (!*arg) {
+av_log_set_flags(flags);
+return 0;
+}
+} else {
+flags &= ~AV_LOG_PRINT_LEVEL;
+}
  av_log_set_flags(flags);
-if (tail == arg)
-arg += 6 + (arg[6]=='+');
-if(tail && !*arg)
-return 0;


might be simpler to use av_strtok()

also this code should idealy be moved into libavutil so other applications
do not need to duplicate it


A useful helper function would allow to update one flag without 
affecting the other. Implementation would end up similar to parsing 
"normal" option flags with the level number handling on-top.


Do you have some suggestion on how to do this with least code 
duplication? Maybe checking the right-hand-side of the string for 
matching a level name or being a number string and then passing the rest 
to av_opt_eval_flags?


Regards,
Tobias

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] OpenCV filter should be built as C++, and C builds fail since OpenCV 3.4.1

2018-03-22 Thread Paul B Mahol
On 3/22/18, Nicolas George  wrote:
> Rostislav Pehlivanov (2018-03-20):
>> There was absolutely nothing in my sentence to imply I'm sarcastic, and I
>> make sure to be obviously sarcastic when I want to be.
>> The filter we have supports a very limited subset of the features of
>> opencv
>> and if there's no overlap we should probably write our own versions of
>> them. Also I have a bad experience working with opencv.
>
> Ok. That on top of the deliberate incompatibility, I suppose getting rid
> of it makes sense.
>
> It should be decided quickly one way or the other and GSoC applicants
> should be notified, though.

Why? OpenCV != OpenCL
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] OpenCV filter should be built as C++, and C builds fail since OpenCV 3.4.1

2018-03-22 Thread Nicolas George
Rostislav Pehlivanov (2018-03-20):
> There was absolutely nothing in my sentence to imply I'm sarcastic, and I
> make sure to be obviously sarcastic when I want to be.
> The filter we have supports a very limited subset of the features of opencv
> and if there's no overlap we should probably write our own versions of
> them. Also I have a bad experience working with opencv.

Ok. That on top of the deliberate incompatibility, I suppose getting rid
of it makes sense.

It should be decided quickly one way or the other and GSoC applicants
should be notified, though.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 0/1] Add Sega FILM muxer

2018-03-22 Thread wm4
On Wed, 21 Mar 2018 22:46:30 -0700
mi...@brew.sh wrote:

> From: Misty De Meo 
> 
> This adds a muxer for the Sega FILM format, which was used by
> Sega Saturn games in the 1990s.
> 
> This muxer provides a complete implementation of the FILM format, at least
> as far as all features I've seen used in the wild, except that it currently
> doesn't support uncompressed video. I've tested its output using vintage
> players, and haven't encountered any compatibility problems so far.
> 
> I haven't been able to get FFmpeg's Cinepak output to be accepted by vintage
> decoders; my successful tests were with stream copied video and new audio.
> I'm planning on looking at that as a followup.
> 
> Misty De Meo (1):
>   Add Sega FILM muxer
> 
>  Changelog |   1 +
>  libavformat/Makefile  |   1 +
>  libavformat/allformats.c  |   1 +
>  libavformat/segafilmenc.c | 380 
> ++
>  4 files changed, 383 insertions(+)
>  create mode 100644 libavformat/segafilmenc.c
> 

Why?
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] OpenCV filter should be built as C++, and C builds fail since OpenCV 3.4.1

2018-03-22 Thread Nicolas George
Paul B Mahol (2018-03-22):
> Why? OpenCV != OpenCL

Oh, I had not noticed. Good then.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v2] dashdec: Support signaling of last segment number (Adding space to avoid mixing styles)

2018-03-22 Thread Steven Liu


> On 22 Mar 2018, at 19:15, sanilraut  wrote:
> 
> ---
> libavformat/dashdec.c | 6 +++---
> 1 file changed, 3 insertions(+), 3 deletions(-)
> 
> diff --git a/libavformat/dashdec.c b/libavformat/dashdec.c
> index bf61837..db63a99 100644
> --- a/libavformat/dashdec.c
> +++ b/libavformat/dashdec.c
> @@ -922,8 +922,8 @@ static int parse_manifest_representation(AVFormatContext 
> *s, const char *url,
> rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 
> 10);
> xmlFree(startnumber_val);
> }
> -if(adaptionset_supplementalproperty_node){
> -
> if(strcmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), 
> "http://dashif.org/guidelines/last-segment-number;) == 0){
> +if (adaptionset_supplementalproperty_node) {
> +if 
> (!strcmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), 
> "http://dashif.org/guidelines/last-segment-number;)) {
What about use av_strcasecmp?
> val = 
> xmlGetProp(adaptionset_supplementalproperty_node,"value");
> rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
> xmlFree(val);
> @@ -1833,7 +1833,7 @@ static int open_demux_for_component(AVFormatContext *s, 
> struct representation *p
> pls->parent = s;
> pls->cur_seq_no  = calc_cur_seg_no(s, pls);
> 
> -if(pls->last_seq_no == 0){
> +if (!pls->last_seq_no) {
> pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
> }
> 
> -- 
> 2.7.4
> 
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Thanks
Steven





___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/arm/hevcdsp_sao : add NEON optimization for sao

2018-03-22 Thread Shengbin Meng
The code looks good to me. I think the wrapper is fine, because that part of 
code is not suitable for NEON assembly.

But you can remove the using of `sizeof(uint8_t)` as suggested by Carl.

Shengbin Meng

> On 19 Mar 2018, at 12:41, Yingming Fan  wrote:
> 
> Hi, is there any review about this patch? What’s your option about wrapper we 
> used in this patch.
> 
> Yingming Fan
> 
>> On 11 Mar 2018, at 8:59 PM, Yingming Fan  wrote:
>> 
>> 
>>> On 11 Mar 2018, at 8:54 PM, Carl Eugen Hoyos  wrote:
>>> 
>>> 2018-03-08 8:03 GMT+01:00 Yingming Fan :
 From: Meng Wang 
>>> 
 +stride_dst /= sizeof(uint8_t);
 +stride_src /= sizeof(uint8_t);
>>> 
>>> FFmpeg requires sizeof(uint8_t) to be 1, please simplify
>>> your patch accordingly.
>>> 
>>> Why is the wrapper function needed?
>> 
>> We use wrapper because codes in wrapper no need to be written with assembly, 
>> C codes more readable.
>> 
>>> 
>>> Carl Eugen
>>> ___
>>> ffmpeg-devel mailing list
>>> ffmpeg-devel@ffmpeg.org
>>> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> 
> 
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Josh de Kock

On 2018/03/22 11:19, Nicolas George wrote:

Josh de Kock (2018-03-22):

I strongly oppose using the same loop. Separating devices' iteration is one
of the first steps to separating lavf from lavd.


And I oppose separating lavf from lavd, was it not clear enough? I have
given technical arguments in my first mail.

Regards,



There is always the option to just merge lavf and lavd. The state of 
them being sort-of merged, but not really, isn't very good and adds a 
lot of complexity especially in inter-library dependencies (which are 
unneeded if lavf and lavd are either merged or actually separate).


--
Josh
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Josh de Kock

On 2018/03/22 12:07, Nicolas George wrote:

Josh de Kock (2018-03-22):

Merging lavd into lavf may be the best option here, as it allows us to
change the return type of av_iterate_indev() etc to an AVDevice or another
type which may represent an actual device as opposed to a purely
input/output device (which is just implemented as an actual lavf format). I
don't know; say this is merged and then we add an AVDevice, then we already
have device iteration functions which return AVFormat so we would need
different function names to accommodate the lavd change requiring yet
another API change--so I am not entirely sure that the current patches
(implementing option 1) are the best way to go about it.


I am sorry, but I have trouble understanding what you are trying to say.
Maybe rephrase it with shorter sentences?



move lavd avinputformats and avoutputformats into lavf

delete lavd

write new lavd aimed at actual devices
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] reitnerlace - tinterlace-like filter under LGPL

2018-03-22 Thread Vasile Toncu



On 14.03.2018 18:56, Thomas Mundt wrote:

2018-03-13 16:10 GMT+01:00 Vasile Toncu :



On 06.03.2018 20:38, Thomas Mundt wrote:


Hi,

2018-03-05 13:48 GMT+01:00 Carl Eugen Hoyos :

2018-03-05 12:37 GMT+01:00, Paul B Mahol :

On 3/5/18, Vasile Toncu  wrote:


Hello,

Thanks for the review. I've made changes according to your guidance.

It would be great to know if the community will go on with our
intention
of adding reinterlace as a alternative for tinterlace.

That being said, here is the new patch.


As already said, this is not acceptable.

There is no point in having 2 filters with near same funcionality.


If you consider the new filter ok, the existing filter will be removed
in the same push. I believe sending only the new filter makes
reviewing easier.

For me reviewing would be easier when Vasile sends a patchset that

includes
the replacement of tinterlace filter.
That way existing fate tests could be used which are fortunately pretty
extensive in this case.
Also it would be helpful when you and/or other experienced ffmpeg
developers would clarify first which parts of tinterlace have to be
rewritten for proper relicensing.
Being left in the dark makes working on patches frustrating.

Another question is how to deal with vf_interlace? IMHO for the user there
should be no difference in output, speed and license.
Two options:
1. Relicensing and slice threading will also be ported to vf_interlace
2. The commands from vf_interlace will be included in the new tinterlace
filter. vf_interlace will be deleted together with old tinterlace filter

I would prefer the second option, but maybe there are even better options
that don´t come to my mind.

Please comment.
Thanks,
Thomas
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Hello everyone,

sorry for a delayed response.

 From what has been discussed in here, I think the reinterlace will exist
with tinterlace for a period of time, just after that the tinterlace can be
removed.

To have the reinterlace added, what is needed to be done from my side?

Thanks,
Vasile Toncu


Two filters with almost the same functionality won´t be accepted, as Paul
stated in this thread.
Also there is vf_interlace filter, which is a subset of vf_tinterlace and
should not differ in speed, output and license. As already said, I would
prefer to include vf_interlace options into vf_tinterlace and remove
vf_interlace.
Also you want several changes: Making tinterlace filter LGPL, adding new
options and adding slice threading.
This should be done in a patch set:

Patch 1/5: Include vf_interlace options into vf_tinterlace filter and
remove vf_interlace

    Hi,

    From what I've researched, it seems that vf_interlace is just an 
incomplete functionality for vf_tinterlace, so it can be removed directly.


    Can anyone confirm this?

    Regards,
    Vasile Toncu


Patch 2/5: Your new LGPL vf_reinterlace filter without the new options,
fixes and slice threading
Patch 3/5: Rename vf_reinterlace and replace vf_tinterlace by it
Patch 4/5: Add slice threading
Patch 5/5: Add the new options and fate tests for them

Please run fate. All tests should pass.
As already said, I don´t have the skills to suggest what has to be done
making the relicensing legal. So I can do a technical review only.
These are just my suggestions to the best of my knowledge! There might be
better ideas from more experienced developers.
Please comment.

Regards,
Thomas
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] Field order during libavformat write_header

2018-03-22 Thread Devin Heitmueller
Hi Marton,

> I still think ffmpeg.c should be fixed instead. There is even some logic in 
> ffmpeg.c to wait for a frame from all streams before writing the header, so 
> it seems doable.

I don’t disagree with this in principle, but we have to be a bit careful.  
There are stream types that generate packets on a very infrequent basis, and 
I’ve already had to add hacks to libavformat/mux.c to ignore certain codec 
types rather than waiting until avctx->max_interleave_delta gets hit.  For 
example, I had cases where the decklink output would stall for several seconds 
whenever a SCTE-35 trigger was encountered.

> Also as far as I see you cannot detect the field order in the muxer if it is 
> using a real codec (like v210) instead of a wrapped avframe.

I had the same suspicion but haven’t confirmed it yet in this case.  When I 
first ran into the problem I thought it might have been because of the v210 
encoder not passing through the data that would have normally been in the 
AVFrame.  However as soon as I saw the problem occurred with uyvy422 I stopped 
looking.  This is a more general problem we have in other cases as well, such 
as relying on the codec parameters for frame width/height because we lost 
access to that information when it was converted from an AVFrame.  The same 
goes for side data where we had to add code to the v210enc module to ensure it 
survived conversion from an AVFrame to an AVPacket.

I know it’s been argued about in the past, but I really do think things like 
V210 should be a raw frame colorspace rather than a codec.  Then we would be 
able to use a wrapped avframe for everything and wouldn’t lose all that data 
when translating from an AVFrame to an AVPacket.

Devin

---
Devin Heitmueller - LTN Global Communications
dheitmuel...@ltnglobal.com

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/dxva2_vc1: add missing frame_params callback to vc1_d3d11va2 hwaccel

2018-03-22 Thread James Almer
On 3/22/2018 6:41 AM, wm4 wrote:
> On Thu, 22 Mar 2018 01:41:11 -0300
> James Almer  wrote:
> 
>> Fixes ticket #7096
>>
>> Signed-off-by: James Almer 
>> ---
>>  libavcodec/dxva2_vc1.c | 1 +
>>  1 file changed, 1 insertion(+)
>>
>> diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c
>> index f22c73cd1e..06f1083b3a 100644
>> --- a/libavcodec/dxva2_vc1.c
>> +++ b/libavcodec/dxva2_vc1.c
>> @@ -473,6 +473,7 @@ const AVHWAccel ff_vc1_d3d11va2_hwaccel = {
>>  .start_frame= dxva2_vc1_start_frame,
>>  .decode_slice   = dxva2_vc1_decode_slice,
>>  .end_frame  = dxva2_vc1_end_frame,
>> +.frame_params   = ff_dxva2_common_frame_params,
>>  .frame_priv_data_size = sizeof(struct dxva2_picture_context),
>>  .priv_data_size = sizeof(FFDXVASharedContext),
>>  };
> 
> LGTM. Apparently I missed this.

Pushed, thanks.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] ffmpeg.c - drain all decoded frames during stream_loop flush

2018-03-22 Thread Gyan Doshi


On 3/21/2018 3:02 AM, Michael Niedermayer wrote:


do you want to also create a fate test for this


Can you suggest an existing fate sample file that I can use? Ideally, a 
stream of a video codec with some delay.


Seems to me most samples in the suite are anomalous or broken in one way 
or the other. Not ideal candidates to test some other feature than they 
were originally intended for.


Regards,
Gyan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 3/3] lavc/qsvdec: expose frame pic_type

2018-03-22 Thread Zhong Li
Currently pict_type are unset.
Add an extra param to fetch the picture type from qsv decoder

v2: remove the key_frame setting because the judgement “key frame is equal
to IDR frame” only suitable for H264.
For HEVC, all IRAP frames are key frames, and other codecs have no IDR
frame.

Signed-off-by: ChaoX A Liu 
Signed-off-by: Zhong Li 
---
 libavcodec/qsv.c  | 24 
 libavcodec/qsv_internal.h |  4 
 libavcodec/qsvdec.c   |  6 ++
 3 files changed, 34 insertions(+)

diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
index 5217adf..2362e55 100644
--- a/libavcodec/qsv.c
+++ b/libavcodec/qsv.c
@@ -195,6 +195,30 @@ int ff_qsv_find_surface_idx(QSVFramesContext *ctx, 
QSVFrame *frame)
 return AVERROR_BUG;
 }
 
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type)
+{
+enum AVPictureType type = AV_PICTURE_TYPE_NONE;
+switch (mfx_pic_type & 0x7) {
+case MFX_FRAMETYPE_I:
+if (mfx_pic_type & MFX_FRAMETYPE_S)
+type = AV_PICTURE_TYPE_SI;
+else
+type = AV_PICTURE_TYPE_I;
+break;
+case MFX_FRAMETYPE_B:
+type = AV_PICTURE_TYPE_B;
+break;
+case MFX_FRAMETYPE_P:
+ if (mfx_pic_type & MFX_FRAMETYPE_S)
+type = AV_PICTURE_TYPE_SP;
+else
+type = AV_PICTURE_TYPE_P;
+break;
+}
+
+return type;
+}
+
 static int qsv_load_plugins(mfxSession session, const char *load_plugins,
 void *logctx)
 {
diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
index c030550..b576f53f 100644
--- a/libavcodec/qsv_internal.h
+++ b/libavcodec/qsv_internal.h
@@ -52,6 +52,9 @@ typedef struct QSVFrame {
 mfxFrameSurface1 surface;
 mfxEncodeCtrl enc_ctrl;
 
+mfxExtDecodedFrameInfo dec_info;
+mfxExtBuffer *ext_param;
+
 int queued;
 int used;
 
@@ -86,6 +89,7 @@ int ff_qsv_codec_id_to_mfx(enum AVCodecID codec_id);
 int ff_qsv_profile_to_mfx(enum AVCodecID codec_id, int profile);
 
 int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc);
+enum AVPictureType ff_qsv_map_pictype(int mfx_pic_type);
 
 int ff_qsv_init_internal_session(AVCodecContext *avctx, mfxSession *session,
  const char *load_plugins);
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index e0c5579..63e2733 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -232,6 +232,11 @@ static int alloc_frame(AVCodecContext *avctx, QSVContext 
*q, QSVFrame *frame)
 
 frame->surface.Data.MemId = >frames_ctx.mids[ret];
 }
+frame->surface.Data.ExtParam = >ext_param;
+frame->surface.Data.NumExtParam = 1;
+frame->ext_param = (mfxExtBuffer*)>dec_info;
+frame->dec_info.Header.BufferId = MFX_EXTBUFF_DECODED_FRAME_INFO;
+frame->dec_info.Header.BufferSz = sizeof(frame->dec_info);
 
 frame->used = 1;
 
@@ -418,6 +423,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
 outsurf->Info.PicStruct & MFX_PICSTRUCT_FIELD_TFF;
 frame->interlaced_frame =
 !(outsurf->Info.PicStruct & MFX_PICSTRUCT_PROGRESSIVE);
+frame->pict_type = ff_qsv_map_pictype(out_frame->dec_info.FrameType);
 
 /* update the surface properties */
 if (avctx->pix_fmt == AV_PIX_FMT_QSV)
-- 
1.8.3.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 1/3] lavc/qsvdec: set complete_frame flags for progressive picture

2018-03-22 Thread Zhong Li
Set the flag MFX_BITSTREAM_COMPLETE_FRAME when it is a progressive picture.
This can fix vc1 decoding segment fault issues because can't set the start
code correctly.
See: ./avconv -hwaccel qsv -c:v vc1_qsv -i /fate-suite/vc1/SA00040.vc1
-vf "hwdownload, format=nv12" -f rawvideo /dev/null

a. field_order of some h264 interlaced video (e.g: cama3_vtc_b.avc) is marked 
as AV_FIELD_UNKNOWN
   in h264_parser.c. This is not a completed frames.
   So only set the MFX_BITSTREAM_COMPLETE_FRAME when it is progressive.
b. some clips have both progressive and interlaced frames 
(e.g.CAPAMA3_Sand_F.264),
   the parsed field_order maybe changed druing the decoding progress.

This patch has been verified for other codecs(mpeg2/hevc/vp8).

Signed-off-by: Zhong Li 
---
 libavcodec/qsvdec.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index 45bedf9..e0c5579 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -318,6 +318,8 @@ static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
 bs.DataLength = avpkt->size;
 bs.MaxLength  = bs.DataLength;
 bs.TimeStamp  = avpkt->pts;
+if (avctx->field_order == AV_FIELD_PROGRESSIVE)
+bs.DataFlag   |= MFX_BITSTREAM_COMPLETE_FRAME;
 }
 
 sync = av_mallocz(sizeof(*sync));
@@ -497,6 +499,7 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext 
*q,
  pkt->data, pkt->size, pkt->pts, pkt->dts,
  pkt->pos);
 
+avctx->field_order  = q->parser->field_order;
 /* TODO: flush delayed frames on reinit */
 if (q->parser->format   != q->orig_pix_fmt||
 FFALIGN(q->parser->coded_width, 16)  != FFALIGN(avctx->coded_width, 
16) ||
@@ -521,7 +524,6 @@ int ff_qsv_process_data(AVCodecContext *avctx, QSVContext 
*q,
 avctx->height   = q->parser->height;
 avctx->coded_width  = FFALIGN(q->parser->coded_width, 16);
 avctx->coded_height = FFALIGN(q->parser->coded_height, 16);
-avctx->field_order  = q->parser->field_order;
 avctx->level= q->avctx_internal->level;
 avctx->profile  = q->avctx_internal->profile;
 
-- 
1.8.3.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 2/3] lavu/hwcontext_qsv: Add support for pix_fmt RGB32.

2018-03-22 Thread Zhong Li
RGB32 format may be used as overlay with alpha blending.
So add RGB32 format support.

Signed-off-by: ChaoX A Liu 
Signed-off-by: Zhong Li 
---
 libavutil/hwcontext_qsv.c | 43 +--
 1 file changed, 33 insertions(+), 10 deletions(-)

diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 0fefec3..dcaf072 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@ static const struct {
 uint32_t   fourcc;
 } supported_pixel_formats[] = {
 { AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+{ AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },
 { AV_PIX_FMT_P010, MFX_FOURCC_P010 },
 { AV_PIX_FMT_PAL8, MFX_FOURCC_P8   },
 };
@@ -731,6 +732,36 @@ static int qsv_transfer_data_child(AVHWFramesContext *ctx, 
AVFrame *dst,
 return ret;
 }
 
+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 
*surface)
+{
+switch (frame->format) {
+case AV_PIX_FMT_NV12:
+surface->Data.Y  = frame->data[0];
+surface->Data.UV = frame->data[1];
+break;
+
+case AV_PIX_FMT_YUV420P:
+surface->Data.Y = frame->data[0];
+surface->Data.U = frame->data[1];
+surface->Data.V = frame->data[2];
+break;
+
+case AV_PIX_FMT_RGB32:
+surface->Data.B = frame->data[0];
+surface->Data.G = frame->data[0] + 1;
+surface->Data.R = frame->data[0] + 2;
+surface->Data.A = frame->data[0] + 3;
+break;
+
+default:
+return MFX_ERR_UNSUPPORTED;
+}
+surface->Data.Pitch = frame->linesize[0];
+surface->Data.TimeStamp = frame->pts;
+
+return 0;
+}
+
 static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
   const AVFrame *src)
 {
@@ -750,11 +781,7 @@ static int qsv_transfer_data_from(AVHWFramesContext *ctx, 
AVFrame *dst,
 }
 
 out.Info = in->Info;
-out.Data.PitchLow = dst->linesize[0];
-out.Data.Y= dst->data[0];
-out.Data.U= dst->data[1];
-out.Data.V= dst->data[2];
-out.Data.A= dst->data[3];
+map_frame_to_surface(dst, );
 
 do {
 err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, , 
NULL, );
@@ -797,11 +824,7 @@ static int qsv_transfer_data_to(AVHWFramesContext *ctx, 
AVFrame *dst,
 }
 
 in.Info = out->Info;
-in.Data.PitchLow = src->linesize[0];
-in.Data.Y= src->data[0];
-in.Data.U= src->data[1];
-in.Data.V= src->data[2];
-in.Data.A= src->data[3];
+map_frame_to_surface(src, );
 
 do {
 err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, , out, NULL, 
);
-- 
1.8.3.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 0/1] Add Sega FILM muxer

2018-03-22 Thread Misty De Meo
On Thu, Mar 22, 2018 at 3:43 AM, wm4  wrote:
> Why?

Mainly to modify existing videos or encode new videos for Saturn
games. It's particularly useful for fan translation - to mux in new
audio, or encode new video for things like credits sequences.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v2] lavf/movenc: write track title metadata for mov files

2018-03-22 Thread Derek Buitenhuis
On 3/21/2018 9:30 PM, Courtland Idstrom wrote:
> Please let me know if this is sufficient. I can also upload these movies
> somewhere, I didn't think it would be appropriate to attach them to this
> email.

I'm satisfied with with it, thanks for checking!

Patch LGTM.

- Derek
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avformat/opensrt: add Haivision Open SRT protocol

2018-03-22 Thread Nicolas George
Sven Dueking (2018-03-16):
> Ping !?!?

I think I am actually expected to reply.

I think that by reviewing the patch I gave the impression that I was
promising to accept the patch in FFmpeg. It was not so, and I apologize
if it was taken that way. The original patch contained significant
changes in the standard network code that made it much more complex, I
wanted to avoid that, that is the reason I reviewed, it was purely
technical. The decision to accept a patch in FFmpeg is not purely
technical, it also involves balancing the cost of maintenance with the
benefit for users.

In this instance, the recent discussion on libav-dev seems to indicate
that the API and ABI of this library could be not very stable, making
the cost of maintenance relatively high, a fact that is worsened by the
library not being integrated in major Linux distros. As for the benefit
for users, are there public servers serving interesting content
accessible with this protocol? Are there situations where this protocol
would allow several instances of ffmpeg to communicate significantly
better than other protocols? I am not aware of any.

That, plus the poor choice of name (seriously, who dabs in multimedia
and does not know that SRT has been a subtitle format for more than
fifteen years? and there is the SRTP profile too) makes me doubtful
about integrating this in FFmpeg.

But it is not my choice only.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/arm/hevcdsp_sao : add NEON optimization for sao

2018-03-22 Thread Shengbin Meng
Hi,

By checkasm benchmark, I can see a speedup of ~3x for band mode and ~6x for 
edge mode on my device (the device has aarch64 CPU, but I configured ffmpeg 
with `—arch=arm`). And FATE passed as well.

Results of a checkasm run:

$./tests/checkasm/checkasm --test=hevc_sao --bench
$ sudo ./tests/checkasm/checkasm --test=hevc_sao --bench
benchmarking with Linux Perf Monitoring API
nop: 49.8
checkasm: using random seed 1088726844
NEON:
 - hevc_sao.sao_band [OK]
 - hevc_sao.sao_edge [OK]
checkasm: all 10 tests passed
hevc_sao_band_8x8_8_c: 578.0
hevc_sao_band_8x8_8_neon: 215.3
hevc_sao_band_16x16_8_c: 2004.3
hevc_sao_band_16x16_8_neon: 680.8
hevc_sao_band_32x32_8_c: 8363.5
hevc_sao_band_32x32_8_neon: 2579.3
hevc_sao_band_48x48_8_c: 18268.3
hevc_sao_band_48x48_8_neon: 5653.3
hevc_sao_band_64x64_8_c: 32001.8
hevc_sao_band_64x64_8_neon: 9952.0
hevc_sao_edge_8x8_8_c: 1211.0
hevc_sao_edge_8x8_8_neon: 217.5
hevc_sao_edge_16x16_8_c: 4708.5
hevc_sao_edge_16x16_8_neon: 767.0
hevc_sao_edge_32x32_8_c: 18673.0
hevc_sao_edge_32x32_8_neon: 2967.3
hevc_sao_edge_48x48_8_c: 41936.3
hevc_sao_edge_48x48_8_neon: 6642.8
hevc_sao_edge_64x64_8_c: 74015.8
hevc_sao_edge_64x64_8_neon: 11781.8

Regards
Shengbin

> On 11 Mar 2018, at 10:27, Yingming Fan  wrote:
> 
> Hi, there. 
> I have already pushed a patch which add hevc_sao checkasm and patch was 
> adopted.
> You can verify this optimization by using checkasm under arm device, 
> `checkasm --test=hevc_sao --bench`.
> hevc_sao_band speed up ~2x, hevc_sao_edge speed up ~4x. Also passed FATE 
> under arm platform.
> 
> Yingming Fan
> 
>> On 8 Mar 2018, at 3:03 PM, Yingming Fan  wrote:
>> 
>> From: Meng Wang 
>> 
>> Signed-off-by: Meng Wang 
>> ---
>> As FFmpeg hevc decoder have no SAO neon optimization, we add sao_band and 
>> sao_edge neon codes in this patch.
>> I have already submit a patch called 'checkasm/hevc_sao : add hevc_sao for 
>> checkasm' several days ago.
>> Results below was printed by hevc_sao checkasm on an armv7 device Nexus 5. 
>> From the results we can see: hevc_sao_band speed up ~2x, hevc_sao_edge speed 
>> up ~4x. 
>> Also test FATE under armv7 device and MacOS.
>> 
>> hevc_sao_band_8x8_8_c: 804.9
>> hevc_sao_band_8x8_8_neon: 452.4
>> hevc_sao_band_16x16_8_c: 2638.1
>> hevc_sao_band_16x16_8_neon: 1169.9
>> hevc_sao_band_32x32_8_c: 9259.9
>> hevc_sao_band_32x32_8_neon: 3956.1
>> hevc_sao_band_48x48_8_c: 20344.6
>> hevc_sao_band_48x48_8_neon: 8649.6
>> hevc_sao_band_64x64_8_c: 35684.6
>> hevc_sao_band_64x64_8_neon: 15213.1
>> hevc_sao_edge_8x8_8_c: 1761.6
>> hevc_sao_edge_8x8_8_neon: 414.6
>> hevc_sao_edge_16x16_8_c: 6844.4
>> hevc_sao_edge_16x16_8_neon: 1589.9
>> hevc_sao_edge_32x32_8_c: 27156.4
>> hevc_sao_edge_32x32_8_neon: 6116.6
>> hevc_sao_edge_48x48_8_c: 60004.6
>> hevc_sao_edge_48x48_8_neon: 13686.4
>> hevc_sao_edge_64x64_8_c: 106708.1
>> hevc_sao_edge_64x64_8_neon: 24240.1
>> 
>> libavcodec/arm/Makefile|   3 +-
>> libavcodec/arm/hevcdsp_init_neon.c |  63 +
>> libavcodec/arm/hevcdsp_sao_neon.S  | 181 
>> +
>> 3 files changed, 246 insertions(+), 1 deletion(-)
>> create mode 100644 libavcodec/arm/hevcdsp_sao_neon.S
>> 
>> diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
>> index 1eeac5449e..2ee913e8a8 100644
>> --- a/libavcodec/arm/Makefile
>> +++ b/libavcodec/arm/Makefile
>> @@ -136,7 +136,8 @@ NEON-OBJS-$(CONFIG_DCA_DECODER)+= 
>> arm/synth_filter_neon.o
>> NEON-OBJS-$(CONFIG_HEVC_DECODER)   += arm/hevcdsp_init_neon.o   \
>>  arm/hevcdsp_deblock_neon.o\
>>  arm/hevcdsp_idct_neon.o   \
>> -  arm/hevcdsp_qpel_neon.o
>> +  arm/hevcdsp_qpel_neon.o   \
>> +  arm/hevcdsp_sao_neon.o
>> NEON-OBJS-$(CONFIG_RV30_DECODER)   += arm/rv34dsp_neon.o
>> NEON-OBJS-$(CONFIG_RV40_DECODER)   += arm/rv34dsp_neon.o\
>>  arm/rv40dsp_neon.o
>> diff --git a/libavcodec/arm/hevcdsp_init_neon.c 
>> b/libavcodec/arm/hevcdsp_init_neon.c
>> index a4628d2a93..3c480f12f8 100644
>> --- a/libavcodec/arm/hevcdsp_init_neon.c
>> +++ b/libavcodec/arm/hevcdsp_init_neon.c
>> @@ -21,8 +21,16 @@
>> #include "libavutil/attributes.h"
>> #include "libavutil/arm/cpu.h"
>> #include "libavcodec/hevcdsp.h"
>> +#include "libavcodec/avcodec.h"
>> #include "hevcdsp_arm.h"
>> 
>> +void ff_hevc_sao_band_filter_neon_8_wrapper(uint8_t *_dst, uint8_t *_src,
>> +  ptrdiff_t stride_dst, ptrdiff_t 
>> stride_src,
>> +  int16_t *sao_offset_val, int 
>> sao_left_class,
>> +  int width, int height);
>> +void ff_hevc_sao_edge_filter_neon_8_wrapper(uint8_t *_dst, uint8_t 

Re: [FFmpeg-devel] [PATCH] avcodec/dxva2_vc1: add missing frame_params callback to vc1_d3d11va2 hwaccel

2018-03-22 Thread wm4
On Thu, 22 Mar 2018 01:41:11 -0300
James Almer  wrote:

> Fixes ticket #7096
> 
> Signed-off-by: James Almer 
> ---
>  libavcodec/dxva2_vc1.c | 1 +
>  1 file changed, 1 insertion(+)
> 
> diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c
> index f22c73cd1e..06f1083b3a 100644
> --- a/libavcodec/dxva2_vc1.c
> +++ b/libavcodec/dxva2_vc1.c
> @@ -473,6 +473,7 @@ const AVHWAccel ff_vc1_d3d11va2_hwaccel = {
>  .start_frame= dxva2_vc1_start_frame,
>  .decode_slice   = dxva2_vc1_decode_slice,
>  .end_frame  = dxva2_vc1_end_frame,
> +.frame_params   = ff_dxva2_common_frame_params,
>  .frame_priv_data_size = sizeof(struct dxva2_picture_context),
>  .priv_data_size = sizeof(FFDXVASharedContext),
>  };

LGTM. Apparently I missed this.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Nicolas George
Josh de Kock (2018-03-22):
>  I have -ffunroll'd the macros for you Nicolas.

That is not what I asked. The macros were just a way of making the poor
API a little less poor, but the problem still remains:

> +#ifdef CONFIG_AVDEVICE
> +opaque = 0;
> +if (muxdemuxers != SHOW_DEMUXERS) {
> +while ((ofmt = av_outdev_iterate())) {
> +if ((!name || strcmp(ofmt-> name, name) < 0) && 
> strcmp(ofmt-> name, last_name) > 0) {
> +name = ofmt - > name;
> +long_name = ofmt - > long_name;
> +is_ofmt = 1;
>  }
> -if (name && strcmp(ifmt->name, name) == 0)
> -decode = 1;
>  }
>  }
> +
> +opaque = 0;
> +if (muxdemuxers != SHOW_MUXERS) {
> +while ((ifmt = av_indev_iterate())) {
> +if ((!name || strcmp(ifmt-> name, name) < 0) && 
> strcmp(ifmt-> name, last_name) > 0) {
> +name = ifmt - > name;
> +long_name = ifmt - > long_name;
> +is_ifmt = 1;
> +}
> +}
> +}
> +#endif

There is a separate loop for devices, and I strongly oppose to that. Fix
your API so that all (de)muxer-like components are returned in a single
loop.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Nicolas George
Josh de Kock (2018-03-22):
> There is always the option to just merge lavf and lavd. The state of them
> being sort-of merged, but not really, isn't very good and adds a lot of
> complexity especially in inter-library dependencies (which are unneeded if
> lavf and lavd are either merged or actually separate).

You are driving your reasoning the wrong way: you start from the
limitations of your new API, and based on what it can do you intent huge
changes to the project that affect user interface. It should be the
opposite: first decide on the user interface and general design, and
then make sure the API allow it.

For user interface, I state:

1. FFmpeg should allow users to select devices the same way as
   (de)muxers, because it allows them to use devices in a basic way even
   when applications are not explicitly prepared for them, that makes
   extra features for free.

Hence, I deduce:

2. All lavf APIs should treat devices the same way as (de)muxers.

And I still think that the better option is to revert the new API and
design a new new one, learning from the small mistakes of this one.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Josh de Kock

On 2018/03/22 11:37, wm4 wrote:

On Thu, 22 Mar 2018 12:32:29 +0100
Nicolas George  wrote:


Josh de Kock (2018-03-22):

There is always the option to just merge lavf and lavd. The state of them
being sort-of merged, but not really, isn't very good and adds a lot of
complexity especially in inter-library dependencies (which are unneeded if
lavf and lavd are either merged or actually separate).


You are driving your reasoning the wrong way: you start from the
limitations of your new API, and based on what it can do you intent huge
changes to the project that affect user interface. It should be the
opposite: first decide on the user interface and general design, and
then make sure the API allow it.

For user interface, I state:

1. FFmpeg should allow users to select devices the same way as
(de)muxers, because it allows them to use devices in a basic way even
when applications are not explicitly prepared for them, that makes
extra features for free.


Devices are not muxers/demuxers and can behave significantly
differently. Thus they should be accessible via a different API, and in
particular avoid accidental use of device (de)muxers if only normal
(de)muxers are wanted by the application.
Merging lavd into lavf may be the best option here, as it allows us to 
change the return type of av_iterate_indev() etc to an AVDevice or 
another type which may represent an actual device as opposed to a purely 
input/output device (which is just implemented as an actual lavf 
format). I don't know; say this is merged and then we add an AVDevice, 
then we already have device iteration functions which return AVFormat so 
we would need different function names to accommodate the lavd change 
requiring yet another API change--so I am not entirely sure that the 
current patches (implementing option 1) are the best way to go about it.


--
Josh
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avformat/opensrt: add Haivision Open SRT protocol

2018-03-22 Thread Sven Dueking


> -Ursprüngliche Nachricht-
> Von: ffmpeg-devel [mailto:ffmpeg-devel-boun...@ffmpeg.org] Im Auftrag
> von Nicolas George
> Gesendet: Donnerstag, 22. März 2018 16:03
> An: FFmpeg development discussions and patches
> Betreff: Re: [FFmpeg-devel] [PATCH] avformat/opensrt: add Haivision
> Open SRT protocol
> 
> Sven Dueking (2018-03-16):
> > Ping !?!?
> 
> I think I am actually expected to reply.
> 
> I think that by reviewing the patch I gave the impression that I was
> promising to accept the patch in FFmpeg. It was not so, and I apologize
> if it was taken that way. The original patch contained significant
> changes in the standard network code that made it much more complex, I
> wanted to avoid that, that is the reason I reviewed, it was purely
> technical. The decision to accept a patch in FFmpeg is not purely
> technical, it also involves balancing the cost of maintenance with the
> benefit for users.
> 
> In this instance, the recent discussion on libav-dev seems to indicate
> that the API and ABI of this library could be not very stable, making
> the cost of maintenance relatively high, a fact that is worsened by the
> library not being integrated in major Linux distros. As for the benefit
> for users, are there public servers serving interesting content
> accessible with this protocol? Are there situations where this protocol
> would allow several instances of ffmpeg to communicate significantly
> better than other protocols? I am not aware of any.
> 
> That, plus the poor choice of name (seriously, who dabs in multimedia
> and does not know that SRT has been a subtitle format for more than
> fifteen years? and there is the SRTP profile too) makes me doubtful
> about integrating this in FFmpeg.
> 
> But it is not my choice only.
> 
> Regards,
> 
> --
>   Nicolas George

Hi,

The SRT library has been made Open Source in April 2017, but was used before
in Haivision products since 2013. It is used in 100s of mission critical
installation (medical, sports, broadcast, military) worldwide and its
reliability has been proven. Since it’s Open Source, 100 organizations have
joined the SRT Alliance (https://www.srtalliance.org/) in order to create a
new ecosystem for broadcast quality streaming over the public internet,
which wasn’t available as Open Source up to that point and only offered as
commercial solutions by very few specialized companies. Meanwhile many
organizations including the EBU (Eurovision), Wowza, Matrox and many more
have SRT Ready solutions. SRT has been integrated into VLC and GStreamer.
The SRT Alliance is working with the video services forum, who recommends
standards to SMPTE, towards a standardization of the protocol. Many
architectures in the broadcast industry are using either libav or ffmpeg and
are asking when they will be able to utilize SRT though these known media
libraries. I understand the name is not optimal, but when it was initially
created in 2013 unfortunately nobody thought about taken it Open Source due
to the typical industry mindset at that time.

Thank you!

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avfilter/af_channelsplit: add channels option

2018-03-22 Thread Nicolas George
Paul B Mahol (2018-03-22):
> So user can pick which channels to extract.
> 
> Signed-off-by: Paul B Mahol 
> ---
>  doc/filters.texi  | 18 ++
>  libavfilter/af_channelsplit.c | 39 +--
>  2 files changed, 51 insertions(+), 6 deletions(-)

Thanks for simplifying the code. I have no more remarks. But Alexander
commented too, so please let him time to follow-up.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 2/3] lavu/hwcontext_qsv: Add support for pix_fmt RGB32.

2018-03-22 Thread Carl Eugen Hoyos
2018-03-22 16:05 GMT+01:00, Zhong Li :
> RGB32 format may be used as overlay with alpha blending.
> So add RGB32 format support.
>
> Signed-off-by: ChaoX A Liu 
> Signed-off-by: Zhong Li 
> ---
>  libavutil/hwcontext_qsv.c | 43 +--
>  1 file changed, 33 insertions(+), 10 deletions(-)
>
> diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
> index 0fefec3..dcaf072 100644
> --- a/libavutil/hwcontext_qsv.c
> +++ b/libavutil/hwcontext_qsv.c
> @@ -90,6 +90,7 @@ static const struct {
>  uint32_t   fourcc;
>  } supported_pixel_formats[] = {
>  { AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
> +{ AV_PIX_FMT_RGB32,MFX_FOURCC_RGB4 },

Imo, AV_PIX_FMT_BGRA would be more readable.

And please kindly confirm that you did test this, the
description on https://software.intel.com/en-us/node/628506
is so misleading that it cannot easily be used a source.

[...]

> +case AV_PIX_FMT_RGB32:
> +surface->Data.B = frame->data[0];
> +surface->Data.G = frame->data[0] + 1;
> +surface->Data.R = frame->data[0] + 2;
> +surface->Data.A = frame->data[0] + 3;

Does this mean every FFmpeg rgb pix_fmt could be
supported with the appropriate matrix?

Wouldn't this be very little code for a great performance
gain given the "wrong" input?

Thank you, Carl Eugen
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avformat/segafilm - fix keyframe detection and set packet, flags

2018-03-22 Thread Gyan Doshi

Set the packet flag using constant as well.
From 905cc5f1146c957ae4290b44a06a22a7524a87de Mon Sep 17 00:00:00 2001
From: Gyan Doshi 
Date: Wed, 21 Mar 2018 18:59:33 +0530
Subject: [PATCH] avformat/segafilm - fix keyframe detection and set packet
 flags

Streams from a Segafilm cpk file can't be streamcopied because
keyframe flag isn't correctly set in stream index and
said flag is never conveyed to the packet

Fixes #7091
---
 libavformat/segafilm.c | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)

diff --git a/libavformat/segafilm.c b/libavformat/segafilm.c
index 1fdef50cc7..4c0cca0140 100644
--- a/libavformat/segafilm.c
+++ b/libavformat/segafilm.c
@@ -239,7 +239,7 @@ static int film_read_header(AVFormatContext *s)
 } else {
 film->sample_table[i].stream = film->video_stream_index;
 film->sample_table[i].pts = AV_RB32([8]) & 0x7FFF;
-film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 0 : 1;
+film->sample_table[i].keyframe = (scratch[8] & 0x80) ? 
AVINDEX_KEYFRAME : 0;
 video_frame_counter++;
 if (film->video_type)
 av_add_index_entry(s->streams[film->video_stream_index],
@@ -286,6 +286,7 @@ static int film_read_packet(AVFormatContext *s,
 
 pkt->stream_index = sample->stream;
 pkt->pts = sample->pts;
+pkt->flags |= sample->keyframe ? AV_PKT_FLAG_KEY : 0;
 
 film->current_sample++;
 
-- 
2.12.2.windows.2___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH][GSoC] srcnn - an image super resolution filter using CNN

2018-03-22 Thread Mina

Hi,
  This patch is introduced as a qualification task required by Super 
Resolution project for GSoC. It passes patchcheck and make fate and 
doesn't introduce new warnings. It's implemented by the help of the 
mentor: Pedro Arthur. It's been tested over 7 images of which 6 got 
expected results while 1 didn't.

Used command for testing:
ffmpeg -i input_image -vf "scale=2*iw:2*ih,format=yuv420p,srcnn" 
output_image.


Attached is the patch and here is a link  for 
3 images with their results; one of which is the one with incorrect result.
Please check(the problem maybe in the way I am copying the image) and 
share some feedback or guidance about what the problem may be and what 
should I do next regarding the GSoC proposal as I haven't submitted one 
yet. Is there any template/requirements for proposal? Also note that the 
algorithm runs convolution without multi-threading on cpu as simple 
while loops so it's slow for large images. Performance be improved later 
on.


Best regards,
Mina Sami.

>From 2ac25e4456a3ce9a16f688cce590da2521ff4703 Mon Sep 17 00:00:00 2001
From: MinaBombo 
Date: Thu, 22 Mar 2018 16:53:22 +0200
Subject: [PATCH] Added srcnn filter

---
 Changelog   |   1 +
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/srcnn_weights.h | 290 
 libavfilter/vf_srcnn.c  | 261 +++
 5 files changed, 554 insertions(+)
 create mode 100644 libavfilter/srcnn_weights.h
 create mode 100644 libavfilter/vf_srcnn.c

diff --git a/Changelog b/Changelog
index c1b9df4..e153f12 100644
--- a/Changelog
+++ b/Changelog
@@ -46,6 +46,7 @@ version :
   They can be found at http://git.videolan.org/?p=ffmpeg/nv-codec-headers.git
 - native SBC encoder and decoder
 - drmeter audio filter
+- Added image SRCNN(Super Resolution Convolutional Neural Network) filter. No training yet.
 
 
 version 3.4:
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index fc16512..b37a956 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -323,6 +323,7 @@ OBJS-$(CONFIG_SMARTBLUR_FILTER)  += vf_smartblur.o
 OBJS-$(CONFIG_SOBEL_FILTER)  += vf_convolution.o
 OBJS-$(CONFIG_SPLIT_FILTER)  += split.o
 OBJS-$(CONFIG_SPP_FILTER)+= vf_spp.o
+OBJS-$(CONFIG_SRCNN_FILTER)  += vf_srcnn.o
 OBJS-$(CONFIG_SSIM_FILTER)   += vf_ssim.o framesync.o
 OBJS-$(CONFIG_STEREO3D_FILTER)   += vf_stereo3d.o
 OBJS-$(CONFIG_STREAMSELECT_FILTER)   += f_streamselect.o framesync.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index cc423af..42f409c 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -332,6 +332,7 @@ static void register_all(void)
 REGISTER_FILTER(SOBEL,  sobel,  vf);
 REGISTER_FILTER(SPLIT,  split,  vf);
 REGISTER_FILTER(SPP,spp,vf);
+REGISTER_FILTER(SRCNN,  srcnn,  vf);
 REGISTER_FILTER(SSIM,   ssim,   vf);
 REGISTER_FILTER(STEREO3D,   stereo3d,   vf);
 REGISTER_FILTER(STREAMSELECT,   streamselect,   vf);
diff --git a/libavfilter/srcnn_weights.h b/libavfilter/srcnn_weights.h
new file mode 100644
index 000..6497b1e
--- /dev/null
+++ b/libavfilter/srcnn_weights.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2018 Mina Sami
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Super resolution filter pretrained weights
+ *
+ * @see https://arxiv.org/abs/1501.00092
+ */
+
+#ifndef AVFILTER_SRCNN_WEIGHTS_H
+#define AVFILTER_SRCNN_WEIGHTS_H
+
+const int ff_cnn_num_filters[] = {
+1, 64, 32, 1
+};
+
+const int ff_cnn_size_filters[] = {
+1, 9, 1, 5
+};
+
+static const float weights1_64x81[] = {

Re: [FFmpeg-devel] [PATCH v2] lavf/movenc: write track title metadata for mov files

2018-03-22 Thread Jan Ekström
On Wed, Mar 21, 2018 at 11:30 PM, Courtland Idstrom
 wrote:
>
>
> Please let me know if this is sufficient. I can also upload these movies
> somewhere, I didn't think it would be appropriate to attach them to this
> email.
>
>
> Cheers,
>
> -Courtland

Thanks for the verification, pushed.

Jan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] avcodec/mediacodecdec: work around for decoding h264 with coded fields

2018-03-22 Thread Aman Gupta
From: Aman Gupta 

This is a hacky work-around for #7092, where the lavc h264
parser splits coded fields into separate video packets, only one
of which has a PTS set.

The MediaCodec#queueInputBuffer API expects a PTS along with
incoming video packets, and breaks badly when the PTS is missing
or incorrect (previously it would be passed in as AV_NOPTS_VALUE,
but the same breakage happens if you pass in 0 instead).

Since it seems there's no easy fix for #7092, this patch stores
the previous PTS in the decoder context and re-uses it for the
second packet. This emulates the behavior of other Android video
players that don't split the coded fields, and pass them as a single
buffer with the same timestamp.
---
 libavcodec/mediacodecdec_common.c | 26 +++---
 libavcodec/mediacodecdec_common.h |  2 ++
 2 files changed, 17 insertions(+), 11 deletions(-)

diff --git a/libavcodec/mediacodecdec_common.c 
b/libavcodec/mediacodecdec_common.c
index e31adb487c..16112b186d 100644
--- a/libavcodec/mediacodecdec_common.c
+++ b/libavcodec/mediacodecdec_common.c
@@ -450,6 +450,7 @@ static int mediacodec_dec_flush_codec(AVCodecContext 
*avctx, MediaCodecDecContex
 s->eos = 0;
 atomic_fetch_add(>serial, 1);
 atomic_init(>hw_buffer_count, 0);
+s->last_pts = AV_NOPTS_VALUE;
 
 status = ff_AMediaCodec_flush(codec);
 if (status < 0) {
@@ -477,6 +478,7 @@ int ff_mediacodec_dec_init(AVCodecContext *avctx, 
MediaCodecDecContext *s,
 atomic_init(>refcount, 1);
 atomic_init(>hw_buffer_count, 0);
 atomic_init(>serial, 1);
+s->last_pts = AV_NOPTS_VALUE;
 
 pix_fmt = ff_get_format(avctx, pix_fmts);
 if (pix_fmt == AV_PIX_FMT_MEDIACODEC) {
@@ -571,6 +573,7 @@ int ff_mediacodec_dec_send(AVCodecContext *avctx, 
MediaCodecDecContext *s,
 FFAMediaCodec *codec = s->codec;
 int status;
 int64_t input_dequeue_timeout_us = INPUT_DEQUEUE_TIMEOUT_US;
+int64_t pts;
 
 if (s->flushing) {
 av_log(avctx, AV_LOG_ERROR, "Decoder is flushing and cannot accept new 
buffer "
@@ -605,14 +608,21 @@ int ff_mediacodec_dec_send(AVCodecContext *avctx, 
MediaCodecDecContext *s,
 return AVERROR_EXTERNAL;
 }
 
+pts = pkt->pts;
+if (pts == AV_NOPTS_VALUE && s->last_pts != AV_NOPTS_VALUE) {
+pts = s->last_pts;
+} else if (pts == AV_NOPTS_VALUE) {
+av_log(avctx, AV_LOG_WARNING, "Packet is missing PTS!\n");
+pts = 0;
+}
+s->last_pts = pkt->pts;
+if (pts && avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
+pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 
100));
+}
+
 if (need_draining) {
-int64_t pts = pkt->pts;
 uint32_t flags = ff_AMediaCodec_getBufferFlagEndOfStream(codec);
 
-if (s->surface) {
-pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 
100));
-}
-
 av_log(avctx, AV_LOG_DEBUG, "Sending End Of Stream signal\n");
 
 status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, 0, pts, 
flags);
@@ -627,16 +637,10 @@ int ff_mediacodec_dec_send(AVCodecContext *avctx, 
MediaCodecDecContext *s,
 s->draining = 1;
 break;
 } else {
-int64_t pts = pkt->pts;
-
 size = FFMIN(pkt->size - offset, size);
 memcpy(data, pkt->data + offset, size);
 offset += size;
 
-if (avctx->pkt_timebase.num && avctx->pkt_timebase.den) {
-pts = av_rescale_q(pts, avctx->pkt_timebase, av_make_q(1, 
100));
-}
-
 status = ff_AMediaCodec_queueInputBuffer(codec, index, 0, size, 
pts, 0);
 if (status < 0) {
 av_log(avctx, AV_LOG_ERROR, "Failed to queue input buffer 
(status = %d)\n", status);
diff --git a/libavcodec/mediacodecdec_common.h 
b/libavcodec/mediacodecdec_common.h
index 023d4c5fa7..045b7aaac4 100644
--- a/libavcodec/mediacodecdec_common.h
+++ b/libavcodec/mediacodecdec_common.h
@@ -69,6 +69,8 @@ typedef struct MediaCodecDecContext {
 bool delay_flush;
 atomic_int serial;
 
+int64_t last_pts;
+
 } MediaCodecDecContext;
 
 int ff_mediacodec_dec_init(AVCodecContext *avctx,
-- 
2.14.2

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Michael Niedermayer
On Thu, Mar 22, 2018 at 01:07:18PM +0100, Nicolas George wrote:
> Josh de Kock (2018-03-22):
> > Merging lavd into lavf may be the best option here, as it allows us to
> > change the return type of av_iterate_indev() etc to an AVDevice or another
> > type which may represent an actual device as opposed to a purely
> > input/output device (which is just implemented as an actual lavf format). I
> > don't know; say this is merged and then we add an AVDevice, then we already
> > have device iteration functions which return AVFormat so we would need
> > different function names to accommodate the lavd change requiring yet
> > another API change--so I am not entirely sure that the current patches
> > (implementing option 1) are the best way to go about it.
> 
> I am sorry, but I have trouble understanding what you are trying to say.
> Maybe rephrase it with shorter sentences?
> 
> There is no separate type for devices, they are coded as AVInputFormat
> and AVOutputFormat. That is fine, because they are designed to behave
> like (de)muxers and can be used anywhere a (de)muxer can be used.
> 
> The result is a huge benefit for users. Just look at ffmpeg: it was
> designed for (de)muxers, but thanks to that it can do real-time capture
> and quick on-the-fly preview. The features are not as complete as with a
> real device API, but most of the time the basic features provided by
> lavf are largely enough to suit the needs.
> 
> You want proof? Just look at the users mailing-list where questions are
> asked about dshow, X11 capture, Decklink cards, etc.
> 

> If you were to change the lavd API to make it different from (de)muxers,
> all applications that right now can use devices automatically would lose
> that ability, to the detriment of users.

not taking a position on any of the suggestions in this thread (as i dont 
have the time ATM to properly think about them ...) but
if lavd had a incompatible API then it would likely be possible to add a
demuxer and muxer to lavf that gives access to all these devices.
So they would then still be accessable through the (de)muxer interface.


[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

You can kill me, but you cannot change the truth.


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/mpeg4_unpack_bframes: make sure the packet is writable when data needs to changed

2018-03-22 Thread Michael Niedermayer
On Wed, Mar 21, 2018 at 10:41:03PM -0300, James Almer wrote:
> Nothing currently guarantees that the packet passed to the bsf will
> be writable.
> 
> Signed-off-by: James Almer 
> ---
> This supersedes "[PATCH 2/2] avcodec/mpeg4_unpack_bframes: allocate a
> new packet when data needs to be changed"
> 
>  libavcodec/mpeg4_unpack_bframes_bsf.c | 3 +++
>  1 file changed, 3 insertions(+)

didnt test yet but should be ok

thx

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Opposition brings concord. Out of discord comes the fairest harmony.
-- Heraclitus


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/mpeg4_unpack_bframes: make sure the packet is writable when data needs to changed

2018-03-22 Thread James Almer
On 3/22/2018 4:34 PM, Michael Niedermayer wrote:
> On Wed, Mar 21, 2018 at 10:41:03PM -0300, James Almer wrote:
>> Nothing currently guarantees that the packet passed to the bsf will
>> be writable.
>>
>> Signed-off-by: James Almer 
>> ---
>> This supersedes "[PATCH 2/2] avcodec/mpeg4_unpack_bframes: allocate a
>> new packet when data needs to be changed"
>>
>>  libavcodec/mpeg4_unpack_bframes_bsf.c | 3 +++
>>  1 file changed, 3 insertions(+)
> 
> didnt test yet but should be ok
> 
> thx

Pushed, thanks.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] Support signaling of last segment number

2018-03-22 Thread sanilraut
Last segment indicated by mpd is not parsed.
Example stream: 
http://dash.akamaized.net/dash264/TestCasesIOP41/LastSegmentNumber/1/manifest_last_segment_num.mpd

This patch supports parsing of Supplemental Descriptor with @schemeIdUri set to 
http://dashif.org/guide-
lines/last-segment-number with the @value set to the last segment number.

---
 libavformat/dashdec.c | 22 +++---
 1 file changed, 19 insertions(+), 3 deletions(-)

diff --git a/libavformat/dashdec.c b/libavformat/dashdec.c
index 7b79b93..db63a99 100644
--- a/libavformat/dashdec.c
+++ b/libavformat/dashdec.c
@@ -805,7 +805,8 @@ static int parse_manifest_representation(AVFormatContext 
*s, const char *url,
  xmlNodePtr fragment_template_node,
  xmlNodePtr content_component_node,
  xmlNodePtr adaptionset_baseurl_node,
- xmlNodePtr 
adaptionset_segmentlist_node)
+ xmlNodePtr 
adaptionset_segmentlist_node,
+ xmlNodePtr 
adaptionset_supplementalproperty_node)
 {
 int32_t ret = 0;
 int32_t audio_rep_idx = 0;
@@ -825,6 +826,7 @@ static int parse_manifest_representation(AVFormatContext 
*s, const char *url,
 char *timescale_val = NULL;
 char *initialization_val = NULL;
 char *media_val = NULL;
+char *val = NULL;
 xmlNodePtr baseurl_nodes[4];
 xmlNodePtr representation_node = node;
 char *rep_id_val = xmlGetProp(representation_node, "id");
@@ -920,6 +922,13 @@ static int parse_manifest_representation(AVFormatContext 
*s, const char *url,
 rep->first_seq_no = (int64_t) strtoll(startnumber_val, NULL, 
10);
 xmlFree(startnumber_val);
 }
+if (adaptionset_supplementalproperty_node) {
+if 
(!strcmp(xmlGetProp(adaptionset_supplementalproperty_node,"schemeIdUri"), 
"http://dashif.org/guidelines/last-segment-number;)) {
+val = 
xmlGetProp(adaptionset_supplementalproperty_node,"value");
+rep->last_seq_no =(int64_t) strtoll(val, NULL, 10) - 1;
+xmlFree(val);
+ }
+}
 
 fragment_timeline_node = 
find_child_node_by_name(representation_segmenttemplate_node, "SegmentTimeline");
 
@@ -1054,6 +1063,7 @@ static int parse_manifest_adaptationset(AVFormatContext 
*s, const char *url,
 xmlNodePtr content_component_node = NULL;
 xmlNodePtr adaptionset_baseurl_node = NULL;
 xmlNodePtr adaptionset_segmentlist_node = NULL;
+xmlNodePtr adaptionset_supplementalproperty_node = NULL;
 xmlNodePtr node = NULL;
 
 node = xmlFirstElementChild(adaptionset_node);
@@ -1066,6 +1076,8 @@ static int parse_manifest_adaptationset(AVFormatContext 
*s, const char *url,
 adaptionset_baseurl_node = node;
 } else if (!av_strcasecmp(node->name, (const char *)"SegmentList")) {
 adaptionset_segmentlist_node = node;
+} else if (!av_strcasecmp(node->name, (const char 
*)"SupplementalProperty")) {
+adaptionset_supplementalproperty_node = node;
 } else if (!av_strcasecmp(node->name, (const char *)"Representation")) 
{
 ret = parse_manifest_representation(s, url, node,
 adaptionset_node,
@@ -1076,7 +1088,8 @@ static int parse_manifest_adaptationset(AVFormatContext 
*s, const char *url,
 fragment_template_node,
 content_component_node,
 adaptionset_baseurl_node,
-adaptionset_segmentlist_node);
+adaptionset_segmentlist_node,
+
adaptionset_supplementalproperty_node);
 if (ret < 0) {
 return ret;
 }
@@ -1819,7 +1832,10 @@ static int open_demux_for_component(AVFormatContext *s, 
struct representation *p
 
 pls->parent = s;
 pls->cur_seq_no  = calc_cur_seg_no(s, pls);
-pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
+
+if (!pls->last_seq_no) {
+pls->last_seq_no = calc_max_seg_no(pls, s->priv_data);
+}
 
 ret = reopen_demux_for_component(s, pls);
 if (ret < 0) {
-- 
2.7.4

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] how to add step-to-previous-frame feature to ffplay?

2018-03-22 Thread Steven Liu


> On 22 Mar 2018, at 15:13, 陈 永岗  wrote:
> 
> 
> Hello everyone,
> 
> 
> I'm newbie to ffmpeg, and found ffplay.c is very useful which contains lots 
> of command line operations. If press 's', it will play next frame. However I 
> want to make it support playing previous frame. Where should I start? Any 
> recommendations?
https://ffmpeg.org/developer.html
https://ffmpeg.org/git-howto.html

可以先读读这几个文档,了解一下如何参与开发
> 
> 
> Best regards
> 
> Dom
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Thanks
Steven





___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] how to add step-to-previous-frame feature to ffplay?

2018-03-22 Thread Steven Liu


> On 22 Mar 2018, at 15:13, 陈 永岗  wrote:
> 
> 
> Hello everyone,
> 
> 
> I'm newbie to ffmpeg, and found ffplay.c is very useful which contains lots 
> of command line operations. If press 's', it will play next frame. However I 
> want to make it support playing previous frame. Where should I start? Any 
> recommendations?
https://ffmpeg.org/developer.html
https://ffmpeg.org/git-howto.html

You can reference these documents to get how to contribute to ffmpeg.
Patch welcome :D

> 
> 
> Best regards
> 
> Dom
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Thanks
Steven





___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Michael Niedermayer
On Thu, Mar 22, 2018 at 02:01:27AM +, Josh de Kock wrote:
[...]
> +#ifdef CONFIG_AVDEVICE
> +opaque = 0;
> +if (muxdemuxers != SHOW_DEMUXERS) {
> +while ((ofmt = av_outdev_iterate())) {
> +if ((!name || strcmp(ofmt-> name, name) < 0) && 
> strcmp(ofmt-> name, last_name) > 0) {
> +name = ofmt - > name;

> +long_name = ofmt - > long_name;
^^^
this doesnt build

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Those who are best at talking, realize last or never when they are wrong.


signature.asc
Description: PGP signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] lavfi: Add OpenCL avgblur filter

2018-03-22 Thread Song, Ruiling
> 
> Current scaling and colour conversion code is mostly found in libswscale, 
> though
> there are also other places like the colorspace filter.  I don't know whether 
> any
> of these will translate suitably to GPU code and what the right approach is 
> here -
> some investigation will be required.
Hi Mark,

I am currently working on the scaling filter implemented using OpenCL (it is 
something like scale_cuda),
I still need some time to polish the patch, and will send it out to the mail 
list once it is good enough.

Ruiling

> 
> - Mark
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avfilter: add hrtfm filter

2018-03-22 Thread Aurelien Jacobs
On Mon, Mar 19, 2018 at 10:49:28PM -0800, Lou Logan wrote:
> On Fri, Mar 16, 2018, at 11:44 AM, Paul B Mahol wrote:
> > Signed-off-by: Paul B Mahol 
> > ---
> >  doc/filters.texi |  60 ++
> >  libavfilter/Makefile |   1 +
> >  libavfilter/af_hrtfm.c   | 486
> >  +++>  libavfilter/allfilters.c 
> > |   1 +
> >  4 files changed, 548 insertions(+)
> >  create mode 100644 libavfilter/af_hrtfm.c
> >
> > diff --git a/doc/filters.texi b/doc/filters.texi
> > index bd43a7ac6e..c298054325 100644
> > --- a/doc/filters.texi
> > +++ b/doc/filters.texi
> > @@ -3218,6 +3218,66 @@ Change highpass width.
> >  Syntax for the command is : "@var{width}"
> >  @end table
> > 
> > +@section hrtfm
> > +
> > +Apply simple Head Related Transfer Function Model to audio stream.
> > +
> > +hrtfm filter creates virtual loudspeakers around the user for
> > binaural> +listening via headphones (audio formats up to 9 channels 
> > supported).> +
> > +This is very simple implementation which does not use any HRIRs.
> > +
> > +It accepts the following parameters:
> > +
> > +@table @option
> > +@item hradius
> 
> You didn’t like the head_radius option name suggestion?
> 
> > +Set head radius of listener. In meters. Default value is
> > @code{0.0891}.
> Why meters instead of cm? 

Because if you want to specify centimeters, you can use the option
like this: -hradius 8.91c
The only way for SI prefix to make sense is to have the options
expressed in the base unit.
So I think hradius should be expressed in meters and every new options
should always be expressed in their base unit.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] libavfilter: Add OpenCL convolution filter v0.2

2018-03-22 Thread Danil Iashchenko
Hi there!
Thank you for advices, I have fixed the problem when per plane matrices
application was incorrect. Now it works as expected and behaves like the 
existing vf_convolution filter.

Tested for yuv and nv12 formats.
The following filters from ffmpeg documentation
(https://ffmpeg.org/ffmpeg-filters.html#Examples-43) work correctly:

 * sharpen
 * blur
 * edge enchance
 * edge detect
 * laplacian edge detector which includes diagonals
 * emboss
 * custom tests

I have two questions:

1. Looks like existing convolution filter implementaion ignores
0rdiv and 0bias parameters for 0m='0 0 0 0 1 0 0 0 0' matrix,
so results will be the same for following sample filter runs:

-vf convolution='0m=0 0 0 0 1 0 0 0 0:0rdiv=1:0bias=20'
-vf convolution='0m=0 0 0 0 1 0 0 0 0:0rdiv=1/3:0bias=0'
-vf convolution='0m=0 0 0 0 1 0 0 0 0:0rdiv=1/2:0bias=100'
-vf convolution='0m=0 0 0 0 1 0 0 0 0:0rdiv=1/5:0bias=50'

and will not differ from

-vf convolution='0m=0 0 0 0 1 0 0 0 0:0rdiv=1:0bias=0'

My implementation does not ignore 0rdiv and 0bias parameters in case of m0='0 0 
0 0 1 0 0 0 0',
and results will differ from the existing implementation so I do not know
which one is correct: mine or exisiting.

2. I have a local kernel (not included in this patch), but there is no
significant speed difference comparing to global kernel. Shall I include
it in the next patch?
---
 configure   |   1 +
 libavfilter/Makefile|   1 +
 libavfilter/allfilters.c|   1 +
 libavfilter/opencl/convolution.cl   |  40 
 libavfilter/opencl_source.h |   1 +
 libavfilter/vf_convolution_opencl.c | 365 
 6 files changed, 409 insertions(+)
 create mode 100644 libavfilter/opencl/convolution.cl
 create mode 100644 libavfilter/vf_convolution_opencl.c

diff --git a/configure b/configure
index 6916b45..bf5c312 100755
--- a/configure
+++ b/configure
@@ -3210,6 +3210,7 @@ blackframe_filter_deps="gpl"
 boxblur_filter_deps="gpl"
 bs2b_filter_deps="libbs2b"
 colormatrix_filter_deps="gpl"
+convolution_opencl_filter_deps="opencl"
 convolve_filter_deps="avcodec"
 convolve_filter_select="fft"
 coreimage_filter_deps="coreimage appkit"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 6a60836..d005934 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -156,6 +156,7 @@ OBJS-$(CONFIG_COLORLEVELS_FILTER)+= 
vf_colorlevels.o
 OBJS-$(CONFIG_COLORMATRIX_FILTER)+= vf_colormatrix.o
 OBJS-$(CONFIG_COLORSPACE_FILTER) += vf_colorspace.o colorspacedsp.o
 OBJS-$(CONFIG_CONVOLUTION_FILTER)+= vf_convolution.o
+OBJS-$(CONFIG_CONVOLUTION_OPENCL_FILTER) += vf_convolution_opencl.o 
opencl.o opencl/convolution.o
 OBJS-$(CONFIG_CONVOLVE_FILTER)   += vf_convolve.o framesync.o
 OBJS-$(CONFIG_COPY_FILTER)   += vf_copy.o
 OBJS-$(CONFIG_COREIMAGE_FILTER)  += vf_coreimage.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index 9adb109..f2dc55e 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -166,6 +166,7 @@ static void register_all(void)
 REGISTER_FILTER(COLORMATRIX,colormatrix,vf);
 REGISTER_FILTER(COLORSPACE, colorspace, vf);
 REGISTER_FILTER(CONVOLUTION,convolution,vf);
+REGISTER_FILTER(CONVOLUTION_OPENCL, convolution_opencl, vf);
 REGISTER_FILTER(CONVOLVE,   convolve,   vf);
 REGISTER_FILTER(COPY,   copy,   vf);
 REGISTER_FILTER(COREIMAGE,  coreimage,  vf);
diff --git a/libavfilter/opencl/convolution.cl 
b/libavfilter/opencl/convolution.cl
new file mode 100644
index 000..aa1db97
--- /dev/null
+++ b/libavfilter/opencl/convolution.cl
@@ -0,0 +1,40 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+__kernel void convolution_global(__write_only image2d_t dst,
+ __read_only  image2d_t src,
+ int coef_matrix_dim,
+ __constant float *coef_matrix,
+ float div,
+ float bias)
+{
+const sampler_t sampler = (CLK_NORMALIZED_COORDS_FALSE | 
CLK_ADDRESS_CLAMP_TO_EDGE | 

Re: [FFmpeg-devel] [PATCH] ffmpeg_filter: enable stream_loop in HWAccel transcoding.

2018-03-22 Thread Jun Zhao
Ping?

The other thing: do we have a clean way to handle auto insert scale
filter in pipeline for HWaccel transcoding case in FFmpeg?

On 2018/3/14 16:26, Jun Zhao wrote:

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] lavc/cfhd: added alpha decompanding in rgba12

2018-03-22 Thread Aurelien Jacobs
On Wed, Mar 21, 2018 at 10:19:58PM +0530, Gagandeep Singh wrote:
> alpha decompanding curve added to post process the decoded alpha channel
> ---
>  libavcodec/cfhd.c | 19 +++
>  1 file changed, 19 insertions(+)
> 
> diff --git a/libavcodec/cfhd.c b/libavcodec/cfhd.c
> index fd834b..e35732df45 100644
> --- a/libavcodec/cfhd.c
> +++ b/libavcodec/cfhd.c
> @@ -37,6 +37,9 @@
>  #include "thread.h"
>  #include "cfhd.h"
>  
> +#define ALPHA_COMPAND_DC_OFFSET 256
> +#define ALPHA_COMPAND_GAIN 9400
> +
>  enum CFHDParam {
>  ChannelCount =  12,
>  SubbandCount =  14,
> @@ -94,6 +97,20 @@ static inline int dequant_and_decompand(int level, int 
> quantisation)
> FFSIGN(level) * quantisation;
>  }
>  
> +static inline void process_alpha(int16_t *alpha, int width)
> +{
> +int i, channel;
> +for (i = 0; i < width; i++) {
> +channel   = alpha[i];
> +channel  -= ALPHA_COMPAND_DC_OFFSET;
> +channel <<= 3;
> +channel  *= ALPHA_COMPAND_GAIN;

Any reason why you can't merge the << 3 (ie. * 8) with the
* ALPHA_COMPAND_GAIN ?

> +channel >>= 16;

> +channel   = av_clip_uintp2(channel, 12);
> +alpha[i]  = channel;

Here you should affect the result of av_clip_uintp2 directly to alpha[i].

Actually, I think it would be more readable by dropping the channel
intermediate variable entirely. You could write this function like this
(untested):

static inline void process_alpha(int16_t *alpha, int width)
{
for (int i = 0; i < width; i++)
alpha[i] = av_clip_uintp2(((alpha[i] - 256) * 75200) >> 16, 12);
}

Of course, you can use DC_OFFSET and GAIN constants in there if you
think it is more readable.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 1/3] avformat/mov: Increase support for common encryption.

2018-03-22 Thread Jacob Trimble
On Mon, Mar 5, 2018 at 12:22 PM, Jacob Trimble  wrote:
> On Mon, Feb 12, 2018 at 9:35 AM, Jacob Trimble  wrote:
>> On Tue, Jan 30, 2018 at 11:27 AM, Jacob Trimble  wrote:
>>> On Wed, Jan 24, 2018 at 5:46 PM, Michael Niedermayer
>>>  wrote:
 On Wed, Jan 24, 2018 at 11:43:26AM -0800, Jacob Trimble wrote:
> On Mon, Jan 22, 2018 at 7:38 PM, Michael Niedermayer
>  wrote
> > [...]
> >> This removes support for saio/saiz atoms, but it was incorrect before.
> >> A follow-up change will add correct support for those.
> >
> > This removal should be done by a seperate patch if it is done.
> > diff has matched up the removed function with a added one making this
> > hard to read as is
> >
>
> The problem is that the old code used the saiz atoms to parse the senc
> atom.  I split the patch up for readability, but the two patches need
> to be applied at the same time (or squashed) since the first breaks
> encrypted content.  But I can squash them again if it is preferable to
> not have a commit that intentionally breaks things.

 I didnt investigate this deeply so there is likely a better option that
 i miss but you could just remove the functions which become unused in a
 subsequent patch to prevent diff from messing the line matching up totally

>>>
>>> Done.
>>>

>
> >
> >>
> >> Signed-off-by: Jacob Trimble 
> >> ---
> >>  libavformat/isom.h |  20 +-
> >>  libavformat/mov.c  | 432 
> >> ++---
> >>  tests/fate/mov.mak |   8 +
> >>  tests/ref/fate/mov-frag-encrypted  |  57 +
> >>  tests/ref/fate/mov-tenc-only-encrypted |  57 +
> >>  5 files changed, 422 insertions(+), 152 deletions(-)
> >>  create mode 100644 tests/ref/fate/mov-frag-encrypted
> >>  create mode 100644 tests/ref/fate/mov-tenc-only-encrypted
> >
> > This depends on other patches you posted, this should be mentioned or
> > all patches should be in the same patchset in order
> >
>
> This depends on
> http://ffmpeg.org/pipermail/ffmpeg-devel/2018-January/223754.html and
> the recently pushed change to libavutil/aes_ctr.  Should I add
> something to the commit message or is that enough?

 If you post a new version, then there should be a mail or comment 
 explaining
 any dependancies on yet to be applied patches.
 It should not be in the commit messages or commited changes ideally
 This way people trying to test code dont need to guess what they need
 to apply first before a patchset


 [...]
> >> +static int get_current_encryption_info(MOVContext *c, 
> >> MOVEncryptionIndex **encryption_index, MOVStreamContext **sc)
> >>  {
> >> +MOVFragmentStreamInfo *frag_stream_info;
> >>  AVStream *st;
> >> -MOVStreamContext *sc;
> >> -size_t auxiliary_info_size;
> >> +int i;
> >>
> >> -if (c->decryption_key_len == 0 || c->fc->nb_streams < 1)
> >> -return 0;
> >> +frag_stream_info = get_current_frag_stream_info(>frag_index);
> >> +if (frag_stream_info) {
> >> +for (i = 0; i < c->fc->nb_streams; i++) {
> >> +if (c->fc->streams[i]->id == frag_stream_info->id) {
> >> +  st = c->fc->streams[i];
> >> +  break;
> >> +}
> >> +}
> >
> > the indention is inconsistent here
> >
>
> No it's not, it looks like it because the diff looks odd.  If you
> apply the patch, the indentation in this method is consistent.

 Indention depth is 4 in mov*.c
 the hunk seems to add lines with a depth of 2
 I would be surprised if this is not in the file after applying the patch

 personally i dont care about the depth that much but i know many other 
 people
 care so this needs to be fixed before this can be applied
>>>
>>> Didn't see that.  Fixed and did a grep for incorrect indentations.
>>>

 [...]

 --
 Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

 Let us carefully observe those good qualities wherein our enemies excel us
 and endeavor to excel them, by avoiding what is faulty, and imitating what
 is excellent in them. -- Plutarch

 ___
 ffmpeg-devel mailing list
 ffmpeg-devel@ffmpeg.org
 http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

>>
>> Ping.  This depends on
>> http://ffmpeg.org/pipermail/ffmpeg-devel/2018-January/223754.html.
>
> Ping again.  I know this is low priority, but I would like to get
> these merged soon.

Ping.  Despite being almost 2 months old, these patches still 

Re: [FFmpeg-devel] [PATCH] avfilter/af_channelsplit: add channels option

2018-03-22 Thread Alexander Strasser
On 2018-03-22 16:04 +0100, Nicolas George wrote:
> Paul B Mahol (2018-03-22):
> > So user can pick which channels to extract.
> > 
> > Signed-off-by: Paul B Mahol 
> > ---
> >  doc/filters.texi  | 18 ++
> >  libavfilter/af_channelsplit.c | 39 +--
> >  2 files changed, 51 insertions(+), 6 deletions(-)
> 
> Thanks for simplifying the code. I have no more remarks. But Alexander
> commented too, so please let him time to follow-up.

I still think checking the number of channels to be less then or
equal to the size of map is more robust.

There are two things that could change in future

1. the size of map (for whatever reason; maybe someone thinks 16
channels are max or similar...)
2. the number of channels we support could be increased

As each channel is represented by a bit in a 64 bit integer, it seems
kind of unlikely we get more channels soon. So I won't insist on the
check to be added.

Patch LGTM.


Thank you,
  Alexander
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH v4 3/7] cmdutils: use new iteration APIs

2018-03-22 Thread Josh de Kock

On 2018/03/22 22:38, Michael Niedermayer wrote:
> On Thu, Mar 22, 2018 at 02:01:27AM +, Josh de Kock wrote:
> [...]
>> +#ifdef CONFIG_AVDEVICE
>> +opaque = 0;
>> +if (muxdemuxers != SHOW_DEMUXERS) {
>> +while ((ofmt = av_outdev_iterate())) {
>> +if ((!name || strcmp(ofmt-> name, name) < 0) && 
strcmp(ofmt-> name, last_name) > 0) {

>> +name = ofmt - > name;
>
>> +long_name = ofmt - > long_name;
>  ^^^
> this doesnt build
>
Oh my bad. It's the same patch as above functionally (apart from the 
stray spaces somehow), but do you think this patch is better than the 
one with the macros? I guess it's maybe easier to read. I'll resend a 
patch for this if we even decide to use this set.


--
Josh
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] lavfi: Add OpenCL avgblur filter

2018-03-22 Thread Mark Thompson
On 21/03/18 13:09, Dylan Fernando wrote:
> On Tue, Mar 20, 2018 at 10:34 AM, Mark Thompson  wrote:
>> On 19/03/18 02:30, dylanf...@gmail.com wrote:
>>> From: drfer3 
>>>
>>> Behaves like the existing avgblur filter, except working on OpenCL
>>> hardware frames. Takes exactly the same options.
>>> ---
>>>  configure   |   1 +
>>>  libavfilter/Makefile|   2 +
>>>  libavfilter/allfilters.c|   1 +
>>>  libavfilter/opencl/avgblur.cl   |  60 
>>>  libavfilter/opencl_source.h |   1 +
>>>  libavfilter/vf_avgblur_opencl.c | 328 ++
>> ++
>>>  6 files changed, 393 insertions(+)
>>>  create mode 100644 libavfilter/opencl/avgblur.cl
>>>  create mode 100644 libavfilter/vf_avgblur_opencl.c
>>>
>>> ...
>>
>> Ignoring the one trivial issue above which I can easily fix myself, this
>> all looks good to me.  I'll try to test on some other platforms (non-Intel,
>> at least AMD and Mali) tomorrow, but I don't anticipate any issues.  Does
>> anyone else have any thoughts?  I'll push this after tomorrow if there
>> isn't anything further.

And pushed, thank you!

> What information should I put in my GSoC application? How should I
> structure it? Should I give a rough timeline detailing exactly which color
> conversion and scaling algorithms I’ll be implementing? If so, which files
> should I look at to see the current colour conversion code?

I have to admit I'm not entirely sure what you need to put in the application 
(I haven't done this bit before, so I'm also reading what the GSoC site says).  
Can anyone else give some guidance here about how it has worked in the past in 
FFmpeg?

Current scaling and colour conversion code is mostly found in libswscale, 
though there are also other places like the colorspace filter.  I don't know 
whether any of these will translate suitably to GPU code and what the right 
approach is here - some investigation will be required.

- Mark
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 02/11] avformat/dashenc: segmentation at the configured segment duration rate

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

When use_template is enabled and use_timeline is disabled, typically
it is required to generate the segments at the configured segment duration
rate on an average. This commit is particularly needed to handle the
segmentation when video frame rates are fractional like 29.97 or 59.94 fps.
---
 doc/muxers.texi   |  4 +++-
 libavformat/dashenc.c | 15 ---
 2 files changed, 15 insertions(+), 4 deletions(-)

diff --git a/doc/muxers.texi b/doc/muxers.texi
index 65eec92..7130cd0 100644
--- a/doc/muxers.texi
+++ b/doc/muxers.texi
@@ -226,7 +226,9 @@ ffmpeg -re -i  -map 0 -map 0 -c:a libfdk_aac -c:v 
libx264
 
 @table @option
 @item -seg_duration @var{microseconds}
-Set the segment length in microseconds.
+Set the segment length in microseconds. The value is treated as average segment
+duration when use_template is enabled and use_timeline is disabled or as 
minimum
+segment duration for all the other use cases.
 @item -window_size @var{size}
 Set the maximum number of segments kept in the manifest.
 @item -extra_window_size @var{size}
diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index bdd5b56..a489f5e 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -1257,6 +1257,7 @@ static int dash_write_packet(AVFormatContext *s, AVPacket 
*pkt)
 DASHContext *c = s->priv_data;
 AVStream *st = s->streams[pkt->stream_index];
 OutputStream *os = >streams[pkt->stream_index];
+int64_t seg_end_duration, elapsed_duration;
 int ret;
 
 ret = update_stream_extradata(s, os, st->codecpar);
@@ -1284,10 +1285,18 @@ static int dash_write_packet(AVFormatContext *s, 
AVPacket *pkt)
 if (os->first_pts == AV_NOPTS_VALUE)
 os->first_pts = pkt->pts;
 
+if (c->use_template && !c->use_timeline) {
+elapsed_duration = pkt->pts - os->first_pts;
+seg_end_duration = (int64_t) os->segment_index * c->seg_duration;
+} else {
+elapsed_duration = pkt->pts - os->start_pts;
+seg_end_duration = c->seg_duration;
+}
+
 if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
 pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
-av_compare_ts(pkt->pts - os->start_pts, st->time_base,
-  c->seg_duration, AV_TIME_BASE_Q) >= 0) {
+av_compare_ts(elapsed_duration, st->time_base,
+  seg_end_duration, AV_TIME_BASE_Q) >= 0) {
 int64_t prev_duration = c->last_duration;
 
 c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
@@ -1427,7 +1436,7 @@ static const AVOption options[] = {
 { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 
id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 
}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
 { "window_size", "number of segments kept in the manifest", 
OFFSET(window_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, E },
 { "extra_window_size", "number of segments kept outside of the manifest 
before removing from disk", OFFSET(extra_window_size), AV_OPT_TYPE_INT, { .i64 
= 5 }, 0, INT_MAX, E },
-{ "seg_duration", "minimum segment duration (in microseconds)", 
OFFSET(seg_duration), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, E },
+{ "seg_duration", "value in microseconds, average segment duration when 
use_template is enabled and use_timeline is disabled or minimum segment 
duration in other cases ", OFFSET(seg_duration), AV_OPT_TYPE_INT, { .i64 = 
500 }, 0, INT_MAX, E },
 { "remove_at_exit", "remove all segments when finished", 
OFFSET(remove_at_exit), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
 { "use_template", "Use SegmentTemplate instead of SegmentList", 
OFFSET(use_template), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
 { "use_timeline", "Use SegmentTimeline in SegmentTemplate", 
OFFSET(use_timeline), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 04/11] avformat/dashenc: removed 'write_manifest' call from 'write_header'

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

Calling 'write_manifest' from 'write_header' was causing creation of
first MPD with invalid values. Ex: zero @duration param value. Also,
the manifest files (MPD or M3U8s) should be created when at-least
one media frame is ready for consumption.
---
 libavformat/dashenc.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index 5fb839d..7b854b5 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -1036,9 +1036,6 @@ static int dash_write_header(AVFormatContext *s)
 if ((ret = avformat_write_header(os->ctx, NULL)) < 0)
 return ret;
 }
-ret = write_manifest(s, 0);
-if (!ret)
-av_log(s, AV_LOG_VERBOSE, "Manifest written to: %s\n", s->url);
 return ret;
 }
 
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 03/11] avformat/dashenc: writing average segment duration for @duration in template mode

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

---
 libavformat/dashenc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index a489f5e..5fb839d 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -344,7 +344,7 @@ static void output_segment_list(OutputStream *os, 
AVIOContext *out, AVFormatCont
 int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : 
AV_TIME_BASE;
 avio_printf(out, "\t\t\t\tuse_timeline)
-avio_printf(out, "duration=\"%"PRId64"\" ", c->last_duration);
+avio_printf(out, "duration=\"%d\" ", c->seg_duration);
 avio_printf(out, "initialization=\"%s\" media=\"%s\" 
startNumber=\"%d\">\n", c->init_seg_name, c->media_seg_name, c->use_timeline ? 
start_number : 1);
 if (c->use_timeline) {
 int64_t cur_time = 0;
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 06/11] avformat/dashenc: addition of @availabilityTimeOffset in MPD

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

availability time of Nth segment = availabilityStartTime + (N*segment duration) 
- availabilityTimeOffset.
This field helps to reduce the latency by about a segment duration in streaming 
mode.
---
 libavformat/dashenc.c | 11 ++-
 1 file changed, 10 insertions(+), 1 deletion(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index b62cb3e..d20bdba 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -85,6 +85,7 @@ typedef struct OutputStream {
 char filename[1024];
 char full_path[1024];
 char temp_path[1024];
+int64_t chunk_duration;
 } OutputStream;
 
 typedef struct DASHContext {
@@ -343,8 +344,12 @@ static void output_segment_list(OutputStream *os, 
AVIOContext *out, AVFormatCont
 if (c->use_template) {
 int timescale = c->use_timeline ? os->ctx->streams[0]->time_base.den : 
AV_TIME_BASE;
 avio_printf(out, "\t\t\t\tuse_timeline)
+if (!c->use_timeline) {
 avio_printf(out, "duration=\"%d\" ", c->seg_duration);
+if (c->streaming && os->chunk_duration)
+avio_printf(out, "availabilityTimeOffset=\"%.3f\" ",
+((double) c->seg_duration - os->chunk_duration) / 
AV_TIME_BASE);
+}
 avio_printf(out, "initialization=\"%s\" media=\"%s\" 
startNumber=\"%d\">\n", c->init_seg_name, c->media_seg_name, c->use_timeline ? 
start_number : 1);
 if (c->use_timeline) {
 int64_t cur_time = 0;
@@ -1283,6 +1288,10 @@ static int dash_write_packet(AVFormatContext *s, 
AVPacket *pkt)
 format_date_now(c->availability_start_time,
 sizeof(c->availability_start_time));
 
+if (!os->chunk_duration && pkt->duration)
+os->chunk_duration = av_rescale_q(pkt->duration, st->time_base,
+  AV_TIME_BASE_Q);
+
 if (c->use_template && !c->use_timeline) {
 elapsed_duration = pkt->pts - os->first_pts;
 seg_end_duration = (int64_t) os->segment_index * c->seg_duration;
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] lavc/cfhd: added alpha decompanding in rgba12

2018-03-22 Thread Gagandeep Singh
On Fri, 23 Mar 2018, 04:26 Aurelien Jacobs,  wrote:

> On Wed, Mar 21, 2018 at 10:19:58PM +0530, Gagandeep Singh wrote:
> > alpha decompanding curve added to post process the decoded alpha channel
> > ---
> >  libavcodec/cfhd.c | 19 +++
> >  1 file changed, 19 insertions(+)
> >
> > diff --git a/libavcodec/cfhd.c b/libavcodec/cfhd.c
> > index fd834b..e35732df45 100644
> > --- a/libavcodec/cfhd.c
> > +++ b/libavcodec/cfhd.c
> > @@ -37,6 +37,9 @@
> >  #include "thread.h"
> >  #include "cfhd.h"
> >
> > +#define ALPHA_COMPAND_DC_OFFSET 256
> > +#define ALPHA_COMPAND_GAIN 9400
> > +
> >  enum CFHDParam {
> >  ChannelCount =  12,
> >  SubbandCount =  14,
> > @@ -94,6 +97,20 @@ static inline int dequant_and_decompand(int level,
> int quantisation)
> > FFSIGN(level) * quantisation;
> >  }
> >
> > +static inline void process_alpha(int16_t *alpha, int width)
> > +{
> > +int i, channel;
> > +for (i = 0; i < width; i++) {
> > +channel   = alpha[i];
> > +channel  -= ALPHA_COMPAND_DC_OFFSET;
> > +channel <<= 3;
> > +channel  *= ALPHA_COMPAND_GAIN;
>
> Any reason why you can't merge the << 3 (ie. * 8) with the
> * ALPHA_COMPAND_GAIN ?
>
> > +channel >>= 16;
>
> > +channel   = av_clip_uintp2(channel, 12);
> > +alpha[i]  = channel;
>
> Here you should affect the result of av_clip_uintp2 directly to alpha[i].
>
> Actually, I think it would be more readable by dropping the channel
> intermediate variable entirely. You could write this function like this
> (untested):
>
> static inline void process_alpha(int16_t *alpha, int width)
> {
> for (int i = 0; i < width; i++)
> alpha[i] = av_clip_uintp2(((alpha[i] - 256) * 75200) >> 16, 12);
> }
>
> Of course, you can use DC_OFFSET and GAIN constants in there if you
> think it is more readable.
>

I will test it, I remember the problem was with the bit shifting in alpha
as it is originally smaller than channel, so I used 32 bit channel, but I
will see if it can also work your way.

> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 09/11] avformat/dashenc: constructing MPD's bandwidth string locally

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

---
 libavformat/dashenc.c | 20 +---
 1 file changed, 9 insertions(+), 11 deletions(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index 294999a..b0ed890 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -78,7 +78,6 @@ typedef struct OutputStream {
 int64_t first_pts, start_pts, max_pts;
 int64_t last_dts;
 int bit_rate;
-char bandwidth_str[64];
 
 char codec_str[100];
 int written_len;
@@ -546,20 +545,25 @@ static int write_adaptation_set(AVFormatContext *s, 
AVIOContext *out, int as_ind
 
 for (i = 0; i < s->nb_streams; i++) {
 OutputStream *os = >streams[i];
+char bandwidth_str[64] = {'\0'};
 
 if (os->as_idx - 1 != as_index)
 continue;
 
+if (os->bit_rate > 0)
+snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"",
+ os->bit_rate);
+
 if (as->media_type == AVMEDIA_TYPE_VIDEO) {
 AVStream *st = s->streams[i];
 avio_printf(out, "\t\t\tformat_name, os->codec_str, os->bandwidth_str, 
s->streams[i]->codecpar->width, s->streams[i]->codecpar->height);
+i, os->format_name, os->codec_str, bandwidth_str, 
s->streams[i]->codecpar->width, s->streams[i]->codecpar->height);
 if (st->avg_frame_rate.num)
 avio_printf(out, " frameRate=\"%d/%d\"", 
st->avg_frame_rate.num, st->avg_frame_rate.den);
 avio_printf(out, ">\n");
 } else {
 avio_printf(out, "\t\t\t\n",
-i, os->format_name, os->codec_str, os->bandwidth_str, 
s->streams[i]->codecpar->sample_rate);
+i, os->format_name, os->codec_str, bandwidth_str, 
s->streams[i]->codecpar->sample_rate);
 avio_printf(out, "\t\t\t\t\n",
 s->streams[i]->codecpar->channels);
 }
@@ -908,10 +912,7 @@ static int dash_init(AVFormatContext *s)
 char filename[1024];
 
 os->bit_rate = s->streams[i]->codecpar->bit_rate;
-if (os->bit_rate) {
-snprintf(os->bandwidth_str, sizeof(os->bandwidth_str),
- " bandwidth=\"%d\"", os->bit_rate);
-} else {
+if (!os->bit_rate) {
 int level = s->strict_std_compliance >= FF_COMPLIANCE_STRICT ?
 AV_LOG_ERROR : AV_LOG_WARNING;
 av_log(s, level, "No bit rate set for stream %d\n", i);
@@ -1226,11 +1227,8 @@ static int dash_flush(AVFormatContext *s, int final, int 
stream)
 int64_t bitrate = (int64_t) range_length * 8 * AV_TIME_BASE / 
av_rescale_q(os->max_pts - os->start_pts,

st->time_base,

AV_TIME_BASE_Q);
-if (bitrate >= 0) {
+if (bitrate >= 0)
 os->bit_rate = bitrate;
-snprintf(os->bandwidth_str, sizeof(os->bandwidth_str),
- " bandwidth=\"%d\"", os->bit_rate);
-}
 }
 add_segment(os, os->filename, os->start_pts, os->max_pts - 
os->start_pts, os->pos, range_length, index_length);
 av_log(s, AV_LOG_VERBOSE, "Representation %d media segment %d written 
to: %s\n", i, os->segment_index, os->full_path);
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 08/11] avformat/dashenc: addition of bitrate overhead in master playlist's bandwidth

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

---
 libavformat/dashenc.c | 7 +--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index 0ba9f55..294999a 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -824,20 +824,23 @@ static int write_manifest(AVFormatContext *s, int final)
 for (i = 0; i < s->nb_streams; i++) {
 char playlist_file[64];
 AVStream *st = s->streams[i];
+OutputStream *os = >streams[i];
 if (st->codecpar->codec_type != AVMEDIA_TYPE_AUDIO)
 continue;
 get_hls_playlist_name(playlist_file, sizeof(playlist_file), NULL, 
i);
 ff_hls_write_audio_rendition(out, (char *)audio_group,
  playlist_file, i, is_default);
-max_audio_bitrate = FFMAX(st->codecpar->bit_rate, 
max_audio_bitrate);
+max_audio_bitrate = FFMAX(st->codecpar->bit_rate +
+  os->bitrate_overhead, max_audio_bitrate);
 is_default = 0;
 }
 
 for (i = 0; i < s->nb_streams; i++) {
 char playlist_file[64];
 AVStream *st = s->streams[i];
+OutputStream *os = >streams[i];
 char *agroup = NULL;
-int stream_bitrate = st->codecpar->bit_rate;
+int stream_bitrate = st->codecpar->bit_rate + os->bitrate_overhead;
 if ((st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) && 
max_audio_bitrate) {
 agroup = (char *)audio_group;
 stream_bitrate += max_audio_bitrate;
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 07/11] avformat/dashenc: logic to compute bitrate overhead

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

---
 libavformat/dashenc.c | 10 ++
 1 file changed, 10 insertions(+)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index d20bdba..0ba9f55 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -86,6 +86,8 @@ typedef struct OutputStream {
 char full_path[1024];
 char temp_path[1024];
 int64_t chunk_duration;
+int total_pkt_size;
+int bitrate_overhead;
 } OutputStream;
 
 typedef struct DASHContext {
@@ -1209,6 +1211,13 @@ static int dash_flush(AVFormatContext *s, int final, int 
stream)
 }
 }
 
+if (!os->bitrate_overhead)
+os->bitrate_overhead = ((int64_t) (range_length - 
os->total_pkt_size) *
+8 * AV_TIME_BASE) /
+   av_rescale_q(os->max_pts - os->start_pts,
+st->time_base, AV_TIME_BASE_Q);
+os->total_pkt_size = 0;
+
 if (!os->bit_rate) {
 // calculate average bitrate of first segment
 int64_t bitrate = (int64_t) range_length * 8 * AV_TIME_BASE / 
av_rescale_q(os->max_pts - os->start_pts,
@@ -1340,6 +1349,7 @@ static int dash_write_packet(AVFormatContext *s, AVPacket 
*pkt)
 else
 os->max_pts = FFMAX(os->max_pts, pkt->pts + pkt->duration);
 os->packets_written++;
+os->total_pkt_size += pkt->size;
 if ((ret = ff_write_chained(os->ctx, 0, pkt, s, 0)) < 0)
 return ret;
 
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 11/11] avformat/dashenc: addition of segment index correction logic

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

The logic is applicable only when use_template is enabled and use_timeline
is disabled. The logic monitors the flow of segment indexes. If a streams's
segment index value is not at the expected real time position, then
the logic corrects that index value.

Typically this logic is needed in live streaming use cases. The network
bandwidth fluctuations are common during long run streaming. Each
fluctuation can cause the segment indexes fall behind the expected real
time position. Without this logic, players will not be able to consume
the content, even after encoder's network condition comes back to
normal state.
---
 doc/muxers.texi   | 11 +++
 libavformat/dashenc.c | 31 ++-
 2 files changed, 37 insertions(+), 5 deletions(-)

diff --git a/doc/muxers.texi b/doc/muxers.texi
index 7130cd0..9b31ea0 100644
--- a/doc/muxers.texi
+++ b/doc/muxers.texi
@@ -268,6 +268,17 @@ To map all video (or audio) streams to an AdaptationSet, 
"v" (or "a") can be use
 When no assignment is defined, this defaults to an AdaptationSet for each 
stream.
 @item -timeout @var{timeout}
 Set timeout for socket I/O operations. Applicable only for HTTP output.
+@item -index_correction @var{index_correction}
+Enable (1) or Disable (0) segment index correction logic. Applicable only when
+@var{use_template} is enabled and @var{use_timeline} is disabled.
+
+When enabled, the logic monitors the flow of segment indexes. If a streams's
+segment index value is not at the expected real time position, then the logic
+corrects that index value.
+
+Typically this logic is needed in live streaming use cases. The network 
bandwidth
+fluctuations are common during long run streaming. Each fluctuation can cause
+the segment indexes fall behind the expected real time position.
 @end table
 
 @anchor{framecrc}
diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index f691fb3..47484eb 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -76,7 +76,7 @@ typedef struct OutputStream {
 int nb_segments, segments_size, segment_index;
 Segment **segments;
 int64_t first_pts, start_pts, max_pts;
-int64_t last_dts;
+int64_t last_dts, last_pts;
 int bit_rate;
 
 char codec_str[100];
@@ -120,6 +120,7 @@ typedef struct DASHContext {
 AVIOContext *m3u8_out;
 int streaming;
 int64_t timeout;
+int index_correction;
 } DASHContext;
 
 static struct codec_string {
@@ -1050,7 +1051,7 @@ static int dash_write_header(AVFormatContext *s)
 static int add_segment(OutputStream *os, const char *file,
int64_t time, int duration,
int64_t start_pos, int64_t range_length,
-   int64_t index_length)
+   int64_t index_length, int next_exp_index)
 {
 int err;
 Segment *seg;
@@ -1078,6 +1079,12 @@ static int add_segment(OutputStream *os, const char 
*file,
 seg->index_length = index_length;
 os->segments[os->nb_segments++] = seg;
 os->segment_index++;
+//correcting the segment index if it has fallen behind the expected value
+if (os->segment_index < next_exp_index) {
+av_log(NULL, AV_LOG_WARNING, "Correcting the segment index after file 
%s: current=%d corrected=%d\n",
+   file, os->segment_index, next_exp_index);
+os->segment_index = next_exp_index;
+}
 return 0;
 }
 
@@ -1167,10 +1174,22 @@ static int dash_flush(AVFormatContext *s, int final, 
int stream)
 const char *proto = avio_find_protocol_name(s->url);
 int use_rename = proto && !strcmp(proto, "file");
 
-int cur_flush_segment_index = 0;
-if (stream >= 0)
+int cur_flush_segment_index = 0, next_exp_index = -1;
+if (stream >= 0) {
 cur_flush_segment_index = c->streams[stream].segment_index;
 
+//finding the next segment's expected index, based on the current pts 
value
+if (c->use_template && !c->use_timeline && c->index_correction &&
+c->streams[stream].last_pts != AV_NOPTS_VALUE &&
+c->streams[stream].first_pts != AV_NOPTS_VALUE) {
+int64_t pts_diff = av_rescale_q(c->streams[stream].last_pts -
+c->streams[stream].first_pts,
+s->streams[stream]->time_base,
+AV_TIME_BASE_Q);
+next_exp_index = (pts_diff / c->seg_duration) + 1;
+}
+}
+
 for (i = 0; i < s->nb_streams; i++) {
 OutputStream *os = >streams[i];
 AVStream *st = s->streams[i];
@@ -1230,7 +1249,7 @@ static int dash_flush(AVFormatContext *s, int final, int 
stream)
 if (bitrate >= 0)
 os->bit_rate = bitrate;
 }
-add_segment(os, os->filename, os->start_pts, os->max_pts - 
os->start_pts, os->pos, range_length, index_length);
+add_segment(os, os->filename, os->start_pts, 

[FFmpeg-devel] [PATCH 10/11] avformat/dashenc: addition of bitrate overhead for @bandwidth param in MPD

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

---
 libavformat/dashenc.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index b0ed890..f691fb3 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -552,7 +552,7 @@ static int write_adaptation_set(AVFormatContext *s, 
AVIOContext *out, int as_ind
 
 if (os->bit_rate > 0)
 snprintf(bandwidth_str, sizeof(bandwidth_str), " bandwidth=\"%d\"",
- os->bit_rate);
+ os->bit_rate + os->bitrate_overhead);
 
 if (as->media_type == AVMEDIA_TYPE_VIDEO) {
 AVStream *st = s->streams[i];
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 05/11] avformat/dashenc: setting @availabilityStartTime when the first frame is ready

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

@availabilityStartTime specifies the anchor for the computation of the earliest
availability time (in UTC) for any Segment in the Media Presentation.
---
 libavformat/dashenc.c | 7 ---
 1 file changed, 4 insertions(+), 3 deletions(-)

diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index 7b854b5..b62cb3e 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -740,9 +740,6 @@ static int write_manifest(AVFormatContext *s, int final)
 update_period = 500;
 avio_printf(out, "\tminimumUpdatePeriod=\"PT%"PRId64"S\"\n", 
update_period);
 avio_printf(out, "\tsuggestedPresentationDelay=\"PT%"PRId64"S\"\n", 
c->last_duration / AV_TIME_BASE);
-if (!c->availability_start_time[0] && s->nb_streams > 0 && 
c->streams[0].nb_segments > 0) {
-format_date_now(c->availability_start_time, 
sizeof(c->availability_start_time));
-}
 if (c->availability_start_time[0])
 avio_printf(out, "\tavailabilityStartTime=\"%s\"\n", 
c->availability_start_time);
 format_date_now(now_str, sizeof(now_str));
@@ -1282,6 +1279,10 @@ static int dash_write_packet(AVFormatContext *s, 
AVPacket *pkt)
 if (os->first_pts == AV_NOPTS_VALUE)
 os->first_pts = pkt->pts;
 
+if (!c->availability_start_time[0])
+format_date_now(c->availability_start_time,
+sizeof(c->availability_start_time));
+
 if (c->use_template && !c->use_timeline) {
 elapsed_duration = pkt->pts - os->first_pts;
 seg_end_duration = (int64_t) os->segment_index * c->seg_duration;
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] lavc/cfhd: added alpha decompanding in rgba12

2018-03-22 Thread Gagandeep Singh
On Fri, 23 Mar 2018, 11:04 Gagandeep Singh, 
wrote:

>
>
> On Fri, 23 Mar 2018, 04:26 Aurelien Jacobs,  wrote:
>
>> On Wed, Mar 21, 2018 at 10:19:58PM +0530, Gagandeep Singh wrote:
>> > alpha decompanding curve added to post process the decoded alpha channel
>> > ---
>> >  libavcodec/cfhd.c | 19 +++
>> >  1 file changed, 19 insertions(+)
>> >
>> > diff --git a/libavcodec/cfhd.c b/libavcodec/cfhd.c
>> > index fd834b..e35732df45 100644
>> > --- a/libavcodec/cfhd.c
>> > +++ b/libavcodec/cfhd.c
>> > @@ -37,6 +37,9 @@
>> >  #include "thread.h"
>> >  #include "cfhd.h"
>> >
>> > +#define ALPHA_COMPAND_DC_OFFSET 256
>> > +#define ALPHA_COMPAND_GAIN 9400
>> > +
>> >  enum CFHDParam {
>> >  ChannelCount =  12,
>> >  SubbandCount =  14,
>> > @@ -94,6 +97,20 @@ static inline int dequant_and_decompand(int level,
>> int quantisation)
>> > FFSIGN(level) * quantisation;
>> >  }
>> >
>> > +static inline void process_alpha(int16_t *alpha, int width)
>> > +{
>> > +int i, channel;
>> > +for (i = 0; i < width; i++) {
>> > +channel   = alpha[i];
>> > +channel  -= ALPHA_COMPAND_DC_OFFSET;
>> > +channel <<= 3;
>> > +channel  *= ALPHA_COMPAND_GAIN;
>>
>> Any reason why you can't merge the << 3 (ie. * 8) with the
>> * ALPHA_COMPAND_GAIN ?
>>
>> > +channel >>= 16;
>>
>> > +channel   = av_clip_uintp2(channel, 12);
>> > +alpha[i]  = channel;
>>
>> Here you should affect the result of av_clip_uintp2 directly to alpha[i].
>>
>> Actually, I think it would be more readable by dropping the channel
>> intermediate variable entirely. You could write this function like this
>> (untested):
>>
>> static inline void process_alpha(int16_t *alpha, int width)
>> {
>> for (int i = 0; i < width; i++)
>> alpha[i] = av_clip_uintp2(((alpha[i] - 256) * 75200) >> 16, 12);
>> }
>>
>> Of course, you can use DC_OFFSET and GAIN constants in there if you
>> think it is more readable.
>>
>
> I will test it, I remember the problem was with the bit shifting in alpha
> as it is originally smaller than channel, so I used 32 bit channel, but I
> will see if it can also work your way.
>

Basically the max alpha value (4095) was overflowing on directly using
alpha, I might be able to shorten it using some type change

> ___
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 01/11] avformat/dashenc: renamed 'min_seg_duration' to 'seg_duration'

2018-03-22 Thread vdixit
From: Vishwanath Dixit 

---
 doc/muxers.texi   |  2 +-
 libavformat/dashenc.c | 10 +-
 2 files changed, 6 insertions(+), 6 deletions(-)

diff --git a/doc/muxers.texi b/doc/muxers.texi
index cb75c26..65eec92 100644
--- a/doc/muxers.texi
+++ b/doc/muxers.texi
@@ -225,7 +225,7 @@ ffmpeg -re -i  -map 0 -map 0 -c:a libfdk_aac -c:v 
libx264
 @end example
 
 @table @option
-@item -min_seg_duration @var{microseconds}
+@item -seg_duration @var{microseconds}
 Set the segment length in microseconds.
 @item -window_size @var{size}
 Set the maximum number of segments kept in the manifest.
diff --git a/libavformat/dashenc.c b/libavformat/dashenc.c
index bdf8c8d..bdd5b56 100644
--- a/libavformat/dashenc.c
+++ b/libavformat/dashenc.c
@@ -94,7 +94,7 @@ typedef struct DASHContext {
 int nb_as;
 int window_size;
 int extra_window_size;
-int min_seg_duration;
+int seg_duration;
 int remove_at_exit;
 int use_template;
 int use_timeline;
@@ -974,7 +974,7 @@ static int dash_init(AVFormatContext *s)
 else
 av_dict_set(, "movflags", "frag_custom+dash+delay_moov", 
0);
 } else {
-av_dict_set_int(, "cluster_time_limit", c->min_seg_duration / 
1000, 0);
+av_dict_set_int(, "cluster_time_limit", c->seg_duration / 
1000, 0);
 av_dict_set_int(, "cluster_size_limit", 5 * 1024 * 1024, 0); 
// set a large cluster size limit
 av_dict_set_int(, "dash", 1, 0);
 av_dict_set_int(, "dash_track_number", i + 1, 0);
@@ -1020,7 +1020,7 @@ static int dash_init(AVFormatContext *s)
 os->segment_index = 1;
 }
 
-if (!c->has_video && c->min_seg_duration <= 0) {
+if (!c->has_video && c->seg_duration <= 0) {
 av_log(s, AV_LOG_WARNING, "no video stream and no min seg duration 
set\n");
 return AVERROR(EINVAL);
 }
@@ -1287,7 +1287,7 @@ static int dash_write_packet(AVFormatContext *s, AVPacket 
*pkt)
 if ((!c->has_video || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) &&
 pkt->flags & AV_PKT_FLAG_KEY && os->packets_written &&
 av_compare_ts(pkt->pts - os->start_pts, st->time_base,
-  c->min_seg_duration, AV_TIME_BASE_Q) >= 0) {
+  c->seg_duration, AV_TIME_BASE_Q) >= 0) {
 int64_t prev_duration = c->last_duration;
 
 c->last_duration = av_rescale_q(pkt->pts - os->start_pts,
@@ -1427,7 +1427,7 @@ static const AVOption options[] = {
 { "adaptation_sets", "Adaptation sets. Syntax: id=0,streams=0,1,2 
id=1,streams=3,4 and so on", OFFSET(adaptation_sets), AV_OPT_TYPE_STRING, { 0 
}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
 { "window_size", "number of segments kept in the manifest", 
OFFSET(window_size), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, E },
 { "extra_window_size", "number of segments kept outside of the manifest 
before removing from disk", OFFSET(extra_window_size), AV_OPT_TYPE_INT, { .i64 
= 5 }, 0, INT_MAX, E },
-{ "min_seg_duration", "minimum segment duration (in microseconds)", 
OFFSET(min_seg_duration), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, E },
+{ "seg_duration", "minimum segment duration (in microseconds)", 
OFFSET(seg_duration), AV_OPT_TYPE_INT, { .i64 = 500 }, 0, INT_MAX, E },
 { "remove_at_exit", "remove all segments when finished", 
OFFSET(remove_at_exit), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, E },
 { "use_template", "Use SegmentTemplate instead of SegmentList", 
OFFSET(use_template), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
 { "use_timeline", "Use SegmentTimeline in SegmentTemplate", 
OFFSET(use_timeline), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, E },
-- 
1.9.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] how to add step-to-previous-frame feature to ffplay?

2018-03-22 Thread 陈 永岗

Hello everyone,


I'm newbie to ffmpeg, and found ffplay.c is very useful which contains lots of 
command line operations. If press 's', it will play next frame. However I want 
to make it support playing previous frame. Where should I start? Any 
recommendations?


Best regards

Dom
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Support signaling of last segment number

2018-03-22 Thread Jan Ekström
On Thu, Mar 22, 2018 at 9:51 PM, sanilraut  wrote:
> Last segment indicated by mpd is not parsed.
> Example stream: 
> http://dash.akamaized.net/dash264/TestCasesIOP41/LastSegmentNumber/1/manifest_last_segment_num.mpd
>
> This patch supports parsing of Supplemental Descriptor with @schemeIdUri set 
> to http://dashif.org/guide-
> lines/last-segment-number with the @value set to the last segment number.
>

Hi,

Just seeing the commit message, I don't remember if the MPEG-DASH
demuxer had any FATE tests, but this stuff sounds like something that
should be included as such. MPEG-DASH is quite complex and having
tests making sure that we get the same things out of the demuxer would
be quite helpful.

You can see for example tests/fate/mov.mak as an example of testing a demuxer.

Best regards,
Jan
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/extract_extradata: don't uninitialize the H2645Packet on every processed packet

2018-03-22 Thread James Almer
On 3/13/2018 9:47 PM, Jun Zhao wrote:
> 
> 
> On 2018/3/11 7:00, James Almer wrote:
>> Based on hevc_parser code. This prevents repeated unnecessary allocations
>> and frees on every packet processed by the bsf.
>>
>> Signed-off-by: James Almer 
>> ---
>>  libavcodec/extract_extradata_bsf.c | 33 +++--
>>  1 file changed, 19 insertions(+), 14 deletions(-)
>>
>> diff --git a/libavcodec/extract_extradata_bsf.c 
>> b/libavcodec/extract_extradata_bsf.c
>> index 4e2d601742..64017b6fb7 100644
>> --- a/libavcodec/extract_extradata_bsf.c
>> +++ b/libavcodec/extract_extradata_bsf.c
>> @@ -36,6 +36,9 @@ typedef struct ExtractExtradataContext {
>>  int (*extract)(AVBSFContext *ctx, AVPacket *pkt,
>> uint8_t **data, int *size);
>>  
>> +/* H264/HEVC specifc fields */
>> +H2645Packet h2645_pkt;
>> +
>>  /* AVOptions */
>>  int remove;
>>  } ExtractExtradataContext;
>> @@ -61,7 +64,6 @@ static int extract_extradata_h2645(AVBSFContext *ctx, 
>> AVPacket *pkt,
>>  
>>  ExtractExtradataContext *s = ctx->priv_data;
>>  
>> -H2645Packet h2645_pkt = { 0 };
>>  int extradata_size = 0, filtered_size = 0;
>>  const int *extradata_nal_types;
>>  int nb_extradata_nal_types;
>> @@ -75,13 +77,13 @@ static int extract_extradata_h2645(AVBSFContext *ctx, 
>> AVPacket *pkt,
>>  nb_extradata_nal_types = FF_ARRAY_ELEMS(extradata_nal_types_h264);
>>  }
>>  
>> -ret = ff_h2645_packet_split(_pkt, pkt->data, pkt->size,
>> +ret = ff_h2645_packet_split(>h2645_pkt, pkt->data, pkt->size,
>>  ctx, 0, 0, ctx->par_in->codec_id, 1);
>>  if (ret < 0)
>> -goto fail;
>> +return ret;
>>  
>> -for (i = 0; i < h2645_pkt.nb_nals; i++) {
>> -H2645NAL *nal = _pkt.nals[i];
>> +for (i = 0; i < s->h2645_pkt.nb_nals; i++) {
>> +H2645NAL *nal = >h2645_pkt.nals[i];
>>  if (val_in_array(extradata_nal_types, nb_extradata_nal_types, 
>> nal->type)) {
>>  extradata_size += nal->raw_size + 3;
>>  if (ctx->par_in->codec_id == AV_CODEC_ID_HEVC) {
>> @@ -104,8 +106,7 @@ static int extract_extradata_h2645(AVBSFContext *ctx, 
>> AVPacket *pkt,
>>  if (s->remove) {
>>  filtered_buf = av_buffer_alloc(filtered_size + 
>> AV_INPUT_BUFFER_PADDING_SIZE);
>>  if (!filtered_buf) {
>> -ret = AVERROR(ENOMEM);
>> -goto fail;
>> +return AVERROR(ENOMEM);
>>  }
>>  memset(filtered_buf->data + filtered_size, 0, 
>> AV_INPUT_BUFFER_PADDING_SIZE);
>>  
>> @@ -115,16 +116,15 @@ static int extract_extradata_h2645(AVBSFContext *ctx, 
>> AVPacket *pkt,
>>  extradata = av_malloc(extradata_size + 
>> AV_INPUT_BUFFER_PADDING_SIZE);
>>  if (!extradata) {
>>  av_buffer_unref(_buf);
>> -ret = AVERROR(ENOMEM);
>> -goto fail;
>> +return AVERROR(ENOMEM);
>>  }
>>  memset(extradata + extradata_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
>>  
>>  *data = extradata;
>>  *size = extradata_size;
>>  
>> -for (i = 0; i < h2645_pkt.nb_nals; i++) {
>> -H2645NAL *nal = _pkt.nals[i];
>> +for (i = 0; i < s->h2645_pkt.nb_nals; i++) {
>> +H2645NAL *nal = >h2645_pkt.nals[i];
>>  if (val_in_array(extradata_nal_types, nb_extradata_nal_types,
>>   nal->type)) {
>>  AV_WB24(extradata, 1); // startcode
>> @@ -145,9 +145,7 @@ static int extract_extradata_h2645(AVBSFContext *ctx, 
>> AVPacket *pkt,
>>  }
>>  }
>>  
>> -fail:
>> -ff_h2645_packet_uninit(_pkt);
>> -return ret;
>> +return 0;
>>  }
>>  
>>  static int extract_extradata_vc1(AVBSFContext *ctx, AVPacket *pkt,
>> @@ -311,6 +309,12 @@ fail:
>>  return ret;
>>  }
>>  
>> +static void extract_extradata_close(AVBSFContext *ctx)
>> +{
>> +ExtractExtradataContext *s = ctx->priv_data;
>> +ff_h2645_packet_uninit(>h2645_pkt);
>> +}
>> +
>>  static const enum AVCodecID codec_ids[] = {
>>  AV_CODEC_ID_CAVS,
>>  AV_CODEC_ID_H264,
>> @@ -343,4 +347,5 @@ const AVBitStreamFilter ff_extract_extradata_bsf = {
>>  .priv_class = _extradata_class,
>>  .init   = extract_extradata_init,
>>  .filter = extract_extradata_filter,
>> +.close  = extract_extradata_close,
>>  };
> LGTM

Pushed, thanks.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel