[FFmpeg-devel] [PATCH] doc/examples/encode_video: add qsv encoder support

2017-07-20 Thread Zhong Li
Signed-off-by: Zhong Li 
---
 doc/examples/encode_video.c | 32 +---
 1 file changed, 29 insertions(+), 3 deletions(-)

diff --git a/doc/examples/encode_video.c b/doc/examples/encode_video.c
index 8cd1321..9c26f63 100644
--- a/doc/examples/encode_video.c
+++ b/doc/examples/encode_video.c
@@ -35,6 +35,8 @@
 
 #include 
 #include 
+#include "libavutil/buffer.h"
+#include "libavutil/hwcontext.h"
 
 static void encode(AVCodecContext *enc_ctx, AVFrame *frame, AVPacket *pkt,
FILE *outfile)
@@ -75,7 +77,10 @@ int main(int argc, char **argv)
 FILE *f;
 AVFrame *frame;
 AVPacket *pkt;
+AVBufferRef* encode_device = NULL;
 uint8_t endcode[] = { 0, 0, 1, 0xb7 };
+enum AVHWDeviceType hw_device_type = AV_HWDEVICE_TYPE_NONE;
+enum AVPixelFormat pixel_format = AV_PIX_FMT_YUV420P;
 
 if (argc <= 2) {
 fprintf(stderr, "Usage: %s  \n", argv[0]);
@@ -86,6 +91,21 @@ int main(int argc, char **argv)
 
 avcodec_register_all();
 
+if (strstr(codec_name, "qsv")) {
+hw_device_type = AV_HWDEVICE_TYPE_QSV;
+pixel_format = AV_PIX_FMT_NV12;
+}
+
+/* open the hardware device */
+if (hw_device_type != AV_HWDEVICE_TYPE_NONE) {
+ret = av_hwdevice_ctx_create(_device, hw_device_type,
+ NULL, NULL, 0);
+if (ret < 0) {
+fprintf(stderr, "Cannot open the hardware device\n");
+exit(1);
+}
+}
+
 /* find the mpeg1video encoder */
 codec = avcodec_find_encoder_by_name(codec_name);
 if (!codec) {
@@ -120,7 +140,7 @@ int main(int argc, char **argv)
  */
 c->gop_size = 10;
 c->max_b_frames = 1;
-c->pix_fmt = AV_PIX_FMT_YUV420P;
+c->pix_fmt = pixel_format;
 
 if (codec->id == AV_CODEC_ID_H264)
 av_opt_set(c->priv_data, "preset", "slow", 0);
@@ -173,8 +193,13 @@ int main(int argc, char **argv)
 /* Cb and Cr */
 for (y = 0; y < c->height/2; y++) {
 for (x = 0; x < c->width/2; x++) {
-frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
-frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
+if (frame->format == AV_PIX_FMT_YUV420P) {
+frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 
2;
+frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 
5;
+} else if (frame->format == AV_PIX_FMT_NV12) {
+frame->data[1][y * frame->linesize[1] + 2 * x] = 128 + y + 
i * 2;
+frame->data[1][y * frame->linesize[1] + 2 * x + 1] = 64 + 
x + i * 5;
+}
 }
 }
 
@@ -194,6 +219,7 @@ int main(int argc, char **argv)
 avcodec_free_context();
 av_frame_free();
 av_packet_free();
+av_buffer_unref(_device);
 
 return 0;
 }
-- 
1.8.3.1

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/dca: remove GetBitContext usage from avpriv_dca_parse_core_frame_header()

2017-07-20 Thread James Almer
On 7/19/2017 4:43 PM, James Almer wrote:
> This prevents potential ABI issues with GetBitContext.
> 
> Signed-off-by: James Almer 
> ---
>  libavcodec/dca.c| 12 +++-
>  libavcodec/dca.h|  7 +--
>  libavcodec/dca_core.c   |  2 +-
>  libavcodec/dca_parser.c |  4 +---
>  libavformat/dtsdec.c|  4 +---
>  5 files changed, 19 insertions(+), 10 deletions(-)
> 
> diff --git a/libavcodec/dca.c b/libavcodec/dca.c
> index 39f8f3d81c..307b21471e 100644
> --- a/libavcodec/dca.c
> +++ b/libavcodec/dca.c
> @@ -88,7 +88,7 @@ int avpriv_dca_convert_bitstream(const uint8_t *src, int 
> src_size, uint8_t *dst,
>  }
>  }
>  
> -int avpriv_dca_parse_core_frame_header(GetBitContext *gb, DCACoreFrameHeader 
> *h)
> +int ff_dca_parse_core_frame_header(DCACoreFrameHeader *h, GetBitContext *gb)
>  {
>  if (get_bits_long(gb, 32) != DCA_SYNCWORD_CORE_BE)
>  return DCA_PARSE_ERROR_SYNC_WORD;
> @@ -145,3 +145,13 @@ int avpriv_dca_parse_core_frame_header(GetBitContext 
> *gb, DCACoreFrameHeader *h)
>  h->dn_code = get_bits(gb, 4);
>  return 0;
>  }
> +
> +int avpriv_dca_parse_core_frame_header(DCACoreFrameHeader *h, uint8_t *buf, 
> int size)
> +{
> +GetBitContext gb;
> +
> +if (init_get_bits8(, buf, size) < 0)
> +return DCA_PARSE_ERROR_INVALIDDATA;
> +
> +return ff_dca_parse_core_frame_header(h, );
> +}
> diff --git a/libavcodec/dca.h b/libavcodec/dca.h
> index cf6204e554..172c965b3b 100644
> --- a/libavcodec/dca.h
> +++ b/libavcodec/dca.h
> @@ -45,7 +45,8 @@ enum DCAParseError {
>  DCA_PARSE_ERROR_SAMPLE_RATE = -6,
>  DCA_PARSE_ERROR_RESERVED_BIT= -7,
>  DCA_PARSE_ERROR_LFE_FLAG= -8,
> -DCA_PARSE_ERROR_PCM_RES = -9
> +DCA_PARSE_ERROR_PCM_RES = -9,
> +DCA_PARSE_ERROR_INVALIDDATA = -10,
>  };
>  
>  typedef struct DCACoreFrameHeader {
> @@ -212,6 +213,8 @@ int avpriv_dca_convert_bitstream(const uint8_t *src, int 
> src_size, uint8_t *dst,
>   * Parse and validate core frame header
>   * @return 0 on success, negative DCA_PARSE_ERROR_ code on failure
>   */
> -int avpriv_dca_parse_core_frame_header(GetBitContext *gb, DCACoreFrameHeader 
> *h);
> +int avpriv_dca_parse_core_frame_header(DCACoreFrameHeader *h, uint8_t *buf, 
> int size);
> +
> +int ff_dca_parse_core_frame_header(DCACoreFrameHeader *h, GetBitContext *gb);
>  
>  #endif /* AVCODEC_DCA_H */
> diff --git a/libavcodec/dca_core.c b/libavcodec/dca_core.c
> index 3add9f812b..6cb1f30a3c 100644
> --- a/libavcodec/dca_core.c
> +++ b/libavcodec/dca_core.c
> @@ -82,7 +82,7 @@ static void get_array(GetBitContext *s, int32_t *array, int 
> size, int n)
>  static int parse_frame_header(DCACoreDecoder *s)
>  {
>  DCACoreFrameHeader h = { 0 };
> -int err = avpriv_dca_parse_core_frame_header(>gb, );
> +int err = ff_dca_parse_core_frame_header(, >gb);
>  
>  if (err < 0) {
>  switch (err) {
> diff --git a/libavcodec/dca_parser.c b/libavcodec/dca_parser.c
> index 7e99b16bf0..11ddb8f188 100644
> --- a/libavcodec/dca_parser.c
> +++ b/libavcodec/dca_parser.c
> @@ -263,9 +263,7 @@ static int dca_parse_params(DCAParseContext *pc1, const 
> uint8_t *buf,
>  if ((ret = avpriv_dca_convert_bitstream(buf, DCA_CORE_FRAME_HEADER_SIZE,
>  hdr, 
> DCA_CORE_FRAME_HEADER_SIZE)) < 0)
>  return ret;
> -if ((ret = init_get_bits8(, hdr, ret)) < 0)
> -return ret;
> -if (avpriv_dca_parse_core_frame_header(, ) < 0)
> +if (avpriv_dca_parse_core_frame_header(, hdr, ret) < 0)
>  return AVERROR_INVALIDDATA;
>  
>  *duration = h.npcmblocks * DCA_PCMBLOCK_SAMPLES;
> diff --git a/libavformat/dtsdec.c b/libavformat/dtsdec.c
> index 6e0048f9bc..a3e52cd596 100644
> --- a/libavformat/dtsdec.c
> +++ b/libavformat/dtsdec.c
> @@ -101,9 +101,7 @@ static int dts_probe(AVProbeData *p)
>  if ((ret = avpriv_dca_convert_bitstream(buf - 2, 
> DCA_CORE_FRAME_HEADER_SIZE,
>  hdr, 
> DCA_CORE_FRAME_HEADER_SIZE)) < 0)
>  continue;
> -if (init_get_bits8(, hdr, ret) < 0)
> -continue;
> -if (avpriv_dca_parse_core_frame_header(, ) < 0)
> +if (avpriv_dca_parse_core_frame_header(, hdr, ret) < 0)
>  continue;
>  
>  marker += 4 * h.sr_code;
> 

Will apply soon.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] avformat/hlsenc: improve hls encrypt get key file operation

2017-07-20 Thread Steven Liu
get key file only once time is ok, no need more times.
Ticket-id: #6545

Found-by: JohnPi
Signed-off-by: Steven Liu 
---
 libavformat/hlsenc.c | 15 +--
 1 file changed, 9 insertions(+), 6 deletions(-)

diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c
index 8a233270b5..6f721eb010 100644
--- a/libavformat/hlsenc.c
+++ b/libavformat/hlsenc.c
@@ -1222,12 +1222,15 @@ static int hls_start(AVFormatContext *s)
 av_log(s, AV_LOG_WARNING, "Cannot use both -hls_key_info_file and 
-hls_enc,"
   " will use -hls_key_info_file priority\n");
 }
-if (c->key_info_file) {
-if ((err = hls_encryption_start(s)) < 0)
-goto fail;
-} else {
-if ((err = do_encrypt(s)) < 0)
-goto fail;
+
+if (c->number <= 1) {
+if (c->key_info_file) {
+if ((err = hls_encryption_start(s)) < 0)
+goto fail;
+} else {
+if ((err = do_encrypt(s)) < 0)
+goto fail;
+}
 }
 if ((err = av_dict_set(, "encryption_key", c->key_string, 0))
 < 0)
-- 
2.11.0 (Apple Git-81)



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] libavfilter/vf_drawtext:support to set glyph spacing while drawing text.

2017-07-20 Thread efren yang
It is applied to every glyph. There is no space between all words in some 
languages, for example Chinese, Japanese and so on. So a sentence will look 
very compactly. I think if we could set glyph spacing, it will adapt to more 
different scenarios. Maybe it is simple but useful.

ffmpeg -i input -vf
drawtext="glyph_spacing=10:text="PROGRAMMING":fontfile=demo.ttf" -f flv 1.flv 
could set glyph spacing 10 pixel.

Signed-off-by: efren yang 
---
 doc/filters.texi  | 4 
 libavfilter/vf_drawtext.c | 3 +++
 2 files changed, 7 insertions(+)

diff --git a/doc/filters.texi b/doc/filters.texi
index 119d1be69d..0f336f8826 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -7077,6 +7077,10 @@ The default value of @var{boxcolor} is "white".
 Set the line spacing in pixels of the border to be drawn around the box using 
@var{box}.
 The default value of @var{line_spacing} is 0.
 
+@item glyph_spacing
+Set the glyph spacing in pixels for drawing text.
+The default value of @var{glyph_spacing} is 0.
+
 @item borderw
 Set the width of the border to be drawn around the text using 
@var{bordercolor}.
 The default value of @var{borderw} is 0.
diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index f6151443bb..152d8f35d5 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -162,6 +162,7 @@ typedef struct DrawTextContext {
 unsigned int default_fontsize;  ///< default font size to use
 
 int line_spacing;   ///< lines spacing in pixels
+int glyph_spacing;  ///< glyph spacing in pixels
 short int draw_box; ///< draw box around text - true or false
 int boxborderw; ///< box border width
 int use_kerning;///< font kerning is used - true/false
@@ -214,6 +215,7 @@ static const AVOption drawtext_options[]= {
 {"box", "set box",  OFFSET(draw_box),   
AV_OPT_TYPE_BOOL,   {.i64=0}, 0,1   , FLAGS},
 {"boxborderw",  "set box border width", OFFSET(boxborderw), 
AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX , FLAGS},
 {"line_spacing",  "set line spacing in pixels", OFFSET(line_spacing),   
AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX,FLAGS},
+{"glyph_spacing",  "set glyph spacing in pixels", OFFSET(glyph_spacing),   
AV_OPT_TYPE_INT,{.i64 = 0}, 0,  INT_MAX,FLAGS },
 {"fontsize","set font size",OFFSET(fontsize_expr),  
AV_OPT_TYPE_STRING, {.str=NULL},  CHAR_MIN, CHAR_MAX , FLAGS},
 {"x",   "set x expression", OFFSET(x_expr), 
AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
 {"y",   "set y expression", OFFSET(y_expr), 
AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
@@ -1374,6 +1376,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
 s->positions[i].y = y - glyph->bitmap_top + y_max;
 if (code == '\t') x  = (x / s->tabsize + 1)*s->tabsize;
 else  x += glyph->advance;
+x += s->glyph_spacing;
 }
 
 max_text_line_w = FFMAX(x, max_text_line_w);
-- 
2.13.0.windows.1



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH V5] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Jun Zhao
V5: remove qsv/cuda in the example and Mark have test dxva2|d3d11va, 
   videotoolbox might work as well.
V4: fix potential memory leak issue base on Steven Liu's review.
V3: re-work to support the other hwaccels, rename from vaapi_dec.c to 
hw_decode.c.
   just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
V2: re-work with new hw decoding API.
From df532eb81df163eb11f3d6c9c8bdc13fdc96ed32 Mon Sep 17 00:00:00 2001
From: Jun Zhao 
Date: Thu, 20 Jul 2017 00:58:56 -0400
Subject: [PATCH V5] examples/hw_decode: Add a HWAccel decoding example.

Add a HWAccel decoding example.

Test with vaapi|dxva2|d3d11va (Mark help to test dxva2|d3d11va),
videotoolbox might work as well.

Signed-off-by: Liu, Kaixuan 
Signed-off-by: Jun Zhao 
---
 doc/examples/hw_decode.c | 264 +++
 1 file changed, 264 insertions(+)
 create mode 100644 doc/examples/hw_decode.c

diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
new file mode 100644
index 00..c27e0d33e6
--- /dev/null
+++ b/doc/examples/hw_decode.c
@@ -0,0 +1,264 @@
+/*
+ * Copyright (c) 2017 Jun Zhao
+ * Copyright (c) 2017 Kaixuan Liu
+ *
+ * HW Acceleration API (video decoding) decode sample
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * HW-Accelerated decoding example.
+ *
+ * @example hw_decode.c
+ * This example shows how to do HW-accelerated decoding with output
+ * frames from the HW video surfaces.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static AVBufferRef *hw_device_ctx = NULL;
+static enum AVPixelFormat hw_pix_fmt;
+FILE *output_file = NULL;
+
+static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type)
+{
+enum AVPixelFormat fmt;
+
+switch (type) {
+case AV_HWDEVICE_TYPE_VAAPI:
+fmt = AV_PIX_FMT_VAAPI;
+break;
+case AV_HWDEVICE_TYPE_DXVA2:
+fmt = AV_PIX_FMT_DXVA2_VLD;
+break;
+case AV_HWDEVICE_TYPE_D3D11VA:
+fmt = AV_PIX_FMT_D3D11;
+break;
+case AV_HWDEVICE_TYPE_VDPAU:
+fmt = AV_PIX_FMT_VDPAU;
+break;
+case AV_HWDEVICE_TYPE_VIDEOTOOLBOX:
+fmt = AV_PIX_FMT_VIDEOTOOLBOX;
+break;
+default:
+fmt = AV_PIX_FMT_NONE;
+break;
+}
+
+return fmt;
+}
+
+static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type)
+{
+int err = 0;
+
+if ((err = av_hwdevice_ctx_create(_device_ctx, type,
+  NULL, NULL, 0)) < 0) {
+fprintf(stderr, "Failed to create specified HW device.\n");
+return err;
+}
+ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+
+return err;
+}
+
+static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
+const enum AVPixelFormat *pix_fmts)
+{
+const enum AVPixelFormat *p;
+
+for (p = pix_fmts; *p != -1; p++) {
+if (*p == hw_pix_fmt)
+return *p;
+}
+
+fprintf(stderr, "Failed to get HW surface format.\n");
+return AV_PIX_FMT_NONE;
+}
+
+static int decode_write(AVCodecContext *avctx, AVPacket *packet)
+{
+AVFrame *frame = NULL, *sw_frame = NULL;
+AVFrame *tmp_frame = NULL;
+uint8_t *buffer = NULL;
+int size;
+int ret = 0;
+
+ret = avcodec_send_packet(avctx, packet);
+if (ret < 0) {
+fprintf(stderr, "Error during decoding\n");
+return ret;
+}
+
+while (ret >= 0) {
+if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
+fprintf(stderr, "Can not alloc frame\n");
+ret = AVERROR(ENOMEM);
+goto fail;
+}
+
+ret = avcodec_receive_frame(avctx, frame);
+if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+break;
+else if (ret < 0) {
+fprintf(stderr, "Error while decoding\n");
+goto fail;
+}
+
+if (frame->format == hw_pix_fmt) {
+/* retrieve data from GPU to CPU */
+if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
+fprintf(stderr, "Error transferring the data to system 

Re: [FFmpeg-devel] [PATCH V4] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Jun Zhao


On 2017/7/21 6:44, Mark Thompson wrote:
> On 20/07/17 08:54, Jun Zhao wrote:
>> V4: fix potential memory leak issue base on Steven Liu's review.
>> V3: re-work to support the other hwaccels, rename from vaapi_dec.c to 
>> hw_decode.c.
>>just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
>> V2: re-work with new hw decoding API.
>>
>> From 718f92731d308423e5a09d0384f7bf2361f5a307 Mon Sep 17 00:00:00 2001
>> From: Jun Zhao 
>> Date: Thu, 20 Jul 2017 00:58:56 -0400
>> Subject: [PATCH V4] examples/hw_decode: Add a HWAccel decoding example.
>>
>> Add a HWAccel decoding example.
>>
>> Just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
>>
>> Signed-off-by: Liu, Kaixuan 
>> Signed-off-by: Jun Zhao 
>> ---
>>  doc/examples/hw_decode.c | 249 
>> +++
>>  1 file changed, 249 insertions(+)
>>  create mode 100644 doc/examples/hw_decode.c
>>
>> diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
>> new file mode 100644
>> index 00..0e77ee877f
>> --- /dev/null
>> +++ b/doc/examples/hw_decode.c
>> @@ -0,0 +1,249 @@
>> +/*
>> + * Copyright (c) 2017 Jun Zhao
>> + * Copyright (c) 2017 Kaixuan Liu
>> + *
>> + * HW Acceleration API (video decoding) decode sample
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
>> USA
>> + */
>> +
>> +/**
>> + * @file
>> + * HW-Accelerated decoding example.
>> + *
>> + * @example hw_decode.c
>> + * This example shows how to do HW-accelerated decoding with output
>> + * frames from the HW video surfaces.
>> + */
>> +
>> +#include 
>> +
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +
>> +static AVBufferRef *hw_device_ctx = NULL;
>> +static enum AVPixelFormat hw_pix_fmt;
>> +FILE *output_file = NULL;
>> +
>> +static enum AVPixelFormat hw_pix_fmts[] = {
>> +[AV_HWDEVICE_TYPE_CUDA] = AV_PIX_FMT_CUDA,
>> +[AV_HWDEVICE_TYPE_DXVA2]= AV_PIX_FMT_DXVA2_VLD,
>> +[AV_HWDEVICE_TYPE_D3D11VA]  = AV_PIX_FMT_D3D11VA_VLD,
> 
> AV_PIX_FMT_D3D11 (this was changed recently).
> 
>> +[AV_HWDEVICE_TYPE_QSV]  = AV_PIX_FMT_QSV,
>> +[AV_HWDEVICE_TYPE_VAAPI]= AV_PIX_FMT_VAAPI,
>> +[AV_HWDEVICE_TYPE_VDPAU]= AV_PIX_FMT_VDPAU,
>> +[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = AV_PIX_FMT_VIDEOTOOLBOX,
>> +};
> 
> Neither CUDA nor QSV will work here, as they are dummy hwaccels used on other 
> decoders.  Just leave the five real hwaccels.
> 
>> +
>> +static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType 
>> type)
>> +{
>> +if (type >= 0 && type < FF_ARRAY_ELEMS(hw_pix_fmts))
>> +return hw_pix_fmts[type];
>> +else
>> +return AV_PIX_FMT_NONE;
>> +}
>> +
>> +static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType 
>> type)
>> +{
>> +int err = 0;
>> +
>> +if ((err = av_hwdevice_ctx_create(_device_ctx, type,
>> +  NULL, NULL, 0)) < 0) {
>> +fprintf(stderr, "Failed to create specified HW device.\n");
>> +return err;
>> +}
>> +ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
>> +
>> +return err;
>> +}
>> +
>> +static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
>> +const enum AVPixelFormat *pix_fmts)
>> +{
>> +const enum AVPixelFormat *p;
>> +
>> +for (p = pix_fmts; *p != -1; p++) {
>> +if (*p == hw_pix_fmt)
>> +return *p;
>> +}
>> +
>> +fprintf(stderr, "Failed to get HW surface format.\n");
>> +return AV_PIX_FMT_NONE;
>> +}
>> +
>> +static int decode_write(AVCodecContext *avctx, AVPacket *packet)
>> +{
>> +AVFrame *frame = NULL, *sw_frame = NULL;
>> +AVFrame *tmp_frame = NULL;
>> +uint8_t *buffer = NULL;
>> +int size;
>> +int ret = 0;
>> +
>> +ret = avcodec_send_packet(avctx, packet);
>> +if (ret < 0) {
>> +fprintf(stderr, "Error during decoding\n");
>> +return ret;
>> +}
>> +
>> +while (ret >= 0) {
>> +if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
>> +fprintf(stderr, "Can not alloc 

Re: [FFmpeg-devel] [PATCH] fate: add tests for some video source filters

2017-07-20 Thread Michael Niedermayer
On Fri, Jul 21, 2017 at 01:04:03AM +0200, Nicolas George wrote:
> Le duodi 2 thermidor, an CCXXV, Michael Niedermayer a écrit :
> > breaks on mips (probably big endian issue)
> 
> Breaks on MIPS, or detects that something is already broken in MIPS and
> nobody noticed until now?

this patch breaks the selftests on mips or maybe more precissely
it adds a test which fails


> 
> Could you perhaps re-run the test with V=1, then re-run the command line
> with -f png, to see how the output is broken?

Stream #0:0: Video: rawvideo ([12][0]3Y / 0x5933000C), yuv444p12be, 320x240 
[SAR 1:1 DAR 4:3], q=2-31, 13824 kb/s, 5 fps, 5 tbn, 5 tbc (default)

the format used seems the native one that has different endianness
and thus different checksums

md5sum out.png mips/out.png
012e1906fe84ee6f757ef3f1473cc26d  out.png
012e1906fe84ee6f757ef3f1473cc26d  mips/out.png

i tried forcing le but it alone didnt give the same result
ive to sleep now so i cant look further into this today
but iam happy to test patches tomorrow, also iam just testing with
mips qemu not real hw so if it wasnt for the one time work to set
build env up and qemu than it would be rather easy for anyone to test

[...]


-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Elect your leaders based on what they did after the last election, not
based on what they say before an election.



signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 1/2] lavu/frame: add new side data type for ICC profiles

2017-07-20 Thread Rostislav Pehlivanov
On 21 July 2017 at 00:49, Nicolas George  wrote:

> Le tridi 3 thermidor, an CCXXV, Rostislav Pehlivanov a écrit :
> > It can be quite big. In some insane cases it can be bigger than the
> actual
> > packet or even the uncompressed frame. Its also not strictly necessary to
> > display something more or less looking okay, but its necessary to display
> > something correctly. I think its better treated like we currently treat
> HDR
> > by defining a side data type and letting the API users cache and use it,
> > which is what this patch does.
>
> All this could apply to a dedicated field. Using side data only brings a
> type pruning of the actual type. Except:
>
>
Yes, it could. However I still think having it as a side data is better
since its the easiest way and that's what all API users currently use to
retrieve HDR metadata as well.


> >Side data is also refcounted so it even
> > solves the size issue.
>
> Indeed. A separate type could be refcounted too, though, but that would
> require a little more code. Short-term convenience vs. long-term
> convenience.
>
> > > > + * The data contains an ICC profile with an optional name
> defined
> > > in the
> > > > + * metadata entry.
> > > Not being a specialist of ICC profiles, I have no idea what that means
> > > in practice. What data structure is it?
> > There's a 300 page document describing the bitstream and how its used.
> The
> > smallest implementation, lcms is still a few tens of thousands of lines.
>
> This is not what I am asking. Look at the doxy for
> AV_FRAME_DATA_SPHERICAL. Explaining the semantic of the structure
> requires at least a few pages of documentation, but the doxy still
> explains in a few world the data structure used.
>
> What I am asking is very simple: if I want to exploit this side through
> a library, to what type shall I cast the pointer?
>
>
Nothing, its a bitstream. You give it to a library to decode and the
library is then ready to take e.g. rgb and output color corrected rgb.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 1/2] lavu/frame: add new side data type for ICC profiles

2017-07-20 Thread Nicolas George
Le tridi 3 thermidor, an CCXXV, Rostislav Pehlivanov a écrit :
> It can be quite big. In some insane cases it can be bigger than the actual
> packet or even the uncompressed frame. Its also not strictly necessary to
> display something more or less looking okay, but its necessary to display
> something correctly. I think its better treated like we currently treat HDR
> by defining a side data type and letting the API users cache and use it,
> which is what this patch does.

All this could apply to a dedicated field. Using side data only brings a
type pruning of the actual type. Except:

>Side data is also refcounted so it even
> solves the size issue.

Indeed. A separate type could be refcounted too, though, but that would
require a little more code. Short-term convenience vs. long-term
convenience.

> > > + * The data contains an ICC profile with an optional name defined
> > in the
> > > + * metadata entry.
> > Not being a specialist of ICC profiles, I have no idea what that means
> > in practice. What data structure is it?
> There's a 300 page document describing the bitstream and how its used. The
> smallest implementation, lcms is still a few tens of thousands of lines.

This is not what I am asking. Look at the doxy for
AV_FRAME_DATA_SPHERICAL. Explaining the semantic of the structure
requires at least a few pages of documentation, but the doxy still
explains in a few world the data structure used.

What I am asking is very simple: if I want to exploit this side through
a library, to what type shall I cast the pointer?

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 1/2] lavu/frame: add new side data type for ICC profiles

2017-07-20 Thread Rostislav Pehlivanov
On 21 July 2017 at 00:08, Nicolas George  wrote:

> Le duodi 2 thermidor, an CCXXV, Rostislav Pehlivanov a écrit :
> > Many image formats support embedding of ICC profiles directly in
> > their bitstreams. Add a new side data type to allow exposing them to
> > API users.
>
> Why not make it a member of AVFrame directly? It looks to me very
> similar in principle to color_range, color_primaries, colorspace, etc.
>
>
It can be quite big. In some insane cases it can be bigger than the actual
packet or even the uncompressed frame. Its also not strictly necessary to
display something more or less looking okay, but its necessary to display
something correctly. I think its better treated like we currently treat HDR
by defining a side data type and letting the API users cache and use it,
which is what this patch does. Side data is also refcounted so it even
solves the size issue.


> >
> > Signed-off-by: Rostislav Pehlivanov 
> > ---
> >  libavutil/frame.h   | 6 ++
> >  libavutil/version.h | 2 +-
> >  2 files changed, 7 insertions(+), 1 deletion(-)
> >
> > diff --git a/libavutil/frame.h b/libavutil/frame.h
> > index 26261d7e40..ee899d844d 100644
> > --- a/libavutil/frame.h
> > +++ b/libavutil/frame.h
> > @@ -134,6 +134,12 @@ enum AVFrameSideDataType {
> >   * the form of the AVContentLightMetadata struct.
> >   */
> >  AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
> > +
> > +/**
>
> > + * The data contains an ICC profile with an optional name defined
> in the
> > + * metadata entry.
>
> Not being a specialist of ICC profiles, I have no idea what that means
> in practice. What data structure is it?
>
>
There's a 300 page document describing the bitstream and how its used. The
smallest implementation, lcms is still a few tens of thousands of lines.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH 1/2] lavu/frame: add new side data type for ICC profiles

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Rostislav Pehlivanov a écrit :
> Many image formats support embedding of ICC profiles directly in
> their bitstreams. Add a new side data type to allow exposing them to
> API users.

Why not make it a member of AVFrame directly? It looks to me very
similar in principle to color_range, color_primaries, colorspace, etc.

> 
> Signed-off-by: Rostislav Pehlivanov 
> ---
>  libavutil/frame.h   | 6 ++
>  libavutil/version.h | 2 +-
>  2 files changed, 7 insertions(+), 1 deletion(-)
> 
> diff --git a/libavutil/frame.h b/libavutil/frame.h
> index 26261d7e40..ee899d844d 100644
> --- a/libavutil/frame.h
> +++ b/libavutil/frame.h
> @@ -134,6 +134,12 @@ enum AVFrameSideDataType {
>   * the form of the AVContentLightMetadata struct.
>   */
>  AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
> +
> +/**

> + * The data contains an ICC profile with an optional name defined in the
> + * metadata entry.

Not being a specialist of ICC profiles, I have no idea what that means
in practice. What data structure is it?

> + */
> +AV_FRAME_DATA_ICC_PROFILE,
>  };
>  
>  enum AVActiveFormatDescription {
> diff --git a/libavutil/version.h b/libavutil/version.h
> index d4f9335a2f..35987e7b50 100644
> --- a/libavutil/version.h
> +++ b/libavutil/version.h
> @@ -80,7 +80,7 @@
>  
>  
>  #define LIBAVUTIL_VERSION_MAJOR  55
> -#define LIBAVUTIL_VERSION_MINOR  68
> +#define LIBAVUTIL_VERSION_MINOR  69
>  #define LIBAVUTIL_VERSION_MICRO 100
>  
>  #define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add tests for some video source filters

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Michael Niedermayer a écrit :
> breaks on mips (probably big endian issue)

Breaks on MIPS, or detects that something is already broken in MIPS and
nobody noticed until now?

Could you perhaps re-run the test with V=1, then re-run the command line
with -f png, to see how the output is broken?

If the output of the filter is currently broken, then I think we should
apply this patch, because it would raise the incentive for fixing it.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH V4] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Mark Thompson
On 20/07/17 08:54, Jun Zhao wrote:
> V4: fix potential memory leak issue base on Steven Liu's review.
> V3: re-work to support the other hwaccels, rename from vaapi_dec.c to 
> hw_decode.c.
>just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
> V2: re-work with new hw decoding API.
> 
> From 718f92731d308423e5a09d0384f7bf2361f5a307 Mon Sep 17 00:00:00 2001
> From: Jun Zhao 
> Date: Thu, 20 Jul 2017 00:58:56 -0400
> Subject: [PATCH V4] examples/hw_decode: Add a HWAccel decoding example.
> 
> Add a HWAccel decoding example.
> 
> Just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
> 
> Signed-off-by: Liu, Kaixuan 
> Signed-off-by: Jun Zhao 
> ---
>  doc/examples/hw_decode.c | 249 
> +++
>  1 file changed, 249 insertions(+)
>  create mode 100644 doc/examples/hw_decode.c
> 
> diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
> new file mode 100644
> index 00..0e77ee877f
> --- /dev/null
> +++ b/doc/examples/hw_decode.c
> @@ -0,0 +1,249 @@
> +/*
> + * Copyright (c) 2017 Jun Zhao
> + * Copyright (c) 2017 Kaixuan Liu
> + *
> + * HW Acceleration API (video decoding) decode sample
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
> USA
> + */
> +
> +/**
> + * @file
> + * HW-Accelerated decoding example.
> + *
> + * @example hw_decode.c
> + * This example shows how to do HW-accelerated decoding with output
> + * frames from the HW video surfaces.
> + */
> +
> +#include 
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +static AVBufferRef *hw_device_ctx = NULL;
> +static enum AVPixelFormat hw_pix_fmt;
> +FILE *output_file = NULL;
> +
> +static enum AVPixelFormat hw_pix_fmts[] = {
> +[AV_HWDEVICE_TYPE_CUDA] = AV_PIX_FMT_CUDA,
> +[AV_HWDEVICE_TYPE_DXVA2]= AV_PIX_FMT_DXVA2_VLD,
> +[AV_HWDEVICE_TYPE_D3D11VA]  = AV_PIX_FMT_D3D11VA_VLD,

AV_PIX_FMT_D3D11 (this was changed recently).

> +[AV_HWDEVICE_TYPE_QSV]  = AV_PIX_FMT_QSV,
> +[AV_HWDEVICE_TYPE_VAAPI]= AV_PIX_FMT_VAAPI,
> +[AV_HWDEVICE_TYPE_VDPAU]= AV_PIX_FMT_VDPAU,
> +[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = AV_PIX_FMT_VIDEOTOOLBOX,
> +};

Neither CUDA nor QSV will work here, as they are dummy hwaccels used on other 
decoders.  Just leave the five real hwaccels.

> +
> +static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type)
> +{
> +if (type >= 0 && type < FF_ARRAY_ELEMS(hw_pix_fmts))
> +return hw_pix_fmts[type];
> +else
> +return AV_PIX_FMT_NONE;
> +}
> +
> +static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType 
> type)
> +{
> +int err = 0;
> +
> +if ((err = av_hwdevice_ctx_create(_device_ctx, type,
> +  NULL, NULL, 0)) < 0) {
> +fprintf(stderr, "Failed to create specified HW device.\n");
> +return err;
> +}
> +ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
> +
> +return err;
> +}
> +
> +static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
> +const enum AVPixelFormat *pix_fmts)
> +{
> +const enum AVPixelFormat *p;
> +
> +for (p = pix_fmts; *p != -1; p++) {
> +if (*p == hw_pix_fmt)
> +return *p;
> +}
> +
> +fprintf(stderr, "Failed to get HW surface format.\n");
> +return AV_PIX_FMT_NONE;
> +}
> +
> +static int decode_write(AVCodecContext *avctx, AVPacket *packet)
> +{
> +AVFrame *frame = NULL, *sw_frame = NULL;
> +AVFrame *tmp_frame = NULL;
> +uint8_t *buffer = NULL;
> +int size;
> +int ret = 0;
> +
> +ret = avcodec_send_packet(avctx, packet);
> +if (ret < 0) {
> +fprintf(stderr, "Error during decoding\n");
> +return ret;
> +}
> +
> +while (ret >= 0) {
> +if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
> +fprintf(stderr, "Can not alloc frame\n");
> +ret = AVERROR(ENOMEM);
> +goto fail;
> +}
> +
> +ret = avcodec_receive_frame(avctx, frame);
> +if (ret == AVERROR(EAGAIN) || ret == 

Re: [FFmpeg-devel] [PATCH] support set words' space

2017-07-20 Thread Michael Niedermayer
On Thu, Jul 20, 2017 at 10:02:25AM +0800, efren yang wrote:
>  vf_drawtext.c |3 +++
>  1 file changed, 3 insertions(+)
> 369e0f0b0e08f3417e2e8206595cea4455561dae  0001-support-set-words-space.patch
> From e02dd9859dee662d2e34428541058520495655b4 Mon Sep 17 00:00:00 2001
> From: efrenyang 
> Date: Wed, 19 Jul 2017 19:01:13 +0800
> Subject: [PATCH] support set words' space
> 
> Signed-off-by: efrenyang 
> ---
>  libavfilter/vf_drawtext.c | 3 +++
>  1 file changed, 3 insertions(+)
> 
> diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
> index f6151443bb..17eeb85425 100644
> --- a/libavfilter/vf_drawtext.c
> +++ b/libavfilter/vf_drawtext.c
> @@ -162,6 +162,7 @@ typedef struct DrawTextContext {
>  unsigned int default_fontsize;  ///< default font size to use
>  
>  int line_spacing;   ///< lines spacing in pixels
> +int word_spacing;   ///< word spacing in pixels
>  short int draw_box; ///< draw box around text - true or false
>  int boxborderw; ///< box border width
>  int use_kerning;///< font kerning is used - true/false
> @@ -214,6 +215,7 @@ static const AVOption drawtext_options[]= {
>  {"box", "set box",  OFFSET(draw_box),   
> AV_OPT_TYPE_BOOL,   {.i64=0}, 0,1   , FLAGS},
>  {"boxborderw",  "set box border width", OFFSET(boxborderw), 
> AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX , FLAGS},
>  {"line_spacing",  "set line spacing in pixels", OFFSET(line_spacing),   
> AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX,FLAGS},
> +{ "word_spacing",  "set text spacing in pixels", OFFSET(word_spacing),   
> AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
>  {"fontsize","set font size",OFFSET(fontsize_expr),  
> AV_OPT_TYPE_STRING, {.str=NULL},  CHAR_MIN, CHAR_MAX , FLAGS},
>  {"x",   "set x expression", OFFSET(x_expr), 
> AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
>  {"y",   "set y expression", OFFSET(y_expr), 
> AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
> @@ -1374,6 +1376,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame 
> *frame,
>  s->positions[i].y = y - glyph->bitmap_top + y_max;
>  if (code == '\t') x  = (x / s->tabsize + 1)*s->tabsize;
>  else  x += glyph->advance;
> +x += s->word_spacing;

This needs some kind of overflow check or some more restrictive
limit than INT_MAX/MIN

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

I have often repented speaking, but never of holding my tongue.
-- Xenocrates


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/h264: Declare the local variable decode_chroma as const.

2017-07-20 Thread Michael Niedermayer
On Thu, Jul 20, 2017 at 10:31:19AM -0700, Wan-Teh Chang wrote:
> ff_h264_decode_mb_cabac() and ff_h264_decode_mb_cavlc() are very long
> functions. Declaring decode_chroma as const makes it clear the variable
> doesn't change after initialization.
> 
> Signed-off-by: Wan-Teh Chang 
> ---
>  libavcodec/h264_cabac.c | 4 ++--
>  libavcodec/h264_cavlc.c | 4 ++--
>  2 files changed, 4 insertions(+), 4 deletions(-)

applied

thx

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

I do not agree with what you have to say, but I'll defend to the death your
right to say it. -- Voltaire


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/hevcdec: hevc_await_progress: declare |y| only if used.

2017-07-20 Thread Michael Niedermayer
On Thu, Jul 20, 2017 at 11:39:11AM -0700, Wan-Teh Chang wrote:
> hevc_await_progress() uses the variable |y| only inside the "if" block.
> So |y| only needs to be declared and initialized in that block.
> 
> Signed-off-by: Wan-Teh Chang 
> ---
>  libavcodec/hevcdec.c | 5 +++--
>  1 file changed, 3 insertions(+), 2 deletions(-)

will apply

thanks

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Complexity theory is the science of finding the exact solution to an
approximation. Benchmarking OTOH is finding an approximation of the exact


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] avcodec/x86/cavsdsp: Delete #include "libavcodec/x86/idctdsp.h".

2017-07-20 Thread Michael Niedermayer
On Thu, Jul 20, 2017 at 10:44:03AM -0700, Wan-Teh Chang wrote:
> This file already has #include "idctdsp.h", which is resolved to the
> idctdsp.h header in the directory where this file resides by compilers.
> Two other files in this directory, libavcodec/x86/idctdsp_init.c and
> libavcodec/x86/xvididct_init.c, also rely on #include "idctdsp.h"
> working this way.
> 
> Signed-off-by: Wan-Teh Chang 
> ---
>  libavcodec/x86/cavsdsp.c | 1 -
>  1 file changed, 1 deletion(-)

will apply unless someone else is faster

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Does the universe only have a finite lifespan? No, its going to go on
forever, its just that you wont like living in it. -- Hiranya Peiri


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Encoder

2017-07-20 Thread Carl Eugen Hoyos
2017-07-20 21:46 GMT+02:00 Paras Chadha :

> +case AV_PIX_FMT_RGB24:
> +case AV_PIX_FMT_RGBA:
> +for (k = 0; k < naxis3; k++) {
> +for (i = 0; i < avctx->height; i++) {
> +ptr = p->data[0] + (avctx->height - i - 1) * 
> p->linesize[0] + k;
> +for (j = 0; j < avctx->width; j++) {
> +bytestream_put_byte(, ptr[0]);
> +ptr += naxis3;

(Sorry if this is nonsense:)
Shouldn't you be using PIX_FMT_GBRP and PIX_FMT_GBRAP?

Carl Eugen
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH][RFC] JPEG2000: SSE optimisation for DWT decoding

2017-07-20 Thread Carl Eugen Hoyos
2017-07-20 18:07 GMT+02:00 maxime taisant :
> From: Maxime Taisant 

> I am currently working on SSE optimisations for the dwt functions used to 
> decode JPEG2000.
> For the moment, I have only managed to produce a SSE-optimized version of the 
> sr_1d97_float function

> (with relatively good results).

Please add some numbers.

> +   if (ARCH_X86)
> +ff_sr_1d97_float_sse(line, mh, mh + lh);
> +   else
> +sr_1d97_float(line, mh, mh + lh);

This is not correct, look at ff_jpeg2000dsp_init_x86() and Jpeg2000DSPContext
to see what has to be done.

Others will comment on the asm code.

Thank you, Carl Eugen
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add YUV444 32bit floating point pixel format

2017-07-20 Thread Carl Eugen Hoyos
2017-07-20 17:46 GMT+02:00 Vittorio Giovara :

> +[AV_PIX_FMT_YUV444F32LE]  = { 0, 0 },
> +[AV_PIX_FMT_YUV444F32BE]  = { 0, 0 },
> +[AV_PIX_FMT_YUVA444F32LE] = { 0, 0 },
> +[AV_PIX_FMT_YUVA444F32BE] = { 0, 0 },

Feel free to ignore but imo adding (non-hw) pix_fmts that can neither
be read nor written by default FFmpeg is not ok.

Carl Eugen
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Michael Niedermayer
On Thu, Jul 20, 2017 at 05:38:18PM +0200, Peter Große wrote:
> On Thu, 20 Jul 2017 17:19:36 +0200
> Tobias Rapp  wrote:
> 
> > > When I re-add --disable-optimizations --enable-mmx --disable-stripping to 
> > > my
> > > configure command line, the test succeeds.
> > >
> > > Any further comments to the patch and test case?  
> > 
> > Maybe try adding -sws_flags +accurate_rnd+bitexact to CMD?
> 
> I tried that, but that didn't help.

make sure these flags actually get passed into the used swscale
if they do, please open a ticket, i probably wont find the time
to look into this in the near future but when i or someone else works
on sws the next time being able to quickly find all issues affecting
it shuld help "time per bug"

thx

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

When the tyrant has disposed of foreign enemies by conquest or treaty, and
there is nothing more to fear from them, then he is always stirring up
some war or other, in order that the people may require a leader. -- Plato


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] pixdesc: Explicitly handle invalid arguments to av_find_best_pix_fmt_of_2()

2017-07-20 Thread Mark Thompson
---
On 20/07/17 01:33, Michael Niedermayer wrote:
> Hi
> 
> On Tue, Jul 18, 2017 at 11:01:01PM +, Mark Thompson wrote:
>> ffmpeg | branch: master | Mark Thompson  | Thu Jul  6 
>> 22:50:35 2017 +0100| [8a442d7a8a687a469ca502a18a0c68f5302b15e0] | committer: 
>> Mark Thompson
>>
>> pixdesc: Improve scoring for opaque/unknown pixel formats
>>
>> Hardware pixel formats do not tell you anything about their actual
>> contents, but should still score higher than formats with completely
>> unknown properties, which in turn should score higher than invalid
>> formats.
>>
>> Do not return an AVERROR code as a score.
>>
>> Fixes a hang in libavfilter where format negotiation gets stuck in a
>> loop because AV_PIX_FMT_NONE scores more highly than all other
>> possibilities.
>>
>>> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=8a442d7a8a687a469ca502a18a0c68f5302b15e0
>> ---
>>
>>  libavutil/pixdesc.c | 16 
>>  1 file changed, 12 insertions(+), 4 deletions(-)
> 
> This still breaks
> valgrind ./ffmpeg_g -i ~/videos/matrixbench_mpeg2.mpg -y  -t 4 -acodec 
> libmp3lame http://127.0.0.1:8192/feed1.ffm
> [mpeg @ 0x24823f20] start time for stream 0 is not set in 
> estimate_timings_from_pts
> Input #0, mpeg, from '/home/michael/videos/matrixbench_mpeg2.mpg':
>   Duration: 00:03:07.66, start: 0.22, bitrate: 5633 kb/s
> Stream #0:0[0x1bf]: Data: dvd_nav_packet
> Stream #0:1[0x1e0]: Video: mpeg2video (Main), yuv420p(tv, 
> bt470bg/bt470m/bt470m, bottom first), 720x576 [SAR 16:15 DAR 4:3], 25 fps, 25 
> tbr, 90k tbn, 50 tbc
> Stream #0:2[0x1c0]: Audio: mp2, 48000 Hz, stereo, s16p, 384 kb/s
> ==17852== Invalid read of size 1
> ==17852==at 0x104871B: av_find_best_pix_fmt_of_2 (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4BD23A: choose_pixel_fmt (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4BBB2C: open_output_file (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4BC696: ffmpeg_parse_options (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4A8A7D: main (in ffmpeg/ffmpeg_g)
> ==17852==  Address 0x9 is not stack'd, malloc'd or (recently) free'd
> ==17852==
> ==17852==
> ==17852== Process terminating with default action of signal 11 (SIGSEGV)
> ==17852==  Access not within mapped region at address 0x9
> ==17852==at 0x104871B: av_find_best_pix_fmt_of_2 (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4BD23A: choose_pixel_fmt (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4BBB2C: open_output_file (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4BC696: ffmpeg_parse_options (in ffmpeg/ffmpeg_g)
> ==17852==by 0x4A8A7D: main (in ffmpeg/ffmpeg_g)
> ==17852==  If you believe this happened as a result of a stack
> ==17852==  overflow in your program's main thread (unlikely but
> ==17852==  possible), you can try to increase the size of the
> ==17852==  main thread stack using the --main-stacksize= flag.
> ==17852==  The main thread stack size used in this run was 8388608.
> 
> 
> the receiver side of the connection can be setup with
> valgrind ./ffserver_g -f ~/videos/ffserver.conf
> 
> ffserver.conf attached
> 
> [...]
> 

Right - so someone does call find_best() with an invalid source format, which 
will give the same score to all possibilities and then barf if one of them is 
invalid.

This change makes the invalid format handling more explicit, and fixes your 
case.

Thanks,

- Mark


 libavutil/pixdesc.c | 32 +++-
 1 file changed, 19 insertions(+), 13 deletions(-)

diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
index 1983ce9ef5..a9dd11a498 100644
--- a/libavutil/pixdesc.c
+++ b/libavutil/pixdesc.c
@@ -2633,21 +2633,27 @@ enum AVPixelFormat av_find_best_pix_fmt_of_2(enum 
AVPixelFormat dst_pix_fmt1, en
 const AVPixFmtDescriptor *desc2 = av_pix_fmt_desc_get(dst_pix_fmt2);
 int score1, score2;
 
-loss_mask= loss_ptr?~*loss_ptr:~0; /* use loss mask if provided */
-if(!has_alpha)
-loss_mask &= ~FF_LOSS_ALPHA;
-
-score1 = get_pix_fmt_score(dst_pix_fmt1, src_pix_fmt, , loss_mask);
-score2 = get_pix_fmt_score(dst_pix_fmt2, src_pix_fmt, , loss_mask);
-
-if (score1 == score2) {
-if(av_get_padded_bits_per_pixel(desc2) != 
av_get_padded_bits_per_pixel(desc1)) {
-dst_pix_fmt = av_get_padded_bits_per_pixel(desc2) < 
av_get_padded_bits_per_pixel(desc1) ? dst_pix_fmt2 : dst_pix_fmt1;
+if (!desc1)
+dst_pix_fmt = dst_pix_fmt2;
+else if (!desc2)
+dst_pix_fmt = dst_pix_fmt1;
+else {
+loss_mask= loss_ptr?~*loss_ptr:~0; /* use loss mask if provided */
+if(!has_alpha)
+loss_mask &= ~FF_LOSS_ALPHA;
+
+score1 = get_pix_fmt_score(dst_pix_fmt1, src_pix_fmt, , 
loss_mask);
+score2 = get_pix_fmt_score(dst_pix_fmt2, src_pix_fmt, , 
loss_mask);
+
+if (score1 == score2) {
+if(av_get_padded_bits_per_pixel(desc2) != 
av_get_padded_bits_per_pixel(desc1)) {
+dst_pix_fmt = av_get_padded_bits_per_pixel(desc2) < 
av_get_padded_bits_per_pixel(desc1) ? 

[FFmpeg-devel] [PATCH 2/2] pngdec: decode and expose iCCP chunks as side data

2017-07-20 Thread Rostislav Pehlivanov
Signed-off-by: Rostislav Pehlivanov 
---
 libavcodec/pngdec.c | 45 +
 1 file changed, 45 insertions(+)

diff --git a/libavcodec/pngdec.c b/libavcodec/pngdec.c
index 083f61f4f8..64811c6fc3 100644
--- a/libavcodec/pngdec.c
+++ b/libavcodec/pngdec.c
@@ -836,6 +836,46 @@ static int decode_trns_chunk(AVCodecContext *avctx, 
PNGDecContext *s,
 return 0;
 }
 
+static int decode_iccp_chunk(PNGDecContext *s, uint32_t length, AVFrame *f)
+{
+int ret, cnt = 0;
+uint8_t *data, profile_name[82];
+AVBPrint bp;
+AVFrameSideData *sd;
+
+while ((profile_name[cnt++] = bytestream2_get_byte(>gb)) && cnt < 81);
+if (cnt > 80) {
+av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid name!\n");
+return AVERROR_INVALIDDATA;
+}
+
+length -= cnt;
+
+if (bytestream2_get_byte(>gb) != 0) {
+av_log(s->avctx, AV_LOG_ERROR, "iCCP with invalid compression!\n");
+return AVERROR_INVALIDDATA;
+}
+
+length -= 1;
+
+if ((ret = decode_zbuf(, s->gb.buffer, s->gb.buffer + length) < 0))
+return ret;
+
+av_bprint_finalize(, (char **));
+
+if (!(sd = av_frame_new_side_data(f, AV_FRAME_DATA_ICC_PROFILE, bp.len)))
+return AVERROR(ENOMEM);
+
+av_dict_set(>metadata, "name", profile_name, 0);
+memcpy(sd->data, data, bp.len);
+av_free(data);
+
+/* ICC compressed data and CRC */
+bytestream2_skip(>gb, length + 4);
+
+return 0;
+}
+
 static void handle_small_bpp(PNGDecContext *s, AVFrame *p)
 {
 if (s->bits_per_pixel == 1 && s->color_type == PNG_COLOR_TYPE_PALETTE) {
@@ -1239,6 +1279,11 @@ static int decode_frame_common(AVCodecContext *avctx, 
PNGDecContext *s,
 bytestream2_skip(>gb, 4); /* crc */
 break;
 }
+case MKTAG('i', 'C', 'C', 'P'): {
+if (decode_iccp_chunk(s, length, p) < 0)
+goto fail;
+break;
+}
 case MKTAG('I', 'E', 'N', 'D'):
 if (!(s->pic_state & PNG_ALLIMAGE))
 av_log(avctx, AV_LOG_ERROR, "IEND without all image\n");
-- 
2.14.0.rc0.284.gd933b75aa4

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH 1/2] lavu/frame: add new side data type for ICC profiles

2017-07-20 Thread Rostislav Pehlivanov
Many image formats support embedding of ICC profiles directly in
their bitstreams. Add a new side data type to allow exposing them to
API users.

Signed-off-by: Rostislav Pehlivanov 
---
 libavutil/frame.h   | 6 ++
 libavutil/version.h | 2 +-
 2 files changed, 7 insertions(+), 1 deletion(-)

diff --git a/libavutil/frame.h b/libavutil/frame.h
index 26261d7e40..ee899d844d 100644
--- a/libavutil/frame.h
+++ b/libavutil/frame.h
@@ -134,6 +134,12 @@ enum AVFrameSideDataType {
  * the form of the AVContentLightMetadata struct.
  */
 AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+
+/**
+ * The data contains an ICC profile with an optional name defined in the
+ * metadata entry.
+ */
+AV_FRAME_DATA_ICC_PROFILE,
 };
 
 enum AVActiveFormatDescription {
diff --git a/libavutil/version.h b/libavutil/version.h
index d4f9335a2f..35987e7b50 100644
--- a/libavutil/version.h
+++ b/libavutil/version.h
@@ -80,7 +80,7 @@
 
 
 #define LIBAVUTIL_VERSION_MAJOR  55
-#define LIBAVUTIL_VERSION_MINOR  68
+#define LIBAVUTIL_VERSION_MINOR  69
 #define LIBAVUTIL_VERSION_MICRO 100
 
 #define LIBAVUTIL_VERSION_INT   AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
-- 
2.14.0.rc0.284.gd933b75aa4

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add tests for some video source filters

2017-07-20 Thread Michael Niedermayer
On Thu, Jul 20, 2017 at 11:07:12AM +0200, Tobias Rapp wrote:
> Adds FATE tests for the previously untested allrgb, allyuv, rgbtestsrc,
> smptebars, smptehdbars and yuvtestsrc filters.
> 
> Signed-off-by: Tobias Rapp 
> ---
>  tests/fate/filter-video.mak| 21 +
>  tests/ref/fate/filter-allrgb   | 10 ++
>  tests/ref/fate/filter-allyuv   | 10 ++
>  tests/ref/fate/filter-rgbtestsrc   | 10 ++
>  tests/ref/fate/filter-smptebars| 10 ++
>  tests/ref/fate/filter-smptehdbars  | 10 ++
>  tests/ref/fate/filter-yuvtestsrc-yuv444p   | 10 ++
>  tests/ref/fate/filter-yuvtestsrc-yuv444p12 | 10 ++
>  8 files changed, 91 insertions(+)
>  create mode 100644 tests/ref/fate/filter-allrgb
>  create mode 100644 tests/ref/fate/filter-allyuv
>  create mode 100644 tests/ref/fate/filter-rgbtestsrc
>  create mode 100644 tests/ref/fate/filter-smptebars
>  create mode 100644 tests/ref/fate/filter-smptehdbars
>  create mode 100644 tests/ref/fate/filter-yuvtestsrc-yuv444p
>  create mode 100644 tests/ref/fate/filter-yuvtestsrc-yuv444p12

breaks on mips (probably big endian issue)

--- tests/ref/fate/filter-yuvtestsrc-yuv444p12   2017-07-20 22:22:52.083416873 
+0200
+++ tests/data/fate/filter-yuvtestsrc-yuv444p12 2017-07-20 22:42:07.043441205 
+0200
@@ -3,8 +3,8 @@
 #codec_id 0: rawvideo
 #dimensions 0: 320x240
 #sar 0: 1/1
-0,  0,  0,1,   460800, 0x3ec49be7
-0,  1,  1,1,   460800, 0x3ec49be7
-0,  2,  2,1,   460800, 0x3ec49be7
-0,  3,  3,1,   460800, 0x3ec49be7
-0,  4,  4,1,   460800, 0x3ec49be7
+0,  0,  0,1,   460800, 0xba079be7
+0,  1,  1,1,   460800, 0xba079be7
+0,  2,  2,1,   460800, 0xba079be7
+0,  3,  3,1,   460800, 0xba079be7
+0,  4,  4,1,   460800, 0xba079be7
Test filter-yuvtestsrc-yuv444p12 failed. Look at 
tests/data/fate/filter-yuvtestsrc-yuv444p12.err for details.
make: *** [fate-filter-yuvtestsrc-yuv444p12] Error 1
make: *** Waiting for unfinished jobs

[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

It is dangerous to be right in matters on which the established authorities
are wrong. -- Voltaire


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add tests for psnr and ssim filter

2017-07-20 Thread Michael Niedermayer
On Wed, Jul 19, 2017 at 05:23:07PM +0200, Tobias Rapp wrote:
> On 19.07.2017 17:06, Nicolas George wrote:
> >Le primidi 1er thermidor, an CCXXV, Tobias Rapp a écrit :
> >>Indeed they are remainders from previous edits. It seems that stripping any
> >>whitespace within the filter string is enough to ensure that it is passed as
> >>one argument token to ffmpeg. So fixed locally.
> >
> >I suppose you left the quotes, otherwise the semicolon separates
> >commands and the brackets are globbing patterns.
> 
> Indeed, see attached patch for reference (WIP as it still contains
> the CPUFLAGS work-around).
> 
> + -f null /dev/null
> >>BTW: Is it OK to redirect output to "/dev/null" here or does this introduce
> >>an unwanted platform dependency (i.e. blocks FATE from running on
> >>MSYS/Windows)?
> >
> >With -f null, you can put anything you want there. I usually put -, but
> >that is just a convenience to be able to change it to "-f fmt - | cmd".
> 
> OK.
> 
> Regards,
> Tobias

>  fate/filter-video.mak   |   16 ++
>  ref/fate/filter-refcmp-psnr-rgb |   45 
> 
>  ref/fate/filter-refcmp-psnr-yuv |   45 
> 
>  ref/fate/filter-refcmp-ssim-rgb |   30 ++
>  ref/fate/filter-refcmp-ssim-yuv |   30 ++
>  5 files changed, 166 insertions(+)
> ba25d086250183b279c65419343103872074dab8  
> 0001-fate-add-tests-for-psnr-and-ssim-filter.patch
> From 430a2c8cfb455cbd11af119987fffa0919f71cc5 Mon Sep 17 00:00:00 2001
> From: Tobias Rapp 
> Date: Tue, 11 Jul 2017 12:38:06 +0200
> Subject: [PATCH] fate: add tests for psnr and ssim filter
> 
> Signed-off-by: Tobias Rapp 

on x86-32:

--- tests/ref/fate/filter-refcmp-psnr-yuv   2017-07-20 22:05:14.887394601 
+0200
+++ tests/data/fate/filter-refcmp-psnr-yuv  2017-07-20 22:14:51.363406746 
+0200
@@ -3,43 +3,43 @@
 lavfi.psnr.psnr.y=24.67
 lavfi.psnr.mse.u=339.38
 lavfi.psnr.psnr.u=22.82
-lavfi.psnr.mse.v=705.41
+lavfi.psnr.mse.v=705.31
 lavfi.psnr.psnr.v=19.65
-lavfi.psnr.mse_avg=372.23
+lavfi.psnr.mse_avg=372.20
 lavfi.psnr.psnr_avg=22.42
 frame:1pts:1   pts_time:1
-lavfi.psnr.mse.y=236.74
+lavfi.psnr.mse.y=236.76
 lavfi.psnr.psnr.y=24.39
 lavfi.psnr.mse.u=416.17
 lavfi.psnr.psnr.u=21.94
-lavfi.psnr.mse.v=704.98
+lavfi.psnr.mse.v=704.97
 lavfi.psnr.psnr.v=19.65
 lavfi.psnr.mse_avg=398.66
 lavfi.psnr.psnr_avg=22.12
 frame:2pts:2   pts_time:2
-lavfi.psnr.mse.y=234.79
+lavfi.psnr.mse.y=234.83
 lavfi.psnr.psnr.y=24.42
 lavfi.psnr.mse.u=435.72
 lavfi.psnr.psnr.u=21.74
 lavfi.psnr.mse.v=699.60
 lavfi.psnr.psnr.v=19.68
-lavfi.psnr.mse_avg=401.23
+lavfi.psnr.mse_avg=401.24
 lavfi.psnr.psnr_avg=22.10
 frame:3pts:3   pts_time:3
-lavfi.psnr.mse.y=250.88
+lavfi.psnr.mse.y=250.91
 lavfi.psnr.psnr.y=24.14
 lavfi.psnr.mse.u=479.73
 lavfi.psnr.psnr.u=21.32
-lavfi.psnr.mse.v=707.55
+lavfi.psnr.mse.v=707.54
 lavfi.psnr.psnr.v=19.63
-lavfi.psnr.mse_avg=422.26
-lavfi.psnr.psnr_avg=21.88
+lavfi.psnr.mse_avg=422.27
+lavfi.psnr.psnr_avg=21.87
 frame:4pts:4   pts_time:4
-lavfi.psnr.mse.y=241.05
+lavfi.psnr.mse.y=241.04
 lavfi.psnr.psnr.y=24.31
 lavfi.psnr.mse.u=505.04
 lavfi.psnr.psnr.u=21.10
 lavfi.psnr.mse.v=716.00
 lavfi.psnr.psnr.v=19.58
-lavfi.psnr.mse_avg=425.79
+lavfi.psnr.mse_avg=425.78
 lavfi.psnr.psnr_avg=21.84
Test filter-refcmp-psnr-yuv failed. Look at 
tests/data/fate/filter-refcmp-psnr-yuv.err for details.
make: *** [fate-filter-refcmp-psnr-yuv] Error 1
make: *** Waiting for unfinished jobs
--- tests/ref/fate/filter-refcmp-psnr-rgb   2017-07-20 22:05:14.887394601 
+0200
+++ tests/data/fate/filter-refcmp-psnr-rgb  2017-07-20 22:14:51.367406746 
+0200
@@ -1,11 +1,11 @@
 frame:0pts:0   pts_time:0
-lavfi.psnr.mse.r=1381.80
+lavfi.psnr.mse.r=1381.38
 lavfi.psnr.psnr.r=16.73
 lavfi.psnr.mse.g=896.00
 lavfi.psnr.psnr.g=18.61
 lavfi.psnr.mse.b=277.38
 lavfi.psnr.psnr.b=23.70
-lavfi.psnr.mse_avg=851.73
+lavfi.psnr.mse_avg=851.59
 lavfi.psnr.psnr_avg=18.83
 frame:1pts:1   pts_time:1
 lavfi.psnr.mse.r=1380.37
@@ -17,9 +17,9 @@
 lavfi.psnr.mse_avg=930.67
 lavfi.psnr.psnr_avg=18.44
 frame:2pts:2   pts_time:2
-lavfi.psnr.mse.r=1403.20
+lavfi.psnr.mse.r=1403.21
 lavfi.psnr.psnr.r=16.66
-lavfi.psnr.mse.g=954.05
+lavfi.psnr.mse.g=954.04
 lavfi.psnr.psnr.g=18.34
 lavfi.psnr.mse.b=494.22
 lavfi.psnr.psnr.b=21.19
Test filter-refcmp-psnr-rgb failed. Look at 
tests/data/fate/filter-refcmp-psnr-rgb.err for details.
make: *** [fate-filter-refcmp-psnr-rgb] Error 1


[...]
-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Many that live deserve death. And some that die deserve life. Can you give
it to them? Then do not be too eager to deal out death in judgement. For
even the very wise cannot see all ends. -- Gandalf


signature.asc
Description: Digital signature
___
ffmpeg-devel 

Re: [FFmpeg-devel] [PATCH] Add FITS Encoder

2017-07-20 Thread James Almer
On 7/20/2017 4:46 PM, Paras Chadha wrote:
> Signed-off-by: Paras Chadha 
> ---
> 
> Made the changes suggested.
> 
>  doc/general.texi   |   2 +-
>  libavcodec/Makefile|   1 +
>  libavcodec/allcodecs.c |   2 +-
>  libavcodec/fitsenc.c   | 237 
> +
>  libavformat/img2enc.c  |   1 +
>  5 files changed, 241 insertions(+), 2 deletions(-)
>  create mode 100644 libavcodec/fitsenc.c
> 
> diff --git a/doc/general.texi b/doc/general.texi
> index 01402cb..1ea7984 100644
> --- a/doc/general.texi
> +++ b/doc/general.texi
> @@ -592,7 +592,7 @@ following image formats are supported:
>  @tab Digital Picture Exchange
>  @item EXR  @tab   @tab X
>  @tab OpenEXR
> -@item FITS @tab   @tab X
> +@item FITS @tab X @tab X
>  @tab Flexible Image Transport System
>  @item JPEG @tab X @tab X
>  @tab Progressive JPEG is not supported.
> diff --git a/libavcodec/Makefile b/libavcodec/Makefile
> index 5348ed9..9b1429f 100644
> --- a/libavcodec/Makefile
> +++ b/libavcodec/Makefile
> @@ -292,6 +292,7 @@ OBJS-$(CONFIG_FFV1_ENCODER)+= ffv1enc.o ffv1.o
>  OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
>  OBJS-$(CONFIG_FIC_DECODER) += fic.o
>  OBJS-$(CONFIG_FITS_DECODER)+= fitsdec.o
> +OBJS-$(CONFIG_FITS_ENCODER)+= fitsenc.o
>  OBJS-$(CONFIG_FLAC_DECODER)+= flacdec.o flacdata.o flac.o
>  OBJS-$(CONFIG_FLAC_ENCODER)+= flacenc.o flacdata.o flac.o 
> vorbis_data.o
>  OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
> diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
> index 8678ac2..7fe66f4 100644
> --- a/libavcodec/allcodecs.c
> +++ b/libavcodec/allcodecs.c
> @@ -192,7 +192,7 @@ static void register_all(void)
>  REGISTER_ENCDEC (FFV1,  ffv1);
>  REGISTER_ENCDEC (FFVHUFF,   ffvhuff);
>  REGISTER_DECODER(FIC,   fic);
> -REGISTER_DECODER(FITS,  fits);
> +REGISTER_ENCDEC (FITS,  fits);
>  REGISTER_ENCDEC (FLASHSV,   flashsv);
>  REGISTER_ENCDEC (FLASHSV2,  flashsv2);
>  REGISTER_DECODER(FLIC,  flic);
> diff --git a/libavcodec/fitsenc.c b/libavcodec/fitsenc.c
> new file mode 100644
> index 000..cdb662b
> --- /dev/null
> +++ b/libavcodec/fitsenc.c
> @@ -0,0 +1,237 @@
> +/*
> + * FITS image encoder
> + * Copyright (c) 2017 Paras Chadha
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
> USA
> + */
> +
> +/**
> + * @file
> + * FITS image encoder
> + *
> + * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0
> + *
> + * RGBA images are encoded as planes in RGBA order. So, NAXIS3 is 3 or 4 for 
> them.
> + * Also CTYPE3 = 'RGB ' is added to the header to distinguish them from 3d 
> images.
> + */
> +
> +#include "libavutil/intreadwrite.h"
> +#include "avcodec.h"
> +#include "bytestream.h"
> +#include "internal.h"
> +
> +typedef struct FITSContext {
> +int first_image;
> +} FITSContext;
> +
> +static av_cold int fits_encode_init(AVCodecContext *avctx)
> +{
> +FITSContext * fitsctx = avctx->priv_data;
> +fitsctx->first_image = 1;
> +return 0;
> +}
> +
> +static int write_keyword_value(uint8_t **bytestream, const char *keyword, 
> int value)
> +{
> +int len, ret;
> +uint8_t *header = *bytestream;
> +len = strlen(keyword);
> +
> +memset(header, ' ', 80);
> +memcpy(header, keyword, len);
> +header[8] = '=';
> +header[9] = ' ';
> +header += 10;
> +ret = snprintf(header, 70, "%d", value);
> +header[ret] = ' ';
> +
> +*bytestream += 80;
> +return 0;
> +}
> +
> +static int fits_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> +const AVFrame *pict, int *got_packet)
> +{
> +AVFrame * const p = (AVFrame *)pict;
> +FITSContext *fitsctx = avctx->priv_data;
> +uint8_t *bytestream, *bytestream_start, *ptr;
> +uint64_t header_size = 2880, data_size = 0, padded_data_size = 0;
> +int ret, bitpix, naxis, naxis3 = 1, bzero = 0, i, j, k, t, rgb = 0;
> +
> +switch (avctx->pix_fmt) {
> +case AV_PIX_FMT_GRAY8:
> +bitpix = 8;
> +

[FFmpeg-devel] [PATCH] Add FITS Encoder

2017-07-20 Thread Paras Chadha
Signed-off-by: Paras Chadha 
---

Made the changes suggested.

 doc/general.texi   |   2 +-
 libavcodec/Makefile|   1 +
 libavcodec/allcodecs.c |   2 +-
 libavcodec/fitsenc.c   | 237 +
 libavformat/img2enc.c  |   1 +
 5 files changed, 241 insertions(+), 2 deletions(-)
 create mode 100644 libavcodec/fitsenc.c

diff --git a/doc/general.texi b/doc/general.texi
index 01402cb..1ea7984 100644
--- a/doc/general.texi
+++ b/doc/general.texi
@@ -592,7 +592,7 @@ following image formats are supported:
 @tab Digital Picture Exchange
 @item EXR  @tab   @tab X
 @tab OpenEXR
-@item FITS @tab   @tab X
+@item FITS @tab X @tab X
 @tab Flexible Image Transport System
 @item JPEG @tab X @tab X
 @tab Progressive JPEG is not supported.
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 5348ed9..9b1429f 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -292,6 +292,7 @@ OBJS-$(CONFIG_FFV1_ENCODER)+= ffv1enc.o ffv1.o
 OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
 OBJS-$(CONFIG_FIC_DECODER) += fic.o
 OBJS-$(CONFIG_FITS_DECODER)+= fitsdec.o
+OBJS-$(CONFIG_FITS_ENCODER)+= fitsenc.o
 OBJS-$(CONFIG_FLAC_DECODER)+= flacdec.o flacdata.o flac.o
 OBJS-$(CONFIG_FLAC_ENCODER)+= flacenc.o flacdata.o flac.o 
vorbis_data.o
 OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 8678ac2..7fe66f4 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -192,7 +192,7 @@ static void register_all(void)
 REGISTER_ENCDEC (FFV1,  ffv1);
 REGISTER_ENCDEC (FFVHUFF,   ffvhuff);
 REGISTER_DECODER(FIC,   fic);
-REGISTER_DECODER(FITS,  fits);
+REGISTER_ENCDEC (FITS,  fits);
 REGISTER_ENCDEC (FLASHSV,   flashsv);
 REGISTER_ENCDEC (FLASHSV2,  flashsv2);
 REGISTER_DECODER(FLIC,  flic);
diff --git a/libavcodec/fitsenc.c b/libavcodec/fitsenc.c
new file mode 100644
index 000..cdb662b
--- /dev/null
+++ b/libavcodec/fitsenc.c
@@ -0,0 +1,237 @@
+/*
+ * FITS image encoder
+ * Copyright (c) 2017 Paras Chadha
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FITS image encoder
+ *
+ * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0
+ *
+ * RGBA images are encoded as planes in RGBA order. So, NAXIS3 is 3 or 4 for 
them.
+ * Also CTYPE3 = 'RGB ' is added to the header to distinguish them from 3d 
images.
+ */
+
+#include "libavutil/intreadwrite.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "internal.h"
+
+typedef struct FITSContext {
+int first_image;
+} FITSContext;
+
+static av_cold int fits_encode_init(AVCodecContext *avctx)
+{
+FITSContext * fitsctx = avctx->priv_data;
+fitsctx->first_image = 1;
+return 0;
+}
+
+static int write_keyword_value(uint8_t **bytestream, const char *keyword, int 
value)
+{
+int len, ret;
+uint8_t *header = *bytestream;
+len = strlen(keyword);
+
+memset(header, ' ', 80);
+memcpy(header, keyword, len);
+header[8] = '=';
+header[9] = ' ';
+header += 10;
+ret = snprintf(header, 70, "%d", value);
+header[ret] = ' ';
+
+*bytestream += 80;
+return 0;
+}
+
+static int fits_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+const AVFrame *pict, int *got_packet)
+{
+AVFrame * const p = (AVFrame *)pict;
+FITSContext *fitsctx = avctx->priv_data;
+uint8_t *bytestream, *bytestream_start, *ptr;
+uint64_t header_size = 2880, data_size = 0, padded_data_size = 0;
+int ret, bitpix, naxis, naxis3 = 1, bzero = 0, i, j, k, t, rgb = 0;
+
+switch (avctx->pix_fmt) {
+case AV_PIX_FMT_GRAY8:
+bitpix = 8;
+naxis = 2;
+break;
+case AV_PIX_FMT_GRAY16BE:
+bitpix = 16;
+naxis = 2;
+bzero = 32768;
+break;
+case AV_PIX_FMT_RGB24:
+case AV_PIX_FMT_RGBA:
+bitpix = 8;
+naxis = 3;
+rgb = 1;
+if 

[FFmpeg-devel] [PATCH] avcodec/atrac3p: use float_dsp in ff_atrac3p_power_compensation

2017-07-20 Thread James Almer
Signed-off-by: James Almer 
---
Fate passes, so i guess the aligment requirements are met.

 libavcodec/atrac3plus.h|  5 +++--
 libavcodec/atrac3plusdec.c | 36 ++--
 libavcodec/atrac3plusdsp.c |  8 
 3 files changed, 25 insertions(+), 24 deletions(-)

diff --git a/libavcodec/atrac3plus.h b/libavcodec/atrac3plus.h
index a33c38a3ee..3c39e293c7 100644
--- a/libavcodec/atrac3plus.h
+++ b/libavcodec/atrac3plus.h
@@ -199,13 +199,14 @@ void ff_atrac3p_generate_tones(Atrac3pChanUnitCtx 
*ch_unit, AVFloatDSPContext *f
  * Perform power compensation aka noise dithering.
  *
  * @param[in]  ctx ptr to the channel context
+ * @param[in]  fdsppointer to float DSP context
  * @param[in]  ch_indexwhich channel to process
  * @param[in,out]  sp  ptr to channel spectrum to process
  * @param[in]  rng_index   indicates which RNG table to use
  * @param[in]  sb_num  which subband to process
  */
-void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, int ch_index,
-   float *sp, int rng_index, int sb_num);
+void ff_atrac3p_power_compensation(Atrac3pChanUnitCtx *ctx, AVFloatDSPContext 
*fdsp,
+   int ch_index, float *sp, int rng_index, int 
sb_num);
 
 /**
  * Regular IMDCT and windowing without overlapping,
diff --git a/libavcodec/atrac3plusdec.c b/libavcodec/atrac3plusdec.c
index 7a2ab3ef95..666d1a5c01 100644
--- a/libavcodec/atrac3plusdec.c
+++ b/libavcodec/atrac3plusdec.c
@@ -198,7 +198,7 @@ static av_cold int atrac3p_decode_init(AVCodecContext 
*avctx)
 return 0;
 }
 
-static void decode_residual_spectrum(Atrac3pChanUnitCtx *ctx,
+static void decode_residual_spectrum(ATRAC3PContext *ctx, Atrac3pChanUnitCtx 
*ch_unit,
  float out[2][ATRAC3P_FRAME_SAMPLES],
  int num_channels,
  AVCodecContext *avctx)
@@ -209,17 +209,17 @@ static void decode_residual_spectrum(Atrac3pChanUnitCtx 
*ctx,
 /* calculate RNG table index for each subband */
 int sb_RNG_index[ATRAC3P_SUBBANDS] = { 0 };
 
-if (ctx->mute_flag) {
+if (ch_unit->mute_flag) {
 for (ch = 0; ch < num_channels; ch++)
 memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
 return;
 }
 
-for (qu = 0, RNG_index = 0; qu < ctx->used_quant_units; qu++)
-RNG_index += ctx->channels[0].qu_sf_idx[qu] +
- ctx->channels[1].qu_sf_idx[qu];
+for (qu = 0, RNG_index = 0; qu < ch_unit->used_quant_units; qu++)
+RNG_index += ch_unit->channels[0].qu_sf_idx[qu] +
+ ch_unit->channels[1].qu_sf_idx[qu];
 
-for (sb = 0; sb < ctx->num_coded_subbands; sb++, RNG_index += 128)
+for (sb = 0; sb < ch_unit->num_coded_subbands; sb++, RNG_index += 128)
 sb_RNG_index[sb] = RNG_index & 0x3FC;
 
 /* inverse quant and power compensation */
@@ -227,35 +227,35 @@ static void decode_residual_spectrum(Atrac3pChanUnitCtx 
*ctx,
 /* clear channel's residual spectrum */
 memset(out[ch], 0, ATRAC3P_FRAME_SAMPLES * sizeof(*out[ch]));
 
-for (qu = 0; qu < ctx->used_quant_units; qu++) {
-src= 
>channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]];
+for (qu = 0; qu < ch_unit->used_quant_units; qu++) {
+src= 
_unit->channels[ch].spectrum[ff_atrac3p_qu_to_spec_pos[qu]];
 dst= [ch][ff_atrac3p_qu_to_spec_pos[qu]];
 nspeclines = ff_atrac3p_qu_to_spec_pos[qu + 1] -
  ff_atrac3p_qu_to_spec_pos[qu];
 
-if (ctx->channels[ch].qu_wordlen[qu] > 0) {
-q = ff_atrac3p_sf_tab[ctx->channels[ch].qu_sf_idx[qu]] *
-ff_atrac3p_mant_tab[ctx->channels[ch].qu_wordlen[qu]];
+if (ch_unit->channels[ch].qu_wordlen[qu] > 0) {
+q = ff_atrac3p_sf_tab[ch_unit->channels[ch].qu_sf_idx[qu]] *
+ff_atrac3p_mant_tab[ch_unit->channels[ch].qu_wordlen[qu]];
 for (i = 0; i < nspeclines; i++)
 dst[i] = src[i] * q;
 }
 }
 
-for (sb = 0; sb < ctx->num_coded_subbands; sb++)
-ff_atrac3p_power_compensation(ctx, ch, [ch][0],
+for (sb = 0; sb < ch_unit->num_coded_subbands; sb++)
+ff_atrac3p_power_compensation(ch_unit, ctx->fdsp, ch, [ch][0],
   sb_RNG_index[sb], sb);
 }
 
-if (ctx->unit_type == CH_UNIT_STEREO) {
-for (sb = 0; sb < ctx->num_coded_subbands; sb++) {
-if (ctx->swap_channels[sb]) {
+if (ch_unit->unit_type == CH_UNIT_STEREO) {
+for (sb = 0; sb < ch_unit->num_coded_subbands; sb++) {
+if (ch_unit->swap_channels[sb]) {
 for (i = 0; i < ATRAC3P_SUBBAND_SAMPLES; i++)
 FFSWAP(float, out[0][sb * 

[FFmpeg-devel] Reading Frames in File Order

2017-07-20 Thread Jacob Trimble
I am writing a program that will use libavformat to demux media
segments. I will need to parse media segments that may appear in any
order and produce playable regions of content.  For example, think
YouTube streaming; there are regions of buffered content and seeking
can produce media segments that are out of order.

I need to have libavformat give me the frames in the order they are
given to it.  If I append a segment in the past, I expect to get
those frames next.  It is fine if the frames given to me are entirely
out of order, I can handle it.

I am implementing this by creating a custom AVIOContext that reads
from an internal buffer.  Since we can't parse the media segments, we
will just give the data to libavformat in the order we get it.  This
context doesn't support seeking.

Currently, if I append a segment in the past, I get a few errors.  I
have a trivial patch to fix a bug that causes problems.  This would
not change any behavior, just fix a bug in the reordering frame logic.

The bigger problem is that av_read_frame reads frames in DTS order.
So since the libavforat "playhead time" is in the future, I won't ever
see the frames.  If I try to seek, I get errors since my AVIOContext
doesn't support seeking.

What I want is a way to have av_read_frame (or a new method) return
the frames in the order they appear in the (virtual) file.

I would be happy to implement this feature.  I wanted to get the
developer's opinions about whether this would be allowed and how
best to implement it (i.e. a new flag somewhere or a new method
similar to av_read_frame).

Thanks.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] avcodec/hevcdec: hevc_await_progress: declare |y| only if used.

2017-07-20 Thread Wan-Teh Chang
hevc_await_progress() uses the variable |y| only inside the "if" block.
So |y| only needs to be declared and initialized in that block.

Signed-off-by: Wan-Teh Chang 
---
 libavcodec/hevcdec.c | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)

diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c
index 55f51211c3..e084d75767 100644
--- a/libavcodec/hevcdec.c
+++ b/libavcodec/hevcdec.c
@@ -1684,10 +1684,11 @@ static void chroma_mc_bi(HEVCContext *s, uint8_t *dst0, 
ptrdiff_t dststride, AVF
 static void hevc_await_progress(HEVCContext *s, HEVCFrame *ref,
 const Mv *mv, int y0, int height)
 {
-int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
+if (s->threads_type == FF_THREAD_FRAME ) {
+int y = FFMAX(0, (mv->y >> 2) + y0 + height + 9);
 
-if (s->threads_type == FF_THREAD_FRAME )
 ff_thread_await_progress(>tf, y, 0);
+}
 }
 
 static void hevc_luma_mv_mvp_mode(HEVCContext *s, int x0, int y0, int nPbW,
-- 
2.14.0.rc0.284.gd933b75aa4-goog

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Demuxer

2017-07-20 Thread Paras Chadha
On Thu, Jul 20, 2017 at 11:17 PM, Nicolas George  wrote:

> Le primidi 1er thermidor, an CCXXV, Reimar Döffinger a écrit :
> > I am a bit unclear on the details, but my memory:
> > Well, they work better (only?) if the images are completely independent.
>
> > I am not sure this is entirely the case here, for example the first
> > image would have a different header it looks like from the muxer
> > patch.
>
> According to this mail from Paras Chadha:
> https://ffmpeg.org/pipermail/ffmpeg-devel/2017-July/213180.html
> there is no global header. A first frame that is not identical to the
> next one would count as a global header.
>
> I hope Paras Chadha can tell us more about this issue.
>

The only difference between first image and rest is that, the first image
will always contain the keyword, SIMPLE = T as first keyword while all
other images will have XTENSION = 'IMAGE ' as first keyword. Also, PCOUNT
and GCOUNT are mandatory in all image extensions but not in the first
image. Rest all keywords are same.


>
> > There is also the question: how would the encoding work?
> > Because if encoding will be using a muxer, having things symmetric by
> > having a demuxer is an advantage.
>
> If it is an image format (without global headers), it should work just
> the same as the other image formats, nothing more: the image2pipe muxer
> concatenates the packets.
>
> > I don't think we have the right frameworks for that. Other cases like
> > the MPEG video format parsers and decoders also duplicate that kind of
> > code.
>
> If we do not have the right framework to avoid code duplication, then we
> need to invent it. What you say here only means that sub-standard code
> have been accepted in the past, and that should not count as a
> precedent.
>
> Also, MPEG is a bit more important than FITS, so rushing things for it
> may have been acceptable.
>
> Still, in this particular case, I have reviewed the patches and I know
> the code enough to say that merging both functions would be quite easy
> even without a framework.
>
> > There might be some ways though, I am mostly thinking of an iterative
> > function that is called with a pointer to each 80 byte header buffer
> > and updates a state structure with the info it extracts from there.
> [1]
>
> Yes, exactly, that is one of the possibilities.
>
> > Would still get messy if the decoder needs quite different data than
> > the demuxer.
> > I have not yet checked how similar these functions actually are.
>
> I have looked at the code (enough to make a full review of a previous
> version), and I can say it is not the case. The information required by
> the demuxer is more or less a subset of the information required by the
> decoder.
>
> I say more or less because I just saw something fishy in the code:
>
> +data_size += pcount;
> +data_size *= (abs(bitpix) >> 3) * gcount;
>
> Paras Chadha, can you tell us why this is counted in the data size in
> the demuxer and not in the decoder?
>

It is because for image extensions, PCOUNT = 0 and GCOUNT = 1 are mandatory
in the header according to the standard. These keywords are not even
mandatory in the header of first image.
Anyways as i have told before, it is not necessary FITS file will contain
an image always. It is a general data storage and transport format too. It
supports various extensions - ASCII table extension and binary table
extension that are used to store and transport data. After that there is
something called Random Groups structure which is also used for the same
purpose. PCOUNT and GCOUNT can affect the size of data in those extensions.
In demuxer, i am skipping over these extensions and only sending the images
to the decoder. So, these keywords are taken into account in demuxer but
not in decoder.


>
> > Parsers work best when parsing requires very limited context.
> > My impression is that there is no fixed maximum header size, which can
> > easily make things really painful in a parser.
>
> Urgh, I thought the framework was better than that. I concede that
> turning the existing code into a parser would be more work than I
>
> Nonetheless, with the coding organization you suggested above (see [1]),
> adding buf[80] to the parsing structure would do the trick.
>
> But we still need Paras Chadha's knowledge about the possible
> differences in the first frame.
>

I have described them above


>
> > Also considering the complexity: I suspect that the header parsing can
> > be simplified a good bit, and maybe some common helper functions
> > extracted.
>
> I think so too.
>
> > I wouldn't insist on avoiding code duplication when trying to share it
> > might actually result in more complicated code. But it probably needs
> > some thinking to figure out what makes sense.
>
> Having looked at the code, I really think it would not lead to more
> complicated code.
>
> > Ok, I think it is possible to use the same code for those, for example
> > by doing this:
>
> > - in the demuxer (or 

Re: [FFmpeg-devel] [PATCH] Add FITS Decoder

2017-07-20 Thread Paras Chadha
On Thu, Jul 20, 2017 at 11:19 PM, Nicolas George  wrote:

> Hi.
>
> Le duodi 2 thermidor, an CCXXV, Paras Chadha a écrit :
> > +size = abs(header->bitpix) >> 3;
>
> > +size *= header->naxisn[i];
>
> Can you explain why PCOUNT and GCOUNT are not taken into account here
> while they are taken into account in the demuxer?
>

It is because for image extensions, PCOUNT = 0 and GCOUNT = 1 are mandatory
in the header according to the standard. These keywords are not even
mandatory in the header of first image.
Anyways as i have told before, it is not necessary FITS file will contain
an image always. It is a general data storage and transport format too. It
supports various extensions - ASCII table extension and binary table
extension that are used to store and transport data. After that there is
something called Random Groups structure which is also used for the same
purpose. PCOUNT and GCOUNT can affect the size of data in those extensions.
In demuxer, i am skipping over these extensions and only sending the images
to the decoder. So, these keywords are taken into account in demuxer but
not in decoder.


>
> Regards,
>
> --
>   Nicolas George
>
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Decoder

2017-07-20 Thread Nicolas George
Hi.

Le duodi 2 thermidor, an CCXXV, Paras Chadha a écrit :
> +size = abs(header->bitpix) >> 3;

> +size *= header->naxisn[i];

Can you explain why PCOUNT and GCOUNT are not taken into account here
while they are taken into account in the demuxer?

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Demuxer

2017-07-20 Thread Nicolas George
Le primidi 1er thermidor, an CCXXV, Reimar Döffinger a écrit :
> I am a bit unclear on the details, but my memory:
> Well, they work better (only?) if the images are completely independent.

> I am not sure this is entirely the case here, for example the first
> image would have a different header it looks like from the muxer
> patch.

According to this mail from Paras Chadha:
https://ffmpeg.org/pipermail/ffmpeg-devel/2017-July/213180.html
there is no global header. A first frame that is not identical to the
next one would count as a global header.

I hope Paras Chadha can tell us more about this issue.

> There is also the question: how would the encoding work?
> Because if encoding will be using a muxer, having things symmetric by
> having a demuxer is an advantage.

If it is an image format (without global headers), it should work just
the same as the other image formats, nothing more: the image2pipe muxer
concatenates the packets.

> I don't think we have the right frameworks for that. Other cases like
> the MPEG video format parsers and decoders also duplicate that kind of
> code.

If we do not have the right framework to avoid code duplication, then we
need to invent it. What you say here only means that sub-standard code
have been accepted in the past, and that should not count as a
precedent.

Also, MPEG is a bit more important than FITS, so rushing things for it
may have been acceptable.

Still, in this particular case, I have reviewed the patches and I know
the code enough to say that merging both functions would be quite easy
even without a framework.

> There might be some ways though, I am mostly thinking of an iterative
> function that is called with a pointer to each 80 byte header buffer
> and updates a state structure with the info it extracts from there.
[1]

Yes, exactly, that is one of the possibilities.

> Would still get messy if the decoder needs quite different data than
> the demuxer.
> I have not yet checked how similar these functions actually are.

I have looked at the code (enough to make a full review of a previous
version), and I can say it is not the case. The information required by
the demuxer is more or less a subset of the information required by the
decoder.

I say more or less because I just saw something fishy in the code:

+data_size += pcount;
+data_size *= (abs(bitpix) >> 3) * gcount;

Paras Chadha, can you tell us why this is counted in the data size in
the demuxer and not in the decoder?

> Parsers work best when parsing requires very limited context.
> My impression is that there is no fixed maximum header size, which can
> easily make things really painful in a parser.

Urgh, I thought the framework was better than that. I concede that
turning the existing code into a parser would be more work than I 

Nonetheless, with the coding organization you suggested above (see [1]),
adding buf[80] to the parsing structure would do the trick.

But we still need Paras Chadha's knowledge about the possible
differences in the first frame.

> Also considering the complexity: I suspect that the header parsing can
> be simplified a good bit, and maybe some common helper functions
> extracted.

I think so too.

> I wouldn't insist on avoiding code duplication when trying to share it
> might actually result in more complicated code. But it probably needs
> some thinking to figure out what makes sense.

Having looked at the code, I really think it would not lead to more
complicated code.

> Ok, I think it is possible to use the same code for those, for example
> by doing this:

> - in the demuxer (or parser, but that is harder because you cannot
> just read what you need but have to deal with the data as it is
> provided), get data until the end of the header.

> The end of the header detection would be somewhat duplicated, but
> should be fairly trivial.

Or it could go the other way round: code the function to use an AVIO,
and in the decoder create an AVIO that reads from the packet data.

> - Then use a function that takes such a buffer and returns a
> AVDictionary with name/value pairs. The decoder and demuxer can then
> each decide what they want to do with them.

Wait, what?!?

The decoder makes an AVDictionary for the strings metadata that are
returned as is in the frame, but apart from that, both the decoder and
demuxer only use a few numeric fields: just return these fields in a
structure.

> There is one issue at least though: for files with very large
> meta-data/header this would almost double memory usage in the demuxer
> due to having copies in the dictionary.

I would say many of our components have this kind of issue. I have not
checked, but I think cases where a component reads a full block and then
splits it into string metadata is very common and we never cared.

> There is also to me the issue, while both parse the same things, what
> these functions do is not at all the same semantically, at least
> currently.

> The demuxer only parses a select few 

[FFmpeg-devel] [PATCH] avcodec/x86/cavsdsp: Delete #include "libavcodec/x86/idctdsp.h".

2017-07-20 Thread Wan-Teh Chang
This file already has #include "idctdsp.h", which is resolved to the
idctdsp.h header in the directory where this file resides by compilers.
Two other files in this directory, libavcodec/x86/idctdsp_init.c and
libavcodec/x86/xvididct_init.c, also rely on #include "idctdsp.h"
working this way.

Signed-off-by: Wan-Teh Chang 
---
 libavcodec/x86/cavsdsp.c | 1 -
 1 file changed, 1 deletion(-)

diff --git a/libavcodec/x86/cavsdsp.c b/libavcodec/x86/cavsdsp.c
index a8a198b46d..becb3a4808 100644
--- a/libavcodec/x86/cavsdsp.c
+++ b/libavcodec/x86/cavsdsp.c
@@ -29,7 +29,6 @@
 #include "libavutil/x86/cpu.h"
 #include "libavcodec/cavsdsp.h"
 #include "libavcodec/idctdsp.h"
-#include "libavcodec/x86/idctdsp.h"
 #include "constants.h"
 #include "fpel.h"
 #include "idctdsp.h"
-- 
2.14.0.rc0.284.gd933b75aa4-goog

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] avcodec/h264: Declare the local variable decode_chroma as const.

2017-07-20 Thread Wan-Teh Chang
ff_h264_decode_mb_cabac() and ff_h264_decode_mb_cavlc() are very long
functions. Declaring decode_chroma as const makes it clear the variable
doesn't change after initialization.

Signed-off-by: Wan-Teh Chang 
---
 libavcodec/h264_cabac.c | 4 ++--
 libavcodec/h264_cavlc.c | 4 ++--
 2 files changed, 4 insertions(+), 4 deletions(-)

diff --git a/libavcodec/h264_cabac.c b/libavcodec/h264_cabac.c
index 0973e30be9..345834645c 100644
--- a/libavcodec/h264_cabac.c
+++ b/libavcodec/h264_cabac.c
@@ -1916,8 +1916,8 @@ int ff_h264_decode_mb_cabac(const H264Context *h, 
H264SliceContext *sl)
 const SPS *sps = h->ps.sps;
 int mb_xy;
 int mb_type, partition_count, cbp = 0;
-int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
-int decode_chroma = sps->chroma_format_idc == 1 || sps->chroma_format_idc 
== 2;
+int dct8x8_allowed = h->ps.pps->transform_8x8_mode;
+const int decode_chroma = sps->chroma_format_idc == 1 || 
sps->chroma_format_idc == 2;
 const int pixel_shift = h->pixel_shift;
 
 mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
diff --git a/libavcodec/h264_cavlc.c b/libavcodec/h264_cavlc.c
index f01e76070c..187b1c64e2 100644
--- a/libavcodec/h264_cavlc.c
+++ b/libavcodec/h264_cavlc.c
@@ -704,8 +704,8 @@ int ff_h264_decode_mb_cavlc(const H264Context *h, 
H264SliceContext *sl)
 int mb_xy;
 int partition_count;
 unsigned int mb_type, cbp;
-int dct8x8_allowed= h->ps.pps->transform_8x8_mode;
-int decode_chroma = h->ps.sps->chroma_format_idc == 1 || 
h->ps.sps->chroma_format_idc == 2;
+int dct8x8_allowed = h->ps.pps->transform_8x8_mode;
+const int decode_chroma = h->ps.sps->chroma_format_idc == 1 || 
h->ps.sps->chroma_format_idc == 2;
 const int pixel_shift = h->pixel_shift;
 
 mb_xy = sl->mb_xy = sl->mb_x + sl->mb_y*h->mb_stride;
-- 
2.14.0.rc0.284.gd933b75aa4-goog

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] Add FITS Decoder

2017-07-20 Thread Paras Chadha
Signed-off-by: Paras Chadha 
---
Fixed an issue with RGB48 and RGBA64 images.
Also simplified the RGB decoding code.

 Changelog   |   1 +
 doc/general.texi|   2 +
 libavcodec/Makefile |   1 +
 libavcodec/allcodecs.c  |   1 +
 libavcodec/avcodec.h|   1 +
 libavcodec/codec_desc.c |   8 +
 libavcodec/fitsdec.c| 487 
 libavcodec/version.h|   2 +-
 libavformat/img2.c  |   1 +
 9 files changed, 503 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/fitsdec.c

diff --git a/Changelog b/Changelog
index 187ae79..d9af2b9 100644
--- a/Changelog
+++ b/Changelog
@@ -29,6 +29,7 @@ version :
 - limiter video filter
 - libvmaf video filter
 - Dolby E decoder and SMPTE 337M demuxer
+- FITS demuxer and decoder

 version 3.3:
 - CrystalHD decoder moved to new decode API
diff --git a/doc/general.texi b/doc/general.texi
index 036c8c2..01402cb 100644
--- a/doc/general.texi
+++ b/doc/general.texi
@@ -592,6 +592,8 @@ following image formats are supported:
 @tab Digital Picture Exchange
 @item EXR  @tab   @tab X
 @tab OpenEXR
+@item FITS @tab   @tab X
+@tab Flexible Image Transport System
 @item JPEG @tab X @tab X
 @tab Progressive JPEG is not supported.
 @item JPEG 2000@tab X @tab X
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 357fa1a..5348ed9 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -291,6 +291,7 @@ OBJS-$(CONFIG_FFV1_DECODER)+= ffv1dec.o ffv1.o
 OBJS-$(CONFIG_FFV1_ENCODER)+= ffv1enc.o ffv1.o
 OBJS-$(CONFIG_FFWAVESYNTH_DECODER) += ffwavesynth.o
 OBJS-$(CONFIG_FIC_DECODER) += fic.o
+OBJS-$(CONFIG_FITS_DECODER)+= fitsdec.o
 OBJS-$(CONFIG_FLAC_DECODER)+= flacdec.o flacdata.o flac.o
 OBJS-$(CONFIG_FLAC_ENCODER)+= flacenc.o flacdata.o flac.o 
vorbis_data.o
 OBJS-$(CONFIG_FLASHSV_DECODER) += flashsv.o
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 4712592..8678ac2 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -192,6 +192,7 @@ static void register_all(void)
 REGISTER_ENCDEC (FFV1,  ffv1);
 REGISTER_ENCDEC (FFVHUFF,   ffvhuff);
 REGISTER_DECODER(FIC,   fic);
+REGISTER_DECODER(FITS,  fits);
 REGISTER_ENCDEC (FLASHSV,   flashsv);
 REGISTER_ENCDEC (FLASHSV2,  flashsv2);
 REGISTER_DECODER(FLIC,  flic);
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index c594993..b28002f 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -447,6 +447,7 @@ enum AVCodecID {
 AV_CODEC_ID_SRGC,
 AV_CODEC_ID_SVG,
 AV_CODEC_ID_GDV,
+AV_CODEC_ID_FITS,

 /* various PCM "codecs" */
 AV_CODEC_ID_FIRST_AUDIO = 0x1, ///< A dummy id pointing at the 
start of audio codecs
diff --git a/libavcodec/codec_desc.c b/libavcodec/codec_desc.c
index 6f43b68..8dcd3ec 100644
--- a/libavcodec/codec_desc.c
+++ b/libavcodec/codec_desc.c
@@ -1464,6 +1464,14 @@ static const AVCodecDescriptor codec_descriptors[] = {
  AV_CODEC_PROP_LOSSLESS,
 },
 {
+.id= AV_CODEC_ID_FITS,
+.type  = AVMEDIA_TYPE_VIDEO,
+.name  = "fits",
+.long_name = NULL_IF_CONFIG_SMALL("Flexible Image Transport System"),
+.props = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY |
+ AV_CODEC_PROP_LOSSLESS,
+},
+{
 .id= AV_CODEC_ID_GIF,
 .type  = AVMEDIA_TYPE_VIDEO,
 .name  = "gif",
diff --git a/libavcodec/fitsdec.c b/libavcodec/fitsdec.c
new file mode 100644
index 000..8c940c0
--- /dev/null
+++ b/libavcodec/fitsdec.c
@@ -0,0 +1,487 @@
+/*
+ * FITS image decoder
+ * Copyright (c) 2017 Paras Chadha
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * FITS image decoder
+ *
+ * Specification: https://fits.gsfc.nasa.gov/fits_standard.html Version 3.0
+ *
+ * Support all 2d images alongwith, bzero, bscale and blank keywords.
+ * RGBA images are supported as NAXIS3 = 3 or 4 i.e. Planes in RGBA order. 
Also CTYPE = 'RGB ' should be present.
+ * Also to interpret 

Re: [FFmpeg-devel] tsan warning about a data race in libavcodec/h264_direct.c

2017-07-20 Thread Wan-Teh Chang
Hi Ronald,

On Wed, Jul 19, 2017 at 11:50 AM, Ronald S. Bultje  wrote:
> Hi Wan-Teh,
>
> On Wed, Jul 19, 2017 at 2:31 PM, Wan-Teh Chang > wrote:
>
>> In libavcodec/h264_direct.c, there is already an
>> await_reference_mb_row() call before the read of
>> sl->ref_list[1][0].parent->mb_type[mb_xy] at line 505:
>>
>> 487 static void pred_temp_direct_motion(const H264Context *const h,
>> H264SliceContext *sl,
>> 488 int *mb_type)
>> 489 {
>> ...
>> 501
>> 502 await_reference_mb_row(h, >ref_list[1][0],
>> 503sl->mb_y + !!IS_INTERLACED(*mb_type));
>> 504
>> 505 if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy]))
>> { // AFL/AFR/FR/FL -> AFL/FL
>>
>> This seems like the wait you suggested, but I guess it is not?
>
> Yes, but clearly it's not doing the correct thing. :-). The ""fun"" in
> these type of issues is to figure out why not. ;-).

I debugged this for fun for three hours last night. I studied other
ff_thread_await_progress() calls, especially the ones in the h264
decoder. But I couldn't figure out the meaning of the third argument
(int field) of ff_thread_await_progress(). It seems to be only used
for h264, and seems important for this tsan warning. By playing with
the third argument, I was able to come up with a patch (pasted below)
that eliminates the tsan warning and makes "make fate-h264 THREADS=4"
pass under tsan. I also played with the second argument (int
progress), but had no success.

Description of the patch:

1. The new function await_reference_mb_row_both_fields() is a variant
of await_reference_mb_row(). pred_temp_direct_motion() is changed to
call await_reference_mb_row_both_fields() instead of
await_reference_mb_row() before it reads
sl->ref_list[1][0].parent->mb_type[mb_xy].

2. If ref_field_picture is true, await_reference_mb_row_both_fields()
calls ff_thread_await_progress() with both field=0 and field=1.
(await_reference_mb_row() calls ff_thread_await_progress() with only
field=ref_field in this case.)

3. If ref_field_picture is false, await_reference_mb_row_both_fields()
calls ff_thread_await_progress() with only field=0, the same as
await_reference_mb_row().

I doubt this patch is correct, but I am publishing it to solicit
ideas. I will try to debug this more this weekend.

Thanks,
Wan-Teh Chang

diff --git a/libavcodec/h264_direct.c b/libavcodec/h264_direct.c
index a7a107c8c2..e8d3811c67 100644
--- a/libavcodec/h264_direct.c
+++ b/libavcodec/h264_direct.c
@@ -197,6 +197,25 @@ static void await_reference_mb_row(const
H264Context *const h, H264Ref *ref,
  ref_field_picture && ref_field);
 }

+/* Waits until it is also safe to access ref->parent->mb_type[mb_xy]. */
+static void await_reference_mb_row_both_fields(const H264Context *const h,
+   H264Ref *ref, int mb_y)
+{
+int ref_field_picture = ref->parent->field_picture;
+int ref_height= 16 * h->mb_height >> ref_field_picture;
+int row   = 16 * mb_y >> ref_field_picture;
+
+if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_FRAME))
+return;
+
+/* FIXME: This is an educated guess. Is this right? */
+ff_thread_await_progress(>parent->tf, FFMIN(row, ref_height - 1), 0);
+if (ref_field_picture) {
+ff_thread_await_progress(>parent->tf, FFMIN(row, ref_height - 1),
+ 1);
+}
+}
+
 static void pred_spatial_direct_motion(const H264Context *const h,
H264SliceContext *sl,
int *mb_type)
 {
@@ -499,7 +518,7 @@ static void pred_temp_direct_motion(const
H264Context *const h, H264SliceContext

 assert(sl->ref_list[1][0].reference & 3);

-await_reference_mb_row(h, >ref_list[1][0],
+await_reference_mb_row_both_fields(h, >ref_list[1][0],
sl->mb_y + !!IS_INTERLACED(*mb_type));

 if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) {
// AFL/AFR/FR/FL -> AFL/FL
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH v2] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Peter Große
Signed-off-by: Peter Große 
---
This depends on patch "lavfi/testsrc2: fix completely transparent alpha."

The test fails without the patch "avfilter/vf_overlay: fix alpha blending when 
main source has an alpha channel".

 tests/fate/filter-video.mak  |  4 
 tests/filtergraphs/overlay_main_alpha|  4 
 tests/ref/fate/filter-overlay_main-alpha | 30 ++
 3 files changed, 38 insertions(+)
 create mode 100644 tests/filtergraphs/overlay_main_alpha
 create mode 100644 tests/ref/fate/filter-overlay_main-alpha

diff --git a/tests/fate/filter-video.mak b/tests/fate/filter-video.mak
index 53fc7a6528..ee2e7aff50 100644
--- a/tests/fate/filter-video.mak
+++ b/tests/fate/filter-video.mak
@@ -379,6 +379,10 @@ fate-filter-concat: CMD = framecrc -filter_complex_script 
$(TARGET_PATH)/tests/d
 FATE_FILTER-$(call ALLYES, TESTSRC2_FILTER FPS_FILTER MPDECIMATE_FILTER) += 
fate-filter-mpdecimate
 fate-filter-mpdecimate: CMD = framecrc -lavfi 
testsrc2=r=2:d=10,fps=3,mpdecimate -r 3 -pix_fmt yuv420p
 
+FATE_FILTER-$(call ALLYES, TESTSRC2_FILTER, OVERLAY_FILTER) += 
fate-filter-overlay_main-alpha
+fate-filter-overlay_main-alpha: tests/data/filtergraphs/overlay_main_alpha
+fate-filter-overlay_main-alpha: CMD = framecrc -filter_complex_script 
$(TARGET_PATH)/tests/data/filtergraphs/overlay_main_alpha
+
 FATE_FILTER_SAMPLES-$(call ALLYES, MOV_DEMUXER FPS_FILTER QTRLE_DECODER) += 
fate-filter-fps-cfr fate-filter-fps fate-filter-fps-r
 fate-filter-fps-cfr: CMD = framecrc -i 
$(TARGET_SAMPLES)/qtrle/apple-animation-variable-fps-bug.mov -r 30 -vsync cfr 
-pix_fmt yuv420p
 fate-filter-fps-r:   CMD = framecrc -i 
$(TARGET_SAMPLES)/qtrle/apple-animation-variable-fps-bug.mov -r 30 -vf fps 
-pix_fmt yuv420p
diff --git a/tests/filtergraphs/overlay_main_alpha 
b/tests/filtergraphs/overlay_main_alpha
new file mode 100644
index 00..95cc1d4c61
--- /dev/null
+++ b/tests/filtergraphs/overlay_main_alpha
@@ -0,0 +1,4 @@
+sws_flags=+accurate_rnd+bitexact;
+testsrc2=d=1:s=1024x768:alpha=255[a];
+testsrc2=d=1:alpha=128[b];
+[a][b]overlay=x=100:y=50
\ No newline at end of file
diff --git a/tests/ref/fate/filter-overlay_main-alpha 
b/tests/ref/fate/filter-overlay_main-alpha
new file mode 100644
index 00..071c7435ae
--- /dev/null
+++ b/tests/ref/fate/filter-overlay_main-alpha
@@ -0,0 +1,30 @@
+#tb 0: 1/25
+#media_type 0: video
+#codec_id 0: rawvideo
+#dimensions 0: 1024x768
+#sar 0: 1/1
+0,  0,  0,1,  1966080, 0xbb0752de
+0,  1,  1,1,  1966080, 0x2eaa3ef1
+0,  2,  2,1,  1966080, 0x6159c6b2
+0,  3,  3,1,  1966080, 0x03403179
+0,  4,  4,1,  1966080, 0x905a86ce
+0,  5,  5,1,  1966080, 0x5c95b051
+0,  6,  6,1,  1966080, 0xd0989f23
+0,  7,  7,1,  1966080, 0x2f345dc0
+0,  8,  8,1,  1966080, 0x2d854f97
+0,  9,  9,1,  1966080, 0xc589e999
+0, 10, 10,1,  1966080, 0x8c2d7938
+0, 11, 11,1,  1966080, 0x457b26ad
+0, 12, 12,1,  1966080, 0x5ee36f1e
+0, 13, 13,1,  1966080, 0x2947c30c
+0, 14, 14,1,  1966080, 0x5a031a29
+0, 15, 15,1,  1966080, 0xc278a4d0
+0, 16, 16,1,  1966080, 0x849a0f9d
+0, 17, 17,1,  1966080, 0xaea83654
+0, 18, 18,1,  1966080, 0xd3054386
+0, 19, 19,1,  1966080, 0x1fae5492
+0, 20, 20,1,  1966080, 0xc8b95b6b
+0, 21, 21,1,  1966080, 0x8634fdf9
+0, 22, 22,1,  1966080, 0x3627c5cb
+0, 23, 23,1,  1966080, 0x8c1c5b90
+0, 24, 24,1,  1966080, 0x2c54109d
-- 
2.13.0

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Demuxer

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Paul B Mahol a écrit :
> You should be more constructive, better go porting dualinput to framesync2,
> that wasting everybodies precious time here.

Keeping high standards of code quality seems to me a more important use
of my time. And since you are neither my boss nor my king, you have no
authority to tell me how I should spend my time.

I have an outstanding technical objection to these patches. Not Reimar,
not anybody else, I. Until I drop it, you are not allowed to push, until
you gain support to overrule me. That is the rule.

In the meantime, you can perfectly continue working, Git makes it
entirely convenient. You can also understand that I am right and start
fixing the patches.

I will now continue the discussion with Reimar.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Demuxer

2017-07-20 Thread Paul B Mahol
On 7/20/17, Nicolas George  wrote:
> Le duodi 2 thermidor, an CCXXV, Paul B Mahol a ecrit :
>> Assuming there are no comments.
>
> Wrong. Give me more time.

You should be more constructive, better go porting dualinput to framesync2,
that wasting everybodies precious time here.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Demuxer

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Paul B Mahol a écrit :
> Assuming there are no comments.

Wrong. Give me more time.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] Add FITS Demuxer

2017-07-20 Thread Paul B Mahol
On 7/19/17, Paul B Mahol  wrote:
> On 7/19/17, Nicolas George  wrote:
>> Le primidi 1er thermidor, an CCXXV, Paul B Mahol a ecrit :
>>> If you had ever write parser, you would know that this above is
>>> giberish.
>>
>> Please enlighten us.
>
> Are there more than one Nicolas here?
>
> Anyway the input can be of any size so one would need to hold complete
> state of parsing.
>

Assuming there are no comments. I will apply decoder patch as is.
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH][RFC] JPEG2000: SSE optimisation for DWT decoding

2017-07-20 Thread maxime taisant
From: Maxime Taisant 

Hi,

I am currently working on SSE optimisations for the dwt functions used to 
decode JPEG2000.
For the moment, I have only managed to produce a SSE-optimized version of the 
sr_1d97_float function (with relatively good results).
I would like to have some comments on my work so far, to know if I am on the 
right track or if there is some parts that I need to improve or modify.

Thank you.
---
 libavcodec/jpeg2000dwt.c  |   5 +-
 libavcodec/jpeg2000dwt.h  |   2 +
 libavcodec/x86/jpeg2000dsp.asm| 268 ++
 libavcodec/x86/jpeg2000dsp_init.c |   3 +
 4 files changed, 277 insertions(+), 1 deletion(-)

diff --git a/libavcodec/jpeg2000dwt.c b/libavcodec/jpeg2000dwt.c
index 55dd5e89b5..b2a952aa29 100644
--- a/libavcodec/jpeg2000dwt.c
+++ b/libavcodec/jpeg2000dwt.c
@@ -425,7 +425,10 @@ static void dwt_decode97_float(DWTContext *s, float *t)
 for (i = 1 - mh; i < lh; i += 2, j++)
 l[i] = data[w * lp + j];
 
-sr_1d97_float(line, mh, mh + lh);
+   if (ARCH_X86)
+ff_sr_1d97_float_sse(line, mh, mh + lh);
+   else
+sr_1d97_float(line, mh, mh + lh);
 
 for (i = 0; i < lh; i++)
 data[w * lp + i] = l[i];
diff --git a/libavcodec/jpeg2000dwt.h b/libavcodec/jpeg2000dwt.h
index 718d183ac1..59dec14478 100644
--- a/libavcodec/jpeg2000dwt.h
+++ b/libavcodec/jpeg2000dwt.h
@@ -65,4 +65,6 @@ int ff_dwt_decode(DWTContext *s, void *t);
 
 void ff_dwt_destroy(DWTContext *s);
 
+void ff_sr_1d97_float_sse(float *p, int i0, int i1);
+
 #endif /* AVCODEC_JPEG2000DWT_H */
diff --git a/libavcodec/x86/jpeg2000dsp.asm b/libavcodec/x86/jpeg2000dsp.asm
index 56b5fbd606..dabfb914b8 100644
--- a/libavcodec/x86/jpeg2000dsp.asm
+++ b/libavcodec/x86/jpeg2000dsp.asm
@@ -29,6 +29,16 @@ pf_ict1: times 8 dd 0.34413
 pf_ict2: times 8 dd 0.71414
 pf_ict3: times 8 dd 1.772
 
+F_LFTG_K: dd 1.230174104914001
+F_LFTG_X: dd 0.812893066115961
+
+F_LFTG_ALPHA: times 8 dd 1.586134342059924
+F_LFTG_BETA: times 8 dd 0.052980118572961
+F_LFTG_GAMMA: times 8 dd 0.882911075530934
+F_LFTG_DELTA: times 8 dd 0.443506852043971
+
+TWO: dd 2.0
+
 SECTION .text
 
 ;***
@@ -142,3 +152,261 @@ RCT_INT
 INIT_YMM avx2
 RCT_INT
 %endif
+
+;***
+; ff_sr_ld97_float_(float *p, int i0, int i1)
+;***
+%macro SR1D97FLOAT 0
+cglobal sr_1d97_float, 3, 5, 10, p, i0, i1, tmp0, tmp1
+mov tmp0q, i0q
+mov tmp1q, i1q
+add tmp0q, 1
+cmp tmp1q, tmp0q
+jg .extend
+sub tmp0q, 2
+jnz .else
+movss m0, [pq+4]
+movss m1, [F_LFTG_K]
+movss m2, [TWO]
+divss m1, m2
+mulss m0, m1
+movss [pq+4], m0
+jmp .end
+
+.else:
+movss m0, [pq]
+movss m1, [F_LFTG_X]
+mulss m0, m1
+movss [pq], m0
+jmp .end
+
+.extend:
+shl i0d, 2
+shl i1d, 2
+mov tmp0q, i0q
+mov tmp1q, i1q
+movups m0, [pq+tmp0q+4]
+shufps m0, m0, 0x1B
+movups [pq+tmp0q-16], m0
+movups m0, [pq+tmp1q-20]
+shufps m0, m0, 0x1B
+movups [pq+tmp1q], m0
+
+movups m3, [F_LFTG_DELTA]
+mov tmp0q, i0q
+mov tmp1q, i1q
+shr tmp0q, 1
+sub tmp0q, 4
+shr tmp1q, 1
+add tmp1q, 8
+cmp tmp0q, tmp1q
+jge .beginloop2
+.loop1:
+add tmp0q, 12
+cmp tmp0q, tmp1q
+jge .endloop1
+ 
+movups m0, [pq+2*tmp0q-28]
+movups m4, [pq+2*tmp0q-12]
+movups m1, m0
+shufps m0, m4, 0xDD
+shufps m1, m4, 0x88
+movups m2, [pq+2*tmp0q-24]
+movups m5, [pq+2*tmp0q-8] 
+shufps m2, m5, 0xDD
+addps m2, m1
+mulps m2, m3
+subps m0, m2
+movups m4, m1
+shufps m1, m0, 0x44
+shufps m1, m1, 0xD8
+shufps m4, m0, 0xEE
+shufps m4, m4, 0xD8
+movups [pq+2*tmp0q-28], m1
+movups [pq+2*tmp0q-12], m4
+
+add tmp0q, 4
+cmp tmp0q, tmp1q
+jge .beginloop2
+jmp .loop1
+  
+.endloop1:
+sub tmp0q, 12
+.littleloop1:
+movss m0, [pq+2*tmp0q]
+movss m1, [pq+2*tmp0q-4]
+movss m2, [pq+2*tmp0q+4]
+addss m1, m2
+mulss m1, m3
+subss m0, m1
+movss [pq+2*tmp0q], m0
+add tmp0q, 4
+cmp tmp0q, tmp1q
+jl .littleloop1
+
+.beginloop2:
+movups m3, [F_LFTG_GAMMA]
+mov tmp0q, i0q
+mov tmp1q, i1q
+shr tmp0q, 1
+sub tmp0q, 4
+shr tmp1q, 1
+add tmp1q, 4
+cmp tmp0q, tmp1q
+jge .beginloop3
+.loop2:
+add tmp0q, 12
+cmp tmp0q, tmp1q
+jge .endloop2
+ 
+movups m0, [pq+2*tmp0q-24]
+movups m4, [pq+2*tmp0q-8]
+movups m1, m0
+shufps m0, m4, 0xDD
+shufps m1, m4, 0x88
+movups m2, [pq+2*tmp0q-20]
+movups m5, [pq+2*tmp0q-4] 
+shufps m2, m5, 0xDD
+addps m2, m1
+mulps m2, m3
+subps m0, m2
+movups m4, m1
+shufps m1, m0, 0x44
+shufps m1, m1, 0xD8
+shufps 

[FFmpeg-devel] [PATCH] ffprobe: Print color properties from show_frames

2017-07-20 Thread Vittorio Giovara
---
Updated according to Tobias' review.
Vittorio

 doc/ffprobe.xsd|  5 +
 ffprobe.c  | 25 +
 tests/ref/fate/ffprobe_compact | 16 
 tests/ref/fate/ffprobe_csv | 16 
 tests/ref/fate/ffprobe_default | 40 
 tests/ref/fate/ffprobe_flat| 40 
 tests/ref/fate/ffprobe_ini | 40 
 7 files changed, 166 insertions(+), 16 deletions(-)

diff --git a/doc/ffprobe.xsd b/doc/ffprobe.xsd
index 6d929a1a32..3e58da0f46 100644
--- a/doc/ffprobe.xsd
+++ b/doc/ffprobe.xsd
@@ -120,6 +120,11 @@
   
   
   
+  
+  
+  
+  
+  
 
 
 
diff --git a/ffprobe.c b/ffprobe.c
index f6d9be0df9..3d9f795d2e 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -2105,6 +2105,31 @@ static void show_frame(WriterContext *w, AVFrame *frame, 
AVStream *stream,
 print_int("interlaced_frame",   frame->interlaced_frame);
 print_int("top_field_first",frame->top_field_first);
 print_int("repeat_pict",frame->repeat_pict);
+
+if (frame->color_range != AVCOL_RANGE_UNSPECIFIED)
+print_str("color_range", av_color_range_name(frame->color_range));
+else
+print_str_opt("color_range", 
av_color_range_name(frame->color_range));
+
+if (frame->colorspace != AVCOL_SPC_UNSPECIFIED)
+print_str("color_space", av_color_space_name(frame->colorspace));
+else
+print_str_opt("color_space", 
av_color_space_name(frame->colorspace));
+
+if (frame->color_primaries != AVCOL_PRI_UNSPECIFIED)
+print_str("color_primaries", 
av_color_primaries_name(frame->color_primaries));
+else
+print_str_opt("color_primaries", 
av_color_primaries_name(frame->color_primaries));
+
+if (frame->color_trc != AVCOL_TRC_UNSPECIFIED)
+print_str("color_transfer", 
av_color_transfer_name(frame->color_trc));
+else
+print_str_opt("color_transfer", 
av_color_transfer_name(frame->color_trc));
+
+if (frame->chroma_location != AVCHROMA_LOC_UNSPECIFIED)
+print_str("chroma_location", 
av_chroma_location_name(frame->chroma_location));
+else
+print_str_opt("chroma_location", 
av_chroma_location_name(frame->chroma_location));
 break;
 
 case AVMEDIA_TYPE_AUDIO:
diff --git a/tests/ref/fate/ffprobe_compact b/tests/ref/fate/ffprobe_compact
index 1948697e19..910837d290 100644
--- a/tests/ref/fate/ffprobe_compact
+++ b/tests/ref/fate/ffprobe_compact
@@ -1,31 +1,31 @@
 
packet|codec_type=audio|stream_index=0|pts=0|pts_time=0.00|dts=0|dts_time=0.00|duration=1024|duration_time=0.023220|convergence_duration=N/A|convergence_duration_time=N/A|size=2048|pos=647|flags=K_
 
frame|media_type=audio|stream_index=0|key_frame=1|pkt_pts=0|pkt_pts_time=0.00|pkt_dts=0|pkt_dts_time=0.00|best_effort_timestamp=0|best_effort_timestamp_time=0.00|pkt_duration=1024|pkt_duration_time=0.023220|pkt_pos=647|pkt_size=2048|sample_fmt=s16|nb_samples=1024|channels=1|channel_layout=unknown
 
packet|codec_type=video|stream_index=1|pts=0|pts_time=0.00|dts=0|dts_time=0.00|duration=2048|duration_time=0.04|convergence_duration=N/A|convergence_duration_time=N/A|size=230400|pos=2722|flags=K_
-frame|media_type=video|stream_index=1|key_frame=1|pkt_pts=0|pkt_pts_time=0.00|pkt_dts=0|pkt_dts_time=0.00|best_effort_timestamp=0|best_effort_timestamp_time=0.00|pkt_duration=2048|pkt_duration_time=0.04|pkt_pos=2722|pkt_size=230400|width=320|height=240|pix_fmt=rgb24|sample_aspect_ratio=1:1|pict_type=I|coded_picture_number=0|display_picture_number=0|interlaced_frame=0|top_field_first=0|repeat_pict=0
+frame|media_type=video|stream_index=1|key_frame=1|pkt_pts=0|pkt_pts_time=0.00|pkt_dts=0|pkt_dts_time=0.00|best_effort_timestamp=0|best_effort_timestamp_time=0.00|pkt_duration=2048|pkt_duration_time=0.04|pkt_pos=2722|pkt_size=230400|width=320|height=240|pix_fmt=rgb24|sample_aspect_ratio=1:1|pict_type=I|coded_picture_number=0|display_picture_number=0|interlaced_frame=0|top_field_first=0|repeat_pict=0|color_range=unknown|color_space=unknown|color_primaries=unknown|color_transfer=unknown|chroma_location=unspecified
 
packet|codec_type=video|stream_index=2|pts=0|pts_time=0.00|dts=0|dts_time=0.00|duration=2048|duration_time=0.04|convergence_duration=N/A|convergence_duration_time=N/A|size=3|pos=233143|flags=K_

[FFmpeg-devel] [PATCH] Add YUV444 32bit floating point pixel format

2017-07-20 Thread Vittorio Giovara
With and without alpha.
---
Updated with fate tests.
Vittorio

 libavutil/pixdesc.c  | 51 
 libavutil/pixdesc.h  |  5 
 libavutil/pixfmt.h   |  7 ++
 libswscale/utils.c   |  4 
 tests/ref/fate/sws-pixdesc-query | 16 +
 5 files changed, 83 insertions(+)

diff --git a/libavutil/pixdesc.c b/libavutil/pixdesc.c
index 1983ce9ef5..2175b9d799 100644
--- a/libavutil/pixdesc.c
+++ b/libavutil/pixdesc.c
@@ -2162,6 +2162,57 @@ static const AVPixFmtDescriptor 
av_pix_fmt_descriptors[AV_PIX_FMT_NB] = {
 .name = "d3d11",
 .flags = AV_PIX_FMT_FLAG_HWACCEL,
 },
+[AV_PIX_FMT_YUV444F32LE] = {
+.name = "yuv444f32le",
+.nb_components = 3,
+.log2_chroma_w = 0,
+.log2_chroma_h = 0,
+.comp = {
+{ 0, 4, 0, 0, 32, 3, 31, 1 },/* Y */
+{ 1, 4, 0, 0, 32, 3, 31, 1 },/* U */
+{ 2, 4, 0, 0, 32, 3, 31, 1 },/* V */
+},
+.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_FLOAT,
+},
+[AV_PIX_FMT_YUV444F32BE] = {
+.name = "yuv444f32be",
+.nb_components = 3,
+.log2_chroma_w = 0,
+.log2_chroma_h = 0,
+.comp = {
+{ 0, 4, 0, 0, 32, 3, 31, 1 },/* Y */
+{ 1, 4, 0, 0, 32, 3, 31, 1 },/* U */
+{ 2, 4, 0, 0, 32, 3, 31, 1 },/* V */
+},
+.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR | 
AV_PIX_FMT_FLAG_FLOAT,
+},
+[AV_PIX_FMT_YUVA444F32LE] = {
+.name = "yuva444f32le",
+.nb_components = 4,
+.log2_chroma_w = 0,
+.log2_chroma_h = 0,
+.comp = {
+{ 0, 4, 0, 0, 32, 3, 31, 1 },/* Y */
+{ 1, 4, 0, 0, 32, 3, 31, 1 },/* U */
+{ 2, 4, 0, 0, 32, 3, 31, 1 },/* V */
+{ 3, 4, 0, 0, 32, 3, 31, 1 },/* A */
+},
+.flags = AV_PIX_FMT_FLAG_PLANAR | AV_PIX_FMT_FLAG_FLOAT | 
AV_PIX_FMT_FLAG_ALPHA,
+},
+[AV_PIX_FMT_YUVA444F32BE] = {
+.name = "yuva444f32be",
+.nb_components = 4,
+.log2_chroma_w = 0,
+.log2_chroma_h = 0,
+.comp = {
+{ 0, 4, 0, 0, 32, 3, 31, 1 },/* Y */
+{ 1, 4, 0, 0, 32, 3, 31, 1 },/* U */
+{ 2, 4, 0, 0, 32, 3, 31, 1 },/* V */
+{ 3, 4, 0, 0, 32, 3, 31, 1 },/* A */
+},
+.flags = AV_PIX_FMT_FLAG_BE | AV_PIX_FMT_FLAG_PLANAR |
+ AV_PIX_FMT_FLAG_FLOAT | AV_PIX_FMT_FLAG_ALPHA,
+},
 };
 #if FF_API_PLUS1_MINUS1
 FF_ENABLE_DEPRECATION_WARNINGS
diff --git a/libavutil/pixdesc.h b/libavutil/pixdesc.h
index c3a6f27f49..294555260a 100644
--- a/libavutil/pixdesc.h
+++ b/libavutil/pixdesc.h
@@ -178,6 +178,11 @@ typedef struct AVPixFmtDescriptor {
 #define AV_PIX_FMT_FLAG_BAYER(1 << 8)
 
 /**
+ * The pixel format contains IEEE-754 single precision floating point values.
+ */
+#define AV_PIX_FMT_FLAG_FLOAT(1 << 10)
+
+/**
  * Return the number of bits per pixel used by the pixel format
  * described by pixdesc. Note that this is not the same as the number
  * of bits per sample.
diff --git a/libavutil/pixfmt.h b/libavutil/pixfmt.h
index e1e4074bee..d6a874da15 100644
--- a/libavutil/pixfmt.h
+++ b/libavutil/pixfmt.h
@@ -326,6 +326,11 @@ enum AVPixelFormat {
  */
 AV_PIX_FMT_D3D11,
 
+AV_PIX_FMT_YUV444F32LE,  ///< IEEE-754 single precision planar YUV 4:4:4,  
96bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+AV_PIX_FMT_YUV444F32BE,  ///< IEEE-754 single precision planar YUV 4:4:4,  
96bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+AV_PIX_FMT_YUVA444F32LE, ///< IEEE-754 single precision planar YUV 4:4:4, 
128bpp, (1 Cr & Cb sample per 1x1 Y & A sample), little-endian
+AV_PIX_FMT_YUVA444F32BE, ///< IEEE-754 single precision planar YUV 4:4:4, 
128bpp, (1 Cr & Cb sample per 1x1 Y & A sample), big-endian
+
 AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you 
want to link with shared libav* because the number of formats might differ 
between versions
 };
 
@@ -389,6 +394,8 @@ enum AVPixelFormat {
 #define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE,
BAYER_GBRG16LE)
 #define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE,
BAYER_GRBG16LE)
 
+#define AV_PIX_FMT_YUV444F32  AV_PIX_FMT_NE(YUV444F32BE,  YUV444F32LE)
+#define AV_PIX_FMT_YUVA444F32 AV_PIX_FMT_NE(YUVA444F32BE, YUVA444F32LE)
 
 #define AV_PIX_FMT_YUVA420P9  AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
 #define AV_PIX_FMT_YUVA422P9  AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
diff --git a/libswscale/utils.c b/libswscale/utils.c
index 17c996734a..892b219049 100644
--- a/libswscale/utils.c
+++ b/libswscale/utils.c
@@ -254,6 +254,10 @@ static const FormatEntry format_entries[AV_PIX_FMT_NB] = {
 [AV_PIX_FMT_P010BE]  = { 1, 1 },
 [AV_PIX_FMT_P016LE] 

Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Nicolas George a écrit :
>the attached patch.

Sorry.

Regards,

-- 
  Nicolas George
From e50bd258ac22d5758348eafa7203f264dd98f580 Mon Sep 17 00:00:00 2001
From: Nicolas George 
Date: Thu, 20 Jul 2017 17:35:44 +0200
Subject: [PATCH] lavfi/testsrc2: fix completely transparent alpha.

Signed-off-by: Nicolas George 
---
 libavfilter/vsrc_testsrc.c | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/libavfilter/vsrc_testsrc.c b/libavfilter/vsrc_testsrc.c
index c4a5ae3742..f94c3ed9dd 100644
--- a/libavfilter/vsrc_testsrc.c
+++ b/libavfilter/vsrc_testsrc.c
@@ -66,6 +66,9 @@ typedef struct TestSourceContext {
 /* only used by testsrc */
 int nb_decimals;
 
+/* only used by testsrc2 */
+int alpha;
+
 /* only used by color */
 FFDrawContext draw;
 FFDrawColor color;
@@ -685,6 +688,7 @@ AVFilter ff_vsrc_testsrc = {
 
 static const AVOption testsrc2_options[] = {
 COMMON_OPTIONS
+{ "alpha", "set global alpha (opacity)", OFFSET(alpha), AV_OPT_TYPE_INT, {.i64 = 255}, 0, 255, FLAGS },
 { NULL }
 };
 
@@ -735,6 +739,7 @@ static void test2_fill_picture(AVFilterContext *ctx, AVFrame *frame)
 {
 TestSourceContext *s = ctx->priv;
 FFDrawColor color;
+unsigned alpha = s->alpha << 24;
 
 /* colored background */
 {
@@ -746,7 +751,8 @@ static void test2_fill_picture(AVFilterContext *ctx, AVFrame *frame)
 x2 = ff_draw_round_to_sub(>draw, 0, 0, x2);
 set_color(s, , ((i & 1) ? 0xFF : 0) |
  ((i & 2) ? 0x00FF00 : 0) |
- ((i & 4) ? 0xFF : 0));
+ ((i & 4) ? 0xFF : 0) |
+ alpha);
 ff_fill_rectangle(>draw, , frame->data, frame->linesize,
   x, 0, x2 - x, frame->height);
 x = x2;
@@ -763,7 +769,7 @@ static void test2_fill_picture(AVFilterContext *ctx, AVFrame *frame)
 g0 = av_rescale_q(s->pts, s->time_base, av_make_q(1, 128));
 for (x = 0; x < s->w; x += dx) {
 g = (av_rescale(x, 6 * 256, s->w) + g0) % (6 * 256);
-set_color(s, , color_gradient(g));
+set_color(s, , color_gradient(g) | alpha);
 y = y0 + av_rescale(x, s->h / 2, s->w);
 y %= 2 * (s->h - 16);
 if (y > s->h - 16)
@@ -785,7 +791,7 @@ static void test2_fill_picture(AVFilterContext *ctx, AVFrame *frame)
 int c, i;
 
 for (c = 0; c < 3; c++) {
-set_color(s, , 0xBB ^ (0xFF << (c << 3)));
+set_color(s, , (0xBB ^ (0xFF << (c << 3))) | alpha);
 pos = av_rescale_q(s->pts, s->time_base, av_make_q(64 >> (c << 1), cycle)) % cycle;
 xh = pos < 1 * l ? pos :
  pos < 2 * l ? l :
-- 
2.13.2



signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Nicolas George a écrit :
> testsrc2 does not handle alpha, though, so most of the video is
> transparent. I will have a look to see if it can be improved.

Have a look at the attached patch.

(Not complete, but working.)

testsrc2=s=1024x768:alpha=255[a];testsrc2=alpha=128[b];[a][b]overlay=x=100:y=50

gives the expected result.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Peter Große
On Thu, 20 Jul 2017 17:19:36 +0200
Tobias Rapp  wrote:

> > When I re-add --disable-optimizations --enable-mmx --disable-stripping to my
> > configure command line, the test succeeds.
> >
> > Any further comments to the patch and test case?  
> 
> Maybe try adding -sws_flags +accurate_rnd+bitexact to CMD?

I tried that, but that didn't help.

Regards
Peter


pgploqABGDVDY.pgp
Description: OpenPGP digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Tobias Rapp

On 20.07.2017 17:06, Peter Große wrote:

On Wed, 19 Jul 2017 21:00:49 +0200
Peter Große  wrote:


I was able to reproduce your checksums only after removing

  --disable-optimizations --disable-mmx --disable-stripping

from my configure command. I thought optimizations should have no
impact on the result?!


After some further tests, it seems swscale (used as the auto_scaler in the
test) produces slightly different output depending on whether MMX is enabled or
not.

When I re-add --disable-optimizations --enable-mmx --disable-stripping to my
configure command line, the test succeeds.

Any further comments to the patch and test case?


Maybe try adding -sws_flags +accurate_rnd+bitexact to CMD?

Regards,
Tobias

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] ffprobe: Print color properties from show_frames

2017-07-20 Thread Tobias Rapp

On 20.07.2017 16:19, Vittorio Giovara wrote:

On 20.07.2017 14:38, Vittorio Giovara wrote:

---
 ffprobe.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/ffprobe.c b/ffprobe.c
index f6d9be0df9..412e2dadab 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -2105,6 +2105,12 @@ static void show_frame(WriterContext *w, AVFrame *frame, 
AVStream *stream,
 print_int("interlaced_frame",   frame->interlaced_frame);
 print_int("top_field_first",frame->top_field_first);
 print_int("repeat_pict",frame->repeat_pict);
+
+print_str("color_range", av_color_range_name(frame->color_range));
+print_str("color_space", av_color_space_name(frame->colorspace));
+print_str("color_primaries", 
av_color_primaries_name(frame->color_primaries));
+print_str("color_transfer",  av_color_transfer_name(frame->color_trc));
+print_str("chroma_location", 
av_chroma_location_name(frame->chroma_location));


I guess this should look like

if (frame->... != ..._UNSPECIFIED)
print_str(...);
else
print_str_opt(...);

see the similar code lines handling color properties on stream level (~
line #2475).


Should it? That approach effectively hides these parameters from the
output if unknown, and I often want to know as much as possible when
hunting down parameters with read_frames (even that the filed is just
"unknown" and not missing). Also if these fields are always output, it
simplify parsing them quite a bit, don't you think so? I'd much rather
change the stream level code to output more information instead.



It depends on the writer whether print_str_opt actually writes to the 
output or not. For example when using CSV optional data is always 
written, when using XML it is skipped.


In my humble opinion for the XML output format it would only increase 
data size and make the parser slightly _more_ complex in case optional 
fields would be written (as the reader would have to check if the field 
value equals "unknown" or "N/A" strings to handle the value-not-valid 
situation). So I see no benefit in replacing print_str_opt with 
print_str globally.



 break;

 case AVMEDIA_TYPE_AUDIO:



The schema file at doc/ffprobe.xsd should be updated to reflect the new
fields.

Also I assume that some FATE references are changed by this patch?


Right, I'll update them in the next iteration, thanks for noticing.



Regards,
Tobias

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Nicolas George
Le duodi 2 thermidor, an CCXXV, Peter Große a écrit :
> After some further tests, it seems swscale (used as the auto_scaler in the
> test) produces slightly different output depending on whether MMX is enabled 
> or
> not.
> 
> When I re-add --disable-optimizations --enable-mmx --disable-stripping to my
> configure command line, the test succeeds.
> 
> Any further comments to the patch and test case?

If you use testsrc2 instead of file inputs, it produces streams with the
requested colorspace directly.

testsrc2 does not handle alpha, though, so most of the video is
transparent. I will have a look to see if it can be improved.

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] fate: add vf_overlay test for main source with alpha channel

2017-07-20 Thread Peter Große
On Wed, 19 Jul 2017 21:00:49 +0200
Peter Große  wrote:

> I was able to reproduce your checksums only after removing
> 
>   --disable-optimizations --disable-mmx --disable-stripping
> 
> from my configure command. I thought optimizations should have no
> impact on the result?!

After some further tests, it seems swscale (used as the auto_scaler in the
test) produces slightly different output depending on whether MMX is enabled or
not.

When I re-add --disable-optimizations --enable-mmx --disable-stripping to my
configure command line, the test succeeds.

Any further comments to the patch and test case?

Regards
Peter
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] fix http 302 not compressed request , Ticket #6404

2017-07-20 Thread satbaby
 

This fixt for me redirect error: 

[http @ 0x65ac50] inflate return value: -3, unknown compression method

 after : 

[http @ 0x65ac50] header='HTTP/1.1 302 Moved Temporarily'

 From ed5195c98bafcaebbe9926b7afd1fac375f3a971 Mon Sep 17 00:00:00 2001
From: Jacek Jendrzej 
Date: Thu, 20 Jul 2017 13:46:46 +0200
Subject: [PATCH] reset compressed header flag, fix http 302 request

Signed-off-by: Jacek Jendrzej 
---
 libavformat/http.c | 1 +
 1 file changed, 1 insertion(+)

diff --git a/libavformat/http.c b/libavformat/http.c
index 30890bb7aa..f25977ab1f 100644
--- a/libavformat/http.c
+++ b/libavformat/http.c
@@ -1248,6 +1248,7 @@ static int http_connect(URLContext *h, const char *path, const char *local_path,
 s->willclose= 0;
 s->end_chunked_post = 0;
 s->end_header   = 0;
+s->compressed   = 0;
 if (post && !s->post_data && !send_expect_100) {
 /* Pretend that it did work. We didn't read any header yet, since
  * we've still to send the POST data, but the code calling this
-- 
2.13.3

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] ffprobe: Print color properties from show_frames

2017-07-20 Thread Vittorio Giovara
>On 20.07.2017 14:38, Vittorio Giovara wrote:
>> ---
>>  ffprobe.c | 6 ++
>>  1 file changed, 6 insertions(+)
>>
>> diff --git a/ffprobe.c b/ffprobe.c
>> index f6d9be0df9..412e2dadab 100644
>> --- a/ffprobe.c
>> +++ b/ffprobe.c
>> @@ -2105,6 +2105,12 @@ static void show_frame(WriterContext *w, AVFrame 
>> *frame, AVStream *stream,
>>  print_int("interlaced_frame",   frame->interlaced_frame);
>>  print_int("top_field_first",frame->top_field_first);
>>  print_int("repeat_pict",frame->repeat_pict);
>> +
>> +print_str("color_range", 
>> av_color_range_name(frame->color_range));
>> +print_str("color_space", 
>> av_color_space_name(frame->colorspace));
>> +print_str("color_primaries", 
>> av_color_primaries_name(frame->color_primaries));
>> +print_str("color_transfer",  
>> av_color_transfer_name(frame->color_trc));
>> +print_str("chroma_location", 
>> av_chroma_location_name(frame->chroma_location));
>
>I guess this should look like
>
>if (frame->... != ..._UNSPECIFIED)
> print_str(...);
>else
> print_str_opt(...);
>
>see the similar code lines handling color properties on stream level (~
>line #2475).

Should it? That approach effectively hides these parameters from the
output if unknown, and I often want to know as much as possible when
hunting down parameters with read_frames (even that the filed is just
"unknown" and not missing). Also if these fields are always output, it
simplify parsing them quite a bit, don't you think so? I'd much rather
change the stream level code to output more information instead.

>>  break;
>>
>>  case AVMEDIA_TYPE_AUDIO:
>>
>
>The schema file at doc/ffprobe.xsd should be updated to reflect the new
>fields.
>
>Also I assume that some FATE references are changed by this patch?

Right, I'll update them in the next iteration, thanks for noticing.
-- 
Vittorio
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] ffprobe: Print color properties from show_frames

2017-07-20 Thread Tobias Rapp

On 20.07.2017 14:38, Vittorio Giovara wrote:

---
 ffprobe.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/ffprobe.c b/ffprobe.c
index f6d9be0df9..412e2dadab 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -2105,6 +2105,12 @@ static void show_frame(WriterContext *w, AVFrame *frame, 
AVStream *stream,
 print_int("interlaced_frame",   frame->interlaced_frame);
 print_int("top_field_first",frame->top_field_first);
 print_int("repeat_pict",frame->repeat_pict);
+
+print_str("color_range", av_color_range_name(frame->color_range));
+print_str("color_space", av_color_space_name(frame->colorspace));
+print_str("color_primaries", 
av_color_primaries_name(frame->color_primaries));
+print_str("color_transfer",  av_color_transfer_name(frame->color_trc));
+print_str("chroma_location", 
av_chroma_location_name(frame->chroma_location));


I guess this should look like

if (frame->... != ..._UNSPECIFIED)
print_str(...);
else
print_str_opt(...);

see the similar code lines handling color properties on stream level (~ 
line #2475).



 break;

 case AVMEDIA_TYPE_AUDIO:



The schema file at doc/ffprobe.xsd should be updated to reflect the new 
fields.


Also I assume that some FATE references are changed by this patch?

Best regards,
Tobias


___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] hevc: Make sure to update the current frame transfer characteristic

2017-07-20 Thread Vittorio Giovara
Otherwise the first decoded frame will still be tagged with the
original transfer instead of the alternative one.

Signed-off-by: Vittorio Giovara 
---
 libavcodec/hevcdec.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/libavcodec/hevcdec.c b/libavcodec/hevcdec.c
index 55f51211c3..ba1863381f 100644
--- a/libavcodec/hevcdec.c
+++ b/libavcodec/hevcdec.c
@@ -2728,7 +2728,7 @@ static int set_side_data(HEVCContext *s)
 if (s->sei.alternative_transfer.present &&
 
av_color_transfer_name(s->sei.alternative_transfer.preferred_transfer_characteristics)
 &&
 s->sei.alternative_transfer.preferred_transfer_characteristics != 
AVCOL_TRC_UNSPECIFIED) {
-s->avctx->color_trc = 
s->sei.alternative_transfer.preferred_transfer_characteristics;
+s->avctx->color_trc = out->color_trc = 
s->sei.alternative_transfer.preferred_transfer_characteristics;
 }
 
 return 0;
-- 
2.13.2

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] ffprobe: Print color properties from show_frames

2017-07-20 Thread Vittorio Giovara
---
 ffprobe.c | 6 ++
 1 file changed, 6 insertions(+)

diff --git a/ffprobe.c b/ffprobe.c
index f6d9be0df9..412e2dadab 100644
--- a/ffprobe.c
+++ b/ffprobe.c
@@ -2105,6 +2105,12 @@ static void show_frame(WriterContext *w, AVFrame *frame, 
AVStream *stream,
 print_int("interlaced_frame",   frame->interlaced_frame);
 print_int("top_field_first",frame->top_field_first);
 print_int("repeat_pict",frame->repeat_pict);
+
+print_str("color_range", av_color_range_name(frame->color_range));
+print_str("color_space", av_color_space_name(frame->colorspace));
+print_str("color_primaries", 
av_color_primaries_name(frame->color_primaries));
+print_str("color_transfer",  av_color_transfer_name(frame->color_trc));
+print_str("chroma_location", 
av_chroma_location_name(frame->chroma_location));
 break;
 
 case AVMEDIA_TYPE_AUDIO:
-- 
2.13.2

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [mov] Fix trampling of ctts during seeks when sidx support is enabled.

2017-07-20 Thread Michael Niedermayer
Hi

On Wed, Jul 19, 2017 at 07:30:01PM -0700, Dale Curtis wrote:
> Thanks will take a look. Is this test not part of fate? make fate passed

no, we should have tests for all (fixed) tickets in fate ideally
but in reality most tickets lack a corresponding test
I tried both in outreachy and as well in GSoC to improve this situation
with student projects but both only moved this forward by a small
step. Its a large amount of work to create robust, portable and
practical tests for "all" tickets and everything else.
The way out to get this actually done would be to pay a developer to
create tests for "all" tickets in fate. I belive carl would be the
ideal one to do this work as he has since a very long time always tested
and kept track of all our tickets.
I did suggest a while ago to someone at google that funding such
project would make sense but IIRC i never heared back.
if some company would fund something like this, i belive this would be
very usefull in the long run for code quality



> for me. The attached patch fixes this; the issue was that the index entries
> are 1 to 1 with ctts values. When samples are added without ctts entries
> we'd just initialize a single ctts entry with a count of 5. This left a gap
> in the ctts table; the fix is to use only 1-count entries when this case is
> hit.
> 
> Note: This made me realize the presence of a ctts box and a trun box with
> ctts samples has always been broken. If the ctts box comes second it'll
> wipe the trun's generated table, but if the trun box comes after the ctts
> box it will try to insert samples at incorrect positions. Prior to my patch
> they would be looked up at incorrect positions, so there shouldn't be any
> new bad behavior here.
> 
> - dale

[...]

-- 
Michael GnuPG fingerprint: 9FF2128B147EF6730BADF133611EC787040B0FAB

Opposition brings concord. Out of discord comes the fairest harmony.
-- Heraclitus


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Re: [FFmpeg-devel] [PATCH] vf_drawtext: support to set glyph spacing while drawing text. ./ffmpeg -i input -vf drawtext="glyph_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv could set glyph

2017-07-20 Thread Moritz Barsnick
On Thu, Jul 20, 2017 at 17:46:25 +0800, efren yang wrote:

> Subject: [FFmpeg-devel] [PATCH] vf_drawtext: support to set glyph spacing 
> while drawing text. ./ffmpeg -i input -vf
>drawtext="glyph_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 
> 1.flv could set glyph spacing 10 pixel.

As Nicolas wrote, this needs to be fixed.

a) Please prefix "libavfilter/vf_drawtext" or "lavfi/vf_drawtext".
b) There needs to be an empty line in the commit message before the
following text, otherwise, everything gets compressed into one line as
above.
c) The example is probably too simple to be worth mentioning.

Please also add the requested documentation of the filter option to
doc/filters.texi.

Your patch is on top of your other patch. Don't do that, the other one
wasn't accepted. Always provide a patch against master HEAD. (You
should locally squash your two commits into one, e.g. on another new
branch, and submit that squashed commit.)

> -int word_spacing;   ///< word spacing in pixels
> +int glyph_spacing;   ///< glyph spacing in pixels
   ^
Your renaming shifted the whitespace/indentation.

>  {"line_spacing",  "set line spacing in pixels", OFFSET(line_spacing),   
> AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX,FLAGS},
> -{"word_spacing",  "set word spacing in pixels", OFFSET(word_spacing),   
> AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
> +{"glyph_spacing",  "set glyph spacing in pixels", OFFSET(glyph_spacing), 
>   AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
   
^  ^
You lost alignment of the parameters, and your bracket alignment might
as well correspond to that of the line "line_spacing".

Moritz
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] avformat/hlsenc: fix hls fmp4 extention name bug

2017-07-20 Thread Steven Liu
ticket-id: #6541
when use hls fmp4 muxer, the extention name is not .m4s, this
code can fix it.

Signed-off-by: Steven Liu 
---
 libavformat/hlsenc.c | 18 +-
 1 file changed, 13 insertions(+), 5 deletions(-)

diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c
index 8a233270b5..911f6068c2 100644
--- a/libavformat/hlsenc.c
+++ b/libavformat/hlsenc.c
@@ -1286,14 +1286,19 @@ fail:
 return err;
 }
 
-static const char * get_default_pattern_localtime_fmt(void)
+static const char * get_default_pattern_localtime_fmt(AVFormatContext *s)
 {
 char b[21];
 time_t t = time(NULL);
 struct tm *p, tmbuf;
+HLSContext *hls = s->priv_data;
+
 p = localtime_r(, );
 // no %s support when strftime returned error or left format string 
unchanged
 // also no %s support on MSVC, which invokes the invalid parameter handler 
on unsupported format strings, instead of returning an error
+if (hls->segment_type == SEGMENT_TYPE_FMP4) {
+return (HAVE_LIBC_MSVCRT || !strftime(b, sizeof(b), "%s", p) || 
!strcmp(b, "%s")) ? "-%Y%m%d%H%M%S.m4s" : "-%s.m4s";
+}
 return (HAVE_LIBC_MSVCRT || !strftime(b, sizeof(b), "%s", p) || !strcmp(b, 
"%s")) ? "-%Y%m%d%H%M%S.ts" : "-%s.ts";
 }
 
@@ -1303,16 +1308,19 @@ static int hls_write_header(AVFormatContext *s)
 int ret, i;
 char *p;
 const char *pattern = "%d.ts";
-const char *pattern_localtime_fmt = get_default_pattern_localtime_fmt();
+const char *pattern_localtime_fmt = get_default_pattern_localtime_fmt(s);
 const char *vtt_pattern = "%d.vtt";
 AVDictionary *options = NULL;
 int byterange_mode = (hls->flags & HLS_SINGLE_FILE) || (hls->max_seg_size 
> 0);
 int basename_size;
 int vtt_basename_size;
 
-if (hls->segment_type == SEGMENT_TYPE_FMP4 && byterange_mode) {
-av_log(s, AV_LOG_WARNING, "Have not support fmp4 byterange mode yet 
now\n");
-return AVERROR_PATCHWELCOME;
+if (hls->segment_type == SEGMENT_TYPE_FMP4) {
+if (byterange_mode) {
+av_log(s, AV_LOG_WARNING, "Have not support fmp4 byterange mode 
yet now\n");
+return AVERROR_PATCHWELCOME;
+}
+pattern = "%d.m4s";
 }
 if ((hls->start_sequence_source_type == 
HLS_START_SEQUENCE_AS_SECONDS_SINCE_EPOCH) ||
 (hls->start_sequence_source_type == 
HLS_START_SEQUENCE_AS_FORMATTED_DATETIME)) {
-- 
2.11.0 (Apple Git-81)



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] vf_drawtext: support to set glyph spacing while drawing text. ./ffmpeg -i input -vf drawtext="glyph_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv could set glyph spa

2017-07-20 Thread efren yang
Signed-off-by: efren yang 
---
 libavfilter/vf_drawtext.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index 137ae5891e..c96c5a 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -162,7 +162,7 @@ typedef struct DrawTextContext {
 unsigned int default_fontsize;  ///< default font size to use
 
 int line_spacing;   ///< lines spacing in pixels
-int word_spacing;   ///< word spacing in pixels
+int glyph_spacing;   ///< glyph spacing in pixels
 short int draw_box; ///< draw box around text - true or false
 int boxborderw; ///< box border width
 int use_kerning;///< font kerning is used - true/false
@@ -215,7 +215,7 @@ static const AVOption drawtext_options[]= {
 {"box", "set box",  OFFSET(draw_box),   
AV_OPT_TYPE_BOOL,   {.i64=0}, 0,1   , FLAGS},
 {"boxborderw",  "set box border width", OFFSET(boxborderw), 
AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX , FLAGS},
 {"line_spacing",  "set line spacing in pixels", OFFSET(line_spacing),   
AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX,FLAGS},
-{"word_spacing",  "set word spacing in pixels", OFFSET(word_spacing),   
AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
+{"glyph_spacing",  "set glyph spacing in pixels", OFFSET(glyph_spacing),   
AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
 {"fontsize","set font size",OFFSET(fontsize_expr),  
AV_OPT_TYPE_STRING, {.str=NULL},  CHAR_MIN, CHAR_MAX , FLAGS},
 {"x",   "set x expression", OFFSET(x_expr), 
AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
 {"y",   "set y expression", OFFSET(y_expr), 
AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
@@ -1376,7 +1376,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
 s->positions[i].y = y - glyph->bitmap_top + y_max;
 if (code == '\t') x  = (x / s->tabsize + 1)*s->tabsize;
 else  x += glyph->advance;
-x += s->word_spacing;
+x += s->glyph_spacing;
 }
 
 max_text_line_w = FFMAX(x, max_text_line_w);
-- 
2.13.0.windows.1



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH] fate: add tests for some video source filters

2017-07-20 Thread Tobias Rapp
Adds FATE tests for the previously untested allrgb, allyuv, rgbtestsrc,
smptebars, smptehdbars and yuvtestsrc filters.

Signed-off-by: Tobias Rapp 
---
 tests/fate/filter-video.mak| 21 +
 tests/ref/fate/filter-allrgb   | 10 ++
 tests/ref/fate/filter-allyuv   | 10 ++
 tests/ref/fate/filter-rgbtestsrc   | 10 ++
 tests/ref/fate/filter-smptebars| 10 ++
 tests/ref/fate/filter-smptehdbars  | 10 ++
 tests/ref/fate/filter-yuvtestsrc-yuv444p   | 10 ++
 tests/ref/fate/filter-yuvtestsrc-yuv444p12 | 10 ++
 8 files changed, 91 insertions(+)
 create mode 100644 tests/ref/fate/filter-allrgb
 create mode 100644 tests/ref/fate/filter-allyuv
 create mode 100644 tests/ref/fate/filter-rgbtestsrc
 create mode 100644 tests/ref/fate/filter-smptebars
 create mode 100644 tests/ref/fate/filter-smptehdbars
 create mode 100644 tests/ref/fate/filter-yuvtestsrc-yuv444p
 create mode 100644 tests/ref/fate/filter-yuvtestsrc-yuv444p12

diff --git a/tests/fate/filter-video.mak b/tests/fate/filter-video.mak
index 53fc7a6..d31a9f1 100644
--- a/tests/fate/filter-video.mak
+++ b/tests/fate/filter-video.mak
@@ -80,6 +80,27 @@ fate-filter-testsrc2-yuv444p: CMD = framecrc -lavfi 
testsrc2=r=7:d=10 -pix_fmt y
 FATE_FILTER-$(call ALLYES, TESTSRC2_FILTER) += fate-filter-testsrc2-rgb24
 fate-filter-testsrc2-rgb24: CMD = framecrc -lavfi testsrc2=r=7:d=10 -pix_fmt 
rgb24
 
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV ALLRGB_FILTER) += fate-filter-allrgb
+fate-filter-allrgb: CMD = framecrc -lavfi allrgb=rate=5:duration=1 -pix_fmt 
rgb24
+
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV ALLYUV_FILTER) += fate-filter-allyuv
+fate-filter-allyuv: CMD = framecrc -lavfi allyuv=rate=5:duration=1 -pix_fmt 
yuv444p
+
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV RGBTESTSRC_FILTER) += 
fate-filter-rgbtestsrc
+fate-filter-rgbtestsrc: CMD = framecrc -lavfi rgbtestsrc=rate=5:duration=1 
-pix_fmt rgb24
+
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV SMPTEBARS_FILTER) += 
fate-filter-smptebars
+fate-filter-smptebars: CMD = framecrc -lavfi smptebars=rate=5:duration=1 
-pix_fmt yuv420p
+
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV SMPTEHDBARS_FILTER) += 
fate-filter-smptehdbars
+fate-filter-smptehdbars: CMD = framecrc -lavfi smptehdbars=rate=5:duration=1 
-pix_fmt yuv444p
+
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV YUVTESTSRC_FILTER) += 
fate-filter-yuvtestsrc-yuv444p
+fate-filter-yuvtestsrc-yuv444p: CMD = framecrc -lavfi 
yuvtestsrc=rate=5:duration=1 -pix_fmt yuv444p
+
+FATE_FILTER-$(call ALLYES, LAVFI_INDEV YUVTESTSRC_FILTER) += 
fate-filter-yuvtestsrc-yuv444p12
+fate-filter-yuvtestsrc-yuv444p12: CMD = framecrc -lavfi 
yuvtestsrc=rate=5:duration=1 -pix_fmt yuv444p12
+
 FATE_FILTER-$(call ALLYES, AVDEVICE TESTSRC_FILTER FORMAT_FILTER CONCAT_FILTER 
SCALE_FILTER) += fate-filter-lavd-scalenorm
 fate-filter-lavd-scalenorm: tests/data/filtergraphs/scalenorm
 fate-filter-lavd-scalenorm: CMD = framecrc -f lavfi -graph_file 
$(TARGET_PATH)/tests/data/filtergraphs/scalenorm -i dummy
diff --git a/tests/ref/fate/filter-allrgb b/tests/ref/fate/filter-allrgb
new file mode 100644
index 000..b54efc5
--- /dev/null
+++ b/tests/ref/fate/filter-allrgb
@@ -0,0 +1,10 @@
+#tb 0: 1/5
+#media_type 0: video
+#codec_id 0: rawvideo
+#dimensions 0: 4096x4096
+#sar 0: 1/1
+0,  0,  0,1, 50331648, 0x9cc26aca
+0,  1,  1,1, 50331648, 0x9cc26aca
+0,  2,  2,1, 50331648, 0x9cc26aca
+0,  3,  3,1, 50331648, 0x9cc26aca
+0,  4,  4,1, 50331648, 0x9cc26aca
diff --git a/tests/ref/fate/filter-allyuv b/tests/ref/fate/filter-allyuv
new file mode 100644
index 000..477992f
--- /dev/null
+++ b/tests/ref/fate/filter-allyuv
@@ -0,0 +1,10 @@
+#tb 0: 1/5
+#media_type 0: video
+#codec_id 0: rawvideo
+#dimensions 0: 4096x4096
+#sar 0: 1/1
+0,  0,  0,1, 50331648, 0x70b16aca
+0,  1,  1,1, 50331648, 0x70b16aca
+0,  2,  2,1, 50331648, 0x70b16aca
+0,  3,  3,1, 50331648, 0x70b16aca
+0,  4,  4,1, 50331648, 0x70b16aca
diff --git a/tests/ref/fate/filter-rgbtestsrc b/tests/ref/fate/filter-rgbtestsrc
new file mode 100644
index 000..e18d364
--- /dev/null
+++ b/tests/ref/fate/filter-rgbtestsrc
@@ -0,0 +1,10 @@
+#tb 0: 1/5
+#media_type 0: video
+#codec_id 0: rawvideo
+#dimensions 0: 320x240
+#sar 0: 1/1
+0,  0,  0,1,   230400, 0x8c0018bb
+0,  1,  1,1,   230400, 0x8c0018bb
+0,  2,  2,1,   230400, 0x8c0018bb
+0,  3,  3,1,   230400, 0x8c0018bb
+0,  4,  4,1,   230400, 0x8c0018bb
diff --git a/tests/ref/fate/filter-smptebars b/tests/ref/fate/filter-smptebars
new file mode 100644
index 000..2242aef
--- /dev/null
+++ 

[FFmpeg-devel] 答复: [PATCH] vf_drawtext: support to set word spacing while drawing text. ./ffmpeg -i input -vf drawtext="word_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv could set word s

2017-07-20 Thread efren_yang
Hi, devel

Thanks for reply.  As you see, it is applied to every glyph. There is 
no space between all words in some languages, for example Chinese, Japanese and 
so on. So a sentence will look very compactly. I think if we could set glyph 
spacing, it would be more useful to adapt to more different scenarios.  I'm not 
saying accurately. I fix it.

From d9b680e8e90236c5807f1df5e258d8632d3c9619 Mon Sep 17 00:00:00 2001
From: efren yang 
Date: Thu, 20 Jul 2017 16:37:06 +0800
Subject: [PATCH] vf_drawtext: support to set glyph spacing while drawing text.
 ./ffmpeg -i input -vf
 drawtext="glyph_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv
 could set glyph spacing 10 pixel.

Signed-off-by: efren yang 
---
 libavfilter/vf_drawtext.c | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
index 137ae5891e..c96c5a 100644
--- a/libavfilter/vf_drawtext.c
+++ b/libavfilter/vf_drawtext.c
@@ -162,7 +162,7 @@ typedef struct DrawTextContext {
 unsigned int default_fontsize;  ///< default font size to use
 
 int line_spacing;   ///< lines spacing in pixels
-int word_spacing;   ///< word spacing in pixels
+int glyph_spacing;   ///< glyph spacing in pixels
 short int draw_box; ///< draw box around text - true or false
 int boxborderw; ///< box border width
 int use_kerning;///< font kerning is used - true/false
@@ -215,7 +215,7 @@ static const AVOption drawtext_options[]= {
 {"box", "set box",  OFFSET(draw_box),   
AV_OPT_TYPE_BOOL,   {.i64=0}, 0,1   , FLAGS},
 {"boxborderw",  "set box border width", OFFSET(boxborderw), 
AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX , FLAGS},
 {"line_spacing",  "set line spacing in pixels", OFFSET(line_spacing),   
AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX,FLAGS},
-{"word_spacing",  "set word spacing in pixels", OFFSET(word_spacing),   
AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
+{"glyph_spacing",  "set glyph spacing in pixels", OFFSET(glyph_spacing),   
AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
 {"fontsize","set font size",OFFSET(fontsize_expr),  
AV_OPT_TYPE_STRING, {.str=NULL},  CHAR_MIN, CHAR_MAX , FLAGS},
 {"x",   "set x expression", OFFSET(x_expr), 
AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
 {"y",   "set y expression", OFFSET(y_expr), 
AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
@@ -1376,7 +1376,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame *frame,
 s->positions[i].y = y - glyph->bitmap_top + y_max;
 if (code == '\t') x  = (x / s->tabsize + 1)*s->tabsize;
 else  x += glyph->advance;
-x += s->word_spacing;
+x += s->glyph_spacing;
 }
 
 max_text_line_w = FFMAX(x, max_text_line_w);
-- 
2.13.0.windows.1

-邮件原件-
发件人: geo...@phare.normalesup.org [mailto:geo...@phare.normalesup.org] 代表 
Nicolas George
发送时间: 2017年7月20日 16:08
收件人: FFmpeg development discussions and patches
抄送: efren_y...@163.com
主题: Re: [FFmpeg-devel] [PATCH] vf_drawtext: support to set word spacing while 
drawing text. ./ffmpeg -i input -vf 
drawtext="word_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv could 
set word spacing 10 pixel.

Hi. Thanks for the patch. See comments below.

Le duodi 2 thermidor, an CCXXV, efren yang a écrit :
> Subject: Re: [FFmpeg-devel] [PATCH] vf_drawtext: support to set word 
> spacing  while drawing text. ./ffmpeg -i input -vf  
> drawtext="word_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 
> 1.flv  could set word spacing 10 pixel.

The commit message will need fixing before commit.

> Signed-off-by: efren yang 
> ---

>  libavfilter/vf_drawtext.c | 3 +++
>  1 file changed, 3 insertions(+)

The documentation needs to be updated.

> 
> diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c 
> index f6151443bb..137ae5891e 100644
> --- a/libavfilter/vf_drawtext.c
> +++ b/libavfilter/vf_drawtext.c
> @@ -162,6 +162,7 @@ typedef struct DrawTextContext {
>  unsigned int default_fontsize;  ///< default font size to use
>  
>  int line_spacing;   ///< lines spacing in pixels
> +int word_spacing;   ///< word spacing in pixels
>  short int draw_box; ///< draw box around text - true or false
>  int boxborderw; ///< box border width
>  int use_kerning;///< font kerning is used - true/false
> @@ -214,6 +215,7 @@ static const AVOption drawtext_options[]= {
>  {"box", "set box",  OFFSET(draw_box),   
> AV_OPT_TYPE_BOOL,   {.i64=0}, 0,1   , FLAGS},
>  {"boxborderw",  

Re: [FFmpeg-devel] [PATCH] vf_drawtext: support to set word spacing while drawing text. ./ffmpeg -i input -vf drawtext="word_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv could set word sp

2017-07-20 Thread Nicolas George
Hi. Thanks for the patch. See comments below.

Le duodi 2 thermidor, an CCXXV, efren yang a écrit :
> Subject: Re: [FFmpeg-devel] [PATCH] vf_drawtext: support to set word spacing
>  while drawing text. ./ffmpeg -i input -vf
>  drawtext="word_spacing=10:textfile=1.txt:fontfile=demo.ttf" -f flv 1.flv
>  could set word spacing 10 pixel.

The commit message will need fixing before commit.

> Signed-off-by: efren yang 
> ---

>  libavfilter/vf_drawtext.c | 3 +++
>  1 file changed, 3 insertions(+)

The documentation needs to be updated.

> 
> diff --git a/libavfilter/vf_drawtext.c b/libavfilter/vf_drawtext.c
> index f6151443bb..137ae5891e 100644
> --- a/libavfilter/vf_drawtext.c
> +++ b/libavfilter/vf_drawtext.c
> @@ -162,6 +162,7 @@ typedef struct DrawTextContext {
>  unsigned int default_fontsize;  ///< default font size to use
>  
>  int line_spacing;   ///< lines spacing in pixels
> +int word_spacing;   ///< word spacing in pixels
>  short int draw_box; ///< draw box around text - true or false
>  int boxborderw; ///< box border width
>  int use_kerning;///< font kerning is used - true/false
> @@ -214,6 +215,7 @@ static const AVOption drawtext_options[]= {
>  {"box", "set box",  OFFSET(draw_box),   
> AV_OPT_TYPE_BOOL,   {.i64=0}, 0,1   , FLAGS},
>  {"boxborderw",  "set box border width", OFFSET(boxborderw), 
> AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX , FLAGS},
>  {"line_spacing",  "set line spacing in pixels", OFFSET(line_spacing),   
> AV_OPT_TYPE_INT,{.i64=0}, INT_MIN,  INT_MAX,FLAGS},
> +{"word_spacing",  "set word spacing in pixels", OFFSET(word_spacing),   
> AV_OPT_TYPE_INT,{ .i64 = 0 }, INT_MIN,  INT_MAX,FLAGS },
>  {"fontsize","set font size",OFFSET(fontsize_expr),  
> AV_OPT_TYPE_STRING, {.str=NULL},  CHAR_MIN, CHAR_MAX , FLAGS},
>  {"x",   "set x expression", OFFSET(x_expr), 
> AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
>  {"y",   "set y expression", OFFSET(y_expr), 
> AV_OPT_TYPE_STRING, {.str="0"},   CHAR_MIN, CHAR_MAX, FLAGS},
> @@ -1374,6 +1376,7 @@ static int draw_text(AVFilterContext *ctx, AVFrame 
> *frame,
>  s->positions[i].y = y - glyph->bitmap_top + y_max;
>  if (code == '\t') x  = (x / s->tabsize + 1)*s->tabsize;
>  else  x += glyph->advance;

> +x += s->word_spacing;

It says "word" spacing, but it is applied to every glyph. Is there
something I am missing?

>  }
>  
>  max_text_line_w = FFMAX(x, max_text_line_w);

Regards,

-- 
  Nicolas George


signature.asc
Description: Digital signature
___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


[FFmpeg-devel] [PATCH V4] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Jun Zhao
V4: fix potential memory leak issue base on Steven Liu's review.
V3: re-work to support the other hwaccels, rename from vaapi_dec.c to 
hw_decode.c.
   just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
V2: re-work with new hw decoding API.
From 718f92731d308423e5a09d0384f7bf2361f5a307 Mon Sep 17 00:00:00 2001
From: Jun Zhao 
Date: Thu, 20 Jul 2017 00:58:56 -0400
Subject: [PATCH V4] examples/hw_decode: Add a HWAccel decoding example.

Add a HWAccel decoding example.

Just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.

Signed-off-by: Liu, Kaixuan 
Signed-off-by: Jun Zhao 
---
 doc/examples/hw_decode.c | 249 +++
 1 file changed, 249 insertions(+)
 create mode 100644 doc/examples/hw_decode.c

diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
new file mode 100644
index 00..0e77ee877f
--- /dev/null
+++ b/doc/examples/hw_decode.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2017 Jun Zhao
+ * Copyright (c) 2017 Kaixuan Liu
+ *
+ * HW Acceleration API (video decoding) decode sample
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * HW-Accelerated decoding example.
+ *
+ * @example hw_decode.c
+ * This example shows how to do HW-accelerated decoding with output
+ * frames from the HW video surfaces.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static AVBufferRef *hw_device_ctx = NULL;
+static enum AVPixelFormat hw_pix_fmt;
+FILE *output_file = NULL;
+
+static enum AVPixelFormat hw_pix_fmts[] = {
+[AV_HWDEVICE_TYPE_CUDA] = AV_PIX_FMT_CUDA,
+[AV_HWDEVICE_TYPE_DXVA2]= AV_PIX_FMT_DXVA2_VLD,
+[AV_HWDEVICE_TYPE_D3D11VA]  = AV_PIX_FMT_D3D11VA_VLD,
+[AV_HWDEVICE_TYPE_QSV]  = AV_PIX_FMT_QSV,
+[AV_HWDEVICE_TYPE_VAAPI]= AV_PIX_FMT_VAAPI,
+[AV_HWDEVICE_TYPE_VDPAU]= AV_PIX_FMT_VDPAU,
+[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = AV_PIX_FMT_VIDEOTOOLBOX,
+};
+
+static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type)
+{
+if (type >= 0 && type < FF_ARRAY_ELEMS(hw_pix_fmts))
+return hw_pix_fmts[type];
+else
+return AV_PIX_FMT_NONE;
+}
+
+static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type)
+{
+int err = 0;
+
+if ((err = av_hwdevice_ctx_create(_device_ctx, type,
+  NULL, NULL, 0)) < 0) {
+fprintf(stderr, "Failed to create specified HW device.\n");
+return err;
+}
+ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+
+return err;
+}
+
+static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
+const enum AVPixelFormat *pix_fmts)
+{
+const enum AVPixelFormat *p;
+
+for (p = pix_fmts; *p != -1; p++) {
+if (*p == hw_pix_fmt)
+return *p;
+}
+
+fprintf(stderr, "Failed to get HW surface format.\n");
+return AV_PIX_FMT_NONE;
+}
+
+static int decode_write(AVCodecContext *avctx, AVPacket *packet)
+{
+AVFrame *frame = NULL, *sw_frame = NULL;
+AVFrame *tmp_frame = NULL;
+uint8_t *buffer = NULL;
+int size;
+int ret = 0;
+
+ret = avcodec_send_packet(avctx, packet);
+if (ret < 0) {
+fprintf(stderr, "Error during decoding\n");
+return ret;
+}
+
+while (ret >= 0) {
+if (!(frame = av_frame_alloc()) || !(sw_frame = av_frame_alloc())) {
+fprintf(stderr, "Can not alloc frame\n");
+ret = AVERROR(ENOMEM);
+goto fail;
+}
+
+ret = avcodec_receive_frame(avctx, frame);
+if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
+break;
+else if (ret < 0) {
+fprintf(stderr, "Error while decoding\n");
+goto fail;
+}
+
+if (frame->format == hw_pix_fmt) {
+/* retrieve data from GPU to CPU */
+if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
+fprintf(stderr, "Error transferring the data to system 
memory\n");
+goto fail;
+}
+tmp_frame = sw_frame;
+} else
+ 

Re: [FFmpeg-devel] [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Steven Liu
2017-07-20 15:09 GMT+08:00 Jun Zhao :
>
>
> On 2017/7/20 14:17, Steven Liu wrote:
>> 2017-07-20 13:29 GMT+08:00 Jun Zhao :
>>> V3: re-work to support the other hwaccels, just test with vaapi, 
>>> dxva2|d3d11va|videotoolbox might work as well.
>>> V2: re-work with new hw decoding API.
>>>
>>> ___
>>> ffmpeg-devel mailing list
>>> ffmpeg-devel@ffmpeg.org
>>> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>
>>
>> 已打开会话。包含1封未读邮件。
>>
>> 跳至内容
>> 通过屏幕阅读器使用 Gmail
>>
>> 搜索
>>
>> Gmail
>> 写邮件
>>
>> 标签
>>
>> 收件箱 (1)
>> 已加星标
>> 重要邮件
>> 聊天记录
>> 已发邮件
>> 草稿
>> 所有邮件
>> 垃圾邮件 (27)
>> 已删除邮件
>> [Gmail]/Gluster-review (493)
>> [Gmail]Gluster-review
>> [Imap]/Drafts
>> [Imap]/Trash
>> 工作邮件
>> 旅行相关
>> 收据
>> 私人邮件
>> arm-linux (2)
>> btrfs (25)
>> Deleted Messages
>> ffmpeg (5,060)
>> ffmpeg-devel (16,285)
>> glusterfs (945)
>> kernel-cn
>> libav (2,049)
>> Notes
>> OpenBSD_ARM
>> OpenBSD-announce (1)
>> rfc (567)
>> Sent Messages
>> SMS
>> 显示更多标签
>>
>> 
>>
>> 环聊
>>
>>
>>
>>
>> 删除标签
>>
>>
>> 更多操作
>>
>> 第 1 个会话,共 19,716 个
>>
>>
>>
>>
>> [FFmpeg-devel] [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.
>>
>> ffmpeg-devel
>> x
>>
>> Jun Zhao 通过“ffmpeg.org”
>>
>> 13:29 (41分钟前)
>>  发送至 FFmpeg、 Mark、 wm4、 Kaixuan
>> 英语
>> 中文
>>
>> 翻译邮件
>> 对英语停用
>> V3: re-work to support the other hwaccels, just test with vaapi,
>> dxva2|d3d11va|videotoolbox might work as well.
>> V2: re-work with new hw decoding API.
>>
>> ___
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> 附件区域
>> 点击此处即可回复、回复全部或转发
>> 使用了 15 GB 存储空间中的13.16 GB (87%)
>> 管理
>> 上次帐号活动时间:1小时前
>> 详细信息
>> 参与者(5 人)
>> Zhao Jun
>> mypopy...@gmail.com
>> 显示详细信息
>>
>> From bd8cbd5c16be3001b950f0c4ae3548909a396bc6 Mon Sep 17 00:00:00 2001
>> From: Jun Zhao 
>> Date: Thu, 20 Jul 2017 00:58:56 -0400
>> Subject: [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.
>>
>> Add a HWAccel decoding example.
>>
>> Just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
>>
>> Signed-off-by: Liu, Kaixuan 
>> Signed-off-by: Jun Zhao 
>> ---
>>  doc/examples/hw_decode.c | 246 
>> +++
>>  1 file changed, 246 insertions(+)
>>  create mode 100644 doc/examples/hw_decode.c
>>
>> diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
>> new file mode 100644
>> index 00..4070a60cba
>> --- /dev/null
>> +++ b/doc/examples/hw_decode.c
>> @@ -0,0 +1,246 @@
>> +/*
>> + * Copyright (c) 2017 Jun Zhao
>> + * Copyright (c) 2017 Kaixuan Liu
>> + *
>> + * HW Acceleration API (video decoding) decode sample
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
>> USA
>> + */
>> +
>> +/**
>> + * @file
>> + * HW-Accelerated decoding example.
>> + *
>> + * @example hw_decode.c
>> + * This example shows how to do HW-accelerated decoding with output
>> + * frames from the HW video surfaces.
>> + */
>> +
>> +#include 
>> +
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +#include 
>> +
>> +static AVBufferRef *hw_device_ctx = NULL;
>> +static enum AVPixelFormat hw_pix_fmt;
>> +FILE *output_file = NULL;
>> +
>> +static enum AVPixelFormat hw_pix_fmts[] = {
>> +[AV_HWDEVICE_TYPE_CUDA] = AV_PIX_FMT_CUDA,
>> +[AV_HWDEVICE_TYPE_DXVA2]= AV_PIX_FMT_DXVA2_VLD,
>> +[AV_HWDEVICE_TYPE_D3D11VA]  = AV_PIX_FMT_D3D11VA_VLD,
>> +[AV_HWDEVICE_TYPE_QSV]  = AV_PIX_FMT_QSV,
>> +[AV_HWDEVICE_TYPE_VAAPI]= AV_PIX_FMT_VAAPI,
>> +[AV_HWDEVICE_TYPE_VDPAU]= AV_PIX_FMT_VDPAU,
>> +[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = AV_PIX_FMT_VIDEOTOOLBOX,
>> +};
>> +
>> +static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType 
>> type)
>> +{
>> +if (type >= 0 && type < FF_ARRAY_ELEMS(hw_pix_fmts))
>> +return hw_pix_fmts[type];
>> +else
>> +return AV_PIX_FMT_NONE;
>> +}
>> +
>> +static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType 
>> type)
>> +{
>> +int err = 0;
>> +
>> +if 

Re: [FFmpeg-devel] [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Jun Zhao


On 2017/7/20 14:17, Steven Liu wrote:
> 2017-07-20 13:29 GMT+08:00 Jun Zhao :
>> V3: re-work to support the other hwaccels, just test with vaapi, 
>> dxva2|d3d11va|videotoolbox might work as well.
>> V2: re-work with new hw decoding API.
>>
>> ___
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
> 
> 已打开会话。包含1封未读邮件。
> 
> 跳至内容
> 通过屏幕阅读器使用 Gmail
> 
> 搜索
> 
> Gmail
> 写邮件
> 
> 标签
> 
> 收件箱 (1)
> 已加星标
> 重要邮件
> 聊天记录
> 已发邮件
> 草稿
> 所有邮件
> 垃圾邮件 (27)
> 已删除邮件
> [Gmail]/Gluster-review (493)
> [Gmail]Gluster-review
> [Imap]/Drafts
> [Imap]/Trash
> 工作邮件
> 旅行相关
> 收据
> 私人邮件
> arm-linux (2)
> btrfs (25)
> Deleted Messages
> ffmpeg (5,060)
> ffmpeg-devel (16,285)
> glusterfs (945)
> kernel-cn
> libav (2,049)
> Notes
> OpenBSD_ARM
> OpenBSD-announce (1)
> rfc (567)
> Sent Messages
> SMS
> 显示更多标签
> 
> 
> 
> 环聊
> 
> 
> 
> 
> 删除标签
> 
> 
> 更多操作
> 
> 第 1 个会话,共 19,716 个
> 
> 
> 
> 
> [FFmpeg-devel] [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.
> 
> ffmpeg-devel
> x
> 
> Jun Zhao 通过“ffmpeg.org”
> 
> 13:29 (41分钟前)
>  发送至 FFmpeg、 Mark、 wm4、 Kaixuan
> 英语
> 中文
> 
> 翻译邮件
> 对英语停用
> V3: re-work to support the other hwaccels, just test with vaapi,
> dxva2|d3d11va|videotoolbox might work as well.
> V2: re-work with new hw decoding API.
> 
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 附件区域
> 点击此处即可回复、回复全部或转发
> 使用了 15 GB 存储空间中的13.16 GB (87%)
> 管理
> 上次帐号活动时间:1小时前
> 详细信息
> 参与者(5 人)
> Zhao Jun
> mypopy...@gmail.com
> 显示详细信息
> 
> From bd8cbd5c16be3001b950f0c4ae3548909a396bc6 Mon Sep 17 00:00:00 2001
> From: Jun Zhao 
> Date: Thu, 20 Jul 2017 00:58:56 -0400
> Subject: [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.
> 
> Add a HWAccel decoding example.
> 
> Just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.
> 
> Signed-off-by: Liu, Kaixuan 
> Signed-off-by: Jun Zhao 
> ---
>  doc/examples/hw_decode.c | 246 
> +++
>  1 file changed, 246 insertions(+)
>  create mode 100644 doc/examples/hw_decode.c
> 
> diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
> new file mode 100644
> index 00..4070a60cba
> --- /dev/null
> +++ b/doc/examples/hw_decode.c
> @@ -0,0 +1,246 @@
> +/*
> + * Copyright (c) 2017 Jun Zhao
> + * Copyright (c) 2017 Kaixuan Liu
> + *
> + * HW Acceleration API (video decoding) decode sample
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 
> USA
> + */
> +
> +/**
> + * @file
> + * HW-Accelerated decoding example.
> + *
> + * @example hw_decode.c
> + * This example shows how to do HW-accelerated decoding with output
> + * frames from the HW video surfaces.
> + */
> +
> +#include 
> +
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +#include 
> +
> +static AVBufferRef *hw_device_ctx = NULL;
> +static enum AVPixelFormat hw_pix_fmt;
> +FILE *output_file = NULL;
> +
> +static enum AVPixelFormat hw_pix_fmts[] = {
> +[AV_HWDEVICE_TYPE_CUDA] = AV_PIX_FMT_CUDA,
> +[AV_HWDEVICE_TYPE_DXVA2]= AV_PIX_FMT_DXVA2_VLD,
> +[AV_HWDEVICE_TYPE_D3D11VA]  = AV_PIX_FMT_D3D11VA_VLD,
> +[AV_HWDEVICE_TYPE_QSV]  = AV_PIX_FMT_QSV,
> +[AV_HWDEVICE_TYPE_VAAPI]= AV_PIX_FMT_VAAPI,
> +[AV_HWDEVICE_TYPE_VDPAU]= AV_PIX_FMT_VDPAU,
> +[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = AV_PIX_FMT_VIDEOTOOLBOX,
> +};
> +
> +static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type)
> +{
> +if (type >= 0 && type < FF_ARRAY_ELEMS(hw_pix_fmts))
> +return hw_pix_fmts[type];
> +else
> +return AV_PIX_FMT_NONE;
> +}
> +
> +static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType 
> type)
> +{
> +int err = 0;
> +
> +if ((err = av_hwdevice_ctx_create(_device_ctx, type,
> +  NULL, NULL, 0)) < 0) {
> +fprintf(stderr, "Failed to create specified HW device.\n");
> +return err;
> +}
> +

Re: [FFmpeg-devel] [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.

2017-07-20 Thread Steven Liu
2017-07-20 13:29 GMT+08:00 Jun Zhao :
> V3: re-work to support the other hwaccels, just test with vaapi, 
> dxva2|d3d11va|videotoolbox might work as well.
> V2: re-work with new hw decoding API.
>
> ___
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>

已打开会话。包含1封未读邮件。

跳至内容
通过屏幕阅读器使用 Gmail

搜索

Gmail
写邮件

标签

收件箱 (1)
已加星标
重要邮件
聊天记录
已发邮件
草稿
所有邮件
垃圾邮件 (27)
已删除邮件
[Gmail]/Gluster-review (493)
[Gmail]Gluster-review
[Imap]/Drafts
[Imap]/Trash
工作邮件
旅行相关
收据
私人邮件
arm-linux (2)
btrfs (25)
Deleted Messages
ffmpeg (5,060)
ffmpeg-devel (16,285)
glusterfs (945)
kernel-cn
libav (2,049)
Notes
OpenBSD_ARM
OpenBSD-announce (1)
rfc (567)
Sent Messages
SMS
显示更多标签



环聊




删除标签


更多操作

第 1 个会话,共 19,716 个




[FFmpeg-devel] [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.

ffmpeg-devel
x

Jun Zhao 通过“ffmpeg.org”

13:29 (41分钟前)
 发送至 FFmpeg、 Mark、 wm4、 Kaixuan
英语
中文

翻译邮件
对英语停用
V3: re-work to support the other hwaccels, just test with vaapi,
dxva2|d3d11va|videotoolbox might work as well.
V2: re-work with new hw decoding API.

___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
附件区域
点击此处即可回复、回复全部或转发
使用了 15 GB 存储空间中的13.16 GB (87%)
管理
上次帐号活动时间:1小时前
详细信息
参与者(5 人)
Zhao Jun
mypopy...@gmail.com
显示详细信息

From bd8cbd5c16be3001b950f0c4ae3548909a396bc6 Mon Sep 17 00:00:00 2001
From: Jun Zhao 
Date: Thu, 20 Jul 2017 00:58:56 -0400
Subject: [PATCH V3] examples/hw_decode: Add a HWAccel decoding example.

Add a HWAccel decoding example.

Just test with vaapi, dxva2|d3d11va|videotoolbox might work as well.

Signed-off-by: Liu, Kaixuan 
Signed-off-by: Jun Zhao 
---
 doc/examples/hw_decode.c | 246 +++
 1 file changed, 246 insertions(+)
 create mode 100644 doc/examples/hw_decode.c

diff --git a/doc/examples/hw_decode.c b/doc/examples/hw_decode.c
new file mode 100644
index 00..4070a60cba
--- /dev/null
+++ b/doc/examples/hw_decode.c
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2017 Jun Zhao
+ * Copyright (c) 2017 Kaixuan Liu
+ *
+ * HW Acceleration API (video decoding) decode sample
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * HW-Accelerated decoding example.
+ *
+ * @example hw_decode.c
+ * This example shows how to do HW-accelerated decoding with output
+ * frames from the HW video surfaces.
+ */
+
+#include 
+
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+#include 
+
+static AVBufferRef *hw_device_ctx = NULL;
+static enum AVPixelFormat hw_pix_fmt;
+FILE *output_file = NULL;
+
+static enum AVPixelFormat hw_pix_fmts[] = {
+[AV_HWDEVICE_TYPE_CUDA] = AV_PIX_FMT_CUDA,
+[AV_HWDEVICE_TYPE_DXVA2]= AV_PIX_FMT_DXVA2_VLD,
+[AV_HWDEVICE_TYPE_D3D11VA]  = AV_PIX_FMT_D3D11VA_VLD,
+[AV_HWDEVICE_TYPE_QSV]  = AV_PIX_FMT_QSV,
+[AV_HWDEVICE_TYPE_VAAPI]= AV_PIX_FMT_VAAPI,
+[AV_HWDEVICE_TYPE_VDPAU]= AV_PIX_FMT_VDPAU,
+[AV_HWDEVICE_TYPE_VIDEOTOOLBOX] = AV_PIX_FMT_VIDEOTOOLBOX,
+};
+
+static enum AVPixelFormat find_fmt_by_hw_type(const enum AVHWDeviceType type)
+{
+if (type >= 0 && type < FF_ARRAY_ELEMS(hw_pix_fmts))
+return hw_pix_fmts[type];
+else
+return AV_PIX_FMT_NONE;
+}
+
+static int hw_decoder_init(AVCodecContext *ctx, const enum AVHWDeviceType type)
+{
+int err = 0;
+
+if ((err = av_hwdevice_ctx_create(_device_ctx, type,
+  NULL, NULL, 0)) < 0) {
+fprintf(stderr, "Failed to create specified HW device.\n");
+return err;
+}
+ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+
+return err;
+}
+
+static enum AVPixelFormat get_hw_format(AVCodecContext *ctx,
+const enum AVPixelFormat *pix_fmts)
+{
+const enum AVPixelFormat *p;
+
+for (p = pix_fmts; *p != -1; p++) {
+if (*p == hw_pix_fmt)
+return *p;
+}
+
+fprintf(stderr, "Failed to get HW surface format.\n");
+return AV_PIX_FMT_NONE;
+}
+
+static 

[FFmpeg-devel] [PATCH] avformat/hlsenc: fix hls fmp4 extention name bug

2017-07-20 Thread Steven Liu
ticket-id: #6541
when use hls fmp4 muxer, the extention name is not .m4s, this code can
fix it.

Signed-off-by: Steven Liu 
---
 libavformat/hlsenc.c | 21 -
 1 file changed, 16 insertions(+), 5 deletions(-)

diff --git a/libavformat/hlsenc.c b/libavformat/hlsenc.c
index 8a233270b5..65650e03a4 100644
--- a/libavformat/hlsenc.c
+++ b/libavformat/hlsenc.c
@@ -1286,14 +1286,19 @@ fail:
 return err;
 }
 
-static const char * get_default_pattern_localtime_fmt(void)
+static const char * get_default_pattern_localtime_fmt(AVFormatContext *s)
 {
 char b[21];
 time_t t = time(NULL);
 struct tm *p, tmbuf;
+HLSContext *hls = s->priv_data;
+
 p = localtime_r(, );
 // no %s support when strftime returned error or left format string 
unchanged
 // also no %s support on MSVC, which invokes the invalid parameter handler 
on unsupported format strings, instead of returning an error
+if (hls->segment_type == SEGMENT_TYPE_FMP4) {
+return (HAVE_LIBC_MSVCRT || !strftime(b, sizeof(b), "%s", p) || 
!strcmp(b, "%s")) ? "-%Y%m%d%H%M%S.m4s" : "-%s.m4s";
+}
 return (HAVE_LIBC_MSVCRT || !strftime(b, sizeof(b), "%s", p) || !strcmp(b, 
"%s")) ? "-%Y%m%d%H%M%S.ts" : "-%s.ts";
 }
 
@@ -1303,16 +1308,19 @@ static int hls_write_header(AVFormatContext *s)
 int ret, i;
 char *p;
 const char *pattern = "%d.ts";
-const char *pattern_localtime_fmt = get_default_pattern_localtime_fmt();
+const char *pattern_localtime_fmt = get_default_pattern_localtime_fmt(s);
 const char *vtt_pattern = "%d.vtt";
 AVDictionary *options = NULL;
 int byterange_mode = (hls->flags & HLS_SINGLE_FILE) || (hls->max_seg_size 
> 0);
 int basename_size;
 int vtt_basename_size;
 
-if (hls->segment_type == SEGMENT_TYPE_FMP4 && byterange_mode) {
-av_log(s, AV_LOG_WARNING, "Have not support fmp4 byterange mode yet 
now\n");
-return AVERROR_PATCHWELCOME;
+if (hls->segment_type == SEGMENT_TYPE_FMP4) {
+if (byterange_mode) {
+av_log(s, AV_LOG_WARNING, "Have not support fmp4 byterange mode 
yet now\n");
+return AVERROR_PATCHWELCOME;
+}
+pattern = "%d.m4s";
 }
 if ((hls->start_sequence_source_type == 
HLS_START_SEQUENCE_AS_SECONDS_SINCE_EPOCH) ||
 (hls->start_sequence_source_type == 
HLS_START_SEQUENCE_AS_FORMATTED_DATETIME)) {
@@ -1391,6 +1399,9 @@ static int hls_write_header(AVFormatContext *s)
 if (hls->flags & HLS_SINGLE_FILE)
 pattern = ".ts";
 
+if (hls->segment_type == SEGMENT_TYPE_FMP4)
+pattern = "m4s";
+
 if (hls->use_localtime) {
 basename_size = strlen(s->filename) + 
strlen(pattern_localtime_fmt) + 1;
 } else {
-- 
2.11.0 (Apple Git-81)



___
ffmpeg-devel mailing list
ffmpeg-devel@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-devel