On 2017/8/7 18:12, Anton Khirnov wrote:

Quoting Huang, Zhengxu (2017-08-04 04:05:28)
diff --git a/configure b/configure
index 4510100..e6ea18a 100755
--- a/configure
+++ b/configure
@@ -1786,6 +1786,7 @@ CONFIG_EXTRA="
     qsv
     qsvdec
     qsvenc
+    qsvvpp
     rangecoder
     riffdec
     riffenc
@@ -2528,6 +2529,8 @@ hqdn3d_filter_deps="gpl"
interlace_filter_deps="gpl"
movie_filter_deps="avcodec avformat"
ocv_filter_deps="libopencv"
+overlay_qsv_filter_deps="libmfx"
+overlay_qsv_filter_select="qsvvpp"
resample_filter_deps="avresample"
scale_filter_deps="swscale"
scale_qsv_filter_deps="libmfx"
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 348ad92..8d9ed6a 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -75,6 +75,7 @@ OBJS-$(CONFIG_NOFORMAT_FILTER)               += vf_format.o
OBJS-$(CONFIG_NULL_FILTER)                   += vf_null.o
OBJS-$(CONFIG_OCV_FILTER)                    += vf_libopencv.o
OBJS-$(CONFIG_OVERLAY_FILTER)                += vf_overlay.o
+OBJS-$(CONFIG_OVERLAY_QSV_FILTER)            += qsvvpp.o vf_overlay_qsv.o
Since you're adding a 'qsvvpp' subsystem to configure, the proper thing to do
here is start a new block for subsystems in the Makefile (as in
libavcodec/Makefile) with
OBJS-$(CONFIG_QSVVPP) += qsvvpp.o
Then none of the filters depend on qsvvpp.o directly, since that gets selected
through configure.

OBJS-$(CONFIG_PAD_FILTER)                    += vf_pad.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER)            += vf_pixdesctest.o
OBJS-$(CONFIG_SCALE_FILTER)                  += vf_scale.o
@@ -104,5 +105,7 @@ OBJS-$(CONFIG_NULLSRC_FILTER)                += 
vsrc_nullsrc.o
OBJS-$(CONFIG_RGBTESTSRC_FILTER)             += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC_FILTER)                += vsrc_testsrc.o

+SKIPHEADERS-$(CONFIG_QSV)                    += qsvvpp.h
Shouldn't this be CONFIG_QSVVPP?

diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c
new file mode 100644
index 0000000..74574bb
--- /dev/null
+++ b/libavfilter/qsvvpp.c
@@ -0,0 +1,699 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+/**
+ * @file
+ * Intel Quick Sync Video VPP base function
+ */
+
+#include "libavutil/common.h"
+#include "libavutil/mathematics.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_qsv.h"
+#include "libavutil/time.h"
+#include "libavutil/pixdesc.h"
+#include "internal.h"
+#include "qsvvpp.h"
+#include "video.h"
+
+typedef struct QSVFrame {
+    AVFrame          *frame;
+    mfxFrameSurface1 *surface;
+    mfxFrameSurface1  surface_internal;  ///< for system memory
+    struct QSVFrame  *next;
+} QSVFrame;
+
+/* abstract struct for all QSV filters */
+struct FFQSVVPPContext {
There is no need to add the 'FF' prefix to internal structs. They are not
visible in the compiled objects as a part of the ABI, so there's no risk of
conflicts. And since the name is already rather long and unreadable it's better
to not make it even longer.

+/* fill the surface info */
+static int fill_frameinfo_by_link(mfxFrameInfo *frameinfo, AVFilterLink *link)
+{
+    enum AVPixelFormat        pix_fmt;
+    AVHWFramesContext        *frames_ctx;
+    const AVPixFmtDescriptor *desc;
+
+    if (link->format == AV_PIX_FMT_QSV) {
+        if (!link->hw_frames_ctx)
+            return AVERROR(EINVAL);
+
+        frames_ctx = (AVHWFramesContext *)link->hw_frames_ctx->data;
+        pix_fmt = frames_ctx->sw_format;
+    } else
+        pix_fmt = link->format;
+
+    desc = av_pix_fmt_desc_get(pix_fmt);
+    if (!desc)
+        return AVERROR_BUG;
+
+    frameinfo->CropX          = 0;
+    frameinfo->CropY          = 0;
+    frameinfo->CropW          = link->w;
+    frameinfo->CropH          = link->h;
+    frameinfo->Width          = FFALIGN(link->w, 32);
+    frameinfo->Height         = FFALIGN(link->h, 32);
This looks dangerous. There is no guarantee that the dimensions are specifically
aligned to 32. For QSV format frames you have access to the mfxFrameInfo struct
in the AVQSVFramesContext.
For system memory frames you need to check that the input frames are
sufficiently aligned.

+/* create the QSV session */
+static int init_vpp_session(AVFilterContext *avctx, FFQSVVPPContext *s)
+{
+    AVFilterLink                 *inlink = avctx->inputs[0];
+    AVFilterLink                *outlink = avctx->outputs[0];
+    AVQSVFramesContext  *in_frames_hwctx = NULL;
+    AVQSVFramesContext *out_frames_hwctx = NULL;
+
+    AVBufferRef *device_ref;
+    AVHWDeviceContext *device_ctx;
+    AVQSVDeviceContext *device_hwctx;
+    mfxHDL handle;
+    mfxHandleType handle_type;
+    mfxVersion ver;
+    mfxIMPL impl;
+    int ret, i;
+
+    /**
+     * @mem-mode
+     * 1.Input has a HW context, Video or Opaque mode
+     * 2.Otherwise, System mode.
+     */
+    if (inlink->hw_frames_ctx) {
+        AVHWFramesContext *frames_ctx = (AVHWFramesContext 
*)inlink->hw_frames_ctx->data;
+
+        device_ref      = frames_ctx->device_ref;
+        in_frames_hwctx = frames_ctx->hwctx;
+
+        if (in_frames_hwctx->frame_type & MFX_MEMTYPE_OPAQUE_FRAME)
+            s->in_mem_mode = MFX_MEMTYPE_OPAQUE_FRAME;
+        else
+            s->in_mem_mode = MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET;
+
+        s->surface_ptrs_in = av_mallocz_array(in_frames_hwctx->nb_surfaces,
+                                              sizeof(*s->surface_ptrs_in));
+        if (!s->surface_ptrs_in)
+            return AVERROR(ENOMEM);
+        for (i = 0; i < in_frames_hwctx->nb_surfaces; i++)
+            s->surface_ptrs_in[i] = in_frames_hwctx->surfaces + i;
+        s->nb_surface_ptrs_in = in_frames_hwctx->nb_surfaces;
+    } else if (avctx->hw_device_ctx) {
+        device_ref     = avctx->hw_device_ctx;
+        s->in_mem_mode = MFX_MEMTYPE_SYSTEM_MEMORY;
+    } else {
+        av_log(avctx, AV_LOG_ERROR, "No hw context provided.\n");
+        return AVERROR(EINVAL);
+    }
This should also check that the underlying devices are the same on all the input
links.

+int ff_qsvvpp_create(AVFilterContext *avctx, FFQSVVPPContext **vpp, 
FFQSVVPPParam *param)
+{
+    int              ret = 0, i;
+    FFQSVVPPContext *s;
+
+    if (!avctx || !vpp)
+        return AVERROR(EINVAL);
I don't think this is necessary, any of those being NULL means the caller is
completely insane, then we should crash as loudly as possible.

+
+    s = av_mallocz(sizeof(FFQSVVPPContext));
nit: sizeof(*s) is preferable

+    if (!s) {
+        ret = AVERROR(ENOMEM);
+        goto failed;
+    }
+
+    s->filter_frame  = param->filter_frame;
+    if (!s->filter_frame)
+        s->filter_frame = ff_filter_frame;
+    s->out_sw_format = param->out_sw_format;
+
+    /* create the vpp session */
+    ret = init_vpp_session(avctx, s);
+    if (ret < 0)
+        goto failed;
+
+    s->frame_infos = av_mallocz_array(sizeof(*s->frame_infos), 
avctx->nb_inputs);
+    if (!s->frame_infos) {
+        ret = AVERROR(ENOMEM);
+        goto failed;
+    }
+
+    /* Init each input's information*/
+    for (i = 0; i < avctx->nb_inputs; i++) {
+        ret = fill_frameinfo_by_link(&s->frame_infos[i], avctx->inputs[i]);
+        if (ret < 0)
+            goto failed;
+    }
+
+    /* Update input's frame info according to crop */
+    for (i = 0; i < param->num_crop; i++) {
+        FFQSVVPPCrop *crop = param->crop + i;
+        if (crop->in_idx > avctx->nb_inputs) {
+            ret = AVERROR(EINVAL);
+            goto failed;
+        }
+        s->frame_infos[crop->in_idx].CropX = crop->x;
+        s->frame_infos[crop->in_idx].CropY = crop->y;
+        s->frame_infos[crop->in_idx].CropW = crop->w;
+        s->frame_infos[crop->in_idx].CropH = crop->h;
+    }
+
+    s->vpp_param.vpp.In = s->frame_infos[0];
+
+    ret = fill_frameinfo_by_link(&s->vpp_param.vpp.Out, avctx->outputs[0]);
+    if (ret < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Fail to get frame info from link.\n");
+        goto failed;
+    }
+
+    if (s->in_mem_mode == MFX_MEMTYPE_OPAQUE_FRAME || s->out_mem_mode == 
MFX_MEMTYPE_OPAQUE_FRAME) {
+        s->nb_ext_buffers = param->num_ext_buf + 1;
+        s->ext_buffers = av_mallocz(sizeof(*s->ext_buffers) * 
s->nb_ext_buffers);
av_mallocz_array

+        if (!s->ext_buffers)
+            return AVERROR(ENOMEM);
should be goto failed?

+
+        s->ext_buffers[0] = (mfxExtBuffer *)&s->opaque_alloc;
+        for (i = 1; i < param->num_ext_buf; i++)
+            s->ext_buffers[i]    = param->ext_buf[i - 1];
+        s->vpp_param.ExtParam    = s->ext_buffers;
+        s->vpp_param.NumExtParam = s->nb_ext_buffers;
+    } else {
+        s->vpp_param.NumExtParam = param->num_ext_buf;
+        s->vpp_param.ExtParam    = param->ext_buf;
+    }
+
+    s->vpp_param.AsyncDepth = 1;
+
+    if (s->in_mem_mode == MFX_MEMTYPE_SYSTEM_MEMORY)
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_SYSTEM_MEMORY;
+    else if (s->in_mem_mode == MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
What about PROCESSOR_TARGET?

+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_VIDEO_MEMORY;
+    else if (s->in_mem_mode == MFX_MEMTYPE_OPAQUE_FRAME)
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_IN_OPAQUE_MEMORY;
+
extra empty line

+
+    if (s->out_mem_mode == MFX_MEMTYPE_SYSTEM_MEMORY)
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_SYSTEM_MEMORY;
+    else if (s->out_mem_mode == MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_VIDEO_MEMORY;
+    else if (s->out_mem_mode == MFX_MEMTYPE_OPAQUE_FRAME)
+        s->vpp_param.IOPattern |= MFX_IOPATTERN_OUT_OPAQUE_MEMORY;
+
+    ret = MFXVideoVPP_Init(s->session, &s->vpp_param);
+    if (ret < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to create a qsvvpp, ret = %d.\n", 
ret);
+        goto failed;
+    }
+
+    *vpp = s;
+    return 0;
+
+failed:
+    if (s)
+        ff_qsvvpp_free(&s);
+
+    return ret;
+}
+
+int ff_qsvvpp_free(FFQSVVPPContext **vpp)
+{
+    FFQSVVPPContext *s = *vpp;
+
+    if (!s)
+        return 0;
+
+    MFXVideoVPP_Close(s->session);
+    MFXClose(s->session);
Shouldn't it check whether the session exists?

diff --git a/libavfilter/vf_overlay_qsv.c b/libavfilter/vf_overlay_qsv.c
new file mode 100644
index 0000000..c18ea2b
--- /dev/null
+++ b/libavfilter/vf_overlay_qsv.c
@@ -0,0 +1,483 @@
+/*
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @vf_overlay_qsv
+ * A hardware accelerated overlay filter based on Intel Quick Sync Video VPP
+ */
+
+#include "internal.h"
+#include "avfilter.h"
+#include "formats.h"
+#include "libavutil/opt.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/eval.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/avstring.h"
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/mathematics.h"
+#include "video.h"
+
+#include "qsvvpp.h"
+
+#define MAIN    0
+#define OVERLAY 1
+
+#define OFFSET(x) offsetof(QSVOverlayContext, x)
+#define FLAGS AV_OPT_FLAG_VIDEO_PARAM
+
+enum var_name {
+    VAR_MAIN_iW,     VAR_MW,
+    VAR_MAIN_iH,     VAR_MH,
+    VAR_OVERLAY_iW,
+    VAR_OVERLAY_iH,
+    VAR_OVERLAY_X,  VAR_OX,
+    VAR_OVERLAY_Y,  VAR_OY,
+    VAR_OVERLAY_W,  VAR_OW,
+    VAR_OVERLAY_H,  VAR_OH,
+    VAR_VARS_NB
+};
+
+enum EOFAction {
+    EOF_ACTION_REPEAT,
+    EOF_ACTION_ENDALL
+};
+
+typedef struct QSVOverlayContext {
+    AVClass *class;
const

+static const AVOption options[] = {
+    { "x", "Overlay x position", OFFSET(overlay_ox), AV_OPT_TYPE_STRING, 
{.str="0"}, 0, 255, .flags = FLAGS},
+    { "y", "Overlay y position", OFFSET(overlay_oy), AV_OPT_TYPE_STRING, 
{.str="0"}, 0, 255, .flags = FLAGS},
+    { "w", "Overlay width",      OFFSET(overlay_ow), AV_OPT_TYPE_STRING, 
{.str="overlay_iw"}, 0, 255, .flags = FLAGS},
+    { "h", "Overlay height",     OFFSET(overlay_oh), AV_OPT_TYPE_STRING, 
{.str="overlay_ih*w/overlay_iw"}, 0, 255, .flags = FLAGS},
+    { "alpha", "Overlay global alpha", OFFSET(overlay_alpha), AV_OPT_TYPE_INT, 
{.i64 = 255}, 0, 255, .flags = FLAGS},
This option could use more explanation. Not necessarily here, possibly in
doc/filters.texi
+    { "eof_action", "Action to take when encountering EOF from secondary input 
",
+        OFFSET(eof_action), AV_OPT_TYPE_INT, { .i64 = EOF_ACTION_REPEAT },
+        EOF_ACTION_REPEAT, EOF_ACTION_ENDALL, .flags = FLAGS, "eof_action" },
+        { "repeat", "Repeat the previous frame.", 0, AV_OPT_TYPE_CONST, { .i64 = 
EOF_ACTION_REPEAT }, .flags = FLAGS, "eof_action" },
+        { "endall", "End both streams.",          0, AV_OPT_TYPE_CONST, { .i64 = 
EOF_ACTION_ENDALL }, .flags = FLAGS, "eof_action" },
+    { NULL }
+};
+
+static int eval_expr(AVFilterContext *ctx)
+{
+    QSVOverlayContext *vpp        = ctx->priv;
+    double            *var_values = vpp->var_values;
+    AVExpr            *ox_expr, *oy_expr, *ow_expr, *oh_expr;
+    int                ret;
+
+#define PASS_EXPR(e, s) {\
+    ret = av_expr_parse(&e, s, var_names, NULL, NULL, NULL, NULL, 0, ctx); \
+    if (ret < 0) {\
+        av_log(ctx, AV_LOG_ERROR, "Error when passing '%s'.\n", s);\
+        return ret;\
+    }\
+}
+    PASS_EXPR(ox_expr, vpp->overlay_ox);
+    PASS_EXPR(oy_expr, vpp->overlay_oy);
+    PASS_EXPR(ow_expr, vpp->overlay_ow);
+    PASS_EXPR(oh_expr, vpp->overlay_oh);
I think a failure in any of those except the first will leak memory.

+static int config_main_input(AVFilterLink *inlink)
+{
+    AVFilterContext       *ctx = inlink->dst;
+    QSVOverlayContext     *vpp = ctx->priv;
+    mfxVPPCompInputStream *st  = &vpp->comp_conf.InputStream[0];
+
+    av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
+            av_get_pix_fmt_name(inlink->format));
+
+    vpp->var_values[VAR_MAIN_iW] =
+    vpp->var_values[VAR_MW]      = inlink->w;
+    vpp->var_values[VAR_MAIN_iH] =
+    vpp->var_values[VAR_MH]      = inlink->h;
+
+    st->DstX              = 0;
+    st->DstY              = 0;
+    st->DstW              = inlink->w;
+    st->DstH              = inlink->h;
+    st->GlobalAlphaEnable = 0;
+    st->PixelAlphaEnable  = 0;
+
+    return 0;
+}
+
+static int config_overlay_input(AVFilterLink *inlink)
+{
+    AVFilterContext       *ctx = inlink->dst;
+    AVFilterLink          *in0 = ctx->inputs[0];
+    QSVOverlayContext     *vpp = ctx->priv;
+    mfxVPPCompInputStream *st  = &vpp->comp_conf.InputStream[1];
+    int                    ret;
+
+    av_log(ctx, AV_LOG_DEBUG, "Input[%d] is of %s.\n", FF_INLINK_IDX(inlink),
+            av_get_pix_fmt_name(inlink->format));
After a linebreak in a function call we usually align on the first character
after the parentheses.

+    if ((in0->format == AV_PIX_FMT_QSV && inlink->format != AV_PIX_FMT_QSV) ||
+        (in0->format != AV_PIX_FMT_QSV && inlink->format == AV_PIX_FMT_QSV)) {
+        av_log(ctx, AV_LOG_ERROR, "One of the inputs is of AV_PIX_FMT_QSV,"
+                "but the other is of soft pixel format.\n");
+        av_log(ctx, AV_LOG_ERROR, "HW/SW mixed format is not supported 
now.\n");
+        return AVERROR(EINVAL);
+    }
I do not think anything guarantees that the inputs will be initialized in any
specific order, so this check needs to be postponed to config_output.

+
+    vpp->var_values[VAR_OVERLAY_iW] = inlink->w;
+    vpp->var_values[VAR_OVERLAY_iH] = inlink->h;
+    ret = eval_expr(ctx);
+    if (ret < 0)
+        return ret;
+
+    st->DstX      = vpp->var_values[VAR_OX];
+    st->DstY      = vpp->var_values[VAR_OY];
+    st->DstW      = vpp->var_values[VAR_OW];
+    st->DstH      = vpp->var_values[VAR_OH];
+    st->GlobalAlpha       = vpp->overlay_alpha;
+    st->GlobalAlphaEnable = (st->GlobalAlpha < 255);
+    st->PixelAlphaEnable  = have_alpha_planar(inlink);
+
+    return 0;
+}
+
+static int config_output(AVFilterLink *outlink)
+{
+    AVFilterContext   *ctx = outlink->src;
+    QSVOverlayContext *vpp = ctx->priv;
+    AVFilterLink      *inlink = ctx->inputs[0];
+
+    av_log(ctx, AV_LOG_DEBUG, "Output is of %s.\n", 
av_get_pix_fmt_name(outlink->format));
+
+    outlink->w          = vpp->var_values[VAR_MW];
+    outlink->h          = vpp->var_values[VAR_MH];
+    outlink->frame_rate = inlink->frame_rate;
+    outlink->time_base  = av_inv_q(outlink->frame_rate);
+
+    return ff_qsvvpp_create(ctx, &vpp->qsv, &vpp->qsv_param);
+}
+
+static int blend_frame(AVFilterContext *ctx,
+                        AVFrame *mpic, AVFrame *opic)
broken indentation

+{
+    int                ret = 0;
+    QSVOverlayContext *vpp = ctx->priv;
+    AVFrame     *opic_copy = NULL;
+
+    ret = ff_qsvvpp_filter_frame(vpp->qsv, ctx->inputs[0], mpic);
+    if (ret == 0 || ret == AVERROR(EAGAIN)) {
+        /**
+         * Make a copy of the overlay frame. Because:
It's not a copy, it's a new reference IIUC. The data remains the same, right?

+static int request_frame(AVFilterLink *outlink)
+{
+    AVFilterContext *ctx = outlink->src;
+    QSVOverlayContext *s = ctx->priv;
+    AVRational   tb_main = ctx->inputs[MAIN]->time_base;
+    AVRational   tb_over = ctx->inputs[OVERLAY]->time_base;
+    int              ret = 0;
+
+    /* get a frame on the main input */
+    if (!s->main) {
+        ret = ff_request_frame(ctx->inputs[MAIN]);
+        if (ret < 0)
+            return ret;
+    }
+
+    /* get a new frame on the overlay input, on EOF check setting 'eof_action' 
*/
+    if (!s->over_next) {
+        ret = ff_request_frame(ctx->inputs[OVERLAY]);
+        if (ret == AVERROR_EOF)
+           return handle_overlay_eof(ctx);
+        else if (ret < 0)
+            return ret;
+    }
+
+    while (s->main->pts != AV_NOPTS_VALUE &&
+           s->over_next->pts != AV_NOPTS_VALUE &&
+           av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main) < 
0) {
+        av_frame_free(&s->over_prev);
+        FFSWAP(AVFrame*, s->over_prev, s->over_next);
+
+        ret = ff_request_frame(ctx->inputs[OVERLAY]);
+        if (ret == AVERROR_EOF)
+            return handle_overlay_eof(ctx);
+        else if (ret < 0)
+            return ret;
+    }
+
+    if (s->main->pts == AV_NOPTS_VALUE ||
+        s->over_next->pts == AV_NOPTS_VALUE ||
+        !av_compare_ts(s->over_next->pts, tb_over, s->main->pts, tb_main)) {
+        ret = blend_frame(ctx, s->main, s->over_next);
+        av_frame_free(&s->over_prev);
+        FFSWAP(AVFrame*, s->over_prev, s->over_next);
+    } else if (s->over_prev) {
+        ret = blend_frame(ctx, s->main, s->over_prev);
+    } else {
+        av_frame_free(&s->main);
+        ret = AVERROR(EAGAIN);
+    }
+
+    s->main = NULL;
+
+    return ret ;
+}
This duplication of nontrivial code with vf_overlay is suboptimal. Would it be
hard to share?

+
+static int filter_frame_main(AVFilterLink *inlink, AVFrame *frame)
+{
+    QSVOverlayContext *s = inlink->dst->priv;
+
+    av_assert0(!s->main);
+    s->main = frame;
+
+    return 0;
+}
+
+static int filter_frame_overlay(AVFilterLink *inlink, AVFrame *frame)
+{
+    QSVOverlayContext *s = inlink->dst->priv;
+
+    av_assert0(!s->over_next);
+    s->over_next = frame;
+
+    return 0;
+}
+
+static int overlay_qsv_init(AVFilterContext *ctx)
+{
+    QSVOverlayContext *vpp = ctx->priv;
+
+    /* fill composite config */
+    vpp->comp_conf.Header.BufferId = MFX_EXTBUFF_VPP_COMPOSITE;
+    vpp->comp_conf.Header.BufferSz = sizeof(vpp->comp_conf);
+    vpp->comp_conf.NumInputStream  = ctx->nb_inputs;
+    vpp->comp_conf.InputStream     =
+            av_mallocz(sizeof(*vpp->comp_conf.InputStream) * ctx->nb_inputs);
nit: av_mallocz_array()

+    if (!vpp->comp_conf.InputStream)
+        return AVERROR(ENOMEM);
+
+    /* initialize QSVVPP params */
+    vpp->qsv_param.filter_frame = NULL;
+    vpp->qsv_param.ext_buf      = av_mallocz(sizeof(*vpp->qsv_param.ext_buf));
+    if (!vpp->qsv_param.ext_buf)
+        return AVERROR(ENOMEM);
+
+    vpp->qsv_param.ext_buf[0]    = (mfxExtBuffer *)&vpp->comp_conf;
+    vpp->qsv_param.num_ext_buf   = 1;
+    vpp->qsv_param.out_sw_format = AV_PIX_FMT_NV12;
+    vpp->qsv_param.num_crop      = 0;
+
+    return 0;
+}
+
+static void overlay_qsv_uninit(AVFilterContext *ctx)
+{
+    QSVOverlayContext *vpp = ctx->priv;
+
+    av_frame_free(&vpp->main);
+    av_frame_free(&vpp->over_prev);
+    av_frame_free(&vpp->over_next);
+    ff_qsvvpp_free(&vpp->qsv);
+    av_freep(&vpp->comp_conf.InputStream);
+    av_freep(&vpp->qsv_param.ext_buf);
+}
+
+static int overlay_qsv_query_formats(AVFilterContext *ctx)
+{
+    int i;
+
+    static const enum AVPixelFormat main_in_fmts[] = {
+        AV_PIX_FMT_YUV420P,
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_YUYV422,
+        AV_PIX_FMT_RGB32,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+    static const enum AVPixelFormat out_pix_fmts[] = {
+        AV_PIX_FMT_NV12,
+        AV_PIX_FMT_QSV,
+        AV_PIX_FMT_NONE
+    };
+
+    for(i = 0; i < ctx->nb_inputs; i++)
missing space

+AVFilter ff_vf_overlay_qsv = {
+    .name          = "overlay_qsv",
+    .description   = NULL_IF_CONFIG_SMALL("Quick Sync Video overlay."),
+    .priv_size     = sizeof(QSVOverlayContext),
+    .query_formats = overlay_qsv_query_formats,
+    .init          = overlay_qsv_init,
+    .uninit        = overlay_qsv_uninit,
+    .inputs        = overlay_qsv_inputs,
+    .outputs       = overlay_qsv_outputs,
+    .priv_class    = &overlay_qsv_class,
+   .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
broken indentation

/1.This duplication of nontrivial code with vf_overlay is suboptimal. Would it be hard to share?////2.This option could use more explanation. Not necessarily here, possibly in doc/filters.texi/

The first one in my opinion maybe need to add a new common file to share code 
so if could we do this later ?
The second one I am not quite sure the format of the filters.texi. So I don't 
do this in the new patch.

All other above issues will be modified in the next version patch.
Thanks for your detail review and look forward to your suggestion on new patch.


_______________________________________________
libav-devel mailing list
libav-devel@libav.org
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to