[FFmpeg-cvslog] amfenc: Add DXVA2 hardware frame input support

2018-04-15 Thread Alexander Kravchenko
ffmpeg | branch: master | Alexander Kravchenko  | Sat 
Apr 14 15:46:10 2018 +0100| [2c6ca2b54968ad3d2c947cdc16f92b6867f29f3a] | 
committer: Mark Thompson

amfenc: Add DXVA2 hardware frame input support

Adds support for AMF initialisation from a DXVA2 (Direct3D9) device, and
then allows passing DXVA2 surfaces into an AMF encoder.

Signed-off-by: Mark Thompson 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=2c6ca2b54968ad3d2c947cdc16f92b6867f29f3a
---

 libavcodec/amfenc.c | 79 +
 1 file changed, 79 insertions(+)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 1ac4ebf456..2a8c76069b 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -24,6 +24,10 @@
 #if CONFIG_D3D11VA
 #include "libavutil/hwcontext_d3d11va.h"
 #endif
+#if CONFIG_DXVA2
+#define COBJMACROS
+#include "libavutil/hwcontext_dxva2.h"
+#endif
 #include "libavutil/mem.h"
 #include "libavutil/pixdesc.h"
 #include "libavutil/time.h"
@@ -51,6 +55,9 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = {
 #if CONFIG_D3D11VA
 AV_PIX_FMT_D3D11,
 #endif
+#if CONFIG_DXVA2
+AV_PIX_FMT_DXVA2_VLD,
+#endif
 AV_PIX_FMT_NONE
 };
 
@@ -162,6 +169,52 @@ static int amf_init_from_d3d11_device(AVCodecContext 
*avctx, AVD3D11VADeviceCont
 }
 #endif
 
+#if CONFIG_DXVA2
+static int amf_init_from_dxva2_device(AVCodecContext *avctx, 
AVDXVA2DeviceContext *hwctx)
+{
+AmfContext *ctx = avctx->priv_data;
+HANDLE device_handle;
+IDirect3DDevice9 *device;
+HRESULT hr;
+AMF_RESULT res;
+int ret;
+
+hr = IDirect3DDeviceManager9_OpenDeviceHandle(hwctx->devmgr, 
_handle);
+if (FAILED(hr)) {
+av_log(avctx, AV_LOG_ERROR, "Failed to open device handle for 
Direct3D9 device: %lx.\n", (unsigned long)hr);
+return AVERROR_EXTERNAL;
+}
+
+hr = IDirect3DDeviceManager9_LockDevice(hwctx->devmgr, device_handle, 
, FALSE);
+if (SUCCEEDED(hr)) {
+IDirect3DDeviceManager9_UnlockDevice(hwctx->devmgr, device_handle, 
FALSE);
+ret = 0;
+} else {
+av_log(avctx, AV_LOG_ERROR, "Failed to lock device handle for 
Direct3D9 device: %lx.\n", (unsigned long)hr);
+ret = AVERROR_EXTERNAL;
+}
+
+IDirect3DDeviceManager9_CloseDeviceHandle(hwctx->devmgr, device_handle);
+
+if (ret < 0)
+return ret;
+
+res = ctx->context->pVtbl->InitDX9(ctx->context, device);
+
+IDirect3DDevice9_Release(device);
+
+if (res != AMF_OK) {
+if (res == AMF_NOT_SUPPORTED)
+av_log(avctx, AV_LOG_ERROR, "AMF via D3D9 is not supported on the 
given device.\n");
+else
+av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on given 
D3D9 device: %d.\n", res);
+return AVERROR(ENODEV);
+}
+
+return 0;
+}
+#endif
+
 static int amf_init_context(AVCodecContext *avctx)
 {
 AmfContext *ctx = avctx->priv_data;
@@ -206,6 +259,13 @@ static int amf_init_context(AVCodecContext *avctx)
 return ret;
 break;
 #endif
+#if CONFIG_DXVA2
+case AV_HWDEVICE_TYPE_DXVA2:
+ret = amf_init_from_dxva2_device(avctx, 
frames_ctx->device_ctx->hwctx);
+if (ret < 0)
+return ret;
+break;
+#endif
 default:
 av_log(avctx, AV_LOG_ERROR, "AMF initialisation from a %s frames 
context is not supported.\n",
av_hwdevice_get_type_name(frames_ctx->device_ctx->type));
@@ -230,6 +290,13 @@ static int amf_init_context(AVCodecContext *avctx)
 return ret;
 break;
 #endif
+#if CONFIG_DXVA2
+case AV_HWDEVICE_TYPE_DXVA2:
+ret = amf_init_from_dxva2_device(avctx, device_ctx->hwctx);
+if (ret < 0)
+return ret;
+break;
+#endif
 default:
 av_log(avctx, AV_LOG_ERROR, "AMF initialisation from a %s device 
is not supported.\n",
av_hwdevice_get_type_name(device_ctx->type));
@@ -581,6 +648,18 @@ int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame 
*frame)
 }
 break;
 #endif
+#if CONFIG_DXVA2
+case AV_PIX_FMT_DXVA2_VLD:
+{
+IDirect3DSurface9 *texture = (IDirect3DSurface9 
*)frame->data[3]; // actual texture
+
+res = 
ctx->context->pVtbl->CreateSurfaceFromDX9Native(ctx->context, texture, 
, NULL); // wrap to AMF surface
+AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), 
"CreateSurfaceFromDX9Native() failed  with error %d\n", res);
+
+hw_surface = 1;
+}
+break;
+#endif
 default:
 {
 res = ctx->context->pVtbl->AllocSurface(ctx->context, 
AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, );

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog


[FFmpeg-cvslog] amfenc: Remove spurious initialisations

2018-04-15 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sat Apr 14 15:46:15 
2018 +0100| [edecd723f3e4528d5153a00b92343041367fbe06] | committer: Mark 
Thompson

amfenc: Remove spurious initialisations

Also minor cosmetics.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=edecd723f3e4528d5153a00b92343041367fbe06
---

 libavcodec/amfenc.c | 76 -
 1 file changed, 29 insertions(+), 47 deletions(-)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 2a8c76069b..384d8efc92 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -107,16 +107,11 @@ static AMFTraceWriterVtbl tracer_vtbl =
 
 static int amf_load_library(AVCodecContext *avctx)
 {
-AmfContext *ctx = avctx->priv_data;
-AMFInit_Fn  init_fun = NULL;
-AMFQueryVersion_Fn  version_fun = NULL;
-AMF_RESULT  res = AMF_OK;
+AmfContext*ctx = avctx->priv_data;
+AMFInit_Fn init_fun;
+AMFQueryVersion_Fn version_fun;
+AMF_RESULT res;
 
-ctx->eof = 0;
-ctx->delayed_drain = 0;
-ctx->hw_frames_ctx = NULL;
-ctx->hw_device_ctx = NULL;
-ctx->delayed_surface = NULL;
 ctx->delayed_frame = av_frame_alloc();
 if (!ctx->delayed_frame) {
 return AVERROR(ENOMEM);
@@ -326,10 +321,10 @@ static int amf_init_context(AVCodecContext *avctx)
 
 static int amf_init_encoder(AVCodecContext *avctx)
 {
-AmfContext  *ctx = avctx->priv_data;
-const wchar_t   *codec_id = NULL;
-AMF_RESULT   res = AMF_OK;
-enum AVPixelFormat   pix_fmt;
+AmfContext*ctx = avctx->priv_data;
+const wchar_t *codec_id = NULL;
+AMF_RESULT res;
+enum AVPixelFormat pix_fmt;
 
 switch (avctx->codec->id) {
 case AV_CODEC_ID_H264:
@@ -360,9 +355,9 @@ static int amf_init_encoder(AVCodecContext *avctx)
 
 int av_cold ff_amf_encode_close(AVCodecContext *avctx)
 {
-AmfContext  *ctx = avctx->priv_data;
-if (ctx->delayed_surface)
-{
+AmfContext *ctx = avctx->priv_data;
+
+if (ctx->delayed_surface) {
 ctx->delayed_surface->pVtbl->Release(ctx->delayed_surface);
 ctx->delayed_surface = NULL;
 }
@@ -402,11 +397,11 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx)
 static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
 AMFSurface* surface)
 {
-AMFPlane   *plane = NULL;
-uint8_t*dst_data[4];
-int dst_linesize[4];
-int planes;
-int i;
+AMFPlane *plane;
+uint8_t  *dst_data[4];
+int   dst_linesize[4];
+int   planes;
+int   i;
 
 planes = surface->pVtbl->GetPlanesCount(surface);
 av_assert0(planes < FF_ARRAY_ELEMS(dst_data));
@@ -437,11 +432,11 @@ static inline int timestamp_queue_enqueue(AVCodecContext 
*avctx, int64_t timesta
 
 static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer 
*buffer)
 {
-AmfContext *ctx = avctx->priv_data;
-int ret;
-AMFVariantStructvar = {0};
-int64_t timestamp = AV_NOPTS_VALUE;
-int64_t size = buffer->pVtbl->GetSize(buffer);
+AmfContext  *ctx = avctx->priv_data;
+int  ret;
+AMFVariantStruct var = {0};
+int64_t  timestamp = AV_NOPTS_VALUE;
+int64_t  size = buffer->pVtbl->GetSize(buffer);
 
 if ((ret = ff_alloc_packet2(avctx, pkt, size, 0)) < 0) {
 return ret;
@@ -497,20 +492,7 @@ static int amf_copy_buffer(AVCodecContext *avctx, AVPacket 
*pkt, AMFBuffer *buff
 // amfenc API implementation
 int ff_amf_encode_init(AVCodecContext *avctx)
 {
-AmfContext *ctx = avctx->priv_data;
-int ret;
-
-ctx->factory = NULL;
-ctx->debug = NULL;
-ctx->trace = NULL;
-ctx->context = NULL;
-ctx->encoder = NULL;
-ctx->library = NULL;
-ctx->version = 0;
-ctx->eof = 0;
-ctx->format = 0;
-ctx->tracer.vtbl = NULL;
-ctx->tracer.avctx = NULL;
+int ret;
 
 if ((ret = amf_load_library(avctx)) == 0) {
 if ((ret = amf_init_context(avctx)) == 0) {
@@ -587,18 +569,18 @@ static AMFBuffer *amf_create_buffer_with_frame_ref(const 
AVFrame *frame, AMFCont
 
 static void amf_release_buffer_with_frame_ref(AMFBuffer 
*frame_ref_storage_buffer)
 {
-AVFrame *av_frame_ref;
-memcpy(_frame_ref, 
frame_ref_storage_buffer->pVtbl->GetNative(frame_ref_storage_buffer), 
sizeof(av_frame_ref));
-av_frame_free(_frame_ref);
+AVFrame *frame_ref;
+memcpy(_ref, 
frame_ref_storage_buffer->pVtbl->GetNative(frame_ref_storage_buffer), 
sizeof(frame_ref));
+av_frame_free(_ref);
 frame_ref_storage_buffer->pVtbl->Release(frame_ref_storage_buffer);
 }
 
 int ff_amf_send_frame(AVCodecContext *avctx, const AVFrame *frame)
 {
-AMF_RESULT  res = AMF_OK;
-AmfContext *ctx = avctx->priv_data;
-AMFSurface   

[FFmpeg-cvslog] amfenc: Fail to open if the user-supplied device is not usable

2018-04-15 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sat Apr 14 15:45:55 
2018 +0100| [ff1be6c9a694ee019608288fd25f1c869f7f51b7] | committer: Mark 
Thompson

amfenc: Fail to open if the user-supplied device is not usable

If the user supplies a device or frames context then it is an error
not to use it; this is consistent with other hardware components.

Also factorise out the D3D11 initialisation and improve error
messages.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=ff1be6c9a694ee019608288fd25f1c869f7f51b7
---

 libavcodec/amfenc.c | 130 
 1 file changed, 81 insertions(+), 49 deletions(-)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index b9418b6791..8a9d6884a4 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -152,10 +152,30 @@ static int amf_load_library(AVCodecContext *avctx)
 return 0;
 }
 
+#if CONFIG_D3D11VA
+static int amf_init_from_d3d11_device(AVCodecContext *avctx, 
AVD3D11VADeviceContext *hwctx)
+{
+AmfContext *ctx = avctx->priv_data;
+AMF_RESULT res;
+
+res = ctx->context->pVtbl->InitDX11(ctx->context, hwctx->device, 
AMF_DX11_1);
+if (res != AMF_OK) {
+if (res == AMF_NOT_SUPPORTED)
+av_log(avctx, AV_LOG_ERROR, "AMF via D3D11 is not supported on the 
given device.\n");
+else
+av_log(avctx, AV_LOG_ERROR, "AMF failed to initialise on the given 
D3D11 device: %d.\n", res);
+return AVERROR(ENODEV);
+}
+
+return 0;
+}
+#endif
+
 static int amf_init_context(AVCodecContext *avctx)
 {
-AmfContext *ctx = avctx->priv_data;
-AMF_RESULT  res = AMF_OK;
+AmfContext *ctx = avctx->priv_data;
+AMF_RESULT  res;
+av_unused int ret;
 
 ctx->hwsurfaces_in_queue = 0;
 ctx->hwsurfaces_in_queue_max = 16;
@@ -176,59 +196,71 @@ static int amf_init_context(AVCodecContext *avctx)
 
 res = ctx->factory->pVtbl->CreateContext(ctx->factory, >context);
 AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() 
failed with error %d\n", res);
-// try to reuse existing DX device
-#if CONFIG_D3D11VA
+
+// If a device was passed to the encoder, try to initialise from that.
 if (avctx->hw_frames_ctx) {
-AVHWFramesContext *device_ctx = 
(AVHWFramesContext*)avctx->hw_frames_ctx->data;
-if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
-if (amf_av_to_amf_format(device_ctx->sw_format) != 
AMF_SURFACE_UNKNOWN) {
-if (device_ctx->device_ctx->hwctx) {
-AVD3D11VADeviceContext *device_d3d11 = 
(AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
-res = ctx->context->pVtbl->InitDX11(ctx->context, 
device_d3d11->device, AMF_DX11_1);
-if (res == AMF_OK) {
-ctx->hw_frames_ctx = 
av_buffer_ref(avctx->hw_frames_ctx);
-if (!ctx->hw_frames_ctx) {
-return AVERROR(ENOMEM);
-}
-if (device_ctx->initial_pool_size > 0)
-ctx->hwsurfaces_in_queue_max = 
device_ctx->initial_pool_size - 1;
-} else {
-if(res == AMF_NOT_SUPPORTED)
-av_log(avctx, AV_LOG_INFO, "avctx->hw_frames_ctx 
has D3D11 device which doesn't have D3D11VA interface, switching to default\n");
-else
-av_log(avctx, AV_LOG_INFO, "avctx->hw_frames_ctx 
has non-AMD device, switching to default\n");
-}
-}
-} else {
-av_log(avctx, AV_LOG_INFO, "avctx->hw_frames_ctx has format 
not uspported by AMF, switching to default\n");
-}
+AVHWFramesContext *frames_ctx = 
(AVHWFramesContext*)avctx->hw_frames_ctx->data;
+
+if (amf_av_to_amf_format(frames_ctx->sw_format) == 
AMF_SURFACE_UNKNOWN) {
+av_log(avctx, AV_LOG_ERROR, "Format of input frames context (%s) 
is not supported by AMF.\n",
+   av_get_pix_fmt_name(frames_ctx->sw_format));
+return AVERROR(EINVAL);
 }
-} else if (avctx->hw_device_ctx) {
-AVHWDeviceContext *device_ctx = 
(AVHWDeviceContext*)(avctx->hw_device_ctx->data);
-if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
-if (device_ctx->hwctx) {
-AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext 
*)device_ctx->hwctx;
-res = ctx->context->pVtbl->InitDX11(ctx->context, 
device_d3d11->device, AMF_DX11_1);
-if (res == AMF_OK) {
-ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
-if (!ctx->hw_device_ctx) {
-return AVERROR(ENOMEM);
-}
-} else {
-if (res == AMF_NOT_SUPPORTED)
-

[FFmpeg-cvslog] amfenc: Do not automatically download/upload unknown hardware input frames

2018-04-15 Thread Mark Thompson
ffmpeg | branch: master | Mark Thompson  | Sat Apr 14 15:46:00 
2018 +0100| [73ed6fa9d77da8cd4f34742dd0f56e64aa714786] | committer: Mark 
Thompson

amfenc: Do not automatically download/upload unknown hardware input frames

Supplying a hardware input frame which is not in the input hardware frames
context is not allowed by the API, so additional code to handle it is not
necessary.  Further, handling it automatically results in very low
performance - it is more appropriate to fail immediately so that the user
can fix their incorrect setup.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=73ed6fa9d77da8cd4f34742dd0f56e64aa714786
---

 libavcodec/amfenc.c | 88 +
 1 file changed, 35 insertions(+), 53 deletions(-)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 8a9d6884a4..65a8e0a853 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -71,14 +71,6 @@ static const FormatMap format_map[] =
 { AV_PIX_FMT_D3D11,  AMF_SURFACE_NV12 },
 };
 
-
-static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt)
-{
-const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
-return desc->flags & AV_PIX_FMT_FLAG_HWACCEL;
-}
-
-
 static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
 {
 int i;
@@ -337,32 +329,14 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx)
 static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
 AMFSurface* surface)
 {
-AVFrame*sw_frame = NULL;
 AMFPlane   *plane = NULL;
 uint8_t*dst_data[4];
 int dst_linesize[4];
-int ret = 0;
 int planes;
 int i;
 
-if (frame->hw_frames_ctx && is_hwaccel_pix_fmt(frame->format)) {
-if (!(sw_frame = av_frame_alloc())) {
-av_log(avctx, AV_LOG_ERROR, "Can not alloc frame\n");
-ret = AVERROR(ENOMEM);
-goto fail;
-}
-if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
-av_log(avctx, AV_LOG_ERROR, "Error transferring the data to system 
memory\n");
-goto fail;
-}
-frame = sw_frame;
-}
-planes = (int)surface->pVtbl->GetPlanesCount(surface);
-if (planes > amf_countof(dst_data)) {
-av_log(avctx, AV_LOG_ERROR, "Invalid number of planes %d in 
surface\n", planes);
-ret = AVERROR(EINVAL);
-goto fail;
-}
+planes = surface->pVtbl->GetPlanesCount(surface);
+av_assert0(planes < FF_ARRAY_ELEMS(dst_data));
 
 for (i = 0; i < planes; i++) {
 plane = surface->pVtbl->GetPlaneAt(surface, i);
@@ -373,11 +347,7 @@ static int amf_copy_surface(AVCodecContext *avctx, const 
AVFrame *frame,
 (const uint8_t**)frame->data, frame->linesize, frame->format,
 avctx->width, avctx->height);
 
-fail:
-if (sw_frame) {
-av_frame_free(_frame);
-}
-return ret;
+return 0;
 }
 
 static inline int timestamp_queue_enqueue(AVCodecContext *avctx, int64_t 
timestamp)
@@ -579,31 +549,46 @@ int ff_amf_send_frame(AVCodecContext *avctx, const 
AVFrame *frame)
 return AVERROR_EOF;
 }
 } else { // submit frame
+int hw_surface = 0;
+
 if (ctx->delayed_surface != NULL) {
 return AVERROR(EAGAIN); // should not happen when called from 
ffmpeg, other clients may resubmit
 }
 // prepare surface from frame
-if (frame->hw_frames_ctx && ( // HW frame detected
-// check if the same hw_frames_ctx as used in initialization
-(ctx->hw_frames_ctx && frame->hw_frames_ctx->data == 
ctx->hw_frames_ctx->data) ||
-// check if the same hw_device_ctx as used in initialization
-(ctx->hw_device_ctx && 
((AVHWFramesContext*)frame->hw_frames_ctx->data)->device_ctx ==
-(AVHWDeviceContext*)ctx->hw_device_ctx->data)
-)) {
-AMFBuffer *frame_ref_storage_buffer;
-
+switch (frame->format) {
 #if CONFIG_D3D11VA
-static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 0xe7c3, 
0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
-ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // 
actual texture
-int index = (int)(size_t)frame->data[1]; // index is a slice in 
texture array is - set to tell AMF which slice to use
-texture->lpVtbl->SetPrivateData(texture, 
, sizeof(index), );
+case AV_PIX_FMT_D3D11:
+{
+static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 
0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
+ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; 
// actual texture
+int index = (intptr_t)frame->data[1]; // index is a slice in 
texture array is - set to tell AMF which slice to use
 
-res = 

[FFmpeg-cvslog] amfenc: Ensure that the software format of hardware frames is valid

2018-04-15 Thread Alexander Kravchenko
ffmpeg | branch: master | Alexander Kravchenko  | Sat 
Apr 14 15:46:05 2018 +0100| [ab7eed13a789b3f709a8964b0337bc69f152a9d7] | 
committer: Mark Thompson

amfenc: Ensure that the software format of hardware frames is valid

Signed-off-by: Mark Thompson 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=ab7eed13a789b3f709a8964b0337bc69f152a9d7
---

 libavcodec/amfenc.c | 12 +---
 1 file changed, 9 insertions(+), 3 deletions(-)

diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
index 65a8e0a853..1ac4ebf456 100644
--- a/libavcodec/amfenc.c
+++ b/libavcodec/amfenc.c
@@ -68,7 +68,6 @@ static const FormatMap format_map[] =
 { AV_PIX_FMT_GRAY8,  AMF_SURFACE_GRAY8 },
 { AV_PIX_FMT_YUV420P,AMF_SURFACE_YUV420P },
 { AV_PIX_FMT_YUYV422,AMF_SURFACE_YUY2 },
-{ AV_PIX_FMT_D3D11,  AMF_SURFACE_NV12 },
 };
 
 static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
@@ -263,6 +262,7 @@ static int amf_init_encoder(AVCodecContext *avctx)
 AmfContext  *ctx = avctx->priv_data;
 const wchar_t   *codec_id = NULL;
 AMF_RESULT   res = AMF_OK;
+enum AVPixelFormat   pix_fmt;
 
 switch (avctx->codec->id) {
 case AV_CODEC_ID_H264:
@@ -276,8 +276,14 @@ static int amf_init_encoder(AVCodecContext *avctx)
 }
 AMF_RETURN_IF_FALSE(ctx, codec_id != NULL, AVERROR(EINVAL), "Codec %d is 
not supported\n", avctx->codec->id);
 
-ctx->format = amf_av_to_amf_format(avctx->pix_fmt);
-AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, 
AVERROR(EINVAL), "Format %d is not supported\n", avctx->pix_fmt);
+if (ctx->hw_frames_ctx)
+pix_fmt = ((AVHWFramesContext*)ctx->hw_frames_ctx->data)->sw_format;
+else
+pix_fmt = avctx->pix_fmt;
+
+ctx->format = amf_av_to_amf_format(pix_fmt);
+AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, 
AVERROR(EINVAL),
+"Format %s is not supported\n", 
av_get_pix_fmt_name(pix_fmt));
 
 res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, 
codec_id, >encoder);
 AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, 
"CreateComponent(%ls) failed with error %d\n", codec_id, res);

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog


[FFmpeg-cvslog] configure: fix clang-cl detection

2018-04-15 Thread Alexander Bilyak
ffmpeg | branch: master | Alexander Bilyak  | Thu 
Feb  1 11:52:24 2018 +0100| [9fd11e51882aad9000943a9962d10880cae6667a] | 
committer: Michael Niedermayer

configure: fix clang-cl detection

When using clang-cl it expects parameters passed in MSVC-style, so appropriate 
toolchain should be selected.
As soon as both clang and clang-cl report themselfs as "clang" with -v option 
the only chance to detect
clang-cl is passing -? option to both which is valid for clang-cl.exe and not 
for clang.exe.

Reviewed-by: Dale Curtis 
Signed-off-by: Michael Niedermayer 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=9fd11e51882aad9000943a9962d10880cae6667a
---

 configure | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/configure b/configure
index 36e425be62..a6f32f8d9a 100755
--- a/configure
+++ b/configure
@@ -4339,7 +4339,7 @@ probe_cc(){
 _depflags='-MMD'
 _cflags_speed='-O3'
 _cflags_size='-Os'
-elif $_cc -v 2>&1 | grep -q clang; then
+elif $_cc -v 2>&1 | grep -q clang && ! $_cc -? > /dev/null 2>&1; then
 _type=clang
 _ident=$($_cc --version 2>/dev/null | head -n1)
 _depflags='-MMD -MF $(@:.o=.d) -MT $@'
@@ -4410,7 +4410,7 @@ probe_cc(){
 _flags_filter=msvc_flags
 _ld_lib='lib%.a'
 _ld_path='-libpath:'
-elif $_cc -nologo- 2>&1 | grep -q Microsoft; then
+elif $_cc -nologo- 2>&1 | grep -q Microsoft || $_cc -v 2>&1 | grep -q 
clang && $_cc -? > /dev/null 2>&1; then
 _type=msvc
 _ident=$($_cc 2>&1 | head -n1 | tr -d '\r')
 _DEPCMD='$(DEP$(1)) $(DEP$(1)FLAGS) $($(1)DEP_FLAGS) $< 2>&1 | awk 
'\''/including/ { sub(/^.*file: */, ""); gsub(/\\/, "/"); if (!match($$0, / /)) 
print "$@:", $$0 }'\'' > $(@:.o=.d)'

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog


[FFmpeg-cvslog] avfilter/af_headphone: add single hrir multichannel stream mode

2018-04-15 Thread Paul B Mahol
ffmpeg | branch: master | Paul B Mahol  | Sun Apr 15 12:48:12 
2018 +0200| [3e003a985f4b07d8685a2f251f5090f11abdfc06] | committer: Paul B Mahol

avfilter/af_headphone: add single hrir multichannel stream mode

Signed-off-by: Paul B Mahol 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=3e003a985f4b07d8685a2f251f5090f11abdfc06
---

 doc/filters.texi   |  19 +
 libavfilter/af_headphone.c | 185 -
 2 files changed, 150 insertions(+), 54 deletions(-)

diff --git a/doc/filters.texi b/doc/filters.texi
index 18a6da155c..40083dd080 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -3179,6 +3179,17 @@ Set custom gain for LFE channels. Value is in dB. 
Default is 0.
 @item size
 Set size of frame in number of samples which will be processed at once.
 Default value is @var{1024}. Allowed range is from 1024 to 96000.
+
+@item hrir
+Set format of hrir stream.
+Default value is @var{stereo}. Alternative value is @var{multich}.
+If value is set to @var{stereo}, number of additional streams should
+be greater or equal to number of input channels in first input stream.
+Also each additional stream should have stereo number of channels.
+If value is set to @var{multich}, number of additional streams should
+be exactly one. Also number of input channels of additional stream
+should be equal or greater than twice number of channels of first input
+stream.
 @end table
 
 @subsection Examples
@@ -3192,6 +3203,14 @@ The files give coefficients for each position of virtual 
loudspeaker:
 ffmpeg -i input.wav -lavfi-complex 
"amovie=azi_270_ele_0_DFC.wav[sr],amovie=azi_90_ele_0_DFC.wav[sl],amovie=azi_225_ele_0_DFC.wav[br],amovie=azi_135_ele_0_DFC.wav[bl],amovie=azi_0_ele_0_DFC.wav,asplit[fc][lfe],amovie=azi_35_ele_0_DFC.wav[fl],amovie=azi_325_ele_0_DFC.wav[fr],[a:0][fl][fr][fc][lfe][bl][br][sl][sr]headphone=FL|FR|FC|LFE|BL|BR|SL|SR"
 output.wav
 @end example
+
+@item
+Full example using wav files as coefficients with amovie filters for 7.1 
downmix,
+but now in @var{multich} @var{hrir} format.
+@example
+ffmpeg -i input.wav -lavfi-complex 
"amovie=minp.wav[hrirs],[a:0][hrirs]headphone=map=FL|FR|FC|LFE|BL|BR|SL|SR:hrir=multich"
+output.wav
+@end example
 @end itemize
 
 @section highpass
diff --git a/libavfilter/af_headphone.c b/libavfilter/af_headphone.c
index 8b34609a2f..a71ed336d8 100644
--- a/libavfilter/af_headphone.c
+++ b/libavfilter/af_headphone.c
@@ -35,6 +35,9 @@
 #define TIME_DOMAIN  0
 #define FREQUENCY_DOMAIN 1
 
+#define HRIR_STEREO 0
+#define HRIR_MULTI  1
+
 typedef struct HeadphoneContext {
 const AVClass *class;
 
@@ -64,6 +67,7 @@ typedef struct HeadphoneContext {
 int buffer_length;
 int n_fft;
 int size;
+int hrir_fmt;
 
 int *delay[2];
 float *data_ir[2];
@@ -130,14 +134,18 @@ static void parse_map(AVFilterContext *ctx)
 char buf[8];
 
 p = NULL;
-if (parse_channel_name(s, s->nb_inputs - 1, , _ch_id, buf)) {
+if (parse_channel_name(s, s->nb_irs, , _ch_id, buf)) {
 av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel 
name.\n", buf);
 continue;
 }
-s->mapping[s->nb_inputs - 1] = out_ch_id;
-s->nb_inputs++;
+s->mapping[s->nb_irs] = out_ch_id;
+s->nb_irs++;
 }
-s->nb_irs = s->nb_inputs - 1;
+
+if (s->hrir_fmt == HRIR_MULTI)
+s->nb_inputs = 2;
+else
+s->nb_inputs = s->nb_irs + 1;
 
 av_free(args);
 }
@@ -402,7 +410,7 @@ static int convert_coeffs(AVFilterContext *ctx, 
AVFilterLink *inlink)
 float *data_ir_r = NULL;
 int offset = 0, ret = 0;
 int n_fft;
-int i, j;
+int i, j, k;
 
 s->buffer_length = 1 << (32 - ff_clz(s->ir_len));
 s->n_fft = n_fft = 1 << (32 - ff_clz(s->ir_len + s->size));
@@ -433,8 +441,8 @@ static int convert_coeffs(AVFilterContext *ctx, 
AVFilterLink *inlink)
 
 s->data_ir[0] = av_calloc(FFALIGN(s->ir_len, 16), sizeof(float) * 
s->nb_irs);
 s->data_ir[1] = av_calloc(FFALIGN(s->ir_len, 16), sizeof(float) * 
s->nb_irs);
-s->delay[0] = av_malloc_array(s->nb_irs, sizeof(float));
-s->delay[1] = av_malloc_array(s->nb_irs, sizeof(float));
+s->delay[0] = av_calloc(s->nb_irs, sizeof(float));
+s->delay[1] = av_calloc(s->nb_irs, sizeof(float));
 
 if (s->type == TIME_DOMAIN) {
 s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * 
nb_input_channels);
@@ -442,8 +450,8 @@ static int convert_coeffs(AVFilterContext *ctx, 
AVFilterLink *inlink)
 } else {
 s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
 s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
-s->temp_fft[0] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
-s->temp_fft[1] = av_malloc_array(s->n_fft, sizeof(FFTComplex));
+s->temp_fft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
+s->temp_fft[1] = av_calloc(s->n_fft, 

[FFmpeg-cvslog] avcodec/nvdec: correctly set intra_pic_flag for h264/hevc

2018-04-15 Thread Timo Rothenpieler
ffmpeg | branch: master | Timo Rothenpieler  | Fri Apr 
13 19:39:24 2018 +0200| [955fa237f495e3077996c1d282b4ace2a9ad6c15] | committer: 
Timo Rothenpieler

avcodec/nvdec: correctly set intra_pic_flag for h264/hevc

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=955fa237f495e3077996c1d282b4ace2a9ad6c15
---

 libavcodec/nvdec_h264.c | 8 +++-
 libavcodec/nvdec_hevc.c | 2 +-
 2 files changed, 8 insertions(+), 2 deletions(-)

diff --git a/libavcodec/nvdec_h264.c b/libavcodec/nvdec_h264.c
index 35f54f2ed5..25b30329d0 100644
--- a/libavcodec/nvdec_h264.c
+++ b/libavcodec/nvdec_h264.c
@@ -74,7 +74,7 @@ static int nvdec_h264_start_frame(AVCodecContext *avctx,
 .bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD,
 .second_field  = FIELD_PICTURE(h) && !h->first_field,
 .ref_pic_flag  = h->nal_ref_idc != 0,
-.intra_pic_flag= 0,
+.intra_pic_flag= 1,
 
 .CodecSpecific.h264 = {
 .log2_max_frame_num_minus4= sps->log2_max_frame_num - 
4,
@@ -132,6 +132,9 @@ static int nvdec_h264_decode_slice(AVCodecContext *avctx, 
const uint8_t *buffer,
uint32_t size)
 {
 NVDECContext *ctx = avctx->internal->hwaccel_priv_data;
+CUVIDPICPARAMS *pp = >pic_params;
+const H264Context *h = avctx->priv_data;
+const H264SliceContext *sl = >slice_ctx[0];
 void *tmp;
 
 tmp = av_fast_realloc(ctx->bitstream, >bitstream_allocated,
@@ -152,6 +155,9 @@ static int nvdec_h264_decode_slice(AVCodecContext *avctx, 
const uint8_t *buffer,
 ctx->bitstream_len += size + 3;
 ctx->nb_slices++;
 
+if (sl->slice_type != AV_PICTURE_TYPE_I && sl->slice_type != 
AV_PICTURE_TYPE_SI)
+pp->intra_pic_flag = 0;
+
 return 0;
 }
 
diff --git a/libavcodec/nvdec_hevc.c b/libavcodec/nvdec_hevc.c
index e89256d75a..008963130b 100644
--- a/libavcodec/nvdec_hevc.c
+++ b/libavcodec/nvdec_hevc.c
@@ -93,7 +93,7 @@ static int nvdec_hevc_start_frame(AVCodecContext *avctx,
 .FrameHeightInMbs  = sps->height / 16,
 .CurrPicIdx= cf->idx,
 .ref_pic_flag  = 1,
-.intra_pic_flag= 0,
+.intra_pic_flag= IS_IRAP(s),
 
 .CodecSpecific.hevc = {
 .pic_width_in_luma_samples= sps->width,

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog


[FFmpeg-cvslog] avcodec/aac_ac3_parser: account for data already in the parsing buffer

2018-04-15 Thread Hendrik Leppkes
ffmpeg | branch: master | Hendrik Leppkes  | Thu Apr  5 
17:09:35 2018 +0200| [8df8a9299364016027825a67534b93cd2b6af473] | committer: 
Hendrik Leppkes

avcodec/aac_ac3_parser: account for data already in the parsing buffer

If a frame starts very close to a packet boundary, the start code may
already have been added to the parsing buffer, indicated by a small
negative value of "i", while the header is still being tracked in the
"state" variable.

Reduce the remaining size accordingly, otherwise trying to find the next
frame could skip over the frame header and lump two frames together as
one.

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=8df8a9299364016027825a67534b93cd2b6af473
---

 libavcodec/aac_ac3_parser.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/libavcodec/aac_ac3_parser.c b/libavcodec/aac_ac3_parser.c
index 019074b0dd..54e459844f 100644
--- a/libavcodec/aac_ac3_parser.c
+++ b/libavcodec/aac_ac3_parser.c
@@ -60,6 +60,9 @@ get_next:
 s->remaining_size += i;
 goto get_next;
 }
+else if (i < 0) {
+s->remaining_size += i;
+}
 }
 }
 }

___
ffmpeg-cvslog mailing list
ffmpeg-cvslog@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/ffmpeg-cvslog


[FFmpeg-cvslog] avcodec/dxv: add support for "high" quality mode

2018-04-15 Thread Paul B Mahol
ffmpeg | branch: master | Paul B Mahol  | Thu Apr  5 16:44:28 
2018 +0200| [250792be5e905cbcfd0b4858e57de9b007e74e44] | committer: Paul B Mahol

avcodec/dxv: add support for "high" quality mode

Signed-off-by: Paul B Mahol 

> http://git.videolan.org/gitweb.cgi/ffmpeg.git/?a=commit;h=250792be5e905cbcfd0b4858e57de9b007e74e44
---

 libavcodec/dxv.c | 815 ---
 1 file changed, 780 insertions(+), 35 deletions(-)

diff --git a/libavcodec/dxv.c b/libavcodec/dxv.c
index 529e211258..08aca73b1f 100644
--- a/libavcodec/dxv.c
+++ b/libavcodec/dxv.c
@@ -1,6 +1,7 @@
 /*
  * Resolume DXV decoder
  * Copyright (C) 2015 Vittorio Giovara 
+ * Copyright (C) 2018 Paul B Mahol
  *
  * This file is part of FFmpeg.
  *
@@ -23,6 +24,7 @@
 
 #include "libavutil/imgutils.h"
 
+#include "mathops.h"
 #include "avcodec.h"
 #include "bytestream.h"
 #include "internal.h"
@@ -34,50 +36,211 @@ typedef struct DXVContext {
 TextureDSPContext texdsp;
 GetByteContext gbc;
 
-uint8_t *tex_data;  // Compressed texture
-int tex_rat;// Compression ratio
-int tex_step;   // Distance between blocks
-int64_t tex_size;   // Texture size
+uint8_t *tex_data;   // Compressed texture
+uint8_t *ctex_data;  // Compressed texture
+int tex_rat; // Compression ratio
+int tex_step;// Distance between blocks
+int ctex_step;   // Distance between blocks
+int64_t tex_size;// Texture size
+int64_t ctex_size;   // Texture size
 
 /* Optimal number of slices for parallel decoding */
 int slice_count;
 
+uint8_t *op_data[4]; // Opcodes
+int64_t op_size[4];  // Opcodes size
+
+int texture_block_w;
+int texture_block_h;
+
+int ctexture_block_w;
+int ctexture_block_h;
+
 /* Pointer to the selected decompression function */
 int (*tex_funct)(uint8_t *dst, ptrdiff_t stride, const uint8_t *block);
+int (*tex_funct_planar[2])(uint8_t *plane0, ptrdiff_t stride0,
+   uint8_t *plane1, ptrdiff_t stride1,
+   const uint8_t *block);
 } DXVContext;
 
+static void decompress_indices(uint8_t *dst, const uint8_t *src)
+{
+int block, i;
+
+for (block = 0; block < 2; block++) {
+int tmp = AV_RL24(src);
+
+/* Unpack 8x3 bit from last 3 byte block */
+for (i = 0; i < 8; i++)
+dst[i] = (tmp >> (i * 3)) & 0x7;
+
+src += 3;
+dst += 8;
+}
+}
+
+static int extract_component(int yo0, int yo1, int code)
+{
+int yo;
+
+if (yo0 == yo1) {
+yo = yo0;
+} else if (code == 0) {
+yo = yo0;
+} else if (code == 1) {
+yo = yo1;
+} else {
+if (yo0 > yo1) {
+yo = (uint8_t) (((8 - code) * yo0 +
+ (code - 1) * yo1) / 7);
+} else {
+if (code == 6) {
+yo = 0;
+} else if (code == 7) {
+yo = 255;
+} else {
+yo = (uint8_t) (((6 - code) * yo0 +
+ (code - 1) * yo1) / 5);
+}
+}
+}
+
+return yo;
+}
+
+static int cocg_block(uint8_t *plane0, ptrdiff_t stride0,
+  uint8_t *plane1, ptrdiff_t stride1,
+  const uint8_t *block)
+{
+uint8_t co_indices[16];
+uint8_t cg_indices[16];
+uint8_t co0 = *(block);
+uint8_t co1 = *(block + 1);
+uint8_t cg0 = *(block + 8);
+uint8_t cg1 = *(block + 9);
+int x, y;
+
+decompress_indices(co_indices, block + 2);
+decompress_indices(cg_indices, block + 10);
+
+for (y = 0; y < 4; y++) {
+for (x = 0; x < 4; x++) {
+int co_code = co_indices[x + y * 4];
+int cg_code = cg_indices[x + y * 4];
+
+plane0[x] = extract_component(cg0, cg1, cg_code);
+plane1[x] = extract_component(co0, co1, co_code);
+}
+plane0 += stride0;
+plane1 += stride1;
+}
+
+return 16;
+}
+
+static void yao_subblock(uint8_t *dst, uint8_t *yo_indices,
+ptrdiff_t stride, const uint8_t *block)
+{
+uint8_t yo0 = *(block);
+uint8_t yo1 = *(block + 1);
+int x, y;
+
+decompress_indices(yo_indices, block + 2);
+
+for (y = 0; y < 4; y++) {
+for (x = 0; x < 4; x++) {
+int yo_code = yo_indices[x + y * 4];
+
+dst[x] = extract_component(yo0, yo1, yo_code);
+}
+dst += stride;
+}
+}
+
+static int yo_block(uint8_t *dst, ptrdiff_t stride,
+uint8_t *unused0, ptrdiff_t unused1,
+const uint8_t *block)
+{
+uint8_t yo_indices[16];
+
+yao_subblock(dst,  yo_indices, stride, block);
+yao_subblock(dst + 4,  yo_indices, stride, block + 8);
+yao_subblock(dst + 8,  yo_indices, stride, block + 16);
+yao_subblock(dst + 12, yo_indices,