This is all duplicated functionality with the vdpau hwaccel API, which was introduced almost 3 years ago.
API users had time enough to switch to the new API. But note that the API stubs are kept, and old programs will still compile against modern libavcodec. --- configure | 11 -- libavcodec/allcodecs.c | 16 -- libavcodec/error_resilience.c | 3 - libavcodec/h263dec.c | 8 - libavcodec/h264.c | 43 +----- libavcodec/h264_picture.c | 13 -- libavcodec/h264_slice.c | 23 +-- libavcodec/mpeg12dec.c | 64 +------- libavcodec/mpeg4videodec.c | 26 ---- libavcodec/mpegpicture.c | 6 +- libavcodec/mpegvideo.c | 13 +- libavcodec/utils.c | 4 - libavcodec/vc1dec.c | 71 +-------- libavcodec/vdpau.c | 340 ------------------------------------------ libavcodec/vdpau_compat.h | 48 ------ 15 files changed, 10 insertions(+), 679 deletions(-) delete mode 100644 libavcodec/vdpau_compat.h diff --git a/configure b/configure index 552f698..b23baf6 100755 --- a/configure +++ b/configure @@ -2422,8 +2422,6 @@ h264_vda_hwaccel_deps="vda" h264_vda_hwaccel_select="h264_decoder" h264_vda_old_hwaccel_deps="vda" h264_vda_old_hwaccel_select="h264_decoder" -h264_vdpau_decoder_deps="vdpau" -h264_vdpau_decoder_select="h264_decoder" h264_vdpau_hwaccel_deps="vdpau" h264_vdpau_hwaccel_select="h264_decoder" h264_videotoolbox_hwaccel_deps="videotoolbox" @@ -2437,12 +2435,8 @@ hevc_vaapi_hwaccel_deps="vaapi VAPictureParameterBufferHEVC" hevc_vaapi_hwaccel_select="hevc_decoder" hevc_vdpau_hwaccel_deps="vdpau VdpPictureInfoHEVC" hevc_vdpau_hwaccel_select="hevc_decoder" -mpeg_vdpau_decoder_deps="vdpau" -mpeg_vdpau_decoder_select="mpeg2video_decoder" mpeg_xvmc_hwaccel_deps="xvmc" mpeg_xvmc_hwaccel_select="mpeg2video_decoder" -mpeg1_vdpau_decoder_deps="vdpau" -mpeg1_vdpau_decoder_select="mpeg1video_decoder" mpeg1_vdpau_hwaccel_deps="vdpau" mpeg1_vdpau_hwaccel_select="mpeg1video_decoder" mpeg1_videotoolbox_hwaccel_deps="videotoolbox" @@ -2467,8 +2461,6 @@ mpeg2_xvmc_hwaccel_select="mpeg2video_decoder" mpeg4_crystalhd_decoder_select="crystalhd" mpeg4_vaapi_hwaccel_deps="vaapi" mpeg4_vaapi_hwaccel_select="mpeg4_decoder" -mpeg4_vdpau_decoder_deps="vdpau" -mpeg4_vdpau_decoder_select="mpeg4_decoder" mpeg4_vdpau_hwaccel_deps="vdpau" mpeg4_vdpau_hwaccel_select="mpeg4_decoder" mpeg4_videotoolbox_hwaccel_deps="videotoolbox" @@ -2481,8 +2473,6 @@ vc1_dxva2_hwaccel_deps="dxva2" vc1_dxva2_hwaccel_select="vc1_decoder" vc1_vaapi_hwaccel_deps="vaapi" vc1_vaapi_hwaccel_select="vc1_decoder" -vc1_vdpau_decoder_deps="vdpau" -vc1_vdpau_decoder_select="vc1_decoder" vc1_vdpau_hwaccel_deps="vdpau" vc1_vdpau_hwaccel_select="vc1_decoder" vc1_qsv_hwaccel_deps="libmfx" @@ -2491,7 +2481,6 @@ wmv3_crystalhd_decoder_select="crystalhd" wmv3_d3d11va_hwaccel_select="vc1_d3d11va_hwaccel" wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel" wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel" -wmv3_vdpau_decoder_select="vc1_vdpau_decoder" wmv3_vdpau_hwaccel_select="vc1_vdpau_hwaccel" # parsers diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c index f33e457..21fa589 100644 --- a/libavcodec/allcodecs.c +++ b/libavcodec/allcodecs.c @@ -192,9 +192,6 @@ void avcodec_register_all(void) REGISTER_DECODER(H264_MMAL, h264_mmal); REGISTER_DECODER(H264_QSV, h264_qsv); REGISTER_DECODER(H264_VDA, h264_vda); -#if FF_API_VDPAU - REGISTER_DECODER(H264_VDPAU, h264_vdpau); -#endif REGISTER_ENCDEC (HAP, hap); REGISTER_DECODER(HEVC, hevc); REGISTER_DECODER(HEVC_QSV, hevc_qsv); @@ -231,14 +228,7 @@ void avcodec_register_all(void) REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video); REGISTER_ENCDEC (MPEG4, mpeg4); REGISTER_DECODER(MPEG4_CRYSTALHD, mpeg4_crystalhd); -#if FF_API_VDPAU - REGISTER_DECODER(MPEG4_VDPAU, mpeg4_vdpau); -#endif REGISTER_DECODER(MPEGVIDEO, mpegvideo); -#if FF_API_VDPAU - REGISTER_DECODER(MPEG_VDPAU, mpeg_vdpau); - REGISTER_DECODER(MPEG1_VDPAU, mpeg1_vdpau); -#endif REGISTER_DECODER(MPEG2_CRYSTALHD, mpeg2_crystalhd); REGISTER_DECODER(MPEG2_QSV, mpeg2_qsv); REGISTER_DECODER(MSA1, msa1); @@ -319,9 +309,6 @@ void avcodec_register_all(void) REGISTER_DECODER(VBLE, vble); REGISTER_DECODER(VC1, vc1); REGISTER_DECODER(VC1_CRYSTALHD, vc1_crystalhd); -#if FF_API_VDPAU - REGISTER_DECODER(VC1_VDPAU, vc1_vdpau); -#endif REGISTER_DECODER(VC1IMAGE, vc1image); REGISTER_DECODER(VC1_QSV, vc1_qsv); REGISTER_DECODER(VCR1, vcr1); @@ -341,9 +328,6 @@ void avcodec_register_all(void) REGISTER_ENCDEC (WMV2, wmv2); REGISTER_DECODER(WMV3, wmv3); REGISTER_DECODER(WMV3_CRYSTALHD, wmv3_crystalhd); -#if FF_API_VDPAU - REGISTER_DECODER(WMV3_VDPAU, wmv3_vdpau); -#endif REGISTER_DECODER(WMV3IMAGE, wmv3image); REGISTER_DECODER(WNV1, wnv1); REGISTER_DECODER(XAN_WC3, xan_wc3); diff --git a/libavcodec/error_resilience.c b/libavcodec/error_resilience.c index 2c741a4..fec8062 100644 --- a/libavcodec/error_resilience.c +++ b/libavcodec/error_resilience.c @@ -777,9 +777,6 @@ void ff_er_frame_start(ERContext *s) static int er_supported(ERContext *s) { if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice || -#if FF_API_CAP_VDPAU - s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU || -#endif !s->cur_pic.f || s->cur_pic.field_picture ) diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c index c85ea9d..1f5aa21 100644 --- a/libavcodec/h263dec.c +++ b/libavcodec/h263dec.c @@ -41,7 +41,6 @@ #include "mpegvideo.h" #include "msmpeg4.h" #include "qpeldsp.h" -#include "vdpau_compat.h" #include "thread.h" #include "wmv2.h" @@ -603,13 +602,6 @@ retry: if (!s->divx_packed && !avctx->hwaccel) ff_thread_finish_setup(avctx); -#if FF_API_CAP_VDPAU - if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)) { - ff_vdpau_mpeg4_decode_picture(avctx->priv_data, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); - goto frame_end; - } -#endif - if (avctx->hwaccel) { ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer, s->gb.buffer_end - s->gb.buffer); diff --git a/libavcodec/h264.c b/libavcodec/h264.c index 8b95003..21fc69e 100644 --- a/libavcodec/h264.c +++ b/libavcodec/h264.c @@ -49,7 +49,7 @@ #include "rectangle.h" #include "svq3.h" #include "thread.h" -#include "vdpau_compat.h" +#include "mpeg4video.h" const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 }; @@ -1565,11 +1565,6 @@ again: if (h->avctx->hwaccel && (ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0) goto end; -#if FF_API_CAP_VDPAU - if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) - ff_vdpau_h264_picture_start(h); -#endif } if (sl->redundant_pic_count == 0) { @@ -1579,16 +1574,6 @@ again: consumed); if (ret < 0) goto end; -#if FF_API_CAP_VDPAU - } else if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) { - ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0], - start_code, - sizeof(start_code)); - ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0], - &buf[buf_index - consumed], - consumed); -#endif } else context_count++; } @@ -1999,29 +1984,3 @@ AVCodec ff_h264_decoder = { .profiles = NULL_IF_CONFIG_SMALL(profiles), .priv_class = &h264_class, }; - -#if CONFIG_H264_VDPAU_DECODER && FF_API_VDPAU -static const AVClass h264_vdpau_class = { - .class_name = "H264 VDPAU Decoder", - .item_name = av_default_item_name, - .option = h264_options, - .version = LIBAVUTIL_VERSION_INT, -}; - -AVCodec ff_h264_vdpau_decoder = { - .name = "h264_vdpau", - .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 (VDPAU acceleration)"), - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_H264, - .priv_data_size = sizeof(H264Context), - .init = ff_h264_decode_init, - .close = h264_decode_end, - .decode = h264_decode_frame, - .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU, - .flush = flush_dpb, - .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_H264, - AV_PIX_FMT_NONE}, - .profiles = NULL_IF_CONFIG_SMALL(profiles), - .priv_class = &h264_vdpau_class, -}; -#endif diff --git a/libavcodec/h264_picture.c b/libavcodec/h264_picture.c index 731d780..5a6107a 100644 --- a/libavcodec/h264_picture.c +++ b/libavcodec/h264_picture.c @@ -42,7 +42,6 @@ #include "mpegutils.h" #include "rectangle.h" #include "thread.h" -#include "vdpau_compat.h" void ff_h264_unref_picture(H264Context *h, H264Picture *pic) { @@ -157,12 +156,6 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup) int err = 0; h->mb_y = 0; -#if FF_API_CAP_VDPAU - if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) - ff_vdpau_h264_set_reference_frames(h); -#endif - if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) { if (!h->droppable) { err = ff_h264_execute_ref_pic_marking(h, h->mmco, h->mmco_index); @@ -180,12 +173,6 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup) "hardware accelerator failed to decode picture\n"); } -#if FF_API_CAP_VDPAU - if (CONFIG_H264_VDPAU_DECODER && - h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) - ff_vdpau_h264_picture_complete(h); -#endif - #if CONFIG_ERROR_RESILIENCE av_assert0(sl == h->slice_ctx); /* diff --git a/libavcodec/h264_slice.c b/libavcodec/h264_slice.c index a346ccb..8d20efc 100644 --- a/libavcodec/h264_slice.c +++ b/libavcodec/h264_slice.c @@ -612,11 +612,7 @@ static int h264_frame_start(H264Context *h) if ((ret = alloc_picture(h, pic)) < 0) return ret; - if(!h->frame_recovered && !h->avctx->hwaccel -#if FF_API_CAP_VDPAU - && !(h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) -#endif - ) + if(!h->frame_recovered && !h->avctx->hwaccel) avpriv_color_frame(pic->f, c); h->cur_pic_ptr = pic; @@ -1051,17 +1047,6 @@ static int h264_slice_header_init(H264Context *h) goto fail; } -#if FF_API_CAP_VDPAU - if (h->avctx->codec && - h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU && - (h->sps.bit_depth_luma != 8 || h->sps.chroma_format_idc > 1)) { - av_log(h->avctx, AV_LOG_ERROR, - "VDPAU decoding does not support video colorspace.\n"); - ret = AVERROR_INVALIDDATA; - goto fail; - } -#endif - if (h->sps.bit_depth_luma < 8 || h->sps.bit_depth_luma > 14 || h->sps.bit_depth_luma == 11 || h->sps.bit_depth_luma == 13 ) { @@ -2541,11 +2526,7 @@ int ff_h264_execute_decode_slices(H264Context *h, unsigned context_count) h->slice_ctx[0].next_slice_idx = INT_MAX; - if (h->avctx->hwaccel -#if FF_API_CAP_VDPAU - || h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU -#endif - ) + if (h->avctx->hwaccel) return 0; if (context_count == 1) { int ret; diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c index 21ccf6c..0ac9b42 100644 --- a/libavcodec/mpeg12dec.c +++ b/libavcodec/mpeg12dec.c @@ -46,7 +46,6 @@ #include "mpegvideodata.h" #include "thread.h" #include "version.h" -#include "vdpau_compat.h" #include "xvmc_internal.h" typedef struct Mpeg1Context { @@ -1180,9 +1179,6 @@ static const enum AVPixelFormat mpeg1_hwaccel_pixfmt_list_420[] = { #if CONFIG_MPEG1_XVMC_HWACCEL AV_PIX_FMT_XVMC, #endif -#if CONFIG_MPEG1_VDPAU_DECODER && FF_API_VDPAU - AV_PIX_FMT_VDPAU_MPEG1, -#endif #if CONFIG_MPEG1_VDPAU_HWACCEL AV_PIX_FMT_VDPAU, #endif @@ -1194,9 +1190,6 @@ static const enum AVPixelFormat mpeg2_hwaccel_pixfmt_list_420[] = { #if CONFIG_MPEG2_XVMC_HWACCEL AV_PIX_FMT_XVMC, #endif -#if CONFIG_MPEG_VDPAU_DECODER && FF_API_VDPAU - AV_PIX_FMT_VDPAU_MPEG2, -#endif #if CONFIG_MPEG2_VDPAU_HWACCEL AV_PIX_FMT_VDPAU, #endif @@ -1226,12 +1219,6 @@ static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = { AV_PIX_FMT_NONE }; -#if FF_API_VDPAU -static inline int uses_vdpau(AVCodecContext *avctx) { - return avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG1 || avctx->pix_fmt == AV_PIX_FMT_VDPAU_MPEG2; -} -#endif - static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx) { Mpeg1Context *s1 = avctx->priv_data; @@ -1256,11 +1243,7 @@ static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx) static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx) { // until then pix_fmt may be changed right after codec init - if (avctx->hwaccel -#if FF_API_VDPAU - || uses_vdpau(avctx) -#endif - ) + if (avctx->hwaccel) if (avctx->idct_algo == FF_IDCT_AUTO) avctx->idct_algo = FF_IDCT_SIMPLE; @@ -2467,12 +2450,6 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, s2->er.error_count += s2->thread_context[i]->er.error_count; } -#if FF_API_VDPAU - if ((CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER) - && uses_vdpau(avctx)) - ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count); -#endif - ret = slice_end(avctx, picture); if (ret < 0) return ret; @@ -2720,13 +2697,6 @@ static int decode_chunks(AVCodecContext *avctx, AVFrame *picture, return AVERROR_INVALIDDATA; } -#if FF_API_VDPAU - if (uses_vdpau(avctx)) { - s->slice_count++; - break; - } -#endif - if (HAVE_THREADS && (avctx->active_thread_type & FF_THREAD_SLICE) && !avctx->hwaccel) { @@ -2951,35 +2921,3 @@ AVCodec ff_mpeg_xvmc_decoder = { FF_ENABLE_DEPRECATION_WARNINGS #endif #endif /* FF_API_XVMC */ - -#if CONFIG_MPEG_VDPAU_DECODER && FF_API_VDPAU -AVCodec ff_mpeg_vdpau_decoder = { - .name = "mpegvideo_vdpau", - .long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"), - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_MPEG2VIDEO, - .priv_data_size = sizeof(Mpeg1Context), - .init = mpeg_decode_init, - .close = mpeg_decode_end, - .decode = mpeg_decode_frame, - .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | - AV_CODEC_CAP_HWACCEL_VDPAU | AV_CODEC_CAP_DELAY, - .flush = flush, -}; -#endif - -#if CONFIG_MPEG1_VDPAU_DECODER && FF_API_VDPAU -AVCodec ff_mpeg1_vdpau_decoder = { - .name = "mpeg1video_vdpau", - .long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video (VDPAU acceleration)"), - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_MPEG1VIDEO, - .priv_data_size = sizeof(Mpeg1Context), - .init = mpeg_decode_init, - .close = mpeg_decode_end, - .decode = mpeg_decode_frame, - .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | - AV_CODEC_CAP_HWACCEL_VDPAU | AV_CODEC_CAP_DELAY, - .flush = flush, -}; -#endif diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c index f15747f..905fd7f 100644 --- a/libavcodec/mpeg4videodec.c +++ b/libavcodec/mpeg4videodec.c @@ -2787,29 +2787,3 @@ AVCodec ff_mpeg4_decoder = { .update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg4_update_thread_context), .priv_class = &mpeg4_class, }; - - -#if CONFIG_MPEG4_VDPAU_DECODER && FF_API_VDPAU -static const AVClass mpeg4_vdpau_class = { - "MPEG4 Video VDPAU Decoder", - av_default_item_name, - mpeg4_options, - LIBAVUTIL_VERSION_INT, -}; - -AVCodec ff_mpeg4_vdpau_decoder = { - .name = "mpeg4_vdpau", - .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"), - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_MPEG4, - .priv_data_size = sizeof(Mpeg4DecContext), - .init = decode_init, - .close = ff_h263_decode_end, - .decode = ff_h263_decode_frame, - .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_TRUNCATED | AV_CODEC_CAP_DELAY | - AV_CODEC_CAP_HWACCEL_VDPAU, - .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_VDPAU_MPEG4, - AV_PIX_FMT_NONE }, - .priv_class = &mpeg4_vdpau_class, -}; -#endif diff --git a/libavcodec/mpegpicture.c b/libavcodec/mpegpicture.c index bb822b6..ae4cd36 100644 --- a/libavcodec/mpegpicture.c +++ b/libavcodec/mpegpicture.c @@ -58,11 +58,7 @@ int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, { int alloc_size = FFALIGN(FFABS(linesize) + 64, 32); - if (avctx->hwaccel -#if FF_API_CAP_VDPAU - || avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU -#endif - ) + if (avctx->hwaccel) return 0; if (linesize < 24) { diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c index 96634ec..3117777 100644 --- a/libavcodec/mpegvideo.c +++ b/libavcodec/mpegvideo.c @@ -1313,11 +1313,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx) return -1; } - if (!avctx->hwaccel -#if FF_API_CAP_VDPAU - && !(avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) -#endif - ) { + if (!avctx->hwaccel) { for(i=0; i<avctx->height; i++) memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i, 0x80, avctx->width); @@ -1661,14 +1657,9 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_ } /* TODO: export all the following to make them accessible for users (and filters) */ - if (avctx->hwaccel || !mbtype_table -#if FF_API_CAP_VDPAU - || (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) -#endif - ) + if (avctx->hwaccel || !mbtype_table) return; - if (avctx->debug & (FF_DEBUG_SKIP | FF_DEBUG_QP | FF_DEBUG_MB_TYPE)) { int x,y; diff --git a/libavcodec/utils.c b/libavcodec/utils.c index 7bfd760..38f8d41 100644 --- a/libavcodec/utils.c +++ b/libavcodec/utils.c @@ -1075,10 +1075,6 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) break; -#if FF_API_CAP_VDPAU - if (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) - break; -#endif if (!setup_hwaccel(avctx, ret, desc->name)) break; diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c index acd29bc..5f6312f 100644 --- a/libavcodec/vc1dec.c +++ b/libavcodec/vc1dec.c @@ -36,7 +36,6 @@ #include "msmpeg4data.h" #include "vc1.h" #include "vc1data.h" -#include "vdpau_compat.h" #include "libavutil/avassert.h" @@ -651,15 +650,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, return buf_size; } -#if FF_API_CAP_VDPAU - if (s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) { - if (v->profile < PROFILE_ADVANCED) - avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3; - else - avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1; - } -#endif - //for advanced profile we may need to parse and unescape data if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) { int buf_size2 = 0; @@ -678,21 +668,13 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, if (size <= 0) continue; switch (AV_RB32(start)) { case VC1_CODE_FRAME: - if (avctx->hwaccel -#if FF_API_CAP_VDPAU - || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU -#endif - ) + if (avctx->hwaccel) buf_start = start; buf_size2 = vc1_unescape_buffer(start + 4, size, buf2); break; case VC1_CODE_FIELD: { int buf_size3; - if (avctx->hwaccel -#if FF_API_CAP_VDPAU - || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU -#endif - ) + if (avctx->hwaccel) buf_start_second_field = start; tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1)); if (!tmp) { @@ -754,11 +736,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, ret = AVERROR_INVALIDDATA; goto err; } else { // found field marker, unescape second field - if (avctx->hwaccel -#if FF_API_CAP_VDPAU - || s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU -#endif - ) + if (avctx->hwaccel) buf_start_second_field = divider; tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1)); if (!tmp) { @@ -905,17 +883,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data, s->me.qpel_put = s->qdsp.put_qpel_pixels_tab; s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab; -#if FF_API_CAP_VDPAU - if ((CONFIG_VC1_VDPAU_DECODER) - &&s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) { - if (v->field_mode && buf_start_second_field) { - ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start); - ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field); - } else { - ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start); - } - } else -#endif if (avctx->hwaccel) { if (v->field_mode && buf_start_second_field) { // decode first field @@ -1160,38 +1127,6 @@ AVCodec ff_wmv3_decoder = { }; #endif -#if CONFIG_WMV3_VDPAU_DECODER && FF_API_VDPAU -AVCodec ff_wmv3_vdpau_decoder = { - .name = "wmv3_vdpau", - .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 VDPAU"), - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_WMV3, - .priv_data_size = sizeof(VC1Context), - .init = vc1_decode_init, - .close = ff_vc1_decode_end, - .decode = vc1_decode_frame, - .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU, - .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_WMV3, AV_PIX_FMT_NONE }, - .profiles = NULL_IF_CONFIG_SMALL(profiles) -}; -#endif - -#if CONFIG_VC1_VDPAU_DECODER && FF_API_VDPAU -AVCodec ff_vc1_vdpau_decoder = { - .name = "vc1_vdpau", - .long_name = NULL_IF_CONFIG_SMALL("SMPTE VC-1 VDPAU"), - .type = AVMEDIA_TYPE_VIDEO, - .id = AV_CODEC_ID_VC1, - .priv_data_size = sizeof(VC1Context), - .init = vc1_decode_init, - .close = ff_vc1_decode_end, - .decode = vc1_decode_frame, - .capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HWACCEL_VDPAU, - .pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_VDPAU_VC1, AV_PIX_FMT_NONE }, - .profiles = NULL_IF_CONFIG_SMALL(profiles) -}; -#endif - #if CONFIG_WMV3IMAGE_DECODER AVCodec ff_wmv3image_decoder = { .name = "wmv3image", diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c index 60c7235..89ebfff 100644 --- a/libavcodec/vdpau.c +++ b/libavcodec/vdpau.c @@ -32,7 +32,6 @@ #include <assert.h> #include "vdpau.h" -#include "vdpau_compat.h" #include "vdpau_internal.h" /** @@ -355,345 +354,6 @@ int ff_vdpau_add_buffer(struct vdpau_picture_context *pic_ctx, return 0; } -/* Obsolete non-hwaccel VDPAU support below... */ - -#if FF_API_VDPAU -void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, int buf_size) -{ - struct vdpau_render_state *render = (struct vdpau_render_state*)data; - assert(render); - - render->bitstream_buffers= av_fast_realloc( - render->bitstream_buffers, - &render->bitstream_buffers_allocated, - sizeof(*render->bitstream_buffers)*(render->bitstream_buffers_used + 1) - ); - - render->bitstream_buffers[render->bitstream_buffers_used].struct_version = VDP_BITSTREAM_BUFFER_VERSION; - render->bitstream_buffers[render->bitstream_buffers_used].bitstream = buf; - render->bitstream_buffers[render->bitstream_buffers_used].bitstream_bytes = buf_size; - render->bitstream_buffers_used++; -} - -#if CONFIG_H264_VDPAU_DECODER -void ff_vdpau_h264_set_reference_frames(H264Context *h) -{ - struct vdpau_render_state *render, *render_ref; - VdpReferenceFrameH264 *rf, *rf2; - H264Picture *pic; - int i, list, pic_frame_idx; - - render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0]; - assert(render); - - rf = &render->info.h264.referenceFrames[0]; -#define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames) - - for (list = 0; list < 2; ++list) { - H264Picture **lp = list ? h->long_ref : h->short_ref; - int ls = list ? 16 : h->short_ref_count; - - for (i = 0; i < ls; ++i) { - pic = lp[i]; - if (!pic || !pic->reference) - continue; - pic_frame_idx = pic->long_ref ? pic->pic_id : pic->frame_num; - - render_ref = (struct vdpau_render_state *)pic->f->data[0]; - assert(render_ref); - - rf2 = &render->info.h264.referenceFrames[0]; - while (rf2 != rf) { - if ( - (rf2->surface == render_ref->surface) - && (rf2->is_long_term == pic->long_ref) - && (rf2->frame_idx == pic_frame_idx) - ) - break; - ++rf2; - } - if (rf2 != rf) { - rf2->top_is_reference |= (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; - rf2->bottom_is_reference |= (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; - continue; - } - - if (rf >= &render->info.h264.referenceFrames[H264_RF_COUNT]) - continue; - - rf->surface = render_ref->surface; - rf->is_long_term = pic->long_ref; - rf->top_is_reference = (pic->reference & PICT_TOP_FIELD) ? VDP_TRUE : VDP_FALSE; - rf->bottom_is_reference = (pic->reference & PICT_BOTTOM_FIELD) ? VDP_TRUE : VDP_FALSE; - rf->field_order_cnt[0] = pic->field_poc[0]; - rf->field_order_cnt[1] = pic->field_poc[1]; - rf->frame_idx = pic_frame_idx; - - ++rf; - } - } - - for (; rf < &render->info.h264.referenceFrames[H264_RF_COUNT]; ++rf) { - rf->surface = VDP_INVALID_HANDLE; - rf->is_long_term = 0; - rf->top_is_reference = 0; - rf->bottom_is_reference = 0; - rf->field_order_cnt[0] = 0; - rf->field_order_cnt[1] = 0; - rf->frame_idx = 0; - } -} - -void ff_vdpau_h264_picture_start(H264Context *h) -{ - struct vdpau_render_state *render; - int i; - - render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0]; - assert(render); - - for (i = 0; i < 2; ++i) { - int foc = h->cur_pic_ptr->field_poc[i]; - if (foc == INT_MAX) - foc = 0; - render->info.h264.field_order_cnt[i] = foc; - } - - render->info.h264.frame_num = h->frame_num; -} - -void ff_vdpau_h264_picture_complete(H264Context *h) -{ - struct vdpau_render_state *render; - - render = (struct vdpau_render_state *)h->cur_pic_ptr->f->data[0]; - assert(render); - - render->info.h264.slice_count = h->current_slice; - if (render->info.h264.slice_count < 1) - return; - - render->info.h264.is_reference = (h->cur_pic_ptr->reference & 3) ? VDP_TRUE : VDP_FALSE; - render->info.h264.field_pic_flag = h->picture_structure != PICT_FRAME; - render->info.h264.bottom_field_flag = h->picture_structure == PICT_BOTTOM_FIELD; - render->info.h264.num_ref_frames = h->sps.ref_frame_count; - render->info.h264.mb_adaptive_frame_field_flag = h->sps.mb_aff && !render->info.h264.field_pic_flag; - render->info.h264.constrained_intra_pred_flag = h->pps.constrained_intra_pred; - render->info.h264.weighted_pred_flag = h->pps.weighted_pred; - render->info.h264.weighted_bipred_idc = h->pps.weighted_bipred_idc; - render->info.h264.frame_mbs_only_flag = h->sps.frame_mbs_only_flag; - render->info.h264.transform_8x8_mode_flag = h->pps.transform_8x8_mode; - render->info.h264.chroma_qp_index_offset = h->pps.chroma_qp_index_offset[0]; - render->info.h264.second_chroma_qp_index_offset = h->pps.chroma_qp_index_offset[1]; - render->info.h264.pic_init_qp_minus26 = h->pps.init_qp - 26; - render->info.h264.num_ref_idx_l0_active_minus1 = h->pps.ref_count[0] - 1; - render->info.h264.num_ref_idx_l1_active_minus1 = h->pps.ref_count[1] - 1; - render->info.h264.log2_max_frame_num_minus4 = h->sps.log2_max_frame_num - 4; - render->info.h264.pic_order_cnt_type = h->sps.poc_type; - render->info.h264.log2_max_pic_order_cnt_lsb_minus4 = h->sps.poc_type ? 0 : h->sps.log2_max_poc_lsb - 4; - render->info.h264.delta_pic_order_always_zero_flag = h->sps.delta_pic_order_always_zero_flag; - render->info.h264.direct_8x8_inference_flag = h->sps.direct_8x8_inference_flag; - render->info.h264.entropy_coding_mode_flag = h->pps.cabac; - render->info.h264.pic_order_present_flag = h->pps.pic_order_present; - render->info.h264.deblocking_filter_control_present_flag = h->pps.deblocking_filter_parameters_present; - render->info.h264.redundant_pic_cnt_present_flag = h->pps.redundant_pic_cnt_present; - memcpy(render->info.h264.scaling_lists_4x4, h->pps.scaling_matrix4, sizeof(render->info.h264.scaling_lists_4x4)); - memcpy(render->info.h264.scaling_lists_8x8[0], h->pps.scaling_matrix8[0], sizeof(render->info.h264.scaling_lists_8x8[0])); - memcpy(render->info.h264.scaling_lists_8x8[1], h->pps.scaling_matrix8[3], sizeof(render->info.h264.scaling_lists_8x8[0])); - - ff_h264_draw_horiz_band(h, &h->slice_ctx[0], 0, h->avctx->height); - render->bitstream_buffers_used = 0; -} -#endif /* CONFIG_H264_VDPAU_DECODER */ - -#if CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER -void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, - int buf_size, int slice_count) -{ - struct vdpau_render_state *render, *last, *next; - int i; - - if (!s->current_picture_ptr) return; - - render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0]; - assert(render); - - /* fill VdpPictureInfoMPEG1Or2 struct */ - render->info.mpeg.picture_structure = s->picture_structure; - render->info.mpeg.picture_coding_type = s->pict_type; - render->info.mpeg.intra_dc_precision = s->intra_dc_precision; - render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct; - render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors; - render->info.mpeg.intra_vlc_format = s->intra_vlc_format; - render->info.mpeg.alternate_scan = s->alternate_scan; - render->info.mpeg.q_scale_type = s->q_scale_type; - render->info.mpeg.top_field_first = s->top_field_first; - render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2 - render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2 - render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert. - render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1]; - render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0]; - render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1]; - for (i = 0; i < 64; ++i) { - render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i]; - render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; - } - - render->info.mpeg.forward_reference = VDP_INVALID_HANDLE; - render->info.mpeg.backward_reference = VDP_INVALID_HANDLE; - - switch(s->pict_type){ - case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.f->data[0]; - assert(next); - render->info.mpeg.backward_reference = next->surface; - // no return here, going to set forward prediction - case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.f->data[0]; - if (!last) // FIXME: Does this test make sense? - last = render; // predict second field from the first - render->info.mpeg.forward_reference = last->surface; - } - - ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size); - - render->info.mpeg.slice_count = slice_count; - - if (slice_count) - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); - render->bitstream_buffers_used = 0; -} -#endif /* CONFIG_MPEG_VDPAU_DECODER || CONFIG_MPEG1_VDPAU_DECODER */ - -#if CONFIG_VC1_VDPAU_DECODER -void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, - int buf_size) -{ - VC1Context *v = s->avctx->priv_data; - struct vdpau_render_state *render, *last, *next; - - render = (struct vdpau_render_state *)s->current_picture.f->data[0]; - assert(render); - - /* fill LvPictureInfoVC1 struct */ - render->info.vc1.frame_coding_mode = v->fcm ? v->fcm + 1 : 0; - render->info.vc1.postprocflag = v->postprocflag; - render->info.vc1.pulldown = v->broadcast; - render->info.vc1.interlace = v->interlace; - render->info.vc1.tfcntrflag = v->tfcntrflag; - render->info.vc1.finterpflag = v->finterpflag; - render->info.vc1.psf = v->psf; - render->info.vc1.dquant = v->dquant; - render->info.vc1.panscan_flag = v->panscanflag; - render->info.vc1.refdist_flag = v->refdist_flag; - render->info.vc1.quantizer = v->quantizer_mode; - render->info.vc1.extended_mv = v->extended_mv; - render->info.vc1.extended_dmv = v->extended_dmv; - render->info.vc1.overlap = v->overlap; - render->info.vc1.vstransform = v->vstransform; - render->info.vc1.loopfilter = v->s.loop_filter; - render->info.vc1.fastuvmc = v->fastuvmc; - render->info.vc1.range_mapy_flag = v->range_mapy_flag; - render->info.vc1.range_mapy = v->range_mapy; - render->info.vc1.range_mapuv_flag = v->range_mapuv_flag; - render->info.vc1.range_mapuv = v->range_mapuv; - /* Specific to simple/main profile only */ - render->info.vc1.multires = v->multires; - render->info.vc1.syncmarker = v->resync_marker; - render->info.vc1.rangered = v->rangered | (v->rangeredfrm << 1); - render->info.vc1.maxbframes = v->s.max_b_frames; - - render->info.vc1.deblockEnable = v->postprocflag & 1; - render->info.vc1.pquant = v->pq; - - render->info.vc1.forward_reference = VDP_INVALID_HANDLE; - render->info.vc1.backward_reference = VDP_INVALID_HANDLE; - - if (v->bi_type) - render->info.vc1.picture_type = 4; - else - render->info.vc1.picture_type = s->pict_type - 1 + s->pict_type / 3; - - switch(s->pict_type){ - case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.f->data[0]; - assert(next); - render->info.vc1.backward_reference = next->surface; - // no break here, going to set forward prediction - case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.f->data[0]; - if (!last) // FIXME: Does this test make sense? - last = render; // predict second field from the first - render->info.vc1.forward_reference = last->surface; - } - - ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size); - - render->info.vc1.slice_count = 1; - - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); - render->bitstream_buffers_used = 0; -} -#endif /* (CONFIG_VC1_VDPAU_DECODER */ - -#if CONFIG_MPEG4_VDPAU_DECODER -void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *ctx, const uint8_t *buf, - int buf_size) -{ - MpegEncContext *s = &ctx->m; - struct vdpau_render_state *render, *last, *next; - int i; - - if (!s->current_picture_ptr) return; - - render = (struct vdpau_render_state *)s->current_picture_ptr->f->data[0]; - assert(render); - - /* fill VdpPictureInfoMPEG4Part2 struct */ - render->info.mpeg4.trd[0] = s->pp_time; - render->info.mpeg4.trb[0] = s->pb_time; - render->info.mpeg4.trd[1] = s->pp_field_time >> 1; - render->info.mpeg4.trb[1] = s->pb_field_time >> 1; - render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den; - render->info.mpeg4.vop_coding_type = 0; - render->info.mpeg4.vop_fcode_forward = s->f_code; - render->info.mpeg4.vop_fcode_backward = s->b_code; - render->info.mpeg4.resync_marker_disable = !ctx->resync_marker; - render->info.mpeg4.interlaced = !s->progressive_sequence; - render->info.mpeg4.quant_type = s->mpeg_quant; - render->info.mpeg4.quarter_sample = s->quarter_sample; - render->info.mpeg4.short_video_header = s->avctx->codec->id == AV_CODEC_ID_H263; - render->info.mpeg4.rounding_control = s->no_rounding; - render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan; - render->info.mpeg4.top_field_first = s->top_field_first; - for (i = 0; i < 64; ++i) { - render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i]; - render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i]; - } - render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE; - render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE; - - switch (s->pict_type) { - case AV_PICTURE_TYPE_B: - next = (struct vdpau_render_state *)s->next_picture.f->data[0]; - assert(next); - render->info.mpeg4.backward_reference = next->surface; - render->info.mpeg4.vop_coding_type = 2; - // no break here, going to set forward prediction - case AV_PICTURE_TYPE_P: - last = (struct vdpau_render_state *)s->last_picture.f->data[0]; - assert(last); - render->info.mpeg4.forward_reference = last->surface; - } - - ff_vdpau_add_data_chunk(s->current_picture_ptr->f->data[0], buf, buf_size); - - ff_mpeg_draw_horiz_band(s, 0, s->avctx->height); - render->bitstream_buffers_used = 0; -} -#endif /* CONFIG_MPEG4_VDPAU_DECODER */ -#endif /* FF_API_VDPAU */ - int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile) { #define PROFILE(prof) \ diff --git a/libavcodec/vdpau_compat.h b/libavcodec/vdpau_compat.h deleted file mode 100644 index 6b4b086..0000000 --- a/libavcodec/vdpau_compat.h +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Video Decode and Presentation API for UNIX (VDPAU) is used for - * HW decode acceleration for MPEG-1/2, H.264 and VC-1. - * - * Copyright (C) 2008 NVIDIA - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#ifndef AVCODEC_VDPAU_COMPAT_H -#define AVCODEC_VDPAU_COMPAT_H - -#include <stdint.h> - -#include "h264.h" -#include "mpeg4video.h" - -void ff_vdpau_add_data_chunk(uint8_t *data, const uint8_t *buf, - int buf_size); - -void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf, - int buf_size, int slice_count); - -void ff_vdpau_h264_picture_start(H264Context *h); -void ff_vdpau_h264_set_reference_frames(H264Context *h); -void ff_vdpau_h264_picture_complete(H264Context *h); - -void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf, - int buf_size); - -void ff_vdpau_mpeg4_decode_picture(Mpeg4DecContext *s, const uint8_t *buf, - int buf_size); - -#endif /* AVCODEC_VDPAU_COMPAT_H */ -- 2.5.1 _______________________________________________ ffmpeg-devel mailing list ffmpeg-devel@ffmpeg.org http://ffmpeg.org/mailman/listinfo/ffmpeg-devel