Re: [Libav-user] imgutils.h decode dst buffer going from avpicture_fill to av_image_fill_arrays
For software scaling the example is as follows, might help you. int main(int argc, char **argv) { const char *src_filename = NULL; const char *src_resolution = NULL; const char *src_pix_fmt_name = NULL; enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_NONE; uint8_t *src_data[4]; int src_linesize[4]; int src_w=0, src_h=0; FILE *src_file; int src_bufsize; const char *dst_filename = NULL; const char *dst_resolution = NULL; const char *dst_pix_fmt_name = NULL; enum AVPixelFormat dst_pix_fmt = AV_PIX_FMT_NONE; uint8_t *dst_data[4]; int dst_linesize[4]; int dst_w=0, dst_h=0; FILE *dst_file; int dst_bufsize; struct SwsContext *sws_ctx; int ret; int frame_count = 0; if (argc != 7) { fprintf(stderr, "*Usage: %s src_file src_resolution src_pix_fmt dst_file dst_resolution dst_pix_fmt*\n" "API example program to show how to scale an video/image with libswscale.\n" "This program generates a series of pictures, rescales them to the given " "resolution and saves them to an output file\n." "\n", argv[0]); exit(1); } src_filename = argv[1]; src_resolution = argv[2]; src_pix_fmt_name = argv[3]; dst_filename = argv[4]; dst_resolution = argv[5]; dst_pix_fmt_name = argv[6]; if(AV_PIX_FMT_NONE == (src_pix_fmt = av_get_pix_fmt(src_pix_fmt_name))) { fprintf(stderr, "Invalid source pixel format '%s'\n", src_pix_fmt_name); exit(1); } if(AV_PIX_FMT_NONE == (dst_pix_fmt = av_get_pix_fmt(dst_pix_fmt_name))) { fprintf(stderr, "Invalid destination pixel format '%s'\n", dst_pix_fmt_name); exit(1); } if (av_parse_video_size(_w, _h, src_resolution) < 0) { fprintf(stderr, "Invalid source resolution '%s', must be in the form WxH or a valid size abbreviation\n", src_resolution); exit(1); } if (av_parse_video_size(_w, _h, dst_resolution) < 0) { fprintf(stderr, "Invalid destination resolution '%s', must be in the form WxH or a valid size abbreviation\n", dst_resolution); exit(1); } src_file = fopen(src_filename, "rb"); if (!src_file) { fprintf(stderr, "Could not open source file %s\n", src_filename); exit(1); } dst_file = fopen(dst_filename, "wb"); if (!dst_file) { fprintf(stderr, "Could not open destination file %s\n", dst_filename); exit(1); } /* create scaling context */ sws_ctx = *sws_getContext*(src_w, src_h, src_pix_fmt, dst_w, dst_h, dst_pix_fmt, SWS_BILINEAR, NULL, NULL, NULL); if (!sws_ctx) { fprintf(stderr, "Impossible to create scale context for the conversion " "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n", av_get_pix_fmt_name(src_pix_fmt), src_w, src_h, av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h); ret = AVERROR(EINVAL); goto end; } /* allocate source and destination image buffers */ if ((ret = *av_image_alloc*(src_data, src_linesize, src_w, src_h, src_pix_fmt, 16)) < 0) { fprintf(stderr, "Could not allocate source image\n"); goto end; } src_bufsize = ret; /* buffer is going to be written to rawvideo file, no alignment */ if ((ret = *av_image_alloc*(dst_data, dst_linesize, dst_w, dst_h, dst_pix_fmt, 1)) < 0) { fprintf(stderr, "Could not allocate destination image\n"); goto end; } dst_bufsize = ret; /* read image from source file */ while(src_bufsize == fread(src_data[0], 1, src_bufsize, src_file)) { /* convert to destination format */ *sws_scale*(sws_ctx, (const uint8_t * const*)src_data,src_linesize, 0, src_h, dst_data, dst_linesize); /* write scaled image to file */ fwrite(dst_data[0], 1, dst_bufsize, dst_file); printf("No of frames converted = %d\r",++frame_count); fflush(stdout); } printf("\n"); fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n" "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n", av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename); end: fclose(src_file); fclose(dst_file); av_freep(_data[0]); av_freep(_data[0]); sws_freeContext(sws_ctx); return ret; } -- View this message in context: http://libav-users.943685.n4.nabble.com/Libav-user-imgutils-h-decode-dst-buffer-going-from-avpicture-fill-to-av-image-fill-arrays-tp4662419p4662429.html Sent from the libav-users mailing list archive at Nabble.com. ___ Libav-user mailing list
Re: [Libav-user] imgutils.h decode dst buffer going from avpicture_fill to av_image_fill_arrays
On 08/19/2016 04:54 AM, ssshukla26 wrote: For example let suppose "decoder->dst_data[0]" has YUV buffer and its alignment is 1. --- //Allocate AVFrame data and linesize av_image_alloc(avframe->data, avframe->linesize, decoder->video_resolution.frame_width, decoder->video_resolution.frame_height, AV_PIX_FMT_YUV420P, 1); Below are the 3 methods to fill data array of avframe. --- //1) if your Y, U, V buffers are contiguous and have the correct size, This is deprecated avpicture_fill((AVPicture*) avframe, decoder->dst_data[0], avframe->format, avframe->width, avframe->height); --- //2) if your Y, U, V buffers are non-contiguous, This is deprecated // Initialize avframe->linesize avpicture_fill((AVPicture*) avframe, NULL, avframe->format, avframe->width, avframe->height); //Set avframe->data pointers manually avframe->data[0] = decoder->dst_data[0];//Y-Buffer avframe->data[1] = decoder->dst_data[1];//U-Buffer avframe->data[2] = decoder->dst_data[2];//V-Buffer --- //3) Fill data array of avframe, as decoder->dst_data[0] alignment is 1 use the same alignment. av_image_fill_arrays(avframe->data, avframe->linesize, decoder->dst_data[0], avframe->format, avframe->width, avframe->height, *1*); Good explanation. I figured out #3 yesterday and avoided the deprecated call m_out_bufers[ m_buf_idx ] = (uint8_t*) av_malloc( av_image_get_buffer_size( AV_PIX_FMT_RGB24, m_avcodec_ctx->width, m_avcodec_ctx->height, 1 ) ); if ( !m_out_bufers[ m_buf_idx ] )[...] av_image_fill_arrays( m_avframeRGB[ m_buf_idx ]->data, m_avframeRGB[ m_buf_idx ]->linesize, m_out_bufers[ m_buf_idx ], AV_PIX_FMT_RGB24, m_avcodec_ctx->width, m_avcodec_ctx->height, 1 ); Can I replace the av_malloc for out_buffers with an input pointer? size_t required_size = av_image_get_buffer_size( AV_PIX_FMT_RGB24, m_avcodec_ctx->width, m_avcodec_ctx->height, 1 ); if ( required_size <= in_buff.size ) m_out_bufers[ m_buf_idx ] = in_buff.data; Thanks cco ___ Libav-user mailing list Libav-user@ffmpeg.org http://ffmpeg.org/mailman/listinfo/libav-user
Re: [Libav-user] imgutils.h decode dst buffer going from avpicture_fill to av_image_fill_arrays
For example let suppose "decoder->dst_data[0]" has YUV buffer and its alignment is 1. --- //Allocate AVFrame data and linesize av_image_alloc(avframe->data, avframe->linesize, decoder->video_resolution.frame_width, decoder->video_resolution.frame_height, AV_PIX_FMT_YUV420P, 1); Below are the 3 methods to fill data array of avframe. --- //1) if your Y, U, V buffers are contiguous and have the correct size, This is deprecated avpicture_fill((AVPicture*) avframe, decoder->dst_data[0], avframe->format, avframe->width, avframe->height); --- //2) if your Y, U, V buffers are non-contiguous, This is deprecated // Initialize avframe->linesize avpicture_fill((AVPicture*) avframe, NULL, avframe->format, avframe->width, avframe->height); //Set avframe->data pointers manually avframe->data[0] = decoder->dst_data[0];//Y-Buffer avframe->data[1] = decoder->dst_data[1];//U-Buffer avframe->data[2] = decoder->dst_data[2];//V-Buffer --- //3) Fill data array of avframe, as decoder->dst_data[0] alignment is 1 use the same alignment. av_image_fill_arrays(avframe->data, avframe->linesize, decoder->dst_data[0], avframe->format, avframe->width, avframe->height, *1*); -- View this message in context: http://libav-users.943685.n4.nabble.com/Libav-user-imgutils-h-decode-dst-buffer-going-from-avpicture-fill-to-av-image-fill-arrays-tp4662419p4662425.html Sent from the libav-users mailing list archive at Nabble.com. ___ Libav-user mailing list Libav-user@ffmpeg.org http://ffmpeg.org/mailman/listinfo/libav-user
[Libav-user] imgutils.h decode dst buffer going from avpicture_fill to av_image_fill_arrays
I am trying to get single frame decoding of h.264 video in an openGL application. Having an issue seeing how to get from avpicture_fill to av_image_fill_arrys : avpicture_fill( (AVPicture *) m_avframeRGB, buffer, AV_PIX_FMT_RGB24, m_avcodec_ctx->width, m_avcodec_ctx->height ) int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], const uint8_t *src, enum AVPixelFormat pix_fmt, int width, int height, int align ) The code follows examples at: ../ffmpeg/decoding_encoding.c ../ffmpeg/scaling_video.c https://github.com/mpenkov/ffmpeg-tutorial/blob/master/tutorial01.c https://github.com/filippobrizzi/raw_rgb_straming/blob/master/client/x264decoder.cpp Unable to find examples using av_image_fill_arrays. What I would like to to do is pass in the pointer to a texture array and have the sws_scale load the RGB data into that location. Sample code I am finding appears to do multiple copies of the buffers. Comments like this one: //Manadatory function to copy the image form an AVFrame to a generic buffer. av_image_copy_to_buffer( (uint8_t*) m_avframeRGB, m_size, (const uint8_t * const*)rgb_buffer, _height, AV_PIX_FMT_RGB24, m_width, m_height, magic_align ); init() { avcodec_register_all(); ctx ... ctx ... ... width height codec m_sws_ctx = sws_getContext( m_width, m_height, AV_PIX_FMT_YUV420P, m_width, m_height, AV_PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL ); /// I think these two calls are just setting up a common struct format but not actually allocating buffers to hold the pixel data m_avframe = get_alloc_picture( AV_PIX_FMT_YUV420P, m_width, m_height, true ); m_avframeRGB = get_alloc_picture( AV_PIX_FMT_RGB24, m_width, m_height, true ); } getFrame( char * rgb_buffer, size_t size ) { av_return = avcodec_send_packet( m_avcodec_context, _packet ) while ( ! frame_done ) gobble packets /// use the input buffer as the output of the YUV to RGB24 sws_scale( m_sws_ctx, m_avframe->data, m_avframe->linesize, 0, m_height, rgb_buffer, m_avframeRGB->linesize ); } Am I even on the right highway here? An even better way may to be get the YUV pointer and run a fragment shader across it, is there a way to leave the buffer in the video ram? Any pointers would be appreciated. Thanks cco ___ Libav-user mailing list Libav-user@ffmpeg.org http://ffmpeg.org/mailman/listinfo/libav-user