I am trying to read an RTP audio stream, and encode it into a FLAC file.
However, when I am reading the stream, I get this error:
[libvorbis @ 028b1520] more samples than frame size
(avcodec_encode_audio2)
During debug mode, it seems the frame_size of my input and output codec
context are mismatched:
inCodecCtx->frame_size = 1152;
outCodecCtx->frame_size = 64;
I tried to write 1152 to outCodecCtx->frame_size, but it gets overwritten
with 64 at `avcodec_open2()`. Why can't I set the frame_size to match the
input frame_size? Should I create an additional output frame to copy the
contents over in a loop?
I'd really appreciate it if you could help me some help or suggestions.
Here is my source:
//
// Test with this command: ffmpeg -re -f lavfi -i
aevalsrc="atan(t/2)*sin(400*2*PI*t)" -ar 16000 -c:a pcm_s16be -f rtp rtp://
127.0.0.1:8554
// ffmpeg -f dshow -i audio="Microphone (High Definition Audio Device)"
-ar 16000 -c:a pcm_s16be -ac 1 -f rtp rtp://127.0.0.1:8554
// Working code
#include "stdafx.h"
#include
extern "C"
{
#include
#include
#include
#include
#include
#include
#include
#include
}
#define AUDIO_INBUF_SIZE 20480
#define ERRBUFFLEN 200
char errbuf[ERRBUFFLEN];
#define av_err2str(ret) av_strerror(ret, errbuf, ERRBUFFLEN)
const int samp_rate = 16000;
int count = 0;
// Callback function
int _ffmpeg_interrupt_fcn(void* ptr)
{
int = *((int*)ptr);
//double = *((double*)ptr);
r += 1;
printf("Interrupted! %d\n", r);
if (r > 30) return 1;
return 0;
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational
*time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream
timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
#ifdef DEBUG_PACKET
log_packet(fmt_ctx, pkt);
#endif
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/*
* Audio decoding.
*/
static void audio_decode_example(const char *outfilename, const char
*filename)
{
int len;
FILE *f, *outfile;
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
AVPacket inpkt, outpkt;
AVCodec *inCodec = NULL;
AVCodecContext *inCodecCtx = NULL;
AVFrame *decoded_frame = NULL;
AVFormatContext *inFormatCtx = NULL;
AVCodec *outCodec = NULL;
AVCodecContext *outCodecCtx = NULL;
AVFormatContext *outFormatCtx = NULL;
AVStream * outAudioStream = NULL;
int ret;
av_init_packet();
AVDictionary *d = NULL; // "create" an empty dictionary
av_dict_set(, "protocol_whitelist", "file,udp,rtp", 0); // add an
entry
// Open video file
ret = avformat_open_input(, filename, NULL, );
if (ret <0)
{
printf_s("Failed: cannot open input.\n");
av_strerror(ret, errbuf, ERRBUFFLEN);
fprintf(stderr, "avformat_open_input() fail: %s\n", errbuf);
exit(1);
}
printf_s("Retrieve stream information.\n");
ret = avformat_find_stream_info(inFormatCtx, NULL);
if (ret <0)
{
printf_s("Failed: cannot find stream.\n");
av_strerror(ret, errbuf, ERRBUFFLEN);
fprintf(stderr, "avformat_find_stream_info() fail: %s\n",
errbuf);
exit(1);
}
av_dump_format(inFormatCtx, 0, filename, 0);
int stream_idx = -1;
for (int i = 0; i < inFormatCtx->nb_streams; i++)
if (inFormatCtx->streams[i]->codec->codec_type ==
AVMEDIA_TYPE_AUDIO) {
stream_idx = i;
break;
}
if (stream_idx == -1)
{
fprintf(stderr, "Video stream not found\n");
exit(1);
}
inCodec =
avcodec_find_decoder(inFormatCtx->streams[stream_idx]->codec->codec_id);
if (!inCodec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
inCodecCtx = avcodec_alloc_context3(inCodec);
if (!inCodecCtx) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
inCodecCtx->channels = 1;
ret = avcodec_open2(inCodecCtx, inCodec, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open codec: %s\n", av_err2str(ret));
exit(1);
}
// Set output
ret = avformat_alloc_output_context2(, NULL, NULL,
outfilename);
if (!outFormatCtx || ret < 0)
{
fprintf(stderr, "Could not allocate output context");
}
outFormatCtx->flags |= AVFMT_FLAG_NOBUFFER |
AVFMT_FLAG_FLUSH_PACKETS;