---
This example is quite misleading, I'm not looking forward to use the current
parsing api explicitly, but the CODEC_FLAG_TRUNCATED is getting cargo culted
around a lot.
Probably I'll try to make it a little more sane later (volunteers welcome).
doc/examples/avcodec.c | 101 ++++++++++++++++++++++++-------------------------
1 file changed, 49 insertions(+), 52 deletions(-)
diff --git a/doc/examples/avcodec.c b/doc/examples/avcodec.c
index 1478881..716334d 100644
--- a/doc/examples/avcodec.c
+++ b/doc/examples/avcodec.c
@@ -82,7 +82,7 @@ static int select_channel_layout(AVCodec *codec)
{
const uint64_t *p;
uint64_t best_ch_layout = 0;
- int best_nb_channels = 0;
+ int best_nb_channels = 0;
if (!codec->channel_layouts)
return AV_CH_LAYOUT_STEREO;
@@ -92,7 +92,7 @@ static int select_channel_layout(AVCodec *codec)
int nb_channels = av_get_channel_layout_nb_channels(*p);
if (nb_channels > best_nb_channels) {
- best_ch_layout = *p;
+ best_ch_layout = *p;
best_nb_channels = nb_channels;
}
p++;
@@ -106,7 +106,7 @@ static int select_channel_layout(AVCodec *codec)
static void audio_encode_example(const char *filename)
{
AVCodec *codec;
- AVCodecContext *c= NULL;
+ AVCodecContext *c = NULL;
AVFrame *frame;
AVPacket pkt;
int i, j, k, ret, got_output;
@@ -177,25 +177,25 @@ static void audio_encode_example(const char *filename)
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
- (const uint8_t*)samples, buffer_size, 0);
+ (const uint8_t *)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "could not setup audio frame\n");
exit(1);
}
/* encode a single tone sound */
- t = 0;
+ t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
- for(i=0;i<200;i++) {
+ for (i = 0; i < 200; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
for (j = 0; j < c->frame_size; j++) {
- samples[2*j] = (int)(sin(t) * 10000);
+ samples[2 * j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
- samples[2*j + k] = samples[2*j];
+ samples[2 * j + k] = samples[2 * j];
t += tincr;
}
/* encode the samples */
@@ -223,7 +223,7 @@ static void audio_encode_example(const char *filename)
static void audio_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
- AVCodecContext *c= NULL;
+ AVCodecContext *c = NULL;
int len;
FILE *f, *outfile;
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
@@ -295,8 +295,8 @@ static void audio_decode_example(const char *outfilename,
const char *filename)
* libavformat. */
memmove(inbuf, avpkt.data, avpkt.size);
avpkt.data = inbuf;
- len = fread(avpkt.data + avpkt.size, 1,
- AUDIO_INBUF_SIZE - avpkt.size, f);
+ len = fread(avpkt.data + avpkt.size, 1,
+ AUDIO_INBUF_SIZE - avpkt.size, f);
if (len > 0)
avpkt.size += len;
}
@@ -316,7 +316,7 @@ static void audio_decode_example(const char *outfilename,
const char *filename)
static void video_encode_example(const char *filename)
{
AVCodec *codec;
- AVCodecContext *c= NULL;
+ AVCodecContext *c = NULL;
int i, ret, x, y, got_output;
FILE *f;
AVFrame *picture;
@@ -332,19 +332,19 @@ static void video_encode_example(const char *filename)
exit(1);
}
- c = avcodec_alloc_context3(codec);
+ c = avcodec_alloc_context3(codec);
picture = av_frame_alloc();
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
- c->width = 352;
+ c->width = 352;
c->height = 288;
/* frames per second */
- c->time_base= (AVRational){1,25};
- c->gop_size = 10; /* emit one intra frame every ten frames */
- c->max_b_frames=1;
- c->pix_fmt = AV_PIX_FMT_YUV420P;
+ c->time_base = (AVRational) {1, 25 };
+ c->gop_size = 10; /* emit one intra frame every ten frames */
+ c->max_b_frames = 1;
+ c->pix_fmt = AV_PIX_FMT_YUV420P;
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
@@ -369,7 +369,7 @@ static void video_encode_example(const char *filename)
picture->height = c->height;
/* encode 1 second of video */
- for(i=0;i<25;i++) {
+ for (i = 0; i < 25; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
@@ -377,19 +377,16 @@ static void video_encode_example(const char *filename)
fflush(stdout);
/* prepare a dummy image */
/* Y */
- for(y=0;y<c->height;y++) {
- for(x=0;x<c->width;x++) {
+ for (y = 0; y < c->height; y++)
+ for (x = 0; x < c->width; x++)
picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
- }
- }
/* Cb and Cr */
- for(y=0;y<c->height/2;y++) {
- for(x=0;x<c->width/2;x++) {
+ for (y = 0; y < c->height / 2; y++)
+ for (x = 0; x < c->width / 2; x++) {
picture->data[1][y * picture->linesize[1] + x] = 128 + y + i *
2;
picture->data[2][y * picture->linesize[2] + x] = 64 + x + i *
5;
}
- }
picture->pts = i;
@@ -445,17 +442,17 @@ static void pgm_save(unsigned char *buf, int wrap, int
xsize, int ysize,
FILE *f;
int i;
- f=fopen(filename,"w");
- fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
- for(i=0;i<ysize;i++)
- fwrite(buf + i * wrap,1,xsize,f);
+ f = fopen(filename, "w");
+ fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
+ for (i = 0; i < ysize; i++)
+ fwrite(buf + i * wrap, 1, xsize, f);
fclose(f);
}
static void video_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
- AVCodecContext *c= NULL;
+ AVCodecContext *c = NULL;
int frame, got_picture, len;
FILE *f;
AVFrame *picture;
@@ -477,15 +474,15 @@ static void video_decode_example(const char *outfilename,
const char *filename)
exit(1);
}
- c = avcodec_alloc_context3(codec);
+ c = avcodec_alloc_context3(codec);
picture = av_frame_alloc();
- if(codec->capabilities&CODEC_CAP_TRUNCATED)
- c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
+ if (codec->capabilities & CODEC_CAP_TRUNCATED)
+ c->flags |= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
/* For some codecs, such as msmpeg4 and mpeg4, width and height
- MUST be initialized there because this information is not
- available in the bitstream. */
+ * MUST be initialized there because this information is not
+ * available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
@@ -502,26 +499,26 @@ static void video_decode_example(const char *outfilename,
const char *filename)
}
frame = 0;
- for(;;) {
+ for (;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
break;
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
- and this is the only method to use them because you cannot
- know the compressed data size before analysing it.
-
- BUT some other codecs (msmpeg4, mpeg4) are inherently frame
- based, so you must call them with all the data for one
- frame exactly. You must also initialize 'width' and
- 'height' before initializing them. */
+ * and this is the only method to use them because you cannot
+ * know the compressed data size before analysing it.
+ *
+ * BUT some other codecs (msmpeg4, mpeg4) are inherently frame
+ * based, so you must call them with all the data for one
+ * frame exactly. You must also initialize 'width' and
+ * 'height' before initializing them. */
/* NOTE2: some codecs allow the raw parameters (frame size,
- sample rate) to be changed at any frame. We handle this, so
- you should also take care of it */
+ * sample rate) to be changed at any frame. We handle this, so
+ * you should also take care of it */
/* here, we use a stream based decoder (mpeg1video), so we
- feed decoder and see if it could decode a frame */
+ * feed decoder and see if it could decode a frame */
avpkt.data = inbuf;
while (avpkt.size > 0) {
len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
@@ -534,7 +531,7 @@ static void video_decode_example(const char *outfilename,
const char *filename)
fflush(stdout);
/* the picture is allocated by the decoder. no need to
- free it */
+ * free it */
snprintf(buf, sizeof(buf), outfilename, frame);
pgm_save(picture->data[0], picture->linesize[0],
c->width, c->height, buf);
@@ -546,17 +543,17 @@ static void video_decode_example(const char *outfilename,
const char *filename)
}
/* some codecs, such as MPEG, transmit the I and P frame with a
- latency of one frame. You must do the following to have a
- chance to get the last frame of the video */
+ * latency of one frame. You must do the following to have a
+ * chance to get the last frame of the video */
avpkt.data = NULL;
avpkt.size = 0;
- len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
+ len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
if (got_picture) {
printf("saving last frame %3d\n", frame);
fflush(stdout);
/* the picture is allocated by the decoder. no need to
- free it */
+ * free it */
snprintf(buf, sizeof(buf), outfilename, frame);
pgm_save(picture->data[0], picture->linesize[0],
c->width, c->height, buf);
--
1.9.0
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel