aboutsummaryrefslogtreecommitdiffstats
path: root/src/test-native/ffmpeg/avcodec_sample.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/test-native/ffmpeg/avcodec_sample.c')
-rw-r--r--src/test-native/ffmpeg/avcodec_sample.c203
1 files changed, 203 insertions, 0 deletions
diff --git a/src/test-native/ffmpeg/avcodec_sample.c b/src/test-native/ffmpeg/avcodec_sample.c
new file mode 100644
index 000000000..f4001b4e6
--- /dev/null
+++ b/src/test-native/ffmpeg/avcodec_sample.c
@@ -0,0 +1,203 @@
+// avcodec_sample.0.5.0.c
+
+// A small sample program that shows how to use libavformat and libavcodec to
+// read video from a file.
+//
+// This version is for the 0.4.9+ release of ffmpeg. This release adds the
+// av_read_frame() API call, which simplifies the reading of video frames
+// considerably.
+//
+// Use
+//
+// gcc -o avcodec_sample.0.5.0 avcodec_sample.0.5.0.c -lavformat -lavcodec -lavutil -lswscale -lz -lbz2
+//
+// to build (assuming libavformat, libavcodec, libavutil, and swscale are correctly installed on
+// your system).
+//
+// Run using
+//
+// avcodec_sample.0.5.0 myvideofile.mpg
+//
+// to write the first five frames from "myvideofile.mpg" to disk in PPM
+// format.
+
+#include <libavcodec/avcodec.h>
+#include <libavformat/avformat.h>
+#include <libswscale/swscale.h>
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+
+static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame);
+
+int main (int argc, const char * argv[])
+{
+ AVFormatContext *pFormatCtx;
+ int i, videoStream;
+ AVCodecContext *pCodecCtx;
+ AVCodec *pCodec;
+ AVFrame *pFrame;
+ AVFrame *pFrameRGB;
+ AVPacket packet;
+ int frameFinished;
+ int numBytes;
+ uint8_t *buffer;
+
+ // Register all formats and codecs
+ av_register_all();
+
+ avformat_network_init();
+
+ // Open video file
+ if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
+ return -1; // Couldn't open file
+
+ // Retrieve stream information
+ if(avformat_find_stream_info(pFormatCtx, NULL)<0)
+ return -1; // Couldn't find stream information
+
+ // Dump information about file onto standard error
+ av_dump_format(pFormatCtx, 0, argv[1], false);
+
+ // Find the first video stream
+ videoStream=-1;
+ for(i=0; i<pFormatCtx->nb_streams; i++)
+ if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
+ {
+ videoStream=i;
+ break;
+ }
+ if(videoStream==-1)
+ return -1; // Didn't find a video stream
+
+ // Get a pointer to the codec context for the video stream
+ pCodecCtx=pFormatCtx->streams[videoStream]->codec;
+
+ // Find the decoder for the video stream
+ pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
+ if(pCodec==NULL)
+ return -1; // Codec not found
+
+ // Open codec
+ if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
+ return -1; // Could not open codec
+
+ // Hack to correct wrong frame rates that seem to be generated by some codecs
+ if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
+ pCodecCtx->time_base.den=1000;
+
+ // Allocate video frame
+ pFrame=avcodec_alloc_frame();
+
+ // Allocate an AVFrame structure
+ pFrameRGB=avcodec_alloc_frame();
+ if(pFrameRGB==NULL)
+ return -1;
+
+ // Determine required buffer size and allocate buffer
+ numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
+ pCodecCtx->height);
+
+ buffer=malloc(numBytes);
+
+ // Assign appropriate parts of buffer to image planes in pFrameRGB
+ avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
+ pCodecCtx->width, pCodecCtx->height);
+
+ // Read frames and save first five frames to disk
+ i=0;
+ while(av_read_frame(pFormatCtx, &packet)>=0)
+ {
+ // Is this a packet from the video stream?
+ if(packet.stream_index==videoStream)
+ {
+ // Decode video frame
+ avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
+
+ // Did we get a video frame?
+ if(frameFinished)
+ {
+ static struct SwsContext *img_convert_ctx;
+
+#if 0
+ // Older removed code
+ // Convert the image from its native format to RGB swscale
+ img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,
+ (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width,
+ pCodecCtx->height);
+
+ // function template, for reference
+ int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY,
+ int srcSliceH, uint8_t* dst[], int dstStride[]);
+#endif
+ // Convert the image into YUV format that SDL uses
+ if(img_convert_ctx == NULL) {
+ int w = pCodecCtx->width;
+ int h = pCodecCtx->height;
+
+ img_convert_ctx = sws_getContext(w, h,
+ pCodecCtx->pix_fmt,
+ w, h, PIX_FMT_RGB24, SWS_BICUBIC,
+ NULL, NULL, NULL);
+ if(img_convert_ctx == NULL) {
+ fprintf(stderr, "Cannot initialize the conversion context!\n");
+ exit(1);
+ }
+ }
+ int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
+ pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
+#if 0 // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009
+ if(ret) {
+ fprintf(stderr, "SWS_Scale failed [%d]!\n", ret);
+ exit(-1);
+ }
+#endif
+ // Save the frame to disk
+ if(i++<=5)
+ SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
+ }
+ }
+
+ // Free the packet that was allocated by av_read_frame
+ av_free_packet(&packet);
+ }
+
+ // Free the RGB image
+ free(buffer);
+ av_free(pFrameRGB);
+
+ // Free the YUV frame
+ av_free(pFrame);
+
+ // Close the codec
+ avcodec_close(pCodecCtx);
+
+ // Close the video file
+ avformat_close_input(&pFormatCtx);
+
+ return 0;
+}
+
+static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
+{
+ FILE *pFile;
+ char szFilename[32];
+ int y;
+
+ // Open file
+ sprintf(szFilename, "frame%d.ppm", iFrame);
+ pFile=fopen(szFilename, "wb");
+ if(pFile==NULL)
+ return;
+
+ // Write header
+ fprintf(pFile, "P6\n%d %d\n255\n", width, height);
+
+ // Write pixel data
+ for(y=0; y<height; y++)
+ fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
+
+ // Close file
+ fclose(pFile);
+}