diff options
author | Sven Gothel <[email protected]> | 2012-04-16 20:50:06 +0200 |
---|---|---|
committer | Sven Gothel <[email protected]> | 2012-04-16 20:50:06 +0200 |
commit | 10935e1ec0d8ed677bc3fddfaa8cd73898a3bcbf (patch) | |
tree | 6d453f72b3024670a6ed5c03454ef54ad4a04ba0 /src/test-native | |
parent | 62e5686fb583ad991d5811baf242d40d21952e27 (diff) |
Add native tests for libav/ffmpeg and gst
Diffstat (limited to 'src/test-native')
-rw-r--r-- | src/test-native/ffmpeg/api-example.c | 479 | ||||
-rw-r--r-- | src/test-native/ffmpeg/avcodec_sample.c | 203 | ||||
-rw-r--r-- | src/test-native/ffmpeg/avcodec_sample.sh | 6 | ||||
-rw-r--r-- | src/test-native/gst/helloworld-auto.c | 112 | ||||
-rw-r--r-- | src/test-native/gst/helloworld-playbin.c | 75 | ||||
-rw-r--r-- | src/test-native/gst/helloworld-playbin2.c | 75 | ||||
-rw-r--r-- | src/test-native/gst/helloworld.c | 142 | ||||
-rw-r--r-- | src/test-native/gst/make.sh | 5 |
8 files changed, 1097 insertions, 0 deletions
diff --git a/src/test-native/ffmpeg/api-example.c b/src/test-native/ffmpeg/api-example.c new file mode 100644 index 000000000..970a90eab --- /dev/null +++ b/src/test-native/ffmpeg/api-example.c @@ -0,0 +1,479 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * libavcodec API use example. + * + * @example libavcodec/api-example.c + * Note that this library only handles codecs (mpeg, mpeg4, etc...), + * not file formats (avi, vob, etc...). See library 'libavformat' for the + * format handling + */ + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +#ifdef HAVE_AV_CONFIG_H +#undef HAVE_AV_CONFIG_H +#endif + +#include "libavcodec/avcodec.h" +#include "libavutil/mathematics.h" +#include "libavutil/samplefmt.h" + +#define INBUF_SIZE 4096 +#define AUDIO_INBUF_SIZE 20480 +#define AUDIO_REFILL_THRESH 4096 + +/* + * Audio encoding example + */ +static void audio_encode_example(const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int frame_size, i, j, out_size, outbuf_size; + FILE *f; + short *samples; + float t, tincr; + uint8_t *outbuf; + + printf("Audio encoding\n"); + + /* find the MP2 encoder */ + codec = avcodec_find_encoder(CODEC_ID_MP2); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + + /* put sample parameters */ + c->bit_rate = 64000; + c->sample_rate = 44100; + c->channels = 2; + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + /* the codec gives us the frame size, in samples */ + frame_size = c->frame_size; + samples = malloc(frame_size * 2 * c->channels); + outbuf_size = 10000; + outbuf = malloc(outbuf_size); + + f = fopen(filename, "wb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + + /* encode a single tone sound */ + t = 0; + tincr = 2 * M_PI * 440.0 / c->sample_rate; + for(i=0;i<200;i++) { + for(j=0;j<frame_size;j++) { + samples[2*j] = (int)(sin(t) * 10000); + samples[2*j+1] = samples[2*j]; + t += tincr; + } + /* encode the samples */ + out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples); + fwrite(outbuf, 1, out_size, f); + } + fclose(f); + free(outbuf); + free(samples); + + avcodec_close(c); + av_free(c); +} + +/* + * Audio decoding. + */ +static void audio_decode_example(const char *outfilename, const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int len; + FILE *f, *outfile; + uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; + AVPacket avpkt; + AVFrame *decoded_frame = NULL; + + av_init_packet(&avpkt); + + printf("Audio decoding\n"); + + /* find the mpeg audio decoder */ + codec = avcodec_find_decoder(CODEC_ID_MP2); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + outfile = fopen(outfilename, "wb"); + if (!outfile) { + av_free(c); + exit(1); + } + + /* decode until eof */ + avpkt.data = inbuf; + avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); + + while (avpkt.size > 0) { + int got_frame = 0; + + if (!decoded_frame) { + if (!(decoded_frame = avcodec_alloc_frame())) { + fprintf(stderr, "out of memory\n"); + exit(1); + } + } else + avcodec_get_frame_defaults(decoded_frame); + + len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); + if (len < 0) { + fprintf(stderr, "Error while decoding\n"); + exit(1); + } + if (got_frame) { + /* if a frame has been decoded, output it */ + int data_size = av_samples_get_buffer_size(NULL, c->channels, + decoded_frame->nb_samples, + c->sample_fmt, 1); + fwrite(decoded_frame->data[0], 1, data_size, outfile); + } + avpkt.size -= len; + avpkt.data += len; + if (avpkt.size < AUDIO_REFILL_THRESH) { + /* Refill the input buffer, to avoid trying to decode + * incomplete frames. Instead of this, one could also use + * a parser, or use a proper container format through + * libavformat. */ + memmove(inbuf, avpkt.data, avpkt.size); + avpkt.data = inbuf; + len = fread(avpkt.data + avpkt.size, 1, + AUDIO_INBUF_SIZE - avpkt.size, f); + if (len > 0) + avpkt.size += len; + } + } + + fclose(outfile); + fclose(f); + + avcodec_close(c); + av_free(c); + av_free(decoded_frame); +} + +/* + * Video encoding example + */ +static void video_encode_example(const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int i, out_size, size, x, y, outbuf_size; + FILE *f; + AVFrame *picture; + uint8_t *outbuf, *picture_buf; + + printf("Video encoding\n"); + + /* find the mpeg1 video encoder */ + codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + picture= avcodec_alloc_frame(); + + /* put sample parameters */ + c->bit_rate = 400000; + /* resolution must be a multiple of two */ + c->width = 352; + c->height = 288; + /* frames per second */ + c->time_base= (AVRational){1,25}; + c->gop_size = 10; /* emit one intra frame every ten frames */ + c->max_b_frames=1; + c->pix_fmt = PIX_FMT_YUV420P; + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + f = fopen(filename, "wb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + + /* alloc image and output buffer */ + outbuf_size = 100000; + outbuf = malloc(outbuf_size); + size = c->width * c->height; + picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ + + picture->data[0] = picture_buf; + picture->data[1] = picture->data[0] + size; + picture->data[2] = picture->data[1] + size / 4; + picture->linesize[0] = c->width; + picture->linesize[1] = c->width / 2; + picture->linesize[2] = c->width / 2; + + /* encode 1 second of video */ + for(i=0;i<25;i++) { + fflush(stdout); + /* prepare a dummy image */ + /* Y */ + for(y=0;y<c->height;y++) { + for(x=0;x<c->width;x++) { + picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; + } + } + + /* Cb and Cr */ + for(y=0;y<c->height/2;y++) { + for(x=0;x<c->width/2;x++) { + picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; + picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5; + } + } + + /* encode the image */ + out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture); + printf("encoding frame %3d (size=%5d)\n", i, out_size); + fwrite(outbuf, 1, out_size, f); + } + + /* get the delayed frames */ + for(; out_size; i++) { + fflush(stdout); + + out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); + printf("write frame %3d (size=%5d)\n", i, out_size); + fwrite(outbuf, 1, out_size, f); + } + + /* add sequence end code to have a real mpeg file */ + outbuf[0] = 0x00; + outbuf[1] = 0x00; + outbuf[2] = 0x01; + outbuf[3] = 0xb7; + fwrite(outbuf, 1, 4, f); + fclose(f); + free(picture_buf); + free(outbuf); + + avcodec_close(c); + av_free(c); + av_free(picture); + printf("\n"); +} + +/* + * Video decoding example + */ + +static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, + char *filename) +{ + FILE *f; + int i; + + f=fopen(filename,"w"); + fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255); + for(i=0;i<ysize;i++) + fwrite(buf + i * wrap,1,xsize,f); + fclose(f); +} + +static void video_decode_example(const char *outfilename, const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int frame, got_picture, len; + FILE *f; + AVFrame *picture; + uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; + char buf[1024]; + AVPacket avpkt; + + av_init_packet(&avpkt); + + /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ + memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); + + printf("Video decoding\n"); + + /* find the mpeg1 video decoder */ + codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + picture= avcodec_alloc_frame(); + + if(codec->capabilities&CODEC_CAP_TRUNCATED) + c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ + + /* For some codecs, such as msmpeg4 and mpeg4, width and height + MUST be initialized there because this information is not + available in the bitstream. */ + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + /* the codec gives us the frame size, in samples */ + + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + + frame = 0; + for(;;) { + avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); + if (avpkt.size == 0) + break; + + /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) + and this is the only method to use them because you cannot + know the compressed data size before analysing it. + + BUT some other codecs (msmpeg4, mpeg4) are inherently frame + based, so you must call them with all the data for one + frame exactly. You must also initialize 'width' and + 'height' before initializing them. */ + + /* NOTE2: some codecs allow the raw parameters (frame size, + sample rate) to be changed at any frame. We handle this, so + you should also take care of it */ + + /* here, we use a stream based decoder (mpeg1video), so we + feed decoder and see if it could decode a frame */ + avpkt.data = inbuf; + while (avpkt.size > 0) { + len = avcodec_decode_video2(c, picture, &got_picture, &avpkt); + if (len < 0) { + fprintf(stderr, "Error while decoding frame %d\n", frame); + exit(1); + } + if (got_picture) { + printf("saving frame %3d\n", frame); + fflush(stdout); + + /* the picture is allocated by the decoder. no need to + free it */ + snprintf(buf, sizeof(buf), outfilename, frame); + pgm_save(picture->data[0], picture->linesize[0], + c->width, c->height, buf); + frame++; + } + avpkt.size -= len; + avpkt.data += len; + } + } + + /* some codecs, such as MPEG, transmit the I and P frame with a + latency of one frame. You must do the following to have a + chance to get the last frame of the video */ + avpkt.data = NULL; + avpkt.size = 0; + len = avcodec_decode_video2(c, picture, &got_picture, &avpkt); + if (got_picture) { + printf("saving last frame %3d\n", frame); + fflush(stdout); + + /* the picture is allocated by the decoder. no need to + free it */ + snprintf(buf, sizeof(buf), outfilename, frame); + pgm_save(picture->data[0], picture->linesize[0], + c->width, c->height, buf); + frame++; + } + + fclose(f); + + avcodec_close(c); + av_free(c); + av_free(picture); + printf("\n"); +} + +int main(int argc, char **argv) +{ + const char *filename; + + /* must be called before using avcodec lib */ + avcodec_init(); + + /* register all the codecs */ + avcodec_register_all(); + + if (argc <= 1) { + audio_encode_example("/tmp/test.mp2"); + audio_decode_example("/tmp/test.sw", "/tmp/test.mp2"); + + video_encode_example("/tmp/test.mpg"); + filename = "/tmp/test.mpg"; + } else { + filename = argv[1]; + } + + // audio_decode_example("/tmp/test.sw", filename); + video_decode_example("/tmp/test%d.pgm", filename); + + return 0; +} diff --git a/src/test-native/ffmpeg/avcodec_sample.c b/src/test-native/ffmpeg/avcodec_sample.c new file mode 100644 index 000000000..f4001b4e6 --- /dev/null +++ b/src/test-native/ffmpeg/avcodec_sample.c @@ -0,0 +1,203 @@ +// avcodec_sample.0.5.0.c + +// A small sample program that shows how to use libavformat and libavcodec to +// read video from a file. +// +// This version is for the 0.4.9+ release of ffmpeg. This release adds the +// av_read_frame() API call, which simplifies the reading of video frames +// considerably. +// +// Use +// +// gcc -o avcodec_sample.0.5.0 avcodec_sample.0.5.0.c -lavformat -lavcodec -lavutil -lswscale -lz -lbz2 +// +// to build (assuming libavformat, libavcodec, libavutil, and swscale are correctly installed on +// your system). +// +// Run using +// +// avcodec_sample.0.5.0 myvideofile.mpg +// +// to write the first five frames from "myvideofile.mpg" to disk in PPM +// format. + +#include <libavcodec/avcodec.h> +#include <libavformat/avformat.h> +#include <libswscale/swscale.h> + +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> + +static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame); + +int main (int argc, const char * argv[]) +{ + AVFormatContext *pFormatCtx; + int i, videoStream; + AVCodecContext *pCodecCtx; + AVCodec *pCodec; + AVFrame *pFrame; + AVFrame *pFrameRGB; + AVPacket packet; + int frameFinished; + int numBytes; + uint8_t *buffer; + + // Register all formats and codecs + av_register_all(); + + avformat_network_init(); + + // Open video file + if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) + return -1; // Couldn't open file + + // Retrieve stream information + if(avformat_find_stream_info(pFormatCtx, NULL)<0) + return -1; // Couldn't find stream information + + // Dump information about file onto standard error + av_dump_format(pFormatCtx, 0, argv[1], false); + + // Find the first video stream + videoStream=-1; + for(i=0; i<pFormatCtx->nb_streams; i++) + if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) + { + videoStream=i; + break; + } + if(videoStream==-1) + return -1; // Didn't find a video stream + + // Get a pointer to the codec context for the video stream + pCodecCtx=pFormatCtx->streams[videoStream]->codec; + + // Find the decoder for the video stream + pCodec=avcodec_find_decoder(pCodecCtx->codec_id); + if(pCodec==NULL) + return -1; // Codec not found + + // Open codec + if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) + return -1; // Could not open codec + + // Hack to correct wrong frame rates that seem to be generated by some codecs + if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1) + pCodecCtx->time_base.den=1000; + + // Allocate video frame + pFrame=avcodec_alloc_frame(); + + // Allocate an AVFrame structure + pFrameRGB=avcodec_alloc_frame(); + if(pFrameRGB==NULL) + return -1; + + // Determine required buffer size and allocate buffer + numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, + pCodecCtx->height); + + buffer=malloc(numBytes); + + // Assign appropriate parts of buffer to image planes in pFrameRGB + avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, + pCodecCtx->width, pCodecCtx->height); + + // Read frames and save first five frames to disk + i=0; + while(av_read_frame(pFormatCtx, &packet)>=0) + { + // Is this a packet from the video stream? + if(packet.stream_index==videoStream) + { + // Decode video frame + avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); + + // Did we get a video frame? + if(frameFinished) + { + static struct SwsContext *img_convert_ctx; + +#if 0 + // Older removed code + // Convert the image from its native format to RGB swscale + img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, + (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, + pCodecCtx->height); + + // function template, for reference + int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]); +#endif + // Convert the image into YUV format that SDL uses + if(img_convert_ctx == NULL) { + int w = pCodecCtx->width; + int h = pCodecCtx->height; + + img_convert_ctx = sws_getContext(w, h, + pCodecCtx->pix_fmt, + w, h, PIX_FMT_RGB24, SWS_BICUBIC, + NULL, NULL, NULL); + if(img_convert_ctx == NULL) { + fprintf(stderr, "Cannot initialize the conversion context!\n"); + exit(1); + } + } + int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, + pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); +#if 0 // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009 + if(ret) { + fprintf(stderr, "SWS_Scale failed [%d]!\n", ret); + exit(-1); + } +#endif + // Save the frame to disk + if(i++<=5) + SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i); + } + } + + // Free the packet that was allocated by av_read_frame + av_free_packet(&packet); + } + + // Free the RGB image + free(buffer); + av_free(pFrameRGB); + + // Free the YUV frame + av_free(pFrame); + + // Close the codec + avcodec_close(pCodecCtx); + + // Close the video file + avformat_close_input(&pFormatCtx); + + return 0; +} + +static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) +{ + FILE *pFile; + char szFilename[32]; + int y; + + // Open file + sprintf(szFilename, "frame%d.ppm", iFrame); + pFile=fopen(szFilename, "wb"); + if(pFile==NULL) + return; + + // Write header + fprintf(pFile, "P6\n%d %d\n255\n", width, height); + + // Write pixel data + for(y=0; y<height; y++) + fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile); + + // Close file + fclose(pFile); +} diff --git a/src/test-native/ffmpeg/avcodec_sample.sh b/src/test-native/ffmpeg/avcodec_sample.sh new file mode 100644 index 000000000..c5ccd6caf --- /dev/null +++ b/src/test-native/ffmpeg/avcodec_sample.sh @@ -0,0 +1,6 @@ +# -I/usr/include/libavcodec -I/usr/include/libavformat \ + +gcc \ + -o avcodec_sample avcodec_sample.c \ + -lavformat -lavcodec -lavutil -lswscale + diff --git a/src/test-native/gst/helloworld-auto.c b/src/test-native/gst/helloworld-auto.c new file mode 100644 index 000000000..6381c9c93 --- /dev/null +++ b/src/test-native/gst/helloworld-auto.c @@ -0,0 +1,112 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +my_bus_callback (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + + +static gboolean +idle_exit_loop (gpointer data) +{ + g_main_loop_quit ((GMainLoop *) data); + + /* once */ + return FALSE; +} + +static void +cb_typefound (GstElement *typefind, + guint probability, + GstCaps *caps, + gpointer data) +{ + GMainLoop *loop = data; + gchar *type; + + type = gst_caps_to_string (caps); + g_print ("Media type %s found, probability %d%%\n", type, probability); + g_free (type); + + /* since we connect to a signal in the pipeline thread context, we need + * to set an idle handler to exit the main loop in the mainloop context. + * Normally, your app should not need to worry about such things. */ + g_idle_add (idle_exit_loop, loop); +} + +gint +main (gint argc, + gchar *argv[]) +{ + GMainLoop *loop; + GstElement *pipeline, *filesrc, *typefind, *fakesink; + GstBus *bus; + + /* init GStreamer */ + gst_init (&argc, &argv); + loop = g_main_loop_new (NULL, FALSE); + + /* check args */ + if (argc != 2) { + g_print ("Usage: %s <filename>\n", argv[0]); + return -1; + } + + /* create a new pipeline to hold the elements */ + pipeline = gst_pipeline_new ("pipe"); + + bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); + gst_bus_add_watch (bus, my_bus_callback, NULL); + gst_object_unref (bus); + + /* create file source and typefind element */ + filesrc = gst_element_factory_make ("filesrc", "source"); + g_object_set (G_OBJECT (filesrc), "location", argv[1], NULL); + typefind = gst_element_factory_make ("typefind", "typefinder"); + g_signal_connect (typefind, "have-type", G_CALLBACK (cb_typefound), loop); + fakesink = gst_element_factory_make ("fakesink", "sink"); + + /* setup */ + gst_bin_add_many (GST_BIN (pipeline), filesrc, typefind, fakesink, NULL); + gst_element_link_many (filesrc, typefind, fakesink, NULL); + gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_PLAYING); + g_main_loop_run (loop); + + /* unset */ + gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_NULL); + gst_object_unref (GST_OBJECT (pipeline)); + + return 0; +} + diff --git a/src/test-native/gst/helloworld-playbin.c b/src/test-native/gst/helloworld-playbin.c new file mode 100644 index 000000000..c5a42fe6c --- /dev/null +++ b/src/test-native/gst/helloworld-playbin.c @@ -0,0 +1,75 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +my_bus_callback (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + +gint +main (gint argc, + gchar *argv[]) +{ + GMainLoop *loop; + GstElement *play; + GstBus *bus; + + /* init GStreamer */ + gst_init (&argc, &argv); + loop = g_main_loop_new (NULL, FALSE); + + /* make sure we have a URI */ + if (argc != 2) { + g_print ("Usage: %s <URI>\n", argv[0]); + return -1; + } + + /* set up */ + play = gst_element_factory_make ("playbin", "play"); + g_object_set (G_OBJECT (play), "uri", argv[1], NULL); + + bus = gst_pipeline_get_bus (GST_PIPELINE (play)); + gst_bus_add_watch (bus, my_bus_callback, loop); + gst_object_unref (bus); + + gst_element_set_state (play, GST_STATE_PLAYING); + + /* now run */ + g_main_loop_run (loop); + + /* also clean up */ + gst_element_set_state (play, GST_STATE_NULL); + gst_object_unref (GST_OBJECT (play)); + + return 0; +} diff --git a/src/test-native/gst/helloworld-playbin2.c b/src/test-native/gst/helloworld-playbin2.c new file mode 100644 index 000000000..b31e3e5c6 --- /dev/null +++ b/src/test-native/gst/helloworld-playbin2.c @@ -0,0 +1,75 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +my_bus_callback (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + +gint +main (gint argc, + gchar *argv[]) +{ + GMainLoop *loop; + GstElement *play; + GstBus *bus; + + /* init GStreamer */ + gst_init (&argc, &argv); + loop = g_main_loop_new (NULL, FALSE); + + /* make sure we have a URI */ + if (argc != 2) { + g_print ("Usage: %s <URI>\n", argv[0]); + return -1; + } + + /* set up */ + play = gst_element_factory_make ("playbin2", "play"); + g_object_set (G_OBJECT (play), "uri", argv[1], NULL); + + bus = gst_pipeline_get_bus (GST_PIPELINE (play)); + gst_bus_add_watch (bus, my_bus_callback, loop); + gst_object_unref (bus); + + gst_element_set_state (play, GST_STATE_PLAYING); + + /* now run */ + g_main_loop_run (loop); + + /* also clean up */ + gst_element_set_state (play, GST_STATE_NULL); + gst_object_unref (GST_OBJECT (play)); + + return 0; +} diff --git a/src/test-native/gst/helloworld.c b/src/test-native/gst/helloworld.c new file mode 100644 index 000000000..6d991898d --- /dev/null +++ b/src/test-native/gst/helloworld.c @@ -0,0 +1,142 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +bus_call (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + + +static void +on_pad_added (GstElement *element, + GstPad *pad, + gpointer data) +{ + GstPad *sinkpad; + GstElement *decoder = (GstElement *) data; + + /* We can now link this pad with the vorbis-decoder sink pad */ + g_print ("Dynamic pad created, linking demuxer/decoder\n"); + + sinkpad = gst_element_get_static_pad (decoder, "sink"); + + gst_pad_link (pad, sinkpad); + + gst_object_unref (sinkpad); +} + + + +int +main (int argc, + char *argv[]) +{ + GMainLoop *loop; + + GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink; + GstBus *bus; + + /* Initialisation */ + gst_init (&argc, &argv); + + loop = g_main_loop_new (NULL, FALSE); + + + /* Check input arguments */ + if (argc != 2) { + g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]); + return -1; + } + + + /* Create gstreamer elements */ + pipeline = gst_pipeline_new ("audio-player"); + source = gst_element_factory_make ("filesrc", "file-source"); + demuxer = gst_element_factory_make ("oggdemux", "ogg-demuxer"); + decoder = gst_element_factory_make ("vorbisdec", "vorbis-decoder"); + conv = gst_element_factory_make ("audioconvert", "converter"); + sink = gst_element_factory_make ("autoaudiosink", "audio-output"); + + if (!pipeline || !source || !demuxer || !decoder || !conv || !sink) { + g_printerr ("One element could not be created. Exiting.\n"); + return -1; + } + + /* Set up the pipeline */ + + /* we set the input filename to the source element */ + g_object_set (G_OBJECT (source), "location", argv[1], NULL); + + /* we add a message handler */ + bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); + gst_bus_add_watch (bus, bus_call, loop); + gst_object_unref (bus); + + /* we add all elements into the pipeline */ + /* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */ + gst_bin_add_many (GST_BIN (pipeline), + source, demuxer, decoder, conv, sink, NULL); + + /* we link the elements together */ + /* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */ + gst_element_link (source, demuxer); + gst_element_link_many (decoder, conv, sink, NULL); + g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder); + + /* note that the demuxer will be linked to the decoder dynamically. + The reason is that Ogg may contain various streams (for example + audio and video). The source pad(s) will be created at run time, + by the demuxer when it detects the amount and nature of streams. + Therefore we connect a callback function which will be executed + when the "pad-added" is emitted.*/ + + + /* Set the pipeline to "playing" state*/ + g_print ("Now playing: %s\n", argv[1]); + gst_element_set_state (pipeline, GST_STATE_PLAYING); + + + /* Iterate */ + g_print ("Running...\n"); + g_main_loop_run (loop); + + + /* Out of the main loop, clean up nicely */ + g_print ("Returned, stopping playback\n"); + gst_element_set_state (pipeline, GST_STATE_NULL); + + g_print ("Deleting pipeline\n"); + gst_object_unref (GST_OBJECT (pipeline)); + + return 0; +} diff --git a/src/test-native/gst/make.sh b/src/test-native/gst/make.sh new file mode 100644 index 000000000..23e54a272 --- /dev/null +++ b/src/test-native/gst/make.sh @@ -0,0 +1,5 @@ +gcc -Wall helloworld.c -o helloworld $(pkg-config --cflags --libs gstreamer-0.10) +gcc -Wall helloworld-auto.c -o helloworld-auto $(pkg-config --cflags --libs gstreamer-0.10) +gcc -Wall helloworld-playbin.c -o helloworld-playbin $(pkg-config --cflags --libs gstreamer-0.10) +gcc -Wall helloworld-playbin2.c -o helloworld-playbin2 $(pkg-config --cflags --libs gstreamer-0.10) + |