diff options
author | Sven Gothel <[email protected]> | 2012-04-16 20:50:06 +0200 |
---|---|---|
committer | Sven Gothel <[email protected]> | 2012-04-16 20:50:06 +0200 |
commit | 10935e1ec0d8ed677bc3fddfaa8cd73898a3bcbf (patch) | |
tree | 6d453f72b3024670a6ed5c03454ef54ad4a04ba0 | |
parent | 62e5686fb583ad991d5811baf242d40d21952e27 (diff) |
Add native tests for libav/ffmpeg and gst
-rw-r--r-- | src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGDynamicLibraryBundleInfo.java | 219 | ||||
-rw-r--r-- | src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGMediaPlayer.java | 503 | ||||
-rw-r--r-- | src/test-native/ffmpeg/api-example.c | 479 | ||||
-rw-r--r-- | src/test-native/ffmpeg/avcodec_sample.c | 203 | ||||
-rw-r--r-- | src/test-native/ffmpeg/avcodec_sample.sh | 6 | ||||
-rw-r--r-- | src/test-native/gst/helloworld-auto.c | 112 | ||||
-rw-r--r-- | src/test-native/gst/helloworld-playbin.c | 75 | ||||
-rw-r--r-- | src/test-native/gst/helloworld-playbin2.c | 75 | ||||
-rw-r--r-- | src/test-native/gst/helloworld.c | 142 | ||||
-rw-r--r-- | src/test-native/gst/make.sh | 5 |
10 files changed, 1819 insertions, 0 deletions
diff --git a/src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGDynamicLibraryBundleInfo.java b/src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGDynamicLibraryBundleInfo.java new file mode 100644 index 000000000..4c4870545 --- /dev/null +++ b/src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGDynamicLibraryBundleInfo.java @@ -0,0 +1,219 @@ +/** + * Copyright 2012 JogAmp Community. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY JogAmp Community ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JogAmp Community OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are those of the + * authors and should not be interpreted as representing official policies, either expressed + * or implied, of JogAmp Community. + */ + +package jogamp.opengl.util.av.impl; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import javax.media.opengl.GLProfile; + +import com.jogamp.common.os.DynamicLibraryBundle; +import com.jogamp.common.os.DynamicLibraryBundleInfo; +import com.jogamp.common.util.RunnableExecutor; + +class FFMPEGDynamicLibraryBundleInfo implements DynamicLibraryBundleInfo { + private static List<String> glueLibNames = new ArrayList<String>(); // none + + private static final int symbolCount = 29; + private static String[] symbolNames = { + "avcodec_version", + "avformat_version", +/* 3 */ "avutil_version", + + // libavcodec + "avcodec_close", + "avcodec_string", + "avcodec_find_decoder", + "avcodec_open2", // 53.6.0 (opt) + "avcodec_open", + "avcodec_alloc_frame", + "avcodec_default_get_buffer", + "avcodec_default_release_buffer", + "av_free_packet", + "avcodec_decode_audio4", // 53.25.0 (opt) + "avcodec_decode_audio3", // 52.23.0 +/* 15 */ "avcodec_decode_video2", // 52.23.0 + + // libavutil + "av_pix_fmt_descriptors", + "av_free", +/* 18 */ "av_get_bits_per_pixel", + + // libavformat + "avformat_close_input", // 53.17.0 (opt) + "av_close_input_file", + "av_register_all", + "avformat_open_input", + "av_dump_format", + "av_read_frame", + "av_seek_frame", + "avformat_network_init", // 53.13.0 (opt) + "avformat_network_deinit", // 53.13.0 (opt) + "avformat_find_stream_info", // 53.3.0 (opt) +/* 29 */ "av_find_stream_info" + }; + + private static String[] optionalSymbolNames = { + "avcodec_open2", // 53.6.0 (opt) + "avcodec_decode_audio4", // 53.25.0 (opt) + "avformat_close_input", // 53.17.0 (opt) + "avformat_network_init", // 53.13.0 (opt) + "avformat_network_deinit", // 53.13.0 (opt) + "avformat_find_stream_info" // 53.3.0 (opt) + }; + + private static long[] symbolAddr; + private static final boolean ready; + + static { + // native ffmpeg media player implementation is included in jogl_desktop and jogl_mobile + GLProfile.initSingleton(); + boolean _ready = false; + try { + _ready = initSymbols(); + } catch (Throwable t) { + t.printStackTrace(); + } + ready = _ready; + if(!ready) { + System.err.println("FFMPEG: Not Available"); + } + } + + static boolean initSingleton() { return ready; } + + private static boolean initSymbols() { + final DynamicLibraryBundle dl = new DynamicLibraryBundle(new FFMPEGDynamicLibraryBundleInfo()); + final boolean avutilLoaded = dl.isToolLibLoaded(0); + final boolean avformatLoaded = dl.isToolLibLoaded(1); + final boolean avcodecLoaded = dl.isToolLibLoaded(2); + if(!avutilLoaded || !avformatLoaded || !avcodecLoaded) { + throw new RuntimeException("FFMPEG Tool library incomplete: [ avutil "+avutilLoaded+", avformat "+avformatLoaded+", avcodec "+avcodecLoaded+"]"); + } + if(!dl.isToolLibComplete()) { + throw new RuntimeException("FFMPEG Tool libraries incomplete"); + } + symbolAddr = new long[symbolCount]; + + final Set<String> optionalSet = new HashSet<String>(); + optionalSet.addAll(Arrays.asList(optionalSymbolNames)); + + if(!lookupSymbols(dl, symbolNames, optionalSet, symbolAddr, symbolCount)) { + return false; + } + return initSymbols0(symbolAddr, symbolAddr.length); + } + + private static boolean lookupSymbols(DynamicLibraryBundle dl, + String[] symbols, Set<String> optionalSymbols, + long[] addresses, int symbolCount) { + for(int i = 0; i<symbolCount; i++) { + final long addr = dl.dynamicLookupFunction(symbols[i]); + if( 0 == addr ) { + if(!optionalSymbols.contains(symbols[i])) { + System.err.println("Could not resolve mandatory symbol <"+symbols[i]+">"); + return false; + } else if(true || DEBUG ) { // keep it verbose per default for now .. + System.err.println("Could not resolve optional symbol <"+symbols[i]+">"); + } + } + addresses[i] = addr; + } + return true; + } + + protected FFMPEGDynamicLibraryBundleInfo() { + } + + @Override + public boolean shallLinkGlobal() { return true; } + + @Override + public boolean shallLookupGlobal() { return true; } + + @Override + public final List<String> getGlueLibNames() { + return glueLibNames; + } + + @Override + public List<List<String>> getToolLibNames() { + List<List<String>> libsList = new ArrayList<List<String>>(); + + final List<String> avutil = new ArrayList<String>(); + avutil.add("avutil"); // default + avutil.add("avutil-52"); // dummy future proof + avutil.add("avutil-51"); // 0.8 + avutil.add("avutil-50"); // 0.7 + libsList.add(avutil); + + final List<String> avformat = new ArrayList<String>(); + avformat.add("avformat"); // default + avformat.add("avformat-55"); // dummy future proof + avformat.add("avformat-54"); // 0.? + avformat.add("avformat-53"); // 0.8 + avformat.add("avformat-52"); // 0.7 + libsList.add(avformat); + + final List<String> avcodec = new ArrayList<String>(); + avcodec.add("avcodec"); // default + avcodec.add("avcodec-55"); // dummy future proof + avcodec.add("avcodec-54"); // 0.? + avcodec.add("avcodec-53"); // 0.8 + avcodec.add("avcodec-52"); // 0.7 + libsList.add(avcodec); + + return libsList; + } + + @Override + public final List<String> getToolGetProcAddressFuncNameList() { + return null; + } + + @Override + public final long toolGetProcAddress(long toolGetProcAddressHandle, String funcName) { + return 0; + } + + @Override + public boolean useToolGetProcAdressFirst(String funcName) { + return false; + } + + @Override + public RunnableExecutor getLibLoaderExecutor() { + return DynamicLibraryBundle.getDefaultRunnableExecutor(); + } + + private static native boolean initSymbols0(long[] symbols, int count); +} diff --git a/src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGMediaPlayer.java b/src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGMediaPlayer.java new file mode 100644 index 000000000..d24961b68 --- /dev/null +++ b/src/jogl/classes/jogamp/opengl/util/av/impl/FFMPEGMediaPlayer.java @@ -0,0 +1,503 @@ +/** + * Copyright 2012 JogAmp Community. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without modification, are + * permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, this list of + * conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright notice, this list + * of conditions and the following disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY JogAmp Community ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND + * FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL JogAmp Community OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING + * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF + * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * The views and conclusions contained in the software and documentation are those of the + * authors and should not be interpreted as representing official policies, either expressed + * or implied, of JogAmp Community. + */ + +package jogamp.opengl.util.av.impl; + +import java.io.IOException; +import java.nio.Buffer; +import java.nio.ByteBuffer; + +import javax.media.opengl.GL; +import javax.media.opengl.GL2ES2; +import javax.media.opengl.GLException; + +import com.jogamp.common.util.VersionNumber; +import com.jogamp.gluegen.runtime.ProcAddressTable; +import com.jogamp.opengl.util.GLPixelStorageModes; +import com.jogamp.opengl.util.texture.Texture; +import com.jogamp.opengl.util.texture.TextureSequence; + +import jogamp.opengl.GLContextImpl; +import jogamp.opengl.es1.GLES1ProcAddressTable; +import jogamp.opengl.es2.GLES2ProcAddressTable; +import jogamp.opengl.gl4.GL4bcProcAddressTable; +import jogamp.opengl.util.av.EGLMediaPlayerImpl; + +/*** + * Currently only YUV420P and the usual packed RGB formats are supported. + * <p> + * Check tag 'FIXME: Add more planar formats !' + * here and in the corresponding native code + * <code>jogl/src/jogl/native/ffmpeg/jogamp_opengl_util_av_impl_FFMPEGMediaPlayer.c</code> + * </p> + * <p> + * TODO: Audio Output + * </p> + */ +public class FFMPEGMediaPlayer extends EGLMediaPlayerImpl { + public static final VersionNumber avUtilVersion; + public static final VersionNumber avFormatVersion; + public static final VersionNumber avCodecVersion; + static final boolean available; + + static { + if(FFMPEGDynamicLibraryBundleInfo.initSingleton()) { + avUtilVersion = getAVVersion(getAvUtilVersion0()); + avFormatVersion = getAVVersion(getAvFormatVersion0()); + avCodecVersion = getAVVersion(getAvCodecVersion0()); + System.err.println("LIB_AV Util : "+avUtilVersion); + System.err.println("LIB_AV Format: "+avFormatVersion); + System.err.println("LIB_AV Codec : "+avCodecVersion); + available = initIDs0(); + } else { + avUtilVersion = null; + avFormatVersion = null; + avCodecVersion = null; + available = false; + } + } + + public static final boolean isAvailable() { return available; } + + private static VersionNumber getAVVersion(int vers) { + return new VersionNumber( ( vers >> 16 ) & 0xFF, + ( vers >> 8 ) & 0xFF, + ( vers >> 0 ) & 0xFF ); + } + + protected long moviePtr = 0; + protected long procAddrGLTexSubImage2D = 0; + protected EGLMediaPlayerImpl.EGLTextureFrame lastTex = null; + protected GLPixelStorageModes psm; + protected PixelFormat vPixelFmt = null; + protected int vPlanes = 0; + protected int vBitsPerPixel = 0; + protected int vBytesPerPixelPerPlane = 0; + protected int[] vLinesize = { 0, 0, 0 }; // per plane + protected int[] vTexWidth = { 0, 0, 0 }; // per plane + protected int texWidth, texHeight; // overall (stuffing planes in one texture) + protected ByteBuffer texCopy; + + public FFMPEGMediaPlayer() { + super(TextureType.GL, false); + if(!available) { + throw new RuntimeException("FFMPEGMediaPlayer not available"); + } + setTextureCount(1); + moviePtr = createInstance0(true); + if(0==moviePtr) { + throw new GLException("Couldn't create FFMPEGInstance"); + } + psm = new GLPixelStorageModes(); + } + + @Override + protected TextureSequence.TextureFrame createTexImage(GL gl, int idx, int[] tex) { + if(TextureType.GL == texType) { + final Texture texture = super.createTexImageImpl(gl, idx, tex, texWidth, texHeight, true); + lastTex = new EGLTextureFrame(null, texture, 0, 0); + } else { + throw new InternalError("n/a"); + } + return lastTex; + } + + @Override + protected void destroyTexImage(GL gl, TextureSequence.TextureFrame imgTex) { + lastTex = null; + super.destroyTexImage(gl, imgTex); + } + + @Override + protected void destroyImpl(GL gl) { + if (moviePtr != 0) { + destroyInstance0(moviePtr); + moviePtr = 0; + } + } + + @Override + protected void initGLStreamImpl(GL gl, int[] texNames) throws IOException { + if(0==moviePtr) { + throw new GLException("FFMPEG native instance null"); + } + final String urlS=urlConn.getURL().toExternalForm(); + + System.out.println("setURL: p1 "+this); + setStream0(moviePtr, urlS, -1, -1); + System.out.println("setURL: p2 "+this); + int tf; + switch(vBytesPerPixelPerPlane) { + case 1: tf = GL2ES2.GL_RED; break; + case 3: tf = GL2ES2.GL_RGB; break; + case 4: tf = GL2ES2.GL_RGBA; break; + default: throw new RuntimeException("Unsupported bytes-per-pixel / plane "+vBytesPerPixelPerPlane); + } + setTextureFormat(tf); + setTextureType(GL.GL_UNSIGNED_BYTE); + GLContextImpl ctx = (GLContextImpl)gl.getContext(); + ProcAddressTable pt = ctx.getGLProcAddressTable(); + if(pt instanceof GLES2ProcAddressTable) { + procAddrGLTexSubImage2D = ((GLES2ProcAddressTable)pt)._addressof_glTexSubImage2D; + } else if(pt instanceof GLES1ProcAddressTable) { + procAddrGLTexSubImage2D = ((GLES1ProcAddressTable)pt)._addressof_glTexSubImage2D; + } else if(pt instanceof GL4bcProcAddressTable) { + procAddrGLTexSubImage2D = ((GL4bcProcAddressTable)pt)._addressof_glTexSubImage2D; + } else { + throw new InternalError("Unknown ProcAddressTable: "+pt.getClass().getName()+" of "+ctx.getClass().getName()); + } + } + private void updateAttributes2(int pixFmt, int planes, int bitsPerPixel, int bytesPerPixelPerPlane, + int lSz0, int lSz1, int lSz2, + int tWd0, int tWd1, int tWd2) { + vPixelFmt = PixelFormat.valueOf(pixFmt); + vPlanes = planes; + vBitsPerPixel = bitsPerPixel; + vBytesPerPixelPerPlane = bytesPerPixelPerPlane; + vLinesize[0] = lSz0; vLinesize[1] = lSz1; vLinesize[2] = lSz2; + vTexWidth[0] = tWd0; vTexWidth[1] = tWd1; vTexWidth[2] = tWd2; + + switch(vPixelFmt) { + case YUV420P: + // YUV420P: Adding U+V on right side of fixed height texture, + // since width is already aligned by decoder. + // Y=w*h, Y=w/2*h/2, U=w/2*h/2 + // w*h + 2 ( w/2 * h/2 ) + // w*h + w*h/2 + // 2*w/2 * h + texWidth = vTexWidth[0] + vTexWidth[1]; texHeight = height; + break; + // case PIX_FMT_YUYV422: + case RGB24: + case BGR24: + case ARGB: + case RGBA: + case ABGR: + case BGRA: + texWidth = vTexWidth[0]; texHeight = height; + break; + default: // FIXME: Add more planar formats ! + throw new RuntimeException("Unsupported pixelformat: "+vPixelFmt); + } + if(DEBUG) { + System.err.println("XXX0: fmt "+vPixelFmt+", planes "+vPlanes+", bpp "+vBitsPerPixel+"/"+vBytesPerPixelPerPlane); + for(int i=0; i<3; i++) { + System.err.println("XXX0 "+i+": "+vTexWidth[i]+"/"+vLinesize[i]); + } + System.err.println("XXX0 total tex "+texWidth+"x"+texHeight); + } + } + + /** + * {@inheritDoc} + * + * If this implementation generates a specialized shader, + * it allows the user to override the default function name <code>ffmpegTexture2D</code>. + * Otherwise the call is delegated to it's super class. + */ + @Override + public String getTextureLookupFunctionName(String desiredFuncName) throws IllegalStateException { + if(State.Uninitialized == state) { + throw new IllegalStateException("Instance not initialized: "+this); + } + if(PixelFormat.YUV420P == vPixelFmt) { + if(null != desiredFuncName && desiredFuncName.length()>0) { + textureLookupFunctionName = desiredFuncName; + } + return textureLookupFunctionName; + } + return super.getTextureLookupFunctionName(desiredFuncName); + } + private String textureLookupFunctionName = "ffmpegTexture2D"; + + /** + * {@inheritDoc} + * + * Depending on the pixelformat, a specific conversion shader is being created, + * e.g. YUV420P to RGB. Otherwise the call is delegated to it's super class. + */ + @Override + public String getTextureLookupFragmentShaderImpl() throws IllegalStateException { + if(State.Uninitialized == state) { + throw new IllegalStateException("Instance not initialized: "+this); + } + final float tc_w_1 = (float)getWidth() / (float)texWidth; + switch(vPixelFmt) { + case YUV420P: + return + "vec4 "+textureLookupFunctionName+"(in "+getTextureSampler2DType()+" image, in vec2 texCoord) {\n"+ + " vec2 u_off = vec2("+tc_w_1+", 0.0);\n"+ + " vec2 v_off = vec2("+tc_w_1+", 0.5);\n"+ + " vec2 tc_half = texCoord*0.5;\n"+ + " float y,u,v,r,g,b;\n"+ + " y = texture2D(image, texCoord).r;\n"+ + " u = texture2D(image, u_off+tc_half).r;\n"+ + " v = texture2D(image, v_off+tc_half).r;\n"+ + " y = 1.1643*(y-0.0625);\n"+ + " u = u-0.5;\n"+ + " v = v-0.5;\n"+ + " r = y+1.5958*v;\n"+ + " g = y-0.39173*u-0.81290*v;\n"+ + " b = y+2.017*u;\n"+ + " return vec4(r, g, b, 1);\n"+ + "}\n" + ; + default: // FIXME: Add more planar formats ! + return super.getTextureLookupFragmentShaderImpl(); + } + } + + @Override + protected synchronized int getCurrentPositionImpl() { + return 0!=moviePtr ? getVideoPTS0(moviePtr) : 0; + } + + @Override + protected synchronized boolean setPlaySpeedImpl(float rate) { + return true; + } + + @Override + public synchronized boolean startImpl() { + if(0==moviePtr) { + return false; + } + return true; + } + + /** @return time position after issuing the command */ + @Override + public synchronized boolean pauseImpl() { + if(0==moviePtr) { + return false; + } + return true; + } + + /** @return time position after issuing the command */ + @Override + public synchronized boolean stopImpl() { + if(0==moviePtr) { + return false; + } + return true; + } + + /** @return time position after issuing the command */ + @Override + protected synchronized int seekImpl(int msec) { + if(0==moviePtr) { + throw new GLException("FFMPEG native instance null"); + } + int pts0 = getVideoPTS0(moviePtr); + int pts1 = seek0(moviePtr, msec); + System.err.println("Seek: "+pts0+" -> "+msec+" : "+pts1); + return pts1; + } + + @Override + protected TextureSequence.TextureFrame getLastTextureImpl() { + return lastTex; + } + + private long lastVideoTime = 0; + private int lastVideoPTS = 0; + private static final int dt_d = 9; + + @Override + protected TextureSequence.TextureFrame getNextTextureImpl(GL gl, boolean blocking) { + if(0==moviePtr) { + throw new GLException("FFMPEG native instance null"); + } + if(null != lastTex) { + psm.setUnpackAlignment(gl, 1); // RGBA ? 4 : 1 + try { + final Texture tex = lastTex.getTexture(); + gl.glActiveTexture(GL.GL_TEXTURE0+getTextureUnit()); + tex.enable(gl); + tex.bind(gl); + readNextPacket0(moviePtr, procAddrGLTexSubImage2D, textureTarget, textureFormat, textureType); + } finally { + psm.restore(gl); + } + final int pts = getVideoPTS0(moviePtr); // this frame + if(blocking) { + // poor mans video sync .. TODO: off thread 'readNextPackage0(..)' on shared GLContext and multi textures/unit! + final long now = System.currentTimeMillis(); + final long now_d = now - lastVideoTime; + final long pts_d = pts - lastVideoPTS; + final long dt = (long) ( (float) ( pts_d - now_d ) / getPlaySpeed() ) ; + lastVideoTime = now; + // System.err.println("s: pts-v "+pts+", pts-d "+pts_d+", now_d "+now_d+", dt "+dt); + if(dt>dt_d) { + try { + Thread.sleep(dt-dt_d); + } catch (InterruptedException e) { } + } /* else if(0>pts_d) { + System.err.println("s: pts-v "+pts+", pts-d "+pts_d+", now_d "+now_d+", dt "+dt); + } */ + } + lastVideoPTS = pts; + } + return lastTex; + } + + private void consumeAudio(int len) { + + } + + private static native int getAvUtilVersion0(); + private static native int getAvFormatVersion0(); + private static native int getAvCodecVersion0(); + private static native boolean initIDs0(); + private native long createInstance0(boolean verbose); + private native void destroyInstance0(long moviePtr); + + private native void setStream0(long moviePtr, String url, int vid, int aid); + + private native int getVideoPTS0(long moviePtr); + + private native int getAudioPTS0(long moviePtr); + private native Buffer getAudioBuffer0(long moviePtr, int plane); + + private native int readNextPacket0(long moviePtr, long procAddrGLTexSubImage2D, int texTarget, int texFmt, int texType); + + private native int seek0(long moviePtr, int position); + + public static enum PixelFormat { + // NONE= -1, + YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + GRAY8, ///< Y , 8bpp + MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + PAL8, ///< 8 bit with RGB32 palette + YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of YUV420P and setting color_range + YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of YUV422P and setting color_range + YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of YUV444P and setting color_range + XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + XVMC_MPEG2_IDCT, + UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + NV21, ///< as above, but U and V bytes are swapped + + ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + GRAY16BE, ///< Y , 16bpp, big-endian + GRAY16LE, ///< Y , 16bpp, little-endian + YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of YUV440P and setting color_range + YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) + VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 + RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 + + BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1 + BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1 + + VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + + YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 + RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0 + BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1 + BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1 + Y400A, ///< 8bit gray, 8bit alpha + BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + VDA_VLD, ///< hardware decoding through VDA + GBRP, ///< planar GBR 4:4:4 24bpp + GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian + GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian + GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian + GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian + GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian + GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian + COUNT ///< number of pixel formats in this list + ; + public static PixelFormat valueOf(int i) { + for (PixelFormat fmt : PixelFormat.values()) { + if(fmt.ordinal() == i) { + return fmt; + } + } + return null; + } + } + +} + diff --git a/src/test-native/ffmpeg/api-example.c b/src/test-native/ffmpeg/api-example.c new file mode 100644 index 000000000..970a90eab --- /dev/null +++ b/src/test-native/ffmpeg/api-example.c @@ -0,0 +1,479 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of Libav. + * + * Libav is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * Libav is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with Libav; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * libavcodec API use example. + * + * @example libavcodec/api-example.c + * Note that this library only handles codecs (mpeg, mpeg4, etc...), + * not file formats (avi, vob, etc...). See library 'libavformat' for the + * format handling + */ + +#include <stdlib.h> +#include <stdio.h> +#include <string.h> + +#ifdef HAVE_AV_CONFIG_H +#undef HAVE_AV_CONFIG_H +#endif + +#include "libavcodec/avcodec.h" +#include "libavutil/mathematics.h" +#include "libavutil/samplefmt.h" + +#define INBUF_SIZE 4096 +#define AUDIO_INBUF_SIZE 20480 +#define AUDIO_REFILL_THRESH 4096 + +/* + * Audio encoding example + */ +static void audio_encode_example(const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int frame_size, i, j, out_size, outbuf_size; + FILE *f; + short *samples; + float t, tincr; + uint8_t *outbuf; + + printf("Audio encoding\n"); + + /* find the MP2 encoder */ + codec = avcodec_find_encoder(CODEC_ID_MP2); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + + /* put sample parameters */ + c->bit_rate = 64000; + c->sample_rate = 44100; + c->channels = 2; + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + /* the codec gives us the frame size, in samples */ + frame_size = c->frame_size; + samples = malloc(frame_size * 2 * c->channels); + outbuf_size = 10000; + outbuf = malloc(outbuf_size); + + f = fopen(filename, "wb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + + /* encode a single tone sound */ + t = 0; + tincr = 2 * M_PI * 440.0 / c->sample_rate; + for(i=0;i<200;i++) { + for(j=0;j<frame_size;j++) { + samples[2*j] = (int)(sin(t) * 10000); + samples[2*j+1] = samples[2*j]; + t += tincr; + } + /* encode the samples */ + out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples); + fwrite(outbuf, 1, out_size, f); + } + fclose(f); + free(outbuf); + free(samples); + + avcodec_close(c); + av_free(c); +} + +/* + * Audio decoding. + */ +static void audio_decode_example(const char *outfilename, const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int len; + FILE *f, *outfile; + uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; + AVPacket avpkt; + AVFrame *decoded_frame = NULL; + + av_init_packet(&avpkt); + + printf("Audio decoding\n"); + + /* find the mpeg audio decoder */ + codec = avcodec_find_decoder(CODEC_ID_MP2); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + outfile = fopen(outfilename, "wb"); + if (!outfile) { + av_free(c); + exit(1); + } + + /* decode until eof */ + avpkt.data = inbuf; + avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f); + + while (avpkt.size > 0) { + int got_frame = 0; + + if (!decoded_frame) { + if (!(decoded_frame = avcodec_alloc_frame())) { + fprintf(stderr, "out of memory\n"); + exit(1); + } + } else + avcodec_get_frame_defaults(decoded_frame); + + len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt); + if (len < 0) { + fprintf(stderr, "Error while decoding\n"); + exit(1); + } + if (got_frame) { + /* if a frame has been decoded, output it */ + int data_size = av_samples_get_buffer_size(NULL, c->channels, + decoded_frame->nb_samples, + c->sample_fmt, 1); + fwrite(decoded_frame->data[0], 1, data_size, outfile); + } + avpkt.size -= len; + avpkt.data += len; + if (avpkt.size < AUDIO_REFILL_THRESH) { + /* Refill the input buffer, to avoid trying to decode + * incomplete frames. Instead of this, one could also use + * a parser, or use a proper container format through + * libavformat. */ + memmove(inbuf, avpkt.data, avpkt.size); + avpkt.data = inbuf; + len = fread(avpkt.data + avpkt.size, 1, + AUDIO_INBUF_SIZE - avpkt.size, f); + if (len > 0) + avpkt.size += len; + } + } + + fclose(outfile); + fclose(f); + + avcodec_close(c); + av_free(c); + av_free(decoded_frame); +} + +/* + * Video encoding example + */ +static void video_encode_example(const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int i, out_size, size, x, y, outbuf_size; + FILE *f; + AVFrame *picture; + uint8_t *outbuf, *picture_buf; + + printf("Video encoding\n"); + + /* find the mpeg1 video encoder */ + codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + picture= avcodec_alloc_frame(); + + /* put sample parameters */ + c->bit_rate = 400000; + /* resolution must be a multiple of two */ + c->width = 352; + c->height = 288; + /* frames per second */ + c->time_base= (AVRational){1,25}; + c->gop_size = 10; /* emit one intra frame every ten frames */ + c->max_b_frames=1; + c->pix_fmt = PIX_FMT_YUV420P; + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + f = fopen(filename, "wb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + + /* alloc image and output buffer */ + outbuf_size = 100000; + outbuf = malloc(outbuf_size); + size = c->width * c->height; + picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */ + + picture->data[0] = picture_buf; + picture->data[1] = picture->data[0] + size; + picture->data[2] = picture->data[1] + size / 4; + picture->linesize[0] = c->width; + picture->linesize[1] = c->width / 2; + picture->linesize[2] = c->width / 2; + + /* encode 1 second of video */ + for(i=0;i<25;i++) { + fflush(stdout); + /* prepare a dummy image */ + /* Y */ + for(y=0;y<c->height;y++) { + for(x=0;x<c->width;x++) { + picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3; + } + } + + /* Cb and Cr */ + for(y=0;y<c->height/2;y++) { + for(x=0;x<c->width/2;x++) { + picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2; + picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5; + } + } + + /* encode the image */ + out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture); + printf("encoding frame %3d (size=%5d)\n", i, out_size); + fwrite(outbuf, 1, out_size, f); + } + + /* get the delayed frames */ + for(; out_size; i++) { + fflush(stdout); + + out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL); + printf("write frame %3d (size=%5d)\n", i, out_size); + fwrite(outbuf, 1, out_size, f); + } + + /* add sequence end code to have a real mpeg file */ + outbuf[0] = 0x00; + outbuf[1] = 0x00; + outbuf[2] = 0x01; + outbuf[3] = 0xb7; + fwrite(outbuf, 1, 4, f); + fclose(f); + free(picture_buf); + free(outbuf); + + avcodec_close(c); + av_free(c); + av_free(picture); + printf("\n"); +} + +/* + * Video decoding example + */ + +static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize, + char *filename) +{ + FILE *f; + int i; + + f=fopen(filename,"w"); + fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255); + for(i=0;i<ysize;i++) + fwrite(buf + i * wrap,1,xsize,f); + fclose(f); +} + +static void video_decode_example(const char *outfilename, const char *filename) +{ + AVCodec *codec; + AVCodecContext *c= NULL; + int frame, got_picture, len; + FILE *f; + AVFrame *picture; + uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE]; + char buf[1024]; + AVPacket avpkt; + + av_init_packet(&avpkt); + + /* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */ + memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE); + + printf("Video decoding\n"); + + /* find the mpeg1 video decoder */ + codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO); + if (!codec) { + fprintf(stderr, "codec not found\n"); + exit(1); + } + + c = avcodec_alloc_context3(codec); + picture= avcodec_alloc_frame(); + + if(codec->capabilities&CODEC_CAP_TRUNCATED) + c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */ + + /* For some codecs, such as msmpeg4 and mpeg4, width and height + MUST be initialized there because this information is not + available in the bitstream. */ + + /* open it */ + if (avcodec_open(c, codec) < 0) { + fprintf(stderr, "could not open codec\n"); + exit(1); + } + + /* the codec gives us the frame size, in samples */ + + f = fopen(filename, "rb"); + if (!f) { + fprintf(stderr, "could not open %s\n", filename); + exit(1); + } + + frame = 0; + for(;;) { + avpkt.size = fread(inbuf, 1, INBUF_SIZE, f); + if (avpkt.size == 0) + break; + + /* NOTE1: some codecs are stream based (mpegvideo, mpegaudio) + and this is the only method to use them because you cannot + know the compressed data size before analysing it. + + BUT some other codecs (msmpeg4, mpeg4) are inherently frame + based, so you must call them with all the data for one + frame exactly. You must also initialize 'width' and + 'height' before initializing them. */ + + /* NOTE2: some codecs allow the raw parameters (frame size, + sample rate) to be changed at any frame. We handle this, so + you should also take care of it */ + + /* here, we use a stream based decoder (mpeg1video), so we + feed decoder and see if it could decode a frame */ + avpkt.data = inbuf; + while (avpkt.size > 0) { + len = avcodec_decode_video2(c, picture, &got_picture, &avpkt); + if (len < 0) { + fprintf(stderr, "Error while decoding frame %d\n", frame); + exit(1); + } + if (got_picture) { + printf("saving frame %3d\n", frame); + fflush(stdout); + + /* the picture is allocated by the decoder. no need to + free it */ + snprintf(buf, sizeof(buf), outfilename, frame); + pgm_save(picture->data[0], picture->linesize[0], + c->width, c->height, buf); + frame++; + } + avpkt.size -= len; + avpkt.data += len; + } + } + + /* some codecs, such as MPEG, transmit the I and P frame with a + latency of one frame. You must do the following to have a + chance to get the last frame of the video */ + avpkt.data = NULL; + avpkt.size = 0; + len = avcodec_decode_video2(c, picture, &got_picture, &avpkt); + if (got_picture) { + printf("saving last frame %3d\n", frame); + fflush(stdout); + + /* the picture is allocated by the decoder. no need to + free it */ + snprintf(buf, sizeof(buf), outfilename, frame); + pgm_save(picture->data[0], picture->linesize[0], + c->width, c->height, buf); + frame++; + } + + fclose(f); + + avcodec_close(c); + av_free(c); + av_free(picture); + printf("\n"); +} + +int main(int argc, char **argv) +{ + const char *filename; + + /* must be called before using avcodec lib */ + avcodec_init(); + + /* register all the codecs */ + avcodec_register_all(); + + if (argc <= 1) { + audio_encode_example("/tmp/test.mp2"); + audio_decode_example("/tmp/test.sw", "/tmp/test.mp2"); + + video_encode_example("/tmp/test.mpg"); + filename = "/tmp/test.mpg"; + } else { + filename = argv[1]; + } + + // audio_decode_example("/tmp/test.sw", filename); + video_decode_example("/tmp/test%d.pgm", filename); + + return 0; +} diff --git a/src/test-native/ffmpeg/avcodec_sample.c b/src/test-native/ffmpeg/avcodec_sample.c new file mode 100644 index 000000000..f4001b4e6 --- /dev/null +++ b/src/test-native/ffmpeg/avcodec_sample.c @@ -0,0 +1,203 @@ +// avcodec_sample.0.5.0.c + +// A small sample program that shows how to use libavformat and libavcodec to +// read video from a file. +// +// This version is for the 0.4.9+ release of ffmpeg. This release adds the +// av_read_frame() API call, which simplifies the reading of video frames +// considerably. +// +// Use +// +// gcc -o avcodec_sample.0.5.0 avcodec_sample.0.5.0.c -lavformat -lavcodec -lavutil -lswscale -lz -lbz2 +// +// to build (assuming libavformat, libavcodec, libavutil, and swscale are correctly installed on +// your system). +// +// Run using +// +// avcodec_sample.0.5.0 myvideofile.mpg +// +// to write the first five frames from "myvideofile.mpg" to disk in PPM +// format. + +#include <libavcodec/avcodec.h> +#include <libavformat/avformat.h> +#include <libswscale/swscale.h> + +#include <stdio.h> +#include <stdlib.h> +#include <stdbool.h> + +static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame); + +int main (int argc, const char * argv[]) +{ + AVFormatContext *pFormatCtx; + int i, videoStream; + AVCodecContext *pCodecCtx; + AVCodec *pCodec; + AVFrame *pFrame; + AVFrame *pFrameRGB; + AVPacket packet; + int frameFinished; + int numBytes; + uint8_t *buffer; + + // Register all formats and codecs + av_register_all(); + + avformat_network_init(); + + // Open video file + if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0) + return -1; // Couldn't open file + + // Retrieve stream information + if(avformat_find_stream_info(pFormatCtx, NULL)<0) + return -1; // Couldn't find stream information + + // Dump information about file onto standard error + av_dump_format(pFormatCtx, 0, argv[1], false); + + // Find the first video stream + videoStream=-1; + for(i=0; i<pFormatCtx->nb_streams; i++) + if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) + { + videoStream=i; + break; + } + if(videoStream==-1) + return -1; // Didn't find a video stream + + // Get a pointer to the codec context for the video stream + pCodecCtx=pFormatCtx->streams[videoStream]->codec; + + // Find the decoder for the video stream + pCodec=avcodec_find_decoder(pCodecCtx->codec_id); + if(pCodec==NULL) + return -1; // Codec not found + + // Open codec + if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) + return -1; // Could not open codec + + // Hack to correct wrong frame rates that seem to be generated by some codecs + if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1) + pCodecCtx->time_base.den=1000; + + // Allocate video frame + pFrame=avcodec_alloc_frame(); + + // Allocate an AVFrame structure + pFrameRGB=avcodec_alloc_frame(); + if(pFrameRGB==NULL) + return -1; + + // Determine required buffer size and allocate buffer + numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, + pCodecCtx->height); + + buffer=malloc(numBytes); + + // Assign appropriate parts of buffer to image planes in pFrameRGB + avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, + pCodecCtx->width, pCodecCtx->height); + + // Read frames and save first five frames to disk + i=0; + while(av_read_frame(pFormatCtx, &packet)>=0) + { + // Is this a packet from the video stream? + if(packet.stream_index==videoStream) + { + // Decode video frame + avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet); + + // Did we get a video frame? + if(frameFinished) + { + static struct SwsContext *img_convert_ctx; + +#if 0 + // Older removed code + // Convert the image from its native format to RGB swscale + img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, + (AVPicture*)pFrame, pCodecCtx->pix_fmt, pCodecCtx->width, + pCodecCtx->height); + + // function template, for reference + int sws_scale(struct SwsContext *context, uint8_t* src[], int srcStride[], int srcSliceY, + int srcSliceH, uint8_t* dst[], int dstStride[]); +#endif + // Convert the image into YUV format that SDL uses + if(img_convert_ctx == NULL) { + int w = pCodecCtx->width; + int h = pCodecCtx->height; + + img_convert_ctx = sws_getContext(w, h, + pCodecCtx->pix_fmt, + w, h, PIX_FMT_RGB24, SWS_BICUBIC, + NULL, NULL, NULL); + if(img_convert_ctx == NULL) { + fprintf(stderr, "Cannot initialize the conversion context!\n"); + exit(1); + } + } + int ret = sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, + pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize); +#if 0 // this use to be true, as of 1/2009, but apparently it is no longer true in 3/2009 + if(ret) { + fprintf(stderr, "SWS_Scale failed [%d]!\n", ret); + exit(-1); + } +#endif + // Save the frame to disk + if(i++<=5) + SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i); + } + } + + // Free the packet that was allocated by av_read_frame + av_free_packet(&packet); + } + + // Free the RGB image + free(buffer); + av_free(pFrameRGB); + + // Free the YUV frame + av_free(pFrame); + + // Close the codec + avcodec_close(pCodecCtx); + + // Close the video file + avformat_close_input(&pFormatCtx); + + return 0; +} + +static void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) +{ + FILE *pFile; + char szFilename[32]; + int y; + + // Open file + sprintf(szFilename, "frame%d.ppm", iFrame); + pFile=fopen(szFilename, "wb"); + if(pFile==NULL) + return; + + // Write header + fprintf(pFile, "P6\n%d %d\n255\n", width, height); + + // Write pixel data + for(y=0; y<height; y++) + fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile); + + // Close file + fclose(pFile); +} diff --git a/src/test-native/ffmpeg/avcodec_sample.sh b/src/test-native/ffmpeg/avcodec_sample.sh new file mode 100644 index 000000000..c5ccd6caf --- /dev/null +++ b/src/test-native/ffmpeg/avcodec_sample.sh @@ -0,0 +1,6 @@ +# -I/usr/include/libavcodec -I/usr/include/libavformat \ + +gcc \ + -o avcodec_sample avcodec_sample.c \ + -lavformat -lavcodec -lavutil -lswscale + diff --git a/src/test-native/gst/helloworld-auto.c b/src/test-native/gst/helloworld-auto.c new file mode 100644 index 000000000..6381c9c93 --- /dev/null +++ b/src/test-native/gst/helloworld-auto.c @@ -0,0 +1,112 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +my_bus_callback (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + + +static gboolean +idle_exit_loop (gpointer data) +{ + g_main_loop_quit ((GMainLoop *) data); + + /* once */ + return FALSE; +} + +static void +cb_typefound (GstElement *typefind, + guint probability, + GstCaps *caps, + gpointer data) +{ + GMainLoop *loop = data; + gchar *type; + + type = gst_caps_to_string (caps); + g_print ("Media type %s found, probability %d%%\n", type, probability); + g_free (type); + + /* since we connect to a signal in the pipeline thread context, we need + * to set an idle handler to exit the main loop in the mainloop context. + * Normally, your app should not need to worry about such things. */ + g_idle_add (idle_exit_loop, loop); +} + +gint +main (gint argc, + gchar *argv[]) +{ + GMainLoop *loop; + GstElement *pipeline, *filesrc, *typefind, *fakesink; + GstBus *bus; + + /* init GStreamer */ + gst_init (&argc, &argv); + loop = g_main_loop_new (NULL, FALSE); + + /* check args */ + if (argc != 2) { + g_print ("Usage: %s <filename>\n", argv[0]); + return -1; + } + + /* create a new pipeline to hold the elements */ + pipeline = gst_pipeline_new ("pipe"); + + bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); + gst_bus_add_watch (bus, my_bus_callback, NULL); + gst_object_unref (bus); + + /* create file source and typefind element */ + filesrc = gst_element_factory_make ("filesrc", "source"); + g_object_set (G_OBJECT (filesrc), "location", argv[1], NULL); + typefind = gst_element_factory_make ("typefind", "typefinder"); + g_signal_connect (typefind, "have-type", G_CALLBACK (cb_typefound), loop); + fakesink = gst_element_factory_make ("fakesink", "sink"); + + /* setup */ + gst_bin_add_many (GST_BIN (pipeline), filesrc, typefind, fakesink, NULL); + gst_element_link_many (filesrc, typefind, fakesink, NULL); + gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_PLAYING); + g_main_loop_run (loop); + + /* unset */ + gst_element_set_state (GST_ELEMENT (pipeline), GST_STATE_NULL); + gst_object_unref (GST_OBJECT (pipeline)); + + return 0; +} + diff --git a/src/test-native/gst/helloworld-playbin.c b/src/test-native/gst/helloworld-playbin.c new file mode 100644 index 000000000..c5a42fe6c --- /dev/null +++ b/src/test-native/gst/helloworld-playbin.c @@ -0,0 +1,75 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +my_bus_callback (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + +gint +main (gint argc, + gchar *argv[]) +{ + GMainLoop *loop; + GstElement *play; + GstBus *bus; + + /* init GStreamer */ + gst_init (&argc, &argv); + loop = g_main_loop_new (NULL, FALSE); + + /* make sure we have a URI */ + if (argc != 2) { + g_print ("Usage: %s <URI>\n", argv[0]); + return -1; + } + + /* set up */ + play = gst_element_factory_make ("playbin", "play"); + g_object_set (G_OBJECT (play), "uri", argv[1], NULL); + + bus = gst_pipeline_get_bus (GST_PIPELINE (play)); + gst_bus_add_watch (bus, my_bus_callback, loop); + gst_object_unref (bus); + + gst_element_set_state (play, GST_STATE_PLAYING); + + /* now run */ + g_main_loop_run (loop); + + /* also clean up */ + gst_element_set_state (play, GST_STATE_NULL); + gst_object_unref (GST_OBJECT (play)); + + return 0; +} diff --git a/src/test-native/gst/helloworld-playbin2.c b/src/test-native/gst/helloworld-playbin2.c new file mode 100644 index 000000000..b31e3e5c6 --- /dev/null +++ b/src/test-native/gst/helloworld-playbin2.c @@ -0,0 +1,75 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +my_bus_callback (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + +gint +main (gint argc, + gchar *argv[]) +{ + GMainLoop *loop; + GstElement *play; + GstBus *bus; + + /* init GStreamer */ + gst_init (&argc, &argv); + loop = g_main_loop_new (NULL, FALSE); + + /* make sure we have a URI */ + if (argc != 2) { + g_print ("Usage: %s <URI>\n", argv[0]); + return -1; + } + + /* set up */ + play = gst_element_factory_make ("playbin2", "play"); + g_object_set (G_OBJECT (play), "uri", argv[1], NULL); + + bus = gst_pipeline_get_bus (GST_PIPELINE (play)); + gst_bus_add_watch (bus, my_bus_callback, loop); + gst_object_unref (bus); + + gst_element_set_state (play, GST_STATE_PLAYING); + + /* now run */ + g_main_loop_run (loop); + + /* also clean up */ + gst_element_set_state (play, GST_STATE_NULL); + gst_object_unref (GST_OBJECT (play)); + + return 0; +} diff --git a/src/test-native/gst/helloworld.c b/src/test-native/gst/helloworld.c new file mode 100644 index 000000000..6d991898d --- /dev/null +++ b/src/test-native/gst/helloworld.c @@ -0,0 +1,142 @@ +#include <gst/gst.h> +#include <glib.h> + + +static gboolean +bus_call (GstBus *bus, + GstMessage *msg, + gpointer data) +{ + GMainLoop *loop = (GMainLoop *) data; + + switch (GST_MESSAGE_TYPE (msg)) { + + case GST_MESSAGE_EOS: + g_print ("End of stream\n"); + g_main_loop_quit (loop); + break; + + case GST_MESSAGE_ERROR: { + gchar *debug; + GError *error; + + gst_message_parse_error (msg, &error, &debug); + g_free (debug); + + g_printerr ("Error: %s\n", error->message); + g_error_free (error); + + g_main_loop_quit (loop); + break; + } + default: + break; + } + + return TRUE; +} + + +static void +on_pad_added (GstElement *element, + GstPad *pad, + gpointer data) +{ + GstPad *sinkpad; + GstElement *decoder = (GstElement *) data; + + /* We can now link this pad with the vorbis-decoder sink pad */ + g_print ("Dynamic pad created, linking demuxer/decoder\n"); + + sinkpad = gst_element_get_static_pad (decoder, "sink"); + + gst_pad_link (pad, sinkpad); + + gst_object_unref (sinkpad); +} + + + +int +main (int argc, + char *argv[]) +{ + GMainLoop *loop; + + GstElement *pipeline, *source, *demuxer, *decoder, *conv, *sink; + GstBus *bus; + + /* Initialisation */ + gst_init (&argc, &argv); + + loop = g_main_loop_new (NULL, FALSE); + + + /* Check input arguments */ + if (argc != 2) { + g_printerr ("Usage: %s <Ogg/Vorbis filename>\n", argv[0]); + return -1; + } + + + /* Create gstreamer elements */ + pipeline = gst_pipeline_new ("audio-player"); + source = gst_element_factory_make ("filesrc", "file-source"); + demuxer = gst_element_factory_make ("oggdemux", "ogg-demuxer"); + decoder = gst_element_factory_make ("vorbisdec", "vorbis-decoder"); + conv = gst_element_factory_make ("audioconvert", "converter"); + sink = gst_element_factory_make ("autoaudiosink", "audio-output"); + + if (!pipeline || !source || !demuxer || !decoder || !conv || !sink) { + g_printerr ("One element could not be created. Exiting.\n"); + return -1; + } + + /* Set up the pipeline */ + + /* we set the input filename to the source element */ + g_object_set (G_OBJECT (source), "location", argv[1], NULL); + + /* we add a message handler */ + bus = gst_pipeline_get_bus (GST_PIPELINE (pipeline)); + gst_bus_add_watch (bus, bus_call, loop); + gst_object_unref (bus); + + /* we add all elements into the pipeline */ + /* file-source | ogg-demuxer | vorbis-decoder | converter | alsa-output */ + gst_bin_add_many (GST_BIN (pipeline), + source, demuxer, decoder, conv, sink, NULL); + + /* we link the elements together */ + /* file-source -> ogg-demuxer ~> vorbis-decoder -> converter -> alsa-output */ + gst_element_link (source, demuxer); + gst_element_link_many (decoder, conv, sink, NULL); + g_signal_connect (demuxer, "pad-added", G_CALLBACK (on_pad_added), decoder); + + /* note that the demuxer will be linked to the decoder dynamically. + The reason is that Ogg may contain various streams (for example + audio and video). The source pad(s) will be created at run time, + by the demuxer when it detects the amount and nature of streams. + Therefore we connect a callback function which will be executed + when the "pad-added" is emitted.*/ + + + /* Set the pipeline to "playing" state*/ + g_print ("Now playing: %s\n", argv[1]); + gst_element_set_state (pipeline, GST_STATE_PLAYING); + + + /* Iterate */ + g_print ("Running...\n"); + g_main_loop_run (loop); + + + /* Out of the main loop, clean up nicely */ + g_print ("Returned, stopping playback\n"); + gst_element_set_state (pipeline, GST_STATE_NULL); + + g_print ("Deleting pipeline\n"); + gst_object_unref (GST_OBJECT (pipeline)); + + return 0; +} diff --git a/src/test-native/gst/make.sh b/src/test-native/gst/make.sh new file mode 100644 index 000000000..23e54a272 --- /dev/null +++ b/src/test-native/gst/make.sh @@ -0,0 +1,5 @@ +gcc -Wall helloworld.c -o helloworld $(pkg-config --cflags --libs gstreamer-0.10) +gcc -Wall helloworld-auto.c -o helloworld-auto $(pkg-config --cflags --libs gstreamer-0.10) +gcc -Wall helloworld-playbin.c -o helloworld-playbin $(pkg-config --cflags --libs gstreamer-0.10) +gcc -Wall helloworld-playbin2.c -o helloworld-playbin2 $(pkg-config --cflags --libs gstreamer-0.10) + |