Add ReadCompressedFrame to Vorbis wrapper.

- This function deprecates ReadCompressedAudio.

Change-Id: Ia843bf03457d84277da6898dcef6817b56bd19d5
diff --git a/JNI/com/google/libvorbis/AudioFrame.java b/JNI/com/google/libvorbis/AudioFrame.java
new file mode 100644
index 0000000..b38cdb3
--- /dev/null
+++ b/JNI/com/google/libvorbis/AudioFrame.java
@@ -0,0 +1,12 @@
+package com.google.libvorbis;
+
+public class AudioFrame {
+  public byte[] buffer;       // compressed data buffer
+  public long   size;         // length of compressed data
+  public long   pts;          // time stamp to show frame (in timebase units)
+
+  public AudioFrame(long size) {
+    this.size = size;
+    buffer = new byte[(int) size];
+  }
+}
\ No newline at end of file
diff --git a/JNI/com/google/libvorbis/VorbisEncoderC.java b/JNI/com/google/libvorbis/VorbisEncoderC.java
index 7220b9f..0733941 100644
--- a/JNI/com/google/libvorbis/VorbisEncoderC.java
+++ b/JNI/com/google/libvorbis/VorbisEncoderC.java
@@ -13,10 +13,15 @@
     return Encode(nativePointer, jBuffer, jBuffer.length);
   }
 
+  // Deprecated. Please use ReadCompressedFrame.
   public byte[] ReadCompressedAudio(long[] timestamp) {
     return ReadCompressedAudio(nativePointer, timestamp);
   }
 
+  public AudioFrame ReadCompressedFrame() {
+    return ReadCompressedFrame(nativePointer);
+  }
+
   public byte[] CodecPrivate() {
     return CodecPrivate(nativePointer);
   }
@@ -79,6 +84,7 @@
   private static native boolean Init(long jVorbisEncoder, long jVorbisEncoderConfig);
   private static native boolean Encode(long jVorbisEncoder, byte[] jBuffer, int length);
   private static native byte[] ReadCompressedAudio(long jVorbisEncoder, long[] timestamp);
+  private static native AudioFrame ReadCompressedFrame(long jVorbisEncoder);
   private static native byte[] CodecPrivate(long jVorbisEncoder);
 
   private static native int GetChannels(long jVorbisEncoder);
diff --git a/JNI/com/google/utils/AudioFrame.java b/JNI/com/google/utils/AudioFrame.java
deleted file mode 100644
index 389cf5f..0000000
--- a/JNI/com/google/utils/AudioFrame.java
+++ /dev/null
@@ -1,11 +0,0 @@
-package com.google.utils;
-
-public class AudioFrame {
-  public long timestamp;
-  public byte[] frame;
-
-  public AudioFrame(long time, byte[] data) {
-    timestamp = time;
-    frame = data;
-  }
-}
\ No newline at end of file
diff --git a/JNI/examples/EncodeWavExample.java b/JNI/examples/EncodeWavExample.java
index dddad9b..9e4caa6 100755
--- a/JNI/examples/EncodeWavExample.java
+++ b/JNI/examples/EncodeWavExample.java
@@ -1,5 +1,6 @@
 import java.io.File;
 
+import com.google.libvorbis.AudioFrame;
 import com.google.libvorbis.VorbisEncConfig;
 import com.google.libvorbis.VorbisEncoderC;
 import com.google.libwebm.mkvmuxer.AudioTrack;
@@ -8,6 +9,8 @@
 import com.google.libwebm.mkvmuxer.SegmentInfo;
 import com.google.utils.WavReader;
 
+import java.io.File;
+
 public class EncodeWavExample {
   /*
    * This function will encode an audio WebM file. |wavInputName| filename of the source audio. The
@@ -100,12 +103,10 @@
           return false;
         }
 
-        long[] timestamp = new long[2];
-
-        byte[] frame = null;
-        while ((frame = vorbisEncoder.ReadCompressedAudio(timestamp)) != null) {
+        AudioFrame frame = null;
+        while ((frame = vorbisEncoder.ReadCompressedFrame()) != null) {
           if (!muxerSegment.addFrame(
-              frame, newAudioTrackNumber, timestamp[0] * 1000000, true)) {
+              frame.buffer, newAudioTrackNumber, frame.pts * 1000000, true)) {
             error.append("Could not add audio frame.");
             return false;
           }
diff --git a/JNI/examples/EncodeY4mWavExample.java b/JNI/examples/EncodeY4mWavExample.java
index 46bbbc3..e675b18 100755
--- a/JNI/examples/EncodeY4mWavExample.java
+++ b/JNI/examples/EncodeY4mWavExample.java
@@ -1,7 +1,5 @@
-import java.io.File;
-import java.io.IOException;
-import java.util.ArrayList;
 
+import com.google.libvorbis.AudioFrame;
 import com.google.libvorbis.VorbisEncConfig;
 import com.google.libvorbis.VorbisEncoderC;
 import com.google.libvpx.LibVpxEnc;
@@ -15,6 +13,10 @@
 import com.google.utils.WavReader;
 import com.google.utils.Y4MReader;
 
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+
 public class EncodeY4mWavExample {
   /*
    * This function will encode an audio and video WebM file. |y4mName| filename of the source video.
@@ -121,9 +123,7 @@
       }
 
       final int maxSamplesToRead = 1000;
-      long[] returnTimestamp = new long[2];
-      long vorbisTimestamp = 0;
-      byte[] vorbisFrame = null;
+      AudioFrame vorbisFrame = null;
       ArrayList<VpxCodecCxPkt> encPkt = null;
       VpxCodecCxPkt pkt = null;
       int pktIndex = 0;
@@ -150,11 +150,11 @@
               return false;
             }
 
-            vorbisFrame = vorbisEncoder.ReadCompressedAudio(returnTimestamp);
+            vorbisFrame = vorbisEncoder.ReadCompressedFrame();
 
             // Matroska is in nanoseconds.
             if (vorbisFrame != null) {
-              vorbisTimestamp = returnTimestamp[0] * 1000000;
+              vorbisFrame.pts *= 1000000;
             }
           } else {
             audioDone = true;
@@ -181,7 +181,7 @@
 
         if ((audioDone && videoDone) || framesIn >= framesToEncode) break;
 
-        if (!videoDone && (audioDone || pkt.pts <= vorbisTimestamp)) {
+        if (!videoDone && (audioDone || pkt.pts <= vorbisFrame.pts)) {
           final boolean isKey = (pkt.flags & 0x1) == 1;
           if (!muxerSegment.addFrame(pkt.buffer, newVideoTrackNumber, pkt.pts, isKey)) {
             error.append("Could not add video frame.");
@@ -196,15 +196,15 @@
             encPkt = null;
           }
         } else if (!audioDone) {
-          if (!muxerSegment.addFrame(vorbisFrame, newAudioTrackNumber, vorbisTimestamp, true)) {
+          if (!muxerSegment.addFrame(vorbisFrame.buffer, newAudioTrackNumber, vorbisFrame.pts,
+                                     true)) {
             error.append("Could not add audio frame.");
             return false;
           }
 
-          // Read the next compressed audio frame.
-          vorbisFrame = vorbisEncoder.ReadCompressedAudio(returnTimestamp);
+          vorbisFrame = vorbisEncoder.ReadCompressedFrame();
           if (vorbisFrame != null) {
-            vorbisTimestamp = returnTimestamp[0] * 1000000;
+            vorbisFrame.pts *= 1000000;
           }
         }
       }
diff --git a/JNI/vorbis/VorbisEncoder.cc b/JNI/vorbis/VorbisEncoder.cc
index a3d5cb4..a907096 100644
--- a/JNI/vorbis/VorbisEncoder.cc
+++ b/JNI/vorbis/VorbisEncoder.cc
@@ -1,3 +1,4 @@
+#include <assert.h>
 #include <jni.h>
 #include <new>
 
@@ -81,6 +82,42 @@
   return newByteArray(env, data, length);
 }
 
+FUNCTION(jobject, ReadCompressedFrame, jlong jVorbisEncoder) {
+  vorbis::VorbisEncoder* encoder =
+      reinterpret_cast<vorbis::VorbisEncoder*>(jVorbisEncoder);
+
+  unsigned char* data;
+  int length;
+  int64_t timestamp;
+  if (!encoder->ReadCompressedAudio(&data, &length, &timestamp))
+    return false;
+
+  jclass audioFrame = env->FindClass("com/google/libvorbis/AudioFrame");
+  assert(audioFrame != NULL);
+
+  jmethodID afInitMethodId = env->GetMethodID(audioFrame, "<init>", "(J)V");
+  assert(afInitMethodId != NULL);
+
+  jfieldID bufferId = env->GetFieldID(audioFrame, "buffer", "[B");
+  assert(bufferId != NULL);
+
+  jfieldID ptsId = env->GetFieldID(audioFrame, "pts", "J");
+  assert(ptsId != NULL);
+
+  jobject frame = env->NewObject(audioFrame,
+                                 afInitMethodId,
+                                 (jlong)length);
+
+  jobject jba = env->GetObjectField(frame, bufferId);
+  assert(jba != NULL);
+
+  env->SetByteArrayRegion((jbyteArray)jba, 0,
+                          length, reinterpret_cast<jbyte *>(data));
+  env->SetLongField(frame, ptsId, timestamp);
+
+  return frame;
+}
+
 FUNCTION(jbyteArray, CodecPrivate, jlong jVorbisEncoder) {
   vorbis::VorbisEncoder* encoder =
       reinterpret_cast<vorbis::VorbisEncoder*>(jVorbisEncoder);