Updated stable to r2950

git-svn-id: http://webrtc.googlecode.com/svn/stable/src@2951 4adac7df-926f-26a2-2b94-8c16560cd09d
diff --git a/build/common.gypi b/build/common.gypi
index 6d02f20..d57b265 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -85,6 +85,10 @@
 
         # Disable the use of protocol buffers in production code.
         'enable_protobuf%': 0,
+
+        # Disable Mozilla internal Opus version
+        'build_with_mozilla%': 0,
+
       }, {  # Settings for the standalone (not-in-Chromium) build.
         'include_pulse_audio%': 1,
         'include_internal_audio_device%': 1,
diff --git a/common_audio/vad/vad_core.c b/common_audio/vad/vad_core.c
index 1e9053f..6a36349 100644
--- a/common_audio/vad/vad_core.c
+++ b/common_audio/vad/vad_core.c
@@ -504,6 +504,9 @@
   memset(self->downsampling_filter_states, 0,
          sizeof(self->downsampling_filter_states));
 
+  // Initialization of 48 to 8 kHz downsampling.
+  WebRtcSpl_ResetResample48khzTo8khz(&self->state_48_to_8);
+
   // Read initial PDF parameters.
   for (i = 0; i < kTableSize; i++) {
     self->noise_means[i] = kNoiseDataMeans[i];
@@ -600,6 +603,31 @@
 // Calculate VAD decision by first extracting feature values and then calculate
 // probability for both speech and background noise.
 
+int WebRtcVad_CalcVad48khz(VadInstT* inst, int16_t* speech_frame,
+                           int frame_length) {
+  int vad;
+  int i;
+  int16_t speech_nb[240];  // 30 ms in 8 kHz.
+  // |tmp_mem| is a temporary memory used by resample function, length is
+  // frame length in 10 ms (480 samples) + 256 extra.
+  int32_t tmp_mem[480 + 256] = { 0 };
+  const int kFrameLen10ms48khz = 480;
+  const int kFrameLen10ms8khz = 80;
+  int num_10ms_frames = frame_length / kFrameLen10ms48khz;
+
+  for (i = 0; i < num_10ms_frames; i++) {
+    WebRtcSpl_Resample48khzTo8khz(speech_frame,
+                                  &speech_nb[i * kFrameLen10ms8khz],
+                                  &inst->state_48_to_8,
+                                  tmp_mem);
+  }
+
+  // Do VAD on an 8 kHz signal
+  vad = WebRtcVad_CalcVad8khz(inst, speech_nb, frame_length / 6);
+
+  return vad;
+}
+
 int WebRtcVad_CalcVad32khz(VadInstT* inst, int16_t* speech_frame,
                            int frame_length)
 {
diff --git a/common_audio/vad/vad_core.h b/common_audio/vad/vad_core.h
index 00d39a4..b89d5df 100644
--- a/common_audio/vad/vad_core.h
+++ b/common_audio/vad/vad_core.h
@@ -16,6 +16,7 @@
 #ifndef WEBRTC_COMMON_AUDIO_VAD_VAD_CORE_H_
 #define WEBRTC_COMMON_AUDIO_VAD_VAD_CORE_H_
 
+#include "common_audio/signal_processing/include/signal_processing_library.h"
 #include "typedefs.h"
 
 enum { kNumChannels = 6 };  // Number of frequency bands (named channels).
@@ -28,6 +29,7 @@
 
     int vad;
     int32_t downsampling_filter_states[4];
+    WebRtcSpl_State48khzTo8khz state_48_to_8;
     int16_t noise_means[kTableSize];
     int16_t speech_means[kTableSize];
     int16_t noise_stds[kTableSize];
@@ -82,6 +84,7 @@
 int WebRtcVad_set_mode_core(VadInstT* self, int mode);
 
 /****************************************************************************
+ * WebRtcVad_CalcVad48khz(...)
  * WebRtcVad_CalcVad32khz(...) 
  * WebRtcVad_CalcVad16khz(...) 
  * WebRtcVad_CalcVad8khz(...) 
@@ -100,6 +103,8 @@
  *                        0 - No active speech
  *                        1-6 - Active speech
  */
+int WebRtcVad_CalcVad48khz(VadInstT* inst, int16_t* speech_frame,
+                           int frame_length);
 int WebRtcVad_CalcVad32khz(VadInstT* inst, int16_t* speech_frame,
                            int frame_length);
 int WebRtcVad_CalcVad16khz(VadInstT* inst, int16_t* speech_frame,
diff --git a/common_audio/vad/vad_core_unittest.cc b/common_audio/vad/vad_core_unittest.cc
index 141b796..0c5648f 100644
--- a/common_audio/vad/vad_core_unittest.cc
+++ b/common_audio/vad/vad_core_unittest.cc
@@ -75,6 +75,9 @@
     if (ValidRatesAndFrameLengths(32000, kFrameLengths[j])) {
       EXPECT_EQ(0, WebRtcVad_CalcVad32khz(self, speech, kFrameLengths[j]));
     }
+    if (ValidRatesAndFrameLengths(48000, kFrameLengths[j])) {
+      EXPECT_EQ(0, WebRtcVad_CalcVad48khz(self, speech, kFrameLengths[j]));
+    }
   }
 
   // Construct a speech signal that will trigger the VAD in all modes. It is
@@ -92,6 +95,9 @@
     if (ValidRatesAndFrameLengths(32000, kFrameLengths[j])) {
       EXPECT_EQ(1, WebRtcVad_CalcVad32khz(self, speech, kFrameLengths[j]));
     }
+    if (ValidRatesAndFrameLengths(48000, kFrameLengths[j])) {
+      EXPECT_EQ(1, WebRtcVad_CalcVad48khz(self, speech, kFrameLengths[j]));
+    }
   }
 
   free(self);
diff --git a/common_audio/vad/vad_sp_unittest.cc b/common_audio/vad/vad_sp_unittest.cc
index 2b25316..632117f 100644
--- a/common_audio/vad/vad_sp_unittest.cc
+++ b/common_audio/vad/vad_sp_unittest.cc
@@ -23,10 +23,11 @@
 
 TEST_F(VadTest, vad_sp) {
   VadInstT* self = reinterpret_cast<VadInstT*>(malloc(sizeof(VadInstT)));
-  int16_t zeros[kMaxFrameLength] = { 0 };
+  const int kMaxFrameLenSp = 960;  // Maximum frame length in this unittest.
+  int16_t zeros[kMaxFrameLenSp] = { 0 };
   int32_t state[2] = { 0 };
-  int16_t data_in[kMaxFrameLength];
-  int16_t data_out[kMaxFrameLength];
+  int16_t data_in[kMaxFrameLenSp];
+  int16_t data_out[kMaxFrameLenSp];
 
   // We expect the first value to be 1600 as long as |frame_counter| is zero,
   // which is true for the first iteration.
@@ -39,20 +40,18 @@
 
   // Construct a speech signal that will trigger the VAD in all modes. It is
   // known that (i * i) will wrap around, but that doesn't matter in this case.
-  for (int16_t i = 0; i < kMaxFrameLength; ++i) {
+  for (int16_t i = 0; i < kMaxFrameLenSp; ++i) {
     data_in[i] = (i * i);
   }
   // Input values all zeros, expect all zeros out.
-  WebRtcVad_Downsampling(zeros, data_out, state,
-                         static_cast<int>(kMaxFrameLength));
+  WebRtcVad_Downsampling(zeros, data_out, state, kMaxFrameLenSp);
   EXPECT_EQ(0, state[0]);
   EXPECT_EQ(0, state[1]);
-  for (int16_t i = 0; i < kMaxFrameLength / 2; ++i) {
+  for (int16_t i = 0; i < kMaxFrameLenSp / 2; ++i) {
     EXPECT_EQ(0, data_out[i]);
   }
   // Make a simple non-zero data test.
-  WebRtcVad_Downsampling(data_in, data_out, state,
-                         static_cast<int>(kMaxFrameLength));
+  WebRtcVad_Downsampling(data_in, data_out, state, kMaxFrameLenSp);
   EXPECT_EQ(207, state[0]);
   EXPECT_EQ(2270, state[1]);
 
diff --git a/common_audio/vad/vad_unittest.cc b/common_audio/vad/vad_unittest.cc
index b31217c..3e66853 100644
--- a/common_audio/vad/vad_unittest.cc
+++ b/common_audio/vad/vad_unittest.cc
@@ -36,12 +36,16 @@
       return true;
     }
     return false;
-  }
-  if (rate == 32000) {
+  } else if (rate == 32000) {
     if (frame_length == 320 || frame_length == 640 || frame_length == 960) {
       return true;
     }
     return false;
+  } else if (rate == 48000) {
+    if (frame_length == 480 || frame_length == 960 || frame_length == 1440) {
+      return true;
+    }
+    return false;
   }
 
   return false;
@@ -122,15 +126,26 @@
 
 TEST_F(VadTest, ValidRatesFrameLengths) {
   // This test verifies valid and invalid rate/frame_length combinations. We
-  // loop through sampling rates and frame lengths from negative values to
+  // loop through some sampling rates and frame lengths from negative values to
   // values larger than possible.
-  for (int16_t rate = -1; rate <= kRates[kRatesSize - 1] + 1; rate++) {
-    for (int16_t frame_length = -1; frame_length <= kMaxFrameLength + 1;
-        frame_length++) {
-      if (ValidRatesAndFrameLengths(rate, frame_length)) {
-        EXPECT_EQ(0, WebRtcVad_ValidRateAndFrameLength(rate, frame_length));
+  const int kNumRates = 12;
+  const int kRates[kNumRates] = {
+    -8000, -4000, 0, 4000, 8000, 8001, 15999, 16000, 32000, 48000, 48001, 96000
+  };
+
+  const int kNumFrameLengths = 13;
+  const int kFrameLengths[kNumFrameLengths] = {
+    -10, 0, 80, 81, 159, 160, 240, 320, 480, 640, 960, 1440, 2000
+  };
+
+  for (int i = 0; i < kNumRates; i++) {
+    for (int j = 0; j < kNumFrameLengths; j++) {
+      if (ValidRatesAndFrameLengths(kRates[i], kFrameLengths[j])) {
+        EXPECT_EQ(0, WebRtcVad_ValidRateAndFrameLength(kRates[i],
+                                                       kFrameLengths[j]));
       } else {
-        EXPECT_EQ(-1, WebRtcVad_ValidRateAndFrameLength(rate, frame_length));
+        EXPECT_EQ(-1, WebRtcVad_ValidRateAndFrameLength(kRates[i],
+                                                        kFrameLengths[j]));
       }
     }
   }
diff --git a/common_audio/vad/vad_unittest.h b/common_audio/vad/vad_unittest.h
index 3069801..a42e86f 100644
--- a/common_audio/vad/vad_unittest.h
+++ b/common_audio/vad/vad_unittest.h
@@ -24,12 +24,12 @@
 const size_t kModesSize = sizeof(kModes) / sizeof(*kModes);
 
 // Rates we support.
-const int kRates[] = { 8000, 12000, 16000, 24000, 32000 };
+const int kRates[] = { 8000, 12000, 16000, 24000, 32000, 48000 };
 const size_t kRatesSize = sizeof(kRates) / sizeof(*kRates);
 
 // Frame lengths we support.
-const int kMaxFrameLength = 960;
-const int kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640,
+const int kMaxFrameLength = 1440;
+const int kFrameLengths[] = { 80, 120, 160, 240, 320, 480, 640, 960,
     kMaxFrameLength };
 const size_t kFrameLengthsSize = sizeof(kFrameLengths) / sizeof(*kFrameLengths);
 
diff --git a/common_audio/vad/webrtc_vad.c b/common_audio/vad/webrtc_vad.c
index 6999211..dad9d73 100644
--- a/common_audio/vad/webrtc_vad.c
+++ b/common_audio/vad/webrtc_vad.c
@@ -18,7 +18,7 @@
 #include "typedefs.h"
 
 static const int kInitCheck = 42;
-static const int kValidRates[] = { 8000, 16000, 32000 };
+static const int kValidRates[] = { 8000, 16000, 32000, 48000 };
 static const size_t kRatesSize = sizeof(kValidRates) / sizeof(*kValidRates);
 static const int kMaxFrameLengthMs = 30;
 
@@ -93,7 +93,9 @@
     return -1;
   }
 
-  if (fs == 32000) {
+  if (fs == 48000) {
+      vad = WebRtcVad_CalcVad48khz(self, audio_frame, frame_length);
+  } else if (fs == 32000) {
     vad = WebRtcVad_CalcVad32khz(self, audio_frame, frame_length);
   } else if (fs == 16000) {
     vad = WebRtcVad_CalcVad16khz(self, audio_frame, frame_length);
diff --git a/common_video/i420_video_frame.cc b/common_video/i420_video_frame.cc
index ca709ef..def6452 100644
--- a/common_video/i420_video_frame.cc
+++ b/common_video/i420_video_frame.cc
@@ -56,9 +56,12 @@
 }
 
 int I420VideoFrame::CopyFrame(const I420VideoFrame& videoFrame) {
-  int ret = CreateFrame(videoFrame.size(kYPlane), videoFrame.buffer(kYPlane),
-                        videoFrame.size(kUPlane), videoFrame.buffer(kUPlane),
-                        videoFrame.size(kVPlane), videoFrame.buffer(kVPlane),
+  int ret = CreateFrame(videoFrame.allocated_size(kYPlane),
+                        videoFrame.buffer(kYPlane),
+                        videoFrame.allocated_size(kUPlane),
+                        videoFrame.buffer(kUPlane),
+                        videoFrame.allocated_size(kVPlane),
+                        videoFrame.buffer(kVPlane),
                         videoFrame.width_, videoFrame.height_,
                         videoFrame.stride(kYPlane), videoFrame.stride(kUPlane),
                         videoFrame.stride(kVPlane));
@@ -93,7 +96,7 @@
   return NULL;
 }
 
-int I420VideoFrame::size(PlaneType type) const {
+int I420VideoFrame::allocated_size(PlaneType type) const {
   const Plane* plane_ptr = GetPlane(type);
     if (plane_ptr)
       return plane_ptr->allocated_size();
@@ -125,6 +128,17 @@
   return 0;
 }
 
+bool I420VideoFrame::IsZeroSize() {
+  return (y_plane_.IsZeroSize() && u_plane_.IsZeroSize() &&
+    v_plane_.IsZeroSize());
+}
+
+void I420VideoFrame::ResetSize() {
+  y_plane_.ResetSize();
+  u_plane_.ResetSize();
+  v_plane_.ResetSize();
+}
+
 int I420VideoFrame::CheckDimensions(int width, int height,
                                     int stride_y, int stride_u, int stride_v) {
   int half_width = (width + 1) / 2;
diff --git a/common_video/i420_video_frame_unittest.cc b/common_video/i420_video_frame_unittest.cc
index e0d22a5..0eb1c2e 100644
--- a/common_video/i420_video_frame_unittest.cc
+++ b/common_video/i420_video_frame_unittest.cc
@@ -26,12 +26,14 @@
 TEST(TestI420VideoFrame, InitialValues) {
   I420VideoFrame frame;
   // Invalid arguments - one call for each variable.
+  EXPECT_TRUE(frame.IsZeroSize());
   EXPECT_EQ(-1, frame.CreateEmptyFrame(0, 10, 10, 14, 14));
   EXPECT_EQ(-1, frame.CreateEmptyFrame(10, -1, 10, 90, 14));
   EXPECT_EQ(-1, frame.CreateEmptyFrame(10, 10, 0, 14, 18));
   EXPECT_EQ(-1, frame.CreateEmptyFrame(10, 10, 10, -2, 13));
   EXPECT_EQ(-1, frame.CreateEmptyFrame(10, 10, 10, 14, 0));
   EXPECT_EQ(0, frame.CreateEmptyFrame(10, 10, 10, 14, 90));
+  EXPECT_FALSE(frame.IsZeroSize());
 }
 
 TEST(TestI420VideoFrame, WidthHeightValues) {
@@ -59,9 +61,20 @@
   int stride_u = frame.stride(kUPlane);
   int stride_v = frame.stride(kVPlane);
   // Verify that allocated size was computed correctly.
-  EXPECT_EQ(ExpectedSize(stride_y, height, kYPlane), frame.size(kYPlane));
-  EXPECT_EQ(ExpectedSize(stride_u, height, kUPlane), frame.size(kUPlane));
-  EXPECT_EQ(ExpectedSize(stride_v, height, kVPlane), frame.size(kVPlane));
+  EXPECT_EQ(ExpectedSize(stride_y, height, kYPlane),
+            frame.allocated_size(kYPlane));
+  EXPECT_EQ(ExpectedSize(stride_u, height, kUPlane),
+            frame.allocated_size(kUPlane));
+  EXPECT_EQ(ExpectedSize(stride_v, height, kVPlane),
+            frame.allocated_size(kVPlane));
+}
+
+TEST(TestI420VideoFrame, ResetSize) {
+  I420VideoFrame frame;
+  EXPECT_EQ(0, frame. CreateEmptyFrame(10, 10, 12, 14, 220));
+  EXPECT_FALSE(frame.IsZeroSize());
+  frame.ResetSize();
+  EXPECT_TRUE(frame.IsZeroSize());
 }
 
 TEST(TestI420VideoFrame, CopyFrame) {
@@ -94,9 +107,9 @@
   // Frame of smaller dimensions - allocated sizes should not vary.
   EXPECT_EQ(0, frame1.CopyFrame(frame2));
   EXPECT_TRUE(EqualFramesExceptSize(frame1, frame2));
-  EXPECT_EQ(kSizeY, frame1.size(kYPlane));
-  EXPECT_EQ(kSizeU, frame1.size(kUPlane));
-  EXPECT_EQ(kSizeV, frame1.size(kVPlane));
+  EXPECT_EQ(kSizeY, frame1.allocated_size(kYPlane));
+  EXPECT_EQ(kSizeU, frame1.allocated_size(kUPlane));
+  EXPECT_EQ(kSizeV, frame1.allocated_size(kVPlane));
   // Verify copy of all parameters.
   // Frame of larger dimensions - update allocated sizes.
   EXPECT_EQ(0, frame2.CopyFrame(frame1));
@@ -128,9 +141,9 @@
   EXPECT_EQ(memcmp(buffer_u, frame2.buffer(kUPlane), kSizeUv), 0);
   EXPECT_EQ(memcmp(buffer_v, frame2.buffer(kVPlane), kSizeUv), 0);
   // Comapre size.
-  EXPECT_LE(kSizeY, frame2.size(kYPlane));
-  EXPECT_LE(kSizeUv, frame2.size(kUPlane));
-  EXPECT_LE(kSizeUv, frame2.size(kVPlane));
+  EXPECT_LE(kSizeY, frame2.allocated_size(kYPlane));
+  EXPECT_LE(kSizeUv, frame2.allocated_size(kUPlane));
+  EXPECT_LE(kSizeUv, frame2.allocated_size(kVPlane));
 }
 
 TEST(TestI420VideoFrame, FrameSwap) {
@@ -204,9 +217,9 @@
     return false;
   // Compare allocated memory size.
   bool ret = true;
-  ret |= (frame1.size(kYPlane) == frame2.size(kYPlane));
-  ret |= (frame1.size(kUPlane) == frame2.size(kUPlane));
-  ret |= (frame1.size(kVPlane) == frame2.size(kVPlane));
+  ret |= (frame1.allocated_size(kYPlane) == frame2.allocated_size(kYPlane));
+  ret |= (frame1.allocated_size(kUPlane) == frame2.allocated_size(kUPlane));
+  ret |= (frame1.allocated_size(kVPlane) == frame2.allocated_size(kVPlane));
   return ret;
 }
 
@@ -223,9 +236,12 @@
   if (!ret)
     return false;
   // Memory should be the equal for the minimum of the two sizes.
-  int size_y = std::min(frame1.size(kYPlane), frame2.size(kYPlane));
-  int size_u = std::min(frame1.size(kUPlane), frame2.size(kUPlane));
-  int size_v = std::min(frame1.size(kVPlane), frame2.size(kVPlane));
+  int size_y = std::min(frame1.allocated_size(kYPlane),
+                        frame2.allocated_size(kYPlane));
+  int size_u = std::min(frame1.allocated_size(kUPlane),
+                        frame2.allocated_size(kUPlane));
+  int size_v = std::min(frame1.allocated_size(kVPlane),
+                        frame2.allocated_size(kVPlane));
   int ret_val = 0;
   ret_val += memcmp(frame1.buffer(kYPlane), frame2.buffer(kYPlane), size_y);
   ret_val += memcmp(frame1.buffer(kUPlane), frame2.buffer(kUPlane), size_u);
diff --git a/common_video/interface/i420_video_frame.h b/common_video/interface/i420_video_frame.h
index 9c7f83a..8d2a050 100644
--- a/common_video/interface/i420_video_frame.h
+++ b/common_video/interface/i420_video_frame.h
@@ -63,7 +63,7 @@
   const uint8_t* buffer(PlaneType type) const;
 
   // Get allocated size per plane.
-  int size(PlaneType type) const;
+  int allocated_size(PlaneType type) const;
 
   // Get allocated stride per plane.
   int stride(PlaneType type) const;
@@ -93,6 +93,13 @@
   // Get render time in miliseconds.
   int64_t render_time_ms() const {return render_time_ms_;}
 
+  // Return true if underlying plane buffers are of zero size, false if not.
+  bool IsZeroSize();
+
+  // Reset underlying plane buffers sizes to 0. This function doesn't
+  // clear memory.
+  void ResetSize();
+
  private:
   // Verifies legality of parameters.
   // Return value: 0 on success ,-1 on error.
diff --git a/common_video/jpeg/include/jpeg.h b/common_video/jpeg/include/jpeg.h
index 10c0461..3bb1093 100644
--- a/common_video/jpeg/include/jpeg.h
+++ b/common_video/jpeg/include/jpeg.h
@@ -17,11 +17,12 @@
 
 // jpeg forward declaration
 struct jpeg_compress_struct;
-struct jpeg_decompress_struct;
 
 namespace webrtc
 {
 
+// TODO(mikhal): Move this to LibYuv wrappar, when LibYuv will have a JPG
+// Encode.
 class JpegEncoder
 {
 public:
@@ -53,29 +54,19 @@
     char                    _fileName[257];
 };
 
-class JpegDecoder
-{
- public:
-    JpegDecoder();
-    ~JpegDecoder();
-
 // Decodes a JPEG-stream
 // Supports 1 image component. 3 interleaved image components,
 // YCbCr sub-sampling  4:4:4, 4:2:2, 4:2:0.
 //
 // Input:
-//    - inputImage        : encoded image to be decoded.
-//    - outputImage       : VideoFrame to store decoded output.
+//    - input_image        : encoded image to be decoded.
+//    - output_image       : VideoFrame to store decoded output.
 //
 //    Output:
 //    - 0             : OK
 //    - (-1)          : Error
-    WebRtc_Word32 Decode(const EncodedImage& inputImage,
-                         VideoFrame& outputImage);
- private:
-    jpeg_decompress_struct*    _cinfo;
-};
-
-
+//    - (-2)          : Unsupported format
+int ConvertJpegToI420(const EncodedImage& input_image,
+                      VideoFrame* output_image);
 }
 #endif /* WEBRTC_COMMON_VIDEO_JPEG  */
diff --git a/common_video/jpeg/jpeg.cc b/common_video/jpeg/jpeg.cc
index 93bc251..b0d3a62 100644
--- a/common_video/jpeg/jpeg.cc
+++ b/common_video/jpeg/jpeg.cc
@@ -18,6 +18,8 @@
 #include "common_video/jpeg/include/jpeg.h"
 #include "common_video/jpeg/data_manager.h"
 #include "common_video/libyuv/include/webrtc_libyuv.h"
+#include "libyuv.h"
+#include "libyuv/mjpeg_decoder.h"
 
 extern "C" {
 #if defined(USE_SYSTEM_LIBJPEG)
@@ -194,172 +196,33 @@
     return 0;
 }
 
-JpegDecoder::JpegDecoder()
-{
-    _cinfo = new jpeg_decompress_struct;
-}
+int ConvertJpegToI420(const EncodedImage& input_image,
+                      VideoFrame* output_image) {
 
-JpegDecoder::~JpegDecoder()
-{
-    if (_cinfo != NULL)
-    {
-        delete _cinfo;
-        _cinfo = NULL;
-    }
-}
-
-WebRtc_Word32
-JpegDecoder::Decode(const EncodedImage& inputImage,
-                    VideoFrame& outputImage)
-{
-
-    WebRtc_UWord8* tmpBuffer = NULL;
-    // Set error handler
-    myErrorMgr    jerr;
-    _cinfo->err = jpeg_std_error(&jerr.pub);
-    jerr.pub.error_exit = MyErrorExit;
-
-    // Establish the setjmp return context
-    if (setjmp(jerr.setjmp_buffer))
-    {
-        if (_cinfo->is_decompressor)
-        {
-            jpeg_destroy_decompress(_cinfo);
-        }
-        if (tmpBuffer != NULL)
-        {
-            delete [] tmpBuffer;
-        }
-        return -1;
-    }
-
-    _cinfo->out_color_space = JCS_YCbCr;
-
-    // Create decompression object
-    jpeg_create_decompress(_cinfo);
-
-    // Specify data source
-    jpegSetSrcBuffer(_cinfo, (JOCTET*) inputImage._buffer, inputImage._size);
-
-    // Read header data
-    jpeg_read_header(_cinfo, TRUE);
-
-    _cinfo->raw_data_out = TRUE;
-    jpeg_start_decompress(_cinfo);
-
-    // Check header
-    if (_cinfo->num_components == 4)
-    {
-        return -2; // not supported
-    }
-    if (_cinfo->progressive_mode == 1)
-    {
-        return -2; // not supported
-    }
-
-
-    WebRtc_UWord32 height = _cinfo->image_height;
-    WebRtc_UWord32 width = _cinfo->image_width;
-
-    // Making sure width and height are even
-    if (height % 2)
-    {
-        height++;
-    }
-    if (width % 2)
-    {
-         width++;
-    }
-
-    WebRtc_UWord32 height16 = (height + 15) & ~15;
-    WebRtc_UWord32 stride = (width + 15) & ~15;
-    WebRtc_UWord32 uvStride = ((((stride + 1) >> 1) + 15) & ~15);
-
-    WebRtc_UWord32 tmpRequiredSize =  stride * height16 +
-                                      2 * (uvStride * ((height16 + 1) >> 1));
-    WebRtc_UWord32 requiredSize = width * height * 3 >> 1;
-
-    // Verify sufficient buffer size.
-    outputImage.VerifyAndAllocate(requiredSize);
-    WebRtc_UWord8* outPtr = outputImage.Buffer();
-
-    if (tmpRequiredSize > requiredSize)
-    {
-        tmpBuffer = new WebRtc_UWord8[(int) (tmpRequiredSize)];
-        outPtr = tmpBuffer;
-    }
-
-    JSAMPROW y[16],u[8],v[8];
-    JSAMPARRAY data[3];
-    data[0] = y;
-    data[1] = u;
-    data[2] = v;
-
-    WebRtc_UWord32 hInd, i;
-    WebRtc_UWord32 numScanLines = 16;
-    WebRtc_UWord32 numLinesProcessed = 0;
-
-    while (_cinfo->output_scanline < _cinfo->output_height)
-    {
-        hInd = _cinfo->output_scanline;
-        for (i = 0; i < numScanLines; i++)
-        {
-            y[i] = outPtr + stride * (i + hInd);
-
-            if (i % 2 == 0)
-            {
-                 u[i / 2] = outPtr + stride * height16 +
-                            stride / 2 * ((i + hInd) / 2);
-                 v[i / 2] = outPtr + stride * height16 +
-                            stride * height16 / 4 +
-                            stride / 2 * ((i + hInd) / 2);
-            }
-        }
-        // Processes exactly one iMCU row per call
-        numLinesProcessed = jpeg_read_raw_data(_cinfo, data, numScanLines);
-        // Error in read
-        if (numLinesProcessed == 0)
-        {
-            jpeg_abort((j_common_ptr)_cinfo);
-            return -1;
-        }
-    }
-
-    if (tmpRequiredSize > requiredSize)
-    {
-         WebRtc_UWord8* dstFramePtr = outputImage.Buffer();
-         WebRtc_UWord8* tmpPtr = outPtr;
-
-         for (WebRtc_UWord32 p = 0; p < 3; p++)
-         {
-             const WebRtc_UWord32 h = (p == 0) ? height : height >> 1;
-             const WebRtc_UWord32 h16 = (p == 0) ? height16 : height16 >> 1;
-             const WebRtc_UWord32 w = (p == 0) ? width : width >> 1;
-             const WebRtc_UWord32 s = (p == 0) ? stride : stride >> 1;
-
-             for (WebRtc_UWord32 i = 0; i < h; i++)
-             {
-                 memcpy(dstFramePtr, tmpPtr, w);
-                 dstFramePtr += w;
-                 tmpPtr += s;
-             }
-             tmpPtr += (h16 - h) * s;
-         }
-    }
-
-    if (tmpBuffer != NULL)
-    {
-        delete [] tmpBuffer;
-    }
-    // Setting output Image parameter
-    outputImage.SetWidth(width);
-    outputImage.SetHeight(height);
-    outputImage.SetLength(requiredSize);
-    outputImage.SetTimeStamp(inputImage._timeStamp);
-
-    jpeg_finish_decompress(_cinfo);
-    jpeg_destroy_decompress(_cinfo);
-    return 0;
+  if (output_image == NULL)
+    return -1;
+  // TODO(mikhal): Update to use latest API from LibYuv when that becomes
+  // available.
+  libyuv::MJpegDecoder jpeg_decoder;
+  bool ret = jpeg_decoder.LoadFrame(input_image._buffer, input_image._size);
+  if (ret == false)
+    return -1;
+  if (jpeg_decoder.GetNumComponents() == 4)
+    return -2;  // not supported.
+  int width = jpeg_decoder.GetWidth();
+  int height = jpeg_decoder.GetHeight();
+  int req_size = CalcBufferSize(kI420, width, height);
+  output_image->VerifyAndAllocate(req_size);
+  output_image->SetWidth(width);
+  output_image->SetHeight(height);
+  output_image->SetLength(req_size);
+  return ConvertToI420(kMJPG,
+                       input_image._buffer,
+                       0, 0,  // no cropping
+                       width, height,
+                       input_image._size,
+                       kRotateNone,
+                       output_image);
 }
 
 
diff --git a/common_video/jpeg/jpeg_unittest.cc b/common_video/jpeg/jpeg_unittest.cc
index ee5d0b0..a7c912f 100644
--- a/common_video/jpeg/jpeg_unittest.cc
+++ b/common_video/jpeg/jpeg_unittest.cc
@@ -34,7 +34,6 @@
 
   void SetUp() {
     encoder_ = new JpegEncoder();
-    decoder_ = new JpegDecoder();
   }
 
   void TearDown() {
@@ -45,7 +44,6 @@
       delete encoded_buffer_;
     }
     delete encoder_;
-    delete decoder_;
   }
 
   // Reads an encoded image. Caller will have to deallocate the memory of this
@@ -70,13 +68,12 @@
   std::string encoded_filename_;
   EncodedImage* encoded_buffer_;
   JpegEncoder* encoder_;
-  JpegDecoder* decoder_;
 };
 
 TEST_F(JpegTest, Decode) {
   encoded_buffer_ = ReadEncodedImage(input_filename_);
   VideoFrame image_buffer;
-  EXPECT_EQ(0, decoder_->Decode(*encoded_buffer_, image_buffer));
+  EXPECT_EQ(0, ConvertJpegToI420(*encoded_buffer_, &image_buffer));
   EXPECT_GT(image_buffer.Length(), 0u);
   EXPECT_EQ(kImageWidth, image_buffer.Width());
   EXPECT_EQ(kImageHeight, image_buffer.Height());
@@ -107,7 +104,7 @@
   // Decode our input image then encode it again to a new file:
   encoded_buffer_ = ReadEncodedImage(input_filename_);
   VideoFrame image_buffer;
-  EXPECT_EQ(0, decoder_->Decode(*encoded_buffer_, image_buffer));
+  EXPECT_EQ(0, ConvertJpegToI420(*encoded_buffer_, &image_buffer));
 
   EXPECT_EQ(0, encoder_->SetFileName(encoded_filename_.c_str()));
   EXPECT_EQ(0, encoder_->Encode(image_buffer));
diff --git a/common_video/plane.h b/common_video/plane.h
index c6d08ce..c674339 100644
--- a/common_video/plane.h
+++ b/common_video/plane.h
@@ -43,6 +43,12 @@
   // Get allocated size.
   int allocated_size() const {return allocated_size_;}
 
+  // Set actual size.
+  void ResetSize() {plane_size_ = 0;};
+
+  // Return true is plane size is zero, false if not.
+  bool IsZeroSize() {return plane_size_ == 0;};
+
   // Get stride value.
   int stride() const {return stride_;}
 
diff --git a/common_video/plane_unittest.cc b/common_video/plane_unittest.cc
index 301f690..19597ce 100644
--- a/common_video/plane_unittest.cc
+++ b/common_video/plane_unittest.cc
@@ -17,11 +17,12 @@
 
 namespace webrtc {
 
-TEST(TestPlane, CreateEmptyPlaneialValues) {
+TEST(TestPlane, CreateEmptyPlaneValues) {
   Plane plane;
   int size, stride;
   EXPECT_EQ(0, plane.allocated_size());
   EXPECT_EQ(0, plane.stride());
+  EXPECT_TRUE(plane.IsZeroSize());
   size = 0;
   stride = 20;
   EXPECT_EQ(-1, plane.CreateEmptyPlane(size, stride, 1));
@@ -33,6 +34,22 @@
   EXPECT_EQ(0, plane.CreateEmptyPlane(size, stride, size));
   EXPECT_EQ(size, plane.allocated_size());
   EXPECT_EQ(stride, plane.stride());
+  EXPECT_FALSE(plane.IsZeroSize());
+}
+
+TEST(TestPlane, ResetSize) {
+  Plane plane;
+  EXPECT_TRUE(plane.IsZeroSize());
+  int allocated_size, plane_size, stride;
+  EXPECT_EQ(0, plane.allocated_size());
+  allocated_size = 30;
+  plane_size = 20;
+  stride = 10;
+  EXPECT_EQ(0, plane.CreateEmptyPlane(allocated_size, stride, plane_size));
+  EXPECT_EQ(allocated_size, plane.allocated_size());
+  EXPECT_FALSE(plane.IsZeroSize());
+  plane.ResetSize();
+  EXPECT_TRUE(plane.IsZeroSize());
 }
 
 TEST(TestPlane, PlaneCopy) {
diff --git a/engine_configurations.h b/engine_configurations.h
index b6a8f85..7fae281 100644
--- a/engine_configurations.h
+++ b/engine_configurations.h
@@ -27,19 +27,27 @@
 //  [Voice] Codec settings
 // ----------------------------------------------------------------------------
 
+// iSAC is not included in the Mozilla build, but in all other builds.
+#ifndef WEBRTC_MOZILLA_BUILD
 #ifdef WEBRTC_ARCH_ARM
-#define WEBRTC_CODEC_ISACFX     // fix-point iSAC implementation
+#define WEBRTC_CODEC_ISACFX  // Fix-point iSAC implementation.
 #else
-#define WEBRTC_CODEC_ISAC       // floating-point iSAC implementation (default)
-#endif
+#define WEBRTC_CODEC_ISAC  // Floating-point iSAC implementation (default).
+#endif  // WEBRTC_ARCH_ARM
+#endif  // !WEBRTC_MOZILLA_BUILD
+
+// AVT is included in all builds, along with G.711, NetEQ and CNG
+// (which are mandatory and don't have any defines).
 #define WEBRTC_CODEC_AVT
 
-#ifndef WEBRTC_CHROMIUM_BUILD
+// iLBC, G.722, PCM16B and Redundancy coding are excluded from Chromium and
+// Mozilla builds.
+#if !defined(WEBRTC_CHROMIUM_BUILD) && !defined(WEBRTC_MOZILLA_BUILD)
 #define WEBRTC_CODEC_ILBC
 #define WEBRTC_CODEC_G722
 #define WEBRTC_CODEC_PCM16
 #define WEBRTC_CODEC_RED
-#endif
+#endif  // !WEBRTC_CHROMIUM_BUILD && !WEBRTC_MOZILLA_BUILD
 
 // ----------------------------------------------------------------------------
 //  [Video] Codec settings
diff --git a/modules/audio_coding/codecs/cng/webrtc_cng.c b/modules/audio_coding/codecs/cng/webrtc_cng.c
index 7cc6cb9..28bfaae 100644
--- a/modules/audio_coding/codecs/cng/webrtc_cng.c
+++ b/modules/audio_coding/codecs/cng/webrtc_cng.c
@@ -36,7 +36,7 @@
 
 typedef struct WebRtcCngEncInst_t_ {
   int16_t enc_nrOfCoefs;
-  int16_t enc_sampfreq;
+  uint16_t enc_sampfreq;
   int16_t enc_interval;
   int16_t enc_msSinceSID;
   int32_t enc_Energy;
diff --git a/modules/audio_coding/codecs/isac/fix/test/kenny.c b/modules/audio_coding/codecs/isac/fix/test/kenny.c
index 71cddb6..b7ca694 100644
--- a/modules/audio_coding/codecs/isac/fix/test/kenny.c
+++ b/modules/audio_coding/codecs/isac/fix/test/kenny.c
@@ -126,10 +126,6 @@
 
   int totalbits =0;
   int totalsmpls =0;
-#ifdef _DEBUG
-  double kbps;
-  FILE *fy;
-#endif
   WebRtc_Word16 testNum, testCE;
 
   FILE *fp_gns = NULL;
@@ -150,13 +146,6 @@
   BottleNeckModel       BN_data;
   f_bn  = NULL;
 
-#ifdef _DEBUG
-  fy = fopen("bit_rate.dat", "w");
-  fclose(fy);
-  fy = fopen("bytes_frames.dat", "w");
-  fclose(fy);
-#endif
-
   readLoss = 0;
   packetLossPercent = 0;
 
@@ -719,9 +708,6 @@
           printf("\nError in decoder: %d.\n", errtype);
         }
       }
-#ifdef _DEBUG
-      fprintf(stderr,"  \rframe = %7d", framecnt);
-#endif
 
       if( readLoss == 1 ) {
         if( fread( &lostFrame, sizeof(WebRtc_Word16), 1, plFile ) != 1 ) {
@@ -804,17 +790,6 @@
         fclose(seedfile);
       }
     }
-
-#ifdef _DEBUG
-
-    kbps = ((double) FS) / ((double) cur_framesmpls) * 8.0 *
-        stream_len / 1000.0;// kbits/s
-    fy = fopen("bit_rate.dat", "a");
-    fprintf(fy, "Frame %i = %0.14f\n", framecnt, kbps);
-    fclose(fy);
-
-#endif /* _DEBUG */
-
   }
   printf("\nLost Frames %d ~ %4.1f%%\n", lostPackets,
          (double)lostPackets/(double)framecnt*100.0 );
@@ -823,14 +798,6 @@
          (double)totalbits *(FS/1000) / totalsmpls);
   printf("\n");
 
-#ifdef _DEBUG
-  /* fprintf(stderr,"\n\ntotal bits    = %d bits", totalbits);
-     fprintf(stderr,"\nmeasured average bitrate  = %0.3f kbits/s",
-     (double)totalbits *(FS/1000) / totalsmpls);
-     fprintf(stderr,"\n");
-  */
-#endif /* _DEBUG */
-
   /* Runtime statistics */
 
 
diff --git a/modules/audio_coding/codecs/opus/interface/opus_interface.h b/modules/audio_coding/codecs/opus/interface/opus_interface.h
new file mode 100644
index 0000000..dcfd87f
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/interface/opus_interface.h
@@ -0,0 +1,123 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#ifndef WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
+#define WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
+
+#include "typedefs.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Opaque wrapper types for the codec state.
+typedef struct WebRtcOpusEncInst OpusEncInst;
+typedef struct WebRtcOpusDecInst OpusDecInst;
+
+int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst, int32_t channels);
+int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_Encode(...)
+ *
+ * This function encodes audio as a series of Opus frames and inserts
+ * it into a packet. Input buffer can be any length.
+ *
+ * Input:
+ *      - inst                  : Encoder context
+ *      - audio_in              : Input speech data buffer
+ *      - samples               : Samples in audio_in
+ *      - length_encoded_buffer : Output buffer size
+ *
+ * Output:
+ *      - encoded               : Output compressed data buffer
+ *
+ * Return value                 : >0 - Length (in bytes) of coded data
+ *                                -1 - Error
+ */
+int16_t WebRtcOpus_Encode(OpusEncInst* inst, int16_t* audio_in, int16_t samples,
+                          int16_t length_encoded_buffer, uint8_t* encoded);
+
+/****************************************************************************
+ * WebRtcOpus_SetBitRate(...)
+ *
+ * This function adjusts the target bitrate of the encoder.
+ *
+ * Input:
+ *      - inst               : Encoder context
+ *      - rate               : New target bitrate
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate);
+
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels);
+int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_DecoderInit(...)
+ *
+ * This function resets state of the decoder.
+ *
+ * Input:
+ *      - inst               : Decoder context
+ *
+ * Return value              :  0 - Success
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst);
+
+/****************************************************************************
+ * WebRtcOpus_Decode(...)
+ *
+ * This function decodes an Opus packet into one or more audio frames at the
+ * ACM interface's sampling rate (32 kHz).
+ *
+ * Input:
+ *      - inst               : Decoder context
+ *      - encoded            : Encoded data
+ *      - encoded_bytes      : Bytes in encoded vector
+ *
+ * Output:
+ *      - decoded            : The decoded vector
+ *      - audio_type         : 1 normal, 2 CNG (for Opus it should
+ *                             always return 1 since we're not using Opus's
+ *                             built-in DTX/CNG scheme)
+ *
+ * Return value              : >0 - Samples in decoded vector
+ *                             -1 - Error
+ */
+int16_t WebRtcOpus_Decode(OpusDecInst* inst, int16_t* encoded,
+                          int16_t encoded_bytes, int16_t* decoded,
+                          int16_t* audio_type);
+
+/****************************************************************************
+ * WebRtcOpus_DecodePlc(...)
+ *
+ * This function precesses PLC for opus frame(s).
+ * Input:
+ *        - inst                  : Decoder context
+ *        - number_of_lost_frames : Number of PLC frames to produce
+ *
+ * Output:
+ *        - decoded               : The decoded vector
+ *
+ * Return value                   : >0 - number of samples in decoded PLC vector
+ *                                  -1 - Error
+ */
+int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
+                             int16_t number_of_lost_frames);
+
+#ifdef __cplusplus
+}  // extern "C"
+#endif
+
+#endif  // WEBRTC_MODULES_AUDIO_CODING_CODECS_OPUS_INTERFACE_OPUS_INTERFACE_H_
diff --git a/modules/audio_coding/codecs/opus/opus.gypi b/modules/audio_coding/codecs/opus/opus.gypi
new file mode 100644
index 0000000..809068c
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus.gypi
@@ -0,0 +1,44 @@
+# Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+#
+# Use of this source code is governed by a BSD-style license
+# that can be found in the LICENSE file in the root of the source
+# tree. An additional intellectual property rights grant can be found
+# in the file PATENTS.  All contributing project authors may
+# be found in the AUTHORS file in the root of the source tree.
+
+{
+  'targets': [
+    {
+      'target_name': 'webrtc_opus',
+      'type': 'static_library',
+      'conditions': [
+        ['build_with_mozilla==1', {
+          # Mozilla provides its own build of the opus library.
+          'include_dirs': [
+            '$(DIST)/include/opus',
+           ]
+        }, {
+          'dependencies': [
+            '<(DEPTH)/third_party/opus/opus.gyp:opus'
+          ],
+          'include_dirs': [
+            '<(webrtc_root)/../third_party/opus/source/include',
+          ],
+        }],
+      ],
+      'direct_dependent_settings': {
+        'conditions': [
+          ['build_with_mozilla==1', {
+            'include_dirs': [
+              '$(DIST)/include/opus',
+            ],
+          }],
+        ],
+      },
+      'sources': [
+        'interface/opus_interface.h',
+        'opus_interface.c',
+      ],
+    },
+  ],
+}
diff --git a/modules/audio_coding/codecs/opus/opus_interface.c b/modules/audio_coding/codecs/opus/opus_interface.c
new file mode 100644
index 0000000..f61ecc5
--- /dev/null
+++ b/modules/audio_coding/codecs/opus/opus_interface.c
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "modules/audio_coding/codecs/opus/interface/opus_interface.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#include "opus.h"
+
+#include "common_audio/signal_processing/resample_by_2_internal.h"
+#include "common_audio/signal_processing/include/signal_processing_library.h"
+
+enum {
+  /* We always produce 20ms frames. */
+  kWebRtcOpusMaxEncodeFrameSizeMs = 20,
+
+  /* The format allows up to 120ms frames. Since we
+   * don't control the other side, we must allow
+   * for packets that large. NetEq is currently
+   * limited to 60 ms on the receive side.
+   */
+  kWebRtcOpusMaxDecodeFrameSizeMs = 120,
+
+  /* Sample count is 48 kHz * samples per frame. */
+  kWebRtcOpusMaxFrameSize = 48 * kWebRtcOpusMaxDecodeFrameSizeMs,
+};
+
+struct WebRtcOpusEncInst {
+  OpusEncoder* encoder;
+};
+
+int16_t WebRtcOpus_EncoderCreate(OpusEncInst** inst, int32_t channels) {
+  OpusEncInst* state;
+  state = (OpusEncInst*) calloc(1, sizeof(OpusEncInst));
+  if (state) {
+    int error;
+    state->encoder = opus_encoder_create(48000, channels, OPUS_APPLICATION_VOIP,
+                                         &error);
+    if (error == OPUS_OK || state->encoder != NULL ) {
+      *inst = state;
+      return 0;
+    }
+    free(state);
+  }
+  return -1;
+}
+
+int16_t WebRtcOpus_EncoderFree(OpusEncInst* inst) {
+  opus_encoder_destroy(inst->encoder);
+  return 0;
+}
+
+int16_t WebRtcOpus_Encode(OpusEncInst* inst, int16_t* audio_in, int16_t samples,
+                          int16_t length_encoded_buffer, uint8_t* encoded) {
+  opus_int16* audio = (opus_int16*) audio_in;
+  unsigned char* coded = encoded;
+  int res;
+
+  if (samples > 48 * kWebRtcOpusMaxEncodeFrameSizeMs) {
+    return -1;
+  }
+
+  res = opus_encode(inst->encoder, audio, samples, coded,
+                    length_encoded_buffer);
+
+  if (res > 0) {
+    return res;
+  }
+  return -1;
+}
+
+int16_t WebRtcOpus_SetBitRate(OpusEncInst* inst, int32_t rate) {
+  return opus_encoder_ctl(inst->encoder, OPUS_SET_BITRATE(rate));
+}
+
+struct WebRtcOpusDecInst {
+  int16_t state_48_32[8];
+  OpusDecoder* decoder;
+};
+
+int16_t WebRtcOpus_DecoderCreate(OpusDecInst** inst, int channels) {
+  OpusDecInst* state;
+  state = (OpusDecInst*) calloc(1, sizeof(OpusDecInst));
+  if (state) {
+    int error;
+    // Always create a 48000 Hz Opus decoder.
+    state->decoder = opus_decoder_create(48000, channels, &error);
+    if (error == OPUS_OK && state->decoder != NULL ) {
+      *inst = state;
+      return 0;
+    }
+    free(state);
+    state = NULL;
+  }
+  return -1;
+}
+
+int16_t WebRtcOpus_DecoderFree(OpusDecInst* inst) {
+  opus_decoder_destroy(inst->decoder);
+  free(inst);
+  return 0;
+}
+
+int16_t WebRtcOpus_DecoderInit(OpusDecInst* inst) {
+  int error = opus_decoder_ctl(inst->decoder, OPUS_RESET_STATE);
+  if (error == OPUS_OK) {
+    memset(inst->state_48_32, 0, sizeof(inst->state_48_32));
+    return 0;
+  }
+  return -1;
+}
+
+static int DecodeNative(OpusDecInst* inst, int16_t* encoded,
+                        int16_t encoded_bytes, int16_t* decoded,
+                        int16_t* audio_type) {
+  unsigned char* coded = (unsigned char*) encoded;
+  opus_int16* audio = (opus_int16*) decoded;
+
+  int res = opus_decode(inst->decoder, coded, encoded_bytes, audio,
+                        kWebRtcOpusMaxFrameSize, 0);
+  /* TODO(tlegrand): set to DTX for zero-length packets? */
+  *audio_type = 0;
+
+  if (res > 0) {
+    return res;
+  }
+  return -1;
+}
+
+int16_t WebRtcOpus_Decode(OpusDecInst* inst, int16_t* encoded,
+                          int16_t encoded_bytes, int16_t* decoded,
+                          int16_t* audio_type) {
+  /* Enough for 120 ms (the largest Opus packet size) of mono audio at 48 kHz
+   * and resampler overlap. This will need to be enlarged for stereo decoding.
+   */
+  int16_t buffer16[kWebRtcOpusMaxFrameSize];
+  int32_t buffer32[kWebRtcOpusMaxFrameSize + 7];
+  int decoded_samples;
+  int blocks;
+  int16_t output_samples;
+  int i;
+
+  /* Decode to a temporary buffer. */
+  decoded_samples = DecodeNative(inst, encoded, encoded_bytes, buffer16,
+                                 audio_type);
+  if (decoded_samples < 0) {
+    return -1;
+  }
+  /* Resample from 48 kHz to 32 kHz. */
+  for (i = 0; i < 7; i++) {
+    buffer32[i] = inst->state_48_32[i];
+    inst->state_48_32[i] = buffer16[decoded_samples -7 + i];
+  }
+  for (i = 0; i < decoded_samples; i++) {
+    buffer32[7 + i] = buffer16[i];
+  }
+  /* Resampling 3 samples to 2. Function divides the input in |blocks| number
+   * of 3-sample groups, and output is |blocks| number of 2-sample groups. */
+  blocks = decoded_samples / 3;
+  WebRtcSpl_Resample48khzTo32khz(buffer32, buffer32, blocks);
+  output_samples = (int16_t) (blocks * 2);
+  WebRtcSpl_VectorBitShiftW32ToW16(decoded, output_samples, buffer32, 15);
+
+  return output_samples;
+}
+
+int16_t WebRtcOpus_DecodePlc(OpusDecInst* inst, int16_t* decoded,
+                             int16_t number_of_lost_frames) {
+  /* TODO(tlegrand): We can pass NULL to opus_decode to activate packet
+   * loss concealment, but I don't know how many samples
+   * number_of_lost_frames corresponds to. */
+  return -1;
+}
diff --git a/modules/audio_coding/main/source/acm_cng.cc b/modules/audio_coding/main/source/acm_cng.cc
index 2393346..4edfc05 100644
--- a/modules/audio_coding/main/source/acm_cng.cc
+++ b/modules/audio_coding/main/source/acm_cng.cc
@@ -81,7 +81,8 @@
   // Then return the structure back to NetEQ to add the codec to it's
   // database.
 
-  if (_sampFreqHz == 8000 || _sampFreqHz == 16000 || _sampFreqHz == 32000) {
+  if (_sampFreqHz == 8000 || _sampFreqHz == 16000 || _sampFreqHz == 32000 ||
+      _sampFreqHz == 48000) {
     SET_CODEC_PAR((codecDef), kDecoderCNG, codecInst.pltype,
         _decoderInstPtr, _sampFreqHz);
     SET_CNG_FUNCTIONS((codecDef));
diff --git a/modules/audio_coding/main/source/acm_cng.h b/modules/audio_coding/main/source/acm_cng.h
index 6276c44..d204d02 100644
--- a/modules/audio_coding/main/source/acm_cng.h
+++ b/modules/audio_coding/main/source/acm_cng.h
@@ -62,7 +62,7 @@
 
   WebRtcCngEncInst* _encoderInstPtr;
   WebRtcCngDecInst* _decoderInstPtr;
-  WebRtc_Word16 _sampFreqHz;
+  WebRtc_UWord16 _sampFreqHz;
 };
 
 } // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_codec_database.cc b/modules/audio_coding/main/source/acm_codec_database.cc
index b782194..2e3db19 100644
--- a/modules/audio_coding/main/source/acm_codec_database.cc
+++ b/modules/audio_coding/main/source/acm_codec_database.cc
@@ -86,6 +86,10 @@
     #include "acm_gsmfr.h"
     #include "gsmfr_interface.h"
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+    #include "acm_opus.h"
+    #include "opus_interface.h"
+#endif
 #ifdef WEBRTC_CODEC_SPEEX
     #include "acm_speex.h"
     #include "speex_interface.h"
@@ -103,22 +107,20 @@
 // codecs. Note! There are a limited number of payload types. If more codecs
 // are defined they will receive reserved fixed payload types (values 69-95).
 const int kDynamicPayloadtypes[ACMCodecDB::kMaxNumCodecs] = {
-  105, 107, 108, 109, 111, 112, 113, 114, 115, 116, 117, 120,
-  121, 122, 123, 124, 125, 126, 101, 100,  97,  96,  95,  94,
-   93,  92,  91,  90,  89,  88,  87,  86,  85,  84,  83,  82,
-   81,  80,  79,  78,  77,  76,  75,  74,  73,  72,  71,  70,
-   69,
+  105, 107, 108, 109, 111, 112, 113, 114, 115, 116, 117, 121,
+   92,  91,  90,  89,  88,  87,  86,  85,  84,  83,  82,  81,
+   80,  79,  78,  77,  76,  75,  74,  73,  72,  71,  70,  69,
+   68, 67
 };
 
 // Creates database with all supported codecs at compile time.
 // Each entry needs the following parameters in the given order:
 // payload type, name, sampling frequency, packet size in samples,
 // number of channels, and default rate.
-#if (defined(WEBRTC_CODEC_PCM16) || \
-     defined(WEBRTC_CODEC_AMR) || defined(WEBRTC_CODEC_AMRWB) || \
-     defined(WEBRTC_CODEC_CELT) || defined(WEBRTC_CODEC_G729_1) || \
-     defined(WEBRTC_CODEC_SPEEX) || defined(WEBRTC_CODEC_G722_1) || \
-     defined(WEBRTC_CODEC_G722_1C))
+#if (defined(WEBRTC_CODEC_AMR) || defined(WEBRTC_CODEC_AMRWB) \
+  || defined(WEBRTC_CODEC_CELT) || defined(WEBRTC_CODEC_G722_1) \
+  || defined(WEBRTC_CODEC_G722_1C) || defined(WEBRTC_CODEC_G729_1) \
+  || defined(WEBRTC_CODEC_PCM16) || defined(WEBRTC_CODEC_SPEEX))
 static int count_database = 0;
 #endif
 
@@ -186,14 +188,19 @@
 #ifdef WEBRTC_CODEC_GSMFR
   {3, "GSM", 8000, 160, 1, 13200},
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+  // Opus supports 48, 24, 16, 12, 8 kHz.
+  {120, "opus", 48000, 960, 1, 32000},
+#endif
 #ifdef WEBRTC_CODEC_SPEEX
   {kDynamicPayloadtypes[count_database++], "speex", 8000, 160, 1, 11000},
   {kDynamicPayloadtypes[count_database++], "speex", 16000, 320, 1, 22000},
 #endif
-  // Comfort noise for three different sampling frequencies.
+  // Comfort noise for four different sampling frequencies.
   {13, "CN", 8000, 240, 1, 0},
   {98, "CN", 16000, 480, 1, 0},
   {99, "CN", 32000, 960, 1, 0},
+  {100, "CN", 48000, 1440, 1, 0},
 #ifdef WEBRTC_CODEC_AVT
   {106, "telephone-event", 8000, 240, 1, 0},
 #endif
@@ -272,6 +279,11 @@
 #ifdef WEBRTC_CODEC_GSMFR
   {3, {160, 320, 480}, 160, 1},
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+  // Opus supports frames shorter than 10ms,
+  // but it doesn't help us to use them.
+  {1, {960}, 0, 2},
+#endif
 #ifdef WEBRTC_CODEC_SPEEX
   {3, {160, 320, 480}, 0, 1},
   {3, {320, 640, 960}, 0, 1},
@@ -280,6 +292,7 @@
   {1, {240}, 240, 1},
   {1, {480}, 480, 1},
   {1, {960}, 960, 1},
+  {1, {1440}, 1440, 1},
 #ifdef WEBRTC_CODEC_AVT
   {1, {240}, 240, 1},
 #endif
@@ -355,6 +368,9 @@
 #ifdef WEBRTC_CODEC_GSMFR
   kDecoderGSMFR,
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+  kDecoderOpus,
+#endif
 #ifdef WEBRTC_CODEC_SPEEX
   kDecoderSPEEX_8,
   kDecoderSPEEX_16,
@@ -363,6 +379,7 @@
   kDecoderCNG,
   kDecoderCNG,
   kDecoderCNG,
+  kDecoderCNG,
 #ifdef WEBRTC_CODEC_AVT
   kDecoderAVT,
 #endif
@@ -509,6 +526,9 @@
   } else if (STR_CASE_CMP("g7291", codec_inst->plname) == 0) {
     return IsG7291RateValid(codec_inst->rate)
         ? codec_id : kInvalidRate;
+  } else if (STR_CASE_CMP("opus", codec_inst->plname) == 0) {
+    return IsOpusRateValid(codec_inst->rate)
+        ? codec_id : kInvalidRate;
   } else if (STR_CASE_CMP("speex", codec_inst->plname) == 0) {
     return IsSpeexRateValid(codec_inst->rate)
         ? codec_id : kInvalidRate;
@@ -719,6 +739,10 @@
         codec_id = kCNSWB;
         break;
       }
+      case 48000: {
+        codec_id = kCNFB;
+        break;
+      }
       default: {
         return NULL;
       }
@@ -732,6 +756,10 @@
 #ifdef WEBRTC_CODEC_G729_1
     return new ACMG729_1(kG729_1);
 #endif
+  } else if (!STR_CASE_CMP(codec_inst->plname, "opus")) {
+#ifdef WEBRTC_CODEC_OPUS
+    return new ACMOpus(kOpus);
+#endif
   } else if (!STR_CASE_CMP(codec_inst->plname, "speex")) {
 #ifdef WEBRTC_CODEC_SPEEX
     int codec_id;
@@ -766,6 +794,10 @@
         codec_id = kCNSWB;
         break;
       }
+      case 48000: {
+        codec_id = kCNFB;
+        break;
+      }
       default: {
         return NULL;
       }
@@ -928,6 +960,14 @@
   }
 }
 
+// Checks if the bitrate is valid for Opus.
+bool ACMCodecDB::IsOpusRateValid(int rate) {
+  if ((rate < 6000) || (rate > 510000)) {
+    return false;
+  }
+  return true;
+}
+
 // Checks if the bitrate is valid for Celt.
 bool ACMCodecDB::IsCeltRateValid(int rate) {
   if ((rate >= 48000) && (rate <= 128000)) {
diff --git a/modules/audio_coding/main/source/acm_codec_database.h b/modules/audio_coding/main/source/acm_codec_database.h
index 0fe3a5e..8baf24e 100644
--- a/modules/audio_coding/main/source/acm_codec_database.h
+++ b/modules/audio_coding/main/source/acm_codec_database.h
@@ -91,6 +91,9 @@
 #ifdef WEBRTC_CODEC_GSMFR
     , kGSMFR
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+    , kOpus
+#endif
 #ifdef WEBRTC_CODEC_SPEEX
     , kSPEEX8
     , kSPEEX16
@@ -98,6 +101,7 @@
     , kCNNB
     , kCNWB
     , kCNSWB
+    , kCNFB
 #ifdef WEBRTC_CODEC_AVT
     , kAVT
 #endif
@@ -170,6 +174,9 @@
   enum {kSPEEX8 = -1};
   enum {kSPEEX16 = -1};
 #endif
+#ifndef WEBRTC_CODEC_OPUS
+  enum {kOpus = -1};
+#endif
 #ifndef WEBRTC_CODEC_AVT
   enum {kAVT = -1};
 #endif
@@ -298,6 +305,7 @@
   static bool IsAMRwbRateValid(int rate);
   static bool IsG7291RateValid(int rate);
   static bool IsSpeexRateValid(int rate);
+  static bool IsOpusRateValid(int rate);
   static bool IsCeltRateValid(int rate);
 
   // Check if the payload type is valid, meaning that it is in the valid range
diff --git a/modules/audio_coding/main/source/acm_common_defs.h b/modules/audio_coding/main/source/acm_common_defs.h
index fd8dbd6..cdff1c1 100644
--- a/modules/audio_coding/main/source/acm_common_defs.h
+++ b/modules/audio_coding/main/source/acm_common_defs.h
@@ -63,14 +63,15 @@
 //   kPassiveDTXNB            : Passive audio frame coded by narrow-band CN.
 //   kPassiveDTXWB            : Passive audio frame coded by wide-band CN.
 //   kPassiveDTXSWB           : Passive audio frame coded by super-wide-band CN.
-//
+//   kPassiveDTXFB            : Passive audio frame coded by full-band CN.
 enum WebRtcACMEncodingType {
   kNoEncoding,
   kActiveNormalEncoded,
   kPassiveNormalEncoded,
   kPassiveDTXNB,
   kPassiveDTXWB,
-  kPassiveDTXSWB
+  kPassiveDTXSWB,
+  kPassiveDTXFB
 };
 
 // A structure which contains codec parameters. For instance, used when
diff --git a/modules/audio_coding/main/source/acm_generic_codec.cc b/modules/audio_coding/main/source/acm_generic_codec.cc
index f9a6a3a..f98f260 100644
--- a/modules/audio_coding/main/source/acm_generic_codec.cc
+++ b/modules/audio_coding/main/source/acm_generic_codec.cc
@@ -58,6 +58,7 @@
       _numLPCParams(kNewCNGNumPLCParams),
       _sentCNPrevious(false),
       _isMaster(true),
+      _prev_frame_cng(0),
       _netEqDecodeLock(NULL),
       _codecWrapperLock(*RWLockWrapper::CreateRWLock()),
       _lastEncodedTimestamp(0),
@@ -294,6 +295,8 @@
                 *encodingType = kPassiveDTXWB;
             } else if (sampFreqHz == 32000) {
                 *encodingType = kPassiveDTXSWB;
+            } else if (sampFreqHz == 48000) {
+                *encodingType = kPassiveDTXFB;
             } else {
                 status = -1;
                 WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
@@ -1169,7 +1172,7 @@
         }
         WebRtc_UWord16 freqHz;
         EncoderSampFreq(freqHz);
-        if(WebRtcCng_InitEnc(_ptrDTXInst, (WebRtc_Word16)freqHz,
+        if(WebRtcCng_InitEnc(_ptrDTXInst, freqHz,
             ACM_SID_INTERVAL_MSEC, _numLPCParams) < 0)
         {
             // Couldn't initialize, has to return -1, and free the memory
@@ -1313,6 +1316,7 @@
         *samplesProcessed = 0;
         return 0;
     }
+
     WebRtc_UWord16 freqHz;
     EncoderSampFreq(freqHz);
 
@@ -1321,8 +1325,8 @@
     WebRtc_Word32 frameLenMsec = (((WebRtc_Word32)_frameLenSmpl * 1000) / freqHz);
     WebRtc_Word16 status;
 
-    // Vector for storing maximum 30 ms of mono audio at 32 kHz
-    WebRtc_Word16 audio[960];
+    // Vector for storing maximum 30 ms of mono audio at 48 kHz.
+    WebRtc_Word16 audio[1440];
 
     // Calculate number of VAD-blocks to process, and number of samples in each block.
     int noSamplesToProcess[2];
@@ -1378,25 +1382,33 @@
             *bitStreamLenByte = 0;
             for(WebRtc_Word16 n = 0; n < num10MsecFrames; n++)
             {
-                // This block is (passive) && (vad enabled)
-                status = WebRtcCng_Encode(_ptrDTXInst, &audio[n*samplesIn10Msec],
-                    samplesIn10Msec, bitStream, &bitStreamLen, 0);
+                // This block is (passive) && (vad enabled). If first CNG after
+                // speech, force SID by setting last parameter to "1".
+                status = WebRtcCng_Encode(_ptrDTXInst,
+                                          &audio[n*samplesIn10Msec],
+                                          samplesIn10Msec, bitStream,
+                                          &bitStreamLen, !_prev_frame_cng);
                 if (status < 0) {
                     return -1;
                 }
 
+                // Update previous frame was CNG.
+                _prev_frame_cng = 1;
+
                 *samplesProcessed += samplesIn10Msec*_noChannels;
 
                 // bitStreamLen will only be > 0 once per 100 ms
                 *bitStreamLenByte += bitStreamLen;
             }
 
-
             // Check if all samples got processed by the DTX
             if(*samplesProcessed != noSamplesToProcess[i]*_noChannels) {
                 // Set to zero since something went wrong. Shouldn't happen.
                 *samplesProcessed = 0;
             }
+        } else {
+            // Update previous frame was not CNG.
+            _prev_frame_cng = 0;
         }
 
         if(*samplesProcessed > 0)
diff --git a/modules/audio_coding/main/source/acm_generic_codec.h b/modules/audio_coding/main/source/acm_generic_codec.h
index c138ed9..29c882c 100644
--- a/modules/audio_coding/main/source/acm_generic_codec.h
+++ b/modules/audio_coding/main/source/acm_generic_codec.h
@@ -1310,6 +1310,7 @@
     WebRtc_UWord8         _numLPCParams;
     bool                  _sentCNPrevious;
     bool                  _isMaster;
+    int16_t               _prev_frame_cng;
 
     WebRtcACMCodecParams  _encoderParams;
     WebRtcACMCodecParams  _decoderParams;
diff --git a/modules/audio_coding/main/source/acm_opus.cc b/modules/audio_coding/main/source/acm_opus.cc
index 87bdd8b..034e57d 100644
--- a/modules/audio_coding/main/source/acm_opus.cc
+++ b/modules/audio_coding/main/source/acm_opus.cc
@@ -8,442 +8,256 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
+#include "acm_opus.h"
+
+#include "acm_codec_database.h"
 #include "acm_common_defs.h"
 #include "acm_neteq.h"
-#include "acm_opus.h"
 #include "trace.h"
 #include "webrtc_neteq.h"
 #include "webrtc_neteq_help_macros.h"
 
 #ifdef WEBRTC_CODEC_OPUS
-    // NOTE! Opus is not included in the open-source package. Modify this file or your codec
-    // API to match the function call and name of used Opus API file.
-    // #include "opus_interface.h"
+#include "opus_interface.h"
 #endif
 
-namespace webrtc
-{
+namespace webrtc {
 
 #ifndef WEBRTC_CODEC_OPUS
 
-ACMOPUS::ACMOPUS(WebRtc_Word16 /* codecID */)
+ACMOpus::ACMOpus(int16_t /* codecID */)
     : _encoderInstPtr(NULL),
       _decoderInstPtr(NULL),
-      _mySampFreq(0),
-      _myRate(0),
-      _opusMode(0),
-      _flagVBR(0) {
+      _sampleFreq(0),
+      _bitrate(0) {
   return;
 }
 
-
-ACMOPUS::~ACMOPUS()
-{
-    return;
+ACMOpus::~ACMOpus() {
+  return;
 }
 
-
-WebRtc_Word16
-ACMOPUS::InternalEncode(
-    WebRtc_UWord8* /* bitStream        */,
-    WebRtc_Word16* /* bitStreamLenByte */)
-{
-    return -1;
+int16_t ACMOpus::InternalEncode(uint8_t* /* bitStream */,
+                                int16_t* /* bitStreamLenByte */) {
+  return -1;
 }
 
-
-WebRtc_Word16
-ACMOPUS::DecodeSafe(
-    WebRtc_UWord8* /* bitStream        */,
-    WebRtc_Word16  /* bitStreamLenByte */,
-    WebRtc_Word16* /* audio            */,
-    WebRtc_Word16* /* audioSamples     */,
-    WebRtc_Word8*  /* speechType       */)
-{
-    return -1;
+int16_t ACMOpus::DecodeSafe(uint8_t* /* bitStream */,
+                            int16_t /* bitStreamLenByte */,
+                            int16_t* /* audio */,
+                            int16_t* /* audioSamples */,
+                            int8_t* /* speechType */) {
+  return -1;
 }
 
-
-WebRtc_Word16
-ACMOPUS::InternalInitEncoder(
-    WebRtcACMCodecParams* /* codecParams */)
-{
-    return -1;
+int16_t ACMOpus::InternalInitEncoder(WebRtcACMCodecParams* /* codecParams */) {
+  return -1;
 }
 
-
-WebRtc_Word16
-ACMOPUS::InternalInitDecoder(
-    WebRtcACMCodecParams* /* codecParams */)
-{
-    return -1;
+int16_t ACMOpus::InternalInitDecoder(WebRtcACMCodecParams* /* codecParams */) {
+  return -1;
 }
 
-
-WebRtc_Word32
-ACMOPUS::CodecDef(
-    WebRtcNetEQ_CodecDef& /* codecDef  */,
-    const CodecInst&      /* codecInst */)
-{
-    return -1;
+int32_t ACMOpus::CodecDef(WebRtcNetEQ_CodecDef& /* codecDef */,
+                          const CodecInst& /* codecInst */) {
+  return -1;
 }
 
-
-ACMGenericCodec*
-ACMOPUS::CreateInstance(void)
-{
-    return NULL;
+ACMGenericCodec* ACMOpus::CreateInstance(void) {
+  return NULL;
 }
 
-
-WebRtc_Word16
-ACMOPUS::InternalCreateEncoder()
-{
-    return -1;
+int16_t ACMOpus::InternalCreateEncoder() {
+  return -1;
 }
 
-
-void
-ACMOPUS::DestructEncoderSafe()
-{
-    return;
+void ACMOpus::DestructEncoderSafe() {
+  return;
 }
 
-
-WebRtc_Word16
-ACMOPUS::InternalCreateDecoder()
-{
-    return -1;
+int16_t ACMOpus::InternalCreateDecoder() {
+  return -1;
 }
 
-
-void
-ACMOPUS::DestructDecoderSafe()
-{
-    return;
+void ACMOpus::DestructDecoderSafe() {
+  return;
 }
 
-
-void
-ACMOPUS::InternalDestructEncoderInst(
-    void* /* ptrInst */)
-{
-    return;
+void ACMOpus::InternalDestructEncoderInst(void* /* ptrInst */) {
+  return;
 }
 
-WebRtc_Word16
-ACMOPUS::SetBitRateSafe(
-    const WebRtc_Word32 /*rate*/ )
-{
-    return -1;
+int16_t ACMOpus::SetBitRateSafe(const int32_t /*rate*/) {
+  return -1;
 }
 
-#else     //===================== Actual Implementation =======================
+#else  //===================== Actual Implementation =======================
 
-// Remove when integrating a real Opus wrapper
-extern WebRtc_Word16 WebRtcOpus_CreateEnc(OPUS_inst_t_** inst, WebRtc_Word16 samplFreq);
-extern WebRtc_Word16 WebRtcOpus_CreateDec(OPUS_inst_t_** inst, WebRtc_Word16 samplFreq);
-extern WebRtc_Word16 WebRtcOpus_FreeEnc(OPUS_inst_t_* inst);
-extern WebRtc_Word16 WebRtcOpus_FreeDec(OPUS_inst_t_* inst);
-extern WebRtc_Word16 WebRtcOpus_Encode(OPUS_inst_t_* encInst,
-                                       WebRtc_Word16* input,
-                                       WebRtc_Word16* output,
-                                       WebRtc_Word16 len,
-                                       WebRtc_Word16 byteLen);
-extern WebRtc_Word16 WebRtcOpus_EncoderInit(OPUS_inst_t_* encInst,
-                                            WebRtc_Word16 samplFreq,
-                                            WebRtc_Word16 mode,
-                                            WebRtc_Word16 vbrFlag);
-extern WebRtc_Word16 WebRtcOpus_Decode(OPUS_inst_t_* decInst);
-extern WebRtc_Word16 WebRtcOpus_DecodeBwe(OPUS_inst_t_* decInst, WebRtc_Word16* input);
-extern WebRtc_Word16 WebRtcOpus_DecodePlc(OPUS_inst_t_* decInst);
-extern WebRtc_Word16 WebRtcOpus_DecoderInit(OPUS_inst_t_* decInst);
-
-ACMOPUS::ACMOPUS(WebRtc_Word16 codecID)
+ACMOpus::ACMOpus(int16_t codecID)
     : _encoderInstPtr(NULL),
       _decoderInstPtr(NULL),
-      _mySampFreq(48000),  // Default sampling frequency.
-      _myRate(50000),  // Default rate.
-      _opusMode(1),  // Default mode is the hybrid mode.
-      _flagVBR(0) {  // Default VBR off.
+      _sampleFreq(32000),  // Default sampling frequency.
+      _bitrate(20000) {  // Default bit-rate.
   _codecID = codecID;
-
-  // Current implementation doesn't have DTX. That might change.
+  // Opus has internal DTX, but we dont use it for now.
   _hasInternalDTX = false;
 
+  if (_codecID != ACMCodecDB::kOpus) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+                 "Wrong codec id for Opus.");
+    _sampleFreq = -1;
+    _bitrate = -1;
+  }
   return;
 }
 
-ACMOPUS::~ACMOPUS()
-{
-    if(_encoderInstPtr != NULL)
-    {
-        WebRtcOpus_FreeEnc(_encoderInstPtr);
-        _encoderInstPtr = NULL;
-    }
-    if(_decoderInstPtr != NULL)
-    {
-        WebRtcOpus_FreeDec(_decoderInstPtr);
-        _decoderInstPtr = NULL;
-    }
-    return;
+ACMOpus::~ACMOpus() {
+  if (_encoderInstPtr != NULL) {
+    WebRtcOpus_EncoderFree(_encoderInstPtr);
+    _encoderInstPtr = NULL;
+  }
+  if (_decoderInstPtr != NULL) {
+    WebRtcOpus_DecoderFree(_decoderInstPtr);
+    _decoderInstPtr = NULL;
+  }
+  return;
 }
 
-
-WebRtc_Word16
-ACMOPUS::InternalEncode(
-    WebRtc_UWord8* bitStream,
-    WebRtc_Word16* bitStreamLenByte)
-{
-    WebRtc_Word16 noEncodedSamples = 0;
-    WebRtc_Word16 tmpLenByte = 0;
+int16_t ACMOpus::InternalEncode(uint8_t* bitStream, int16_t* bitStreamLenByte) {
+  // Call Encoder.
+  *bitStreamLenByte = WebRtcOpus_Encode(_encoderInstPtr,
+                                        &_inAudio[_inAudioIxRead],
+                                        _frameLenSmpl,
+                                        MAX_PAYLOAD_SIZE_BYTE,
+                                        bitStream);
+  // Check for error reported from encoder.
+  if (*bitStreamLenByte < 0) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+                 "InternalEncode: Encode error for Opus");
     *bitStreamLenByte = 0;
+    return -1;
+  }
 
-    WebRtc_Word16 byteLengthFrame = 0;
+  // Increment the read index. This tells the caller how far
+  // we have gone forward in reading the audio buffer.
+  _inAudioIxRead += _frameLenSmpl;
 
-    // Derive what byte-length is requested
-    byteLengthFrame = _myRate*_frameLenSmpl/(8*_mySampFreq);
-
-    // Call Encoder
-    *bitStreamLenByte = WebRtcOpus_Encode(_encoderInstPtr, &_inAudio[_inAudioIxRead],
-           (WebRtc_Word16*)bitStream, _frameLenSmpl, byteLengthFrame);
-
-    // increment the read index this tell the caller that how far
-    // we have gone forward in reading the audio buffer
-    _inAudioIxRead += _frameLenSmpl;
-
-    // sanity check
-    if(*bitStreamLenByte < 0)
-    {
-        // error has happened
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
-            "InternalEncode: Encode error for Opus");
-            *bitStreamLenByte = 0;
-            return -1;
-    }
-
-    return *bitStreamLenByte;
+  return *bitStreamLenByte;
 }
 
+int16_t ACMOpus::DecodeSafe(uint8_t* bitStream, int16_t bitStreamLenByte,
+                            int16_t* audio, int16_t* audioSamples,
+                            int8_t* speechType) {
+  return 0;
+}
 
+int16_t ACMOpus::InternalInitEncoder(WebRtcACMCodecParams* codecParams) {
+  int16_t ret;
+  if (_encoderInstPtr != NULL) {
+    WebRtcOpus_EncoderFree(_encoderInstPtr);
+    _encoderInstPtr = NULL;
+  }
+  ret = WebRtcOpus_EncoderCreate(&_encoderInstPtr,
+                                 codecParams->codecInstant.channels);
+  if (ret < 0) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+                 "Encoder creation failed for Opus");
+    return ret;
+  }
+  ret = WebRtcOpus_SetBitRate(_encoderInstPtr, codecParams->codecInstant.rate);
+  if (ret < 0) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+                 "Setting initial bitrate failed for Opus");
+    return ret;
+  }
+  return 0;
+}
 
-WebRtc_Word16
-ACMOPUS::DecodeSafe(
-    WebRtc_UWord8* /* bitStream        */,
-    WebRtc_Word16  /* bitStreamLenByte */,
-    WebRtc_Word16* /* audio            */,
-    WebRtc_Word16* /* audioSamples     */,
-    WebRtc_Word8*  /* speechType       */)
-{
+int16_t ACMOpus::InternalInitDecoder(WebRtcACMCodecParams* codecParams) {
+  if (_decoderInstPtr != NULL) {
+    WebRtcOpus_DecoderFree(_decoderInstPtr);
+    _decoderInstPtr = NULL;
+  }
+  if (WebRtcOpus_DecoderCreate(&_decoderInstPtr,
+                               codecParams->codecInstant.channels) < 0) {
+    return -1;
+  }
+  return WebRtcOpus_DecoderInit(_decoderInstPtr);
+}
+
+int32_t ACMOpus::CodecDef(WebRtcNetEQ_CodecDef& codecDef,
+                          const CodecInst& codecInst) {
+  if (!_decoderInitialized) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+                 "CodeDef: Decoder uninitialized for Opus");
+    return -1;
+  }
+
+  // Fill up the structure by calling "SET_CODEC_PAR" & "SET_OPUS_FUNCTION."
+  // Then call NetEQ to add the codec to its database.
+  // TODO(tlegrand): Decoder is registered in NetEQ as a 32 kHz decoder, which
+  // is true until we have a full 48 kHz system, and remove the downsampling
+  // in the Opus decoder wrapper.
+  SET_CODEC_PAR((codecDef), kDecoderOpus, codecInst.pltype, _decoderInstPtr,
+                32000);
+  SET_OPUS_FUNCTIONS((codecDef));
+  return 0;
+}
+
+ACMGenericCodec* ACMOpus::CreateInstance(void) {
+  return NULL;
+}
+
+int16_t ACMOpus::InternalCreateEncoder() {
+  // Real encoder will be created in InternalInitEncoder.
+  return 0;
+}
+
+void ACMOpus::DestructEncoderSafe() {
+  if (_encoderInstPtr) {
+    WebRtcOpus_EncoderFree(_encoderInstPtr);
+    _encoderInstPtr = NULL;
+  }
+}
+
+int16_t ACMOpus::InternalCreateDecoder() {
+  // Real decoder will be created in InternalInitDecoder
+  return 0;
+}
+
+void ACMOpus::DestructDecoderSafe() {
+  _decoderInitialized = false;
+  if (_decoderInstPtr) {
+    WebRtcOpus_DecoderFree(_decoderInstPtr);
+    _decoderInstPtr = NULL;
+  }
+}
+
+void ACMOpus::InternalDestructEncoderInst(void* ptrInst) {
+  if (ptrInst != NULL) {
+    WebRtcOpus_EncoderFree((OpusEncInst*) ptrInst);
+  }
+  return;
+}
+
+int16_t ACMOpus::SetBitRateSafe(const int32_t rate) {
+  if (rate < 6000 || rate > 510000) {
+    WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
+                 "SetBitRateSafe: Invalid rate Opus");
+    return -1;
+  }
+
+  _bitrate = rate;
+
+  // Ask the encoder for the new rate.
+  if (WebRtcOpus_SetBitRate(_encoderInstPtr, _bitrate) >= 0) {
+    _encoderParams.codecInstant.rate = _bitrate;
     return 0;
+  }
+
+  return -1;
 }
 
+#endif  // WEBRTC_CODEC_OPUS
 
-WebRtc_Word16
-ACMOPUS::InternalInitEncoder(
-    WebRtcACMCodecParams* codecParams)
-{
-    //set the bit rate and initialize
-    _myRate = codecParams->codecInstant.rate;
-    return SetBitRateSafe( (WebRtc_UWord32)_myRate);
-}
-
-
-WebRtc_Word16
-ACMOPUS::InternalInitDecoder(
-    WebRtcACMCodecParams* /* codecParams */)
-{
-    if (WebRtcOpus_DecoderInit(_decoderInstPtr) < 0)
-    {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
-                "InternalInitDecoder: init decoder failed for Opus");
-        return -1;
-    }
-    return 0;
-}
-
-
-WebRtc_Word32
-ACMOPUS::CodecDef(
-    WebRtcNetEQ_CodecDef& codecDef,
-    const CodecInst&      codecInst)
-{
-    if (!_decoderInitialized)
-    {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
-            "CodeDef: Decoder uninitialized for Opus");
-        return -1;
-    }
-
-    // Fill up the structure by calling
-    // "SET_CODEC_PAR" & "SET_G729_FUNCTION."
-    // Then call NetEQ to add the codec to it's
-    // database.
-    SET_CODEC_PAR((codecDef), kDecoderOpus, codecInst.pltype,
-        _decoderInstPtr, 16000);
-    SET_OPUS_FUNCTIONS((codecDef));
-    return 0;
-}
-
-
-ACMGenericCodec*
-ACMOPUS::CreateInstance(void)
-{
-    return NULL;
-}
-
-
-WebRtc_Word16
-ACMOPUS::InternalCreateEncoder()
-{
-    if (WebRtcOpus_CreateEnc(&_encoderInstPtr, _mySampFreq) < 0)
-    {
-        WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
-            "InternalCreateEncoder: create encoder failed for Opus");
-        return -1;
-    }
-    return 0;
-}
-
-
-void
-ACMOPUS::DestructEncoderSafe()
-{
-    _encoderExist = false;
-    _encoderInitialized = false;
-    if(_encoderInstPtr != NULL)
-    {
-        WebRtcOpus_FreeEnc(_encoderInstPtr);
-        _encoderInstPtr = NULL;
-    }
-}
-
-
-WebRtc_Word16
-ACMOPUS::InternalCreateDecoder()
-{
-   if (WebRtcOpus_CreateDec(&_decoderInstPtr, _mySampFreq) < 0)
-   {
-      WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
-        "InternalCreateDecoder: create decoder failed for Opus");
-       return -1;
-   }
-   return 0;
-}
-
-
-void
-ACMOPUS::DestructDecoderSafe()
-{
-    _decoderExist = false;
-    _decoderInitialized = false;
-    if(_decoderInstPtr != NULL)
-    {
-        WebRtcOpus_FreeDec(_decoderInstPtr);
-        _decoderInstPtr = NULL;
-    }
-}
-
-
-void
-ACMOPUS::InternalDestructEncoderInst(
-    void* ptrInst)
-{
-    if(ptrInst != NULL)
-    {
-        WebRtcOpus_FreeEnc((OPUS_inst_t*)ptrInst);
-    }
-    return;
-}
-
-WebRtc_Word16
-ACMOPUS::SetBitRateSafe(
-    const WebRtc_Word32 rate)
-{
-    //allowed rates: {8000, 12000, 14000, 16000, 18000, 20000,
-    //                22000, 24000, 26000, 28000, 30000, 32000};
-    switch(rate)
-    {
-    case 8000:
-        {
-            _myRate = 8000;
-            break;
-        }
-    case 12000:
-        {
-            _myRate = 12000;
-            break;
-        }
-    case 14000:
-        {
-            _myRate = 14000;
-            break;
-        }
-    case 16000:
-        {
-            _myRate = 16000;
-            break;
-        }
-    case 18000:
-        {
-            _myRate = 18000;
-            break;
-        }
-    case 20000:
-        {
-            _myRate = 20000;
-            break;
-        }
-    case 22000:
-        {
-            _myRate = 22000;
-            break;
-        }
-    case 24000:
-        {
-            _myRate = 24000;
-            break;
-        }
-    case 26000:
-        {
-            _myRate = 26000;
-            break;
-        }
-    case 28000:
-        {
-            _myRate = 28000;
-            break;
-        }
-    case 30000:
-        {
-            _myRate = 30000;
-            break;
-        }
-    case 32000:
-        {
-            _myRate = 32000;
-            break;
-        }
-    default:
-        {
-            WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _uniqueID,
-                "SetBitRateSafe: Invalid rate Opus");
-            return -1;
-        }
-    }
-
-    // Re-init with new rate
-    if (WebRtcOpus_EncoderInit(_encoderInstPtr, _mySampFreq, _opusMode, _flagVBR) >= 0)
-    {
-        _encoderParams.codecInstant.rate = _myRate;
-        return 0;
-    }
-    else
-    {
-        return -1;
-    }
-}
-
-#endif
-
-} // namespace webrtc
+}  // namespace webrtc
diff --git a/modules/audio_coding/main/source/acm_opus.h b/modules/audio_coding/main/source/acm_opus.h
index c6832fa..d8baa30 100644
--- a/modules/audio_coding/main/source/acm_opus.h
+++ b/modules/audio_coding/main/source/acm_opus.h
@@ -12,68 +12,48 @@
 #define WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_OPUS_H_
 
 #include "acm_generic_codec.h"
+#include "opus_interface.h"
+#include "resampler.h"
 
-// forward declaration
-struct OPUS_inst_t_;
-struct OPUS_inst_t_;
+namespace webrtc {
 
-namespace webrtc
-{
+class ACMOpus : public ACMGenericCodec {
+ public:
+  ACMOpus(int16_t codecID);
+  ~ACMOpus();
 
-class ACMOPUS: public ACMGenericCodec
-{
-public:
-    ACMOPUS(WebRtc_Word16 codecID);
-    ~ACMOPUS();
-    // for FEC
-    ACMGenericCodec* CreateInstance(void);
+  ACMGenericCodec* CreateInstance(void);
 
-    WebRtc_Word16 InternalEncode(
-        WebRtc_UWord8* bitstream,
-        WebRtc_Word16* bitStreamLenByte);
+  int16_t InternalEncode(uint8_t* bitstream, int16_t* bitStreamLenByte);
 
-    WebRtc_Word16 InternalInitEncoder(
-        WebRtcACMCodecParams *codecParams);
+  int16_t InternalInitEncoder(WebRtcACMCodecParams *codecParams);
 
-    WebRtc_Word16 InternalInitDecoder(
-        WebRtcACMCodecParams *codecParams);
+  int16_t InternalInitDecoder(WebRtcACMCodecParams *codecParams);
 
-protected:
-    WebRtc_Word16 DecodeSafe(
-        WebRtc_UWord8* bitStream,
-        WebRtc_Word16  bitStreamLenByte,
-        WebRtc_Word16* audio,
-        WebRtc_Word16* audioSamples,
-        WebRtc_Word8*  speechType);
+ protected:
+  int16_t DecodeSafe(uint8_t* bitStream, int16_t bitStreamLenByte,
+                     int16_t* audio, int16_t* audioSamples, int8_t* speechType);
 
-    WebRtc_Word32 CodecDef(
-        WebRtcNetEQ_CodecDef& codecDef,
-        const CodecInst& codecInst);
+  int32_t CodecDef(WebRtcNetEQ_CodecDef& codecDef, const CodecInst& codecInst);
 
-    void DestructEncoderSafe();
+  void DestructEncoderSafe();
 
-    void DestructDecoderSafe();
+  void DestructDecoderSafe();
 
-    WebRtc_Word16 InternalCreateEncoder();
+  int16_t InternalCreateEncoder();
 
-    WebRtc_Word16 InternalCreateDecoder();
+  int16_t InternalCreateDecoder();
 
-    void InternalDestructEncoderInst(
-        void* ptrInst);
+  void InternalDestructEncoderInst(void* ptrInst);
 
-    WebRtc_Word16 SetBitRateSafe(
-        const WebRtc_Word32 rate);
+  int16_t SetBitRateSafe(const int32_t rate);
 
-    OPUS_inst_t_* _encoderInstPtr;
-    OPUS_inst_t_* _decoderInstPtr;
-
-    WebRtc_UWord16    _mySampFreq;
-    WebRtc_UWord16    _myRate;
-    WebRtc_Word16     _opusMode;
-    WebRtc_Word16     _flagVBR;
-
+  OpusEncInst* _encoderInstPtr;
+  OpusDecInst* _decoderInstPtr;
+  uint16_t _sampleFreq;
+  uint16_t _bitrate;
 };
 
-} // namespace webrtc
+}  // namespace webrtc
 
 #endif  // WEBRTC_MODULES_AUDIO_CODING_MAIN_SOURCE_ACM_OPUS_H_
diff --git a/modules/audio_coding/main/source/audio_coding_module.gypi b/modules/audio_coding/main/source/audio_coding_module.gypi
index bc9ea7d..f62ba36 100644
--- a/modules/audio_coding/main/source/audio_coding_module.gypi
+++ b/modules/audio_coding/main/source/audio_coding_module.gypi
@@ -15,6 +15,7 @@
       'iLBC',
       'iSAC',
       'iSACFix',
+      'webrtc_opus',
       'PCM16B',
       'NetEq',
       '<(webrtc_root)/common_audio/common_audio.gyp:resampler',
@@ -37,11 +38,13 @@
       'include_dirs': [
         '../interface',
         '../../../interface',
+        '../../codecs/opus/interface',
       ],
       'direct_dependent_settings': {
         'include_dirs': [
         '../interface',
         '../../../interface',
+        '../../codecs/opus/interface',
         ],
       },
       'sources': [
diff --git a/modules/audio_coding/main/source/audio_coding_module_impl.cc b/modules/audio_coding/main/source/audio_coding_module_impl.cc
index c1341b9..0a399ba 100644
--- a/modules/audio_coding/main/source/audio_coding_module_impl.cc
+++ b/modules/audio_coding/main/source/audio_coding_module_impl.cc
@@ -45,6 +45,7 @@
       _cng_nb_pltype(255),
       _cng_wb_pltype(255),
       _cng_swb_pltype(255),
+      _cng_fb_pltype(255),
       _red_pltype(255),
       _vadEnabled(false),
       _dtxEnabled(false),
@@ -112,6 +113,8 @@
         _cng_wb_pltype = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
       } else if (ACMCodecDB::database_[i].plfreq == 32000) {
         _cng_swb_pltype = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
+      } else if (ACMCodecDB::database_[i].plfreq == 48000) {
+        _cng_fb_pltype = static_cast<uint8_t>(ACMCodecDB::database_[i].pltype);
       }
     }
   }
@@ -320,6 +323,12 @@
           _isFirstRED = true;
           break;
         }
+        case kPassiveDTXFB: {
+          current_payload_type = _cng_fb_pltype;
+          frame_type = kAudioFrameCN;
+          _isFirstRED = true;
+          break;
+        }
       }
       has_data_to_send = true;
       _previousPayloadType = current_payload_type;
@@ -612,6 +621,10 @@
         _cng_swb_pltype = static_cast<uint8_t>(send_codec.pltype);
         break;
       }
+      case 48000: {
+        _cng_fb_pltype = static_cast<uint8_t>(send_codec.pltype);
+        break;
+      }
       default: {
         WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceAudioCoding, _id,
                      "RegisterSendCodec() failed, invalid frequency for CNG "
@@ -1254,6 +1267,9 @@
   CriticalSectionScoped lock(_acmCritSect);
   if (DecoderParamByPlType(_lastRecvAudioCodecPlType, codec_params) < 0) {
     return _netEq.CurrentSampFreqHz();
+  } else if (codec_params.codecInstant.plfreq == 48000) {
+    // TODO(tlegrand): Remove this option when we have full 48 kHz support.
+    return 32000;
   } else {
     return codec_params.codecInstant.plfreq;
   }
diff --git a/modules/audio_coding/main/source/audio_coding_module_impl.h b/modules/audio_coding/main/source/audio_coding_module_impl.h
index 145faf6..0e7f2f3 100644
--- a/modules/audio_coding/main/source/audio_coding_module_impl.h
+++ b/modules/audio_coding/main/source/audio_coding_module_impl.h
@@ -279,6 +279,7 @@
   uint8_t _cng_nb_pltype;
   uint8_t _cng_wb_pltype;
   uint8_t _cng_swb_pltype;
+  uint8_t _cng_fb_pltype;
   uint8_t _red_pltype;
   bool _vadEnabled;
   bool _dtxEnabled;
diff --git a/modules/audio_coding/main/test/TestAllCodecs.cc b/modules/audio_coding/main/test/TestAllCodecs.cc
index b312390..89a9829 100644
--- a/modules/audio_coding/main/test/TestAllCodecs.cc
+++ b/modules/audio_coding/main/test/TestAllCodecs.cc
@@ -614,6 +614,28 @@
   Run(channel_a_to_b_);
   outfile_b_.Close();
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+  if (test_mode_ != 0) {
+    printf("===============================================================\n");
+  }
+  test_count_++;
+  OpenOutFile(test_count_);
+  char codec_opus[] = "OPUS";
+  RegisterSendCodec('A', codec_opus, 48000, 6000, 960, -1);
+  Run(channel_a_to_b_);
+  RegisterSendCodec('A', codec_opus, 48000, 20000, 960, -1);
+  Run(channel_a_to_b_);
+  RegisterSendCodec('A', codec_opus, 48000, 32000, 960, -1);
+  Run(channel_a_to_b_);
+  RegisterSendCodec('A', codec_opus, 48000, 48000, 960, -1);
+  Run(channel_a_to_b_);
+  RegisterSendCodec('A', codec_opus, 48000, 64000, 960, -1);
+  Run(channel_a_to_b_);
+  RegisterSendCodec('A', codec_opus, 48000, 96000, 960, -1);
+  Run(channel_a_to_b_);
+  RegisterSendCodec('A', codec_opus, 48000, 500000, 960, -1);
+  Run(channel_a_to_b_);
+#endif
   if (test_mode_ != 0) {
     printf("===============================================================\n");
 
diff --git a/modules/audio_coding/main/test/TestVADDTX.cc b/modules/audio_coding/main/test/TestVADDTX.cc
index 793ab57..fcca374 100644
--- a/modules/audio_coding/main/test/TestVADDTX.cc
+++ b/modules/audio_coding/main/test/TestVADDTX.cc
@@ -138,6 +138,21 @@
     _outFileB.Close();
 
 #endif
+#ifdef WEBRTC_CODEC_OPUS
+    // Open outputfile
+    OpenOutFile(testCntr++);
+
+    // Register Opus as send codec
+    char nameOPUS[] = "opus";
+    RegisterSendCodec('A', nameOPUS);
+
+    // Run the five test cased
+    runTestCases();
+
+    // Close file
+    _outFileB.Close();
+
+#endif
     if(_testMode) {
         printf("Done!\n");
     }
diff --git a/modules/audio_coding/neteq/codec_db.c b/modules/audio_coding/neteq/codec_db.c
index 5369cfd..ebc9216 100644
--- a/modules/audio_coding/neteq/codec_db.c
+++ b/modules/audio_coding/neteq/codec_db.c
@@ -84,7 +84,7 @@
 #ifdef NETEQ_32KHZ_WIDEBAND
     &&(codec_fs!=32000)
 #endif
-#ifdef NETEQ_48KHZ_WIDEBAND
+#if defined(NETEQ_48KHZ_WIDEBAND) || defined(NETEQ_OPUS_CODEC)
     &&(codec_fs!=48000)
 #endif
     )
@@ -114,6 +114,9 @@
 #ifdef NETEQ_ISAC_SWB_CODEC
         case kDecoderISACswb :
 #endif
+#ifdef NETEQ_OPUS_CODEC
+        case kDecoderOpus :
+#endif
 #ifdef NETEQ_G722_CODEC
         case kDecoderG722 :
         case kDecoderG722_2ch :
@@ -458,6 +461,9 @@
 #ifdef NETEQ_ISAC_SWB_CODEC
         case kDecoderISACswb:
 #endif
+#ifdef NETEQ_OPUS_CODEC
+        case kDecoderOpus:
+#endif
 #ifdef NETEQ_ARBITRARY_CODEC
         case kDecoderArbitrary:
 #endif
diff --git a/modules/audio_coding/neteq/interface/webrtc_neteq.h b/modules/audio_coding/neteq/interface/webrtc_neteq.h
index 39f6595..9fc8297 100644
--- a/modules/audio_coding/neteq/interface/webrtc_neteq.h
+++ b/modules/audio_coding/neteq/interface/webrtc_neteq.h
@@ -62,6 +62,7 @@
     kDecoderG722_1C_24,
     kDecoderG722_1C_32,
     kDecoderG722_1C_48,
+    kDecoderOpus,
     kDecoderSPEEX_8,
     kDecoderSPEEX_16,
     kDecoderCELT_32,
diff --git a/modules/audio_coding/neteq/interface/webrtc_neteq_help_macros.h b/modules/audio_coding/neteq/interface/webrtc_neteq_help_macros.h
index c6f19bb..d885faa 100644
--- a/modules/audio_coding/neteq/interface/webrtc_neteq_help_macros.h
+++ b/modules/audio_coding/neteq/interface/webrtc_neteq_help_macros.h
@@ -151,7 +151,6 @@
                     inst.funcUpdBWEst=NULL; \
                     inst.funcGetErrorCode=NULL;
 
-
 #define SET_PCM16B_SWB48_FUNCTIONS(inst) \
                     inst.funcDecode=(WebRtcNetEQ_FuncDecode)WebRtcPcm16b_DecodeW16; \
                     inst.funcDecodeRCU=NULL; \
@@ -317,6 +316,17 @@
                     inst.funcUpdBWEst=NULL; \
                     inst.funcGetErrorCode=NULL;
 
+#define SET_OPUS_FUNCTIONS(inst) \
+                    inst.funcDecode=(WebRtcNetEQ_FuncDecode)WebRtcOpus_Decode; \
+                    inst.funcDecodeRCU=NULL; \
+                    inst.funcDecodePLC=NULL; \
+                    inst.funcDecodeInit=(WebRtcNetEQ_FuncDecodeInit)WebRtcOpus_DecoderInit; \
+                    inst.funcAddLatePkt=NULL; \
+                    inst.funcGetMDinfo=NULL; \
+                    inst.funcGetPitch=NULL; \
+                    inst.funcUpdBWEst=NULL; \
+                    inst.funcGetErrorCode=NULL;
+
 #define SET_SPEEX_FUNCTIONS(inst) \
                     inst.funcDecode=(WebRtcNetEQ_FuncDecode)WebRtcSpeex_Decode; \
                     inst.funcDecodeRCU=NULL; \
diff --git a/modules/audio_coding/neteq/neteq_defines.h b/modules/audio_coding/neteq/neteq_defines.h
index 318e6bb..79cb144 100644
--- a/modules/audio_coding/neteq/neteq_defines.h
+++ b/modules/audio_coding/neteq/neteq_defines.h
@@ -77,6 +77,8 @@
  *
  * NETEQ_G722_1C_CODEC            Enable G722.1 Annex C
  *
+ * NETEQ_OPUS_CODEC               Enable Opus
+ *
  * NETEQ_SPEEX_CODEC              Enable Speex (at 8 and 16 kHz sample rate)
  *
  * NETEQ_CELT_CODEC               Enable Celt (at 32 kHz sample rate)
@@ -244,6 +246,7 @@
     #define NETEQ_G729_CODEC
     #define NETEQ_G726_CODEC
     #define NETEQ_GSMFR_CODEC
+    #define NETEQ_OPUS_CODEC
     #define NETEQ_AMR_CODEC
 #endif
 
@@ -252,6 +255,7 @@
     #define NETEQ_G722_CODEC
     #define NETEQ_G722_1_CODEC
     #define NETEQ_G729_1_CODEC
+    #define NETEQ_OPUS_CODEC
     #define NETEQ_SPEEX_CODEC
     #define NETEQ_AMRWB_CODEC
     #define NETEQ_WIDEBAND
@@ -262,6 +266,7 @@
     #define NETEQ_32KHZ_WIDEBAND
     #define NETEQ_G722_1C_CODEC
     #define NETEQ_CELT_CODEC
+    #define NETEQ_OPUS_CODEC
 #endif
 
 #if (defined(NETEQ_VOICEENGINE_CODECS))
@@ -295,6 +300,8 @@
     #define NETEQ_G722_1C_CODEC
     #define NETEQ_CELT_CODEC
 
+    /* Fullband 48 kHz codecs */
+    #define NETEQ_OPUS_CODEC
 #endif 
 
 #if (defined(NETEQ_ALL_CODECS))
@@ -331,21 +338,26 @@
 
     /* Super wideband 48kHz codecs */
     #define NETEQ_48KHZ_WIDEBAND
+    #define NETEQ_OPUS_CODEC
 #endif
 
 /* Max output size from decoding one frame */
 #if defined(NETEQ_48KHZ_WIDEBAND)
-    #define NETEQ_MAX_FRAME_SIZE     2880    /* 60 ms super wideband */
-    #define NETEQ_MAX_OUTPUT_SIZE    3600    /* 60+15 ms super wideband (60 ms decoded + 15 ms for merge overlap) */
+    #define NETEQ_MAX_FRAME_SIZE 5760  /* 120 ms super wideband */
+    #define NETEQ_MAX_OUTPUT_SIZE 6480  /* 120+15 ms super wideband (120 ms
+                                         * decoded + 15 ms for merge overlap) */
 #elif defined(NETEQ_32KHZ_WIDEBAND)
-    #define NETEQ_MAX_FRAME_SIZE     1920    /* 60 ms super wideband */
-    #define NETEQ_MAX_OUTPUT_SIZE    2400    /* 60+15 ms super wideband (60 ms decoded + 15 ms for merge overlap) */
+    #define NETEQ_MAX_FRAME_SIZE 3840  /* 120 ms super wideband */
+    #define NETEQ_MAX_OUTPUT_SIZE 4320  /* 120+15 ms super wideband (120 ms
+                                         * decoded + 15 ms for merge overlap) */
 #elif defined(NETEQ_WIDEBAND)
-    #define NETEQ_MAX_FRAME_SIZE     960        /* 60 ms wideband */
-    #define NETEQ_MAX_OUTPUT_SIZE    1200    /* 60+15 ms wideband (60 ms decoded + 10 ms for merge overlap) */
+    #define NETEQ_MAX_FRAME_SIZE 1920  /* 120 ms wideband */
+    #define NETEQ_MAX_OUTPUT_SIZE 2160  /* 120+15 ms wideband (120 ms decoded +
+                                         * 15 ms for merge overlap) */
 #else
-    #define NETEQ_MAX_FRAME_SIZE     480        /* 60 ms narrowband */
-    #define NETEQ_MAX_OUTPUT_SIZE    600        /* 60+15 ms narrowband (60 ms decoded + 10 ms for merge overlap) */
+    #define NETEQ_MAX_FRAME_SIZE 960  /* 120 ms narrowband */
+    #define NETEQ_MAX_OUTPUT_SIZE 1080  /* 120+15 ms narrowband (120 ms decoded
+                                         * + 15 ms for merge overlap) */
 #endif
 
 
diff --git a/modules/audio_coding/neteq/packet_buffer.c b/modules/audio_coding/neteq/packet_buffer.c
index 8f09b07..7fbea58 100644
--- a/modules/audio_coding/neteq/packet_buffer.c
+++ b/modules/audio_coding/neteq/packet_buffer.c
@@ -578,6 +578,11 @@
             codecBytes = 1560; /* 240ms @ 52kbps (30ms frames) */
             codecBuffers = 8;
         }
+        else if (codecID[i] == kDecoderOpus)
+        {
+            codecBytes = 15300; /* 240ms @ 510kbps (60ms frames) */
+            codecBuffers = 30;  /* Replicating the value for PCMu/a */
+        }
         else if ((codecID[i] == kDecoderPCM16B) ||
             (codecID[i] == kDecoderPCM16B_2ch))
         {
diff --git a/modules/audio_coding/neteq/recin.c b/modules/audio_coding/neteq/recin.c
index bce7c48..399250d 100644
--- a/modules/audio_coding/neteq/recin.c
+++ b/modules/audio_coding/neteq/recin.c
@@ -202,6 +202,13 @@
             /* Get CNG sample rate */
             WebRtc_UWord16 fsCng = WebRtcNetEQ_DbGetSampleRate(&MCU_inst->codec_DB_inst,
                 RTPpacket[i_k].payloadType);
+
+            /* Force sampling frequency to 32000 Hz CNG 48000 Hz. */
+            /* TODO(tlegrand): remove limitation once ACM has full 48 kHz
+             * support. */
+            if (fsCng > 32000) {
+                fsCng = 32000;
+            }
             if ((fsCng != MCU_inst->fs) && (fsCng > 8000))
             {
                 /*
@@ -370,10 +377,29 @@
             MCU_inst->scalingFactor = kTSscalingTwo;
             break;
         }
+        case kDecoderOpus:
+        {
+            /* We resample Opus internally to 32 kHz, but timestamps
+             * are counted at 48 kHz. So there are two output samples
+             * per three RTP timestamp ticks. */
+            MCU_inst->scalingFactor = kTSscalingTwoThirds;
+            break;
+        }
+
         case kDecoderAVT:
         case kDecoderCNG:
         {
-            /* do not change the timestamp scaling settings */
+            /* TODO(tlegrand): remove scaling once ACM has full 48 kHz
+             * support. */
+            WebRtc_UWord16 sample_freq =
+                WebRtcNetEQ_DbGetSampleRate(&MCU_inst->codec_DB_inst,
+                                            rtpPayloadType);
+            if (sample_freq == 48000) {
+              MCU_inst->scalingFactor = kTSscalingTwoThirds;
+            }
+
+            /* For sample_freq <= 32 kHz, do not change the timestamp scaling
+             * settings. */
             break;
         }
         default:
diff --git a/modules/audio_coding/neteq/recout.c b/modules/audio_coding/neteq/recout.c
index eb80f2d..1f47945 100644
--- a/modules/audio_coding/neteq/recout.c
+++ b/modules/audio_coding/neteq/recout.c
@@ -41,8 +41,8 @@
 /* Scratch usage:
 
  Type           Name                            size             startpos      endpos
- WebRtc_Word16  pw16_NetEqAlgorithm_buffer      600*fs/8000      0             600*fs/8000-1
- struct         dspInfo                         6                600*fs/8000   605*fs/8000
+ WebRtc_Word16  pw16_NetEqAlgorithm_buffer      1080*fs/8000     0             1080*fs/8000-1
+ struct         dspInfo                         6                1080*fs/8000  1085*fs/8000
 
  func           WebRtcNetEQ_Normal              40+495*fs/8000   0             39+495*fs/8000
  func           WebRtcNetEQ_Merge               40+496*fs/8000   0             39+496*fs/8000
@@ -50,7 +50,7 @@
  func           WebRtcNetEQ_Accelerate          210              240*fs/8000   209+240*fs/8000
  func           WebRtcNetEQ_BGNUpdate           69               480*fs/8000   68+480*fs/8000
 
- Total:  605*fs/8000
+ Total:  1086*fs/8000
  */
 
 #define SCRATCH_ALGORITHM_BUFFER            0
@@ -58,35 +58,35 @@
 #define SCRATCH_NETEQ_MERGE                 0
 
 #if (defined(NETEQ_48KHZ_WIDEBAND)) 
-#define SCRATCH_DSP_INFO                     3600
+#define SCRATCH_DSP_INFO                     6480
 #define SCRATCH_NETEQ_ACCELERATE            1440
 #define SCRATCH_NETEQ_BGN_UPDATE            2880
 #define SCRATCH_NETEQ_EXPAND                756
 #elif (defined(NETEQ_32KHZ_WIDEBAND)) 
-#define SCRATCH_DSP_INFO                     2400
+#define SCRATCH_DSP_INFO                     4320
 #define SCRATCH_NETEQ_ACCELERATE            960
 #define SCRATCH_NETEQ_BGN_UPDATE            1920
 #define SCRATCH_NETEQ_EXPAND                504
 #elif (defined(NETEQ_WIDEBAND)) 
-#define SCRATCH_DSP_INFO                     1200
+#define SCRATCH_DSP_INFO                     2160
 #define SCRATCH_NETEQ_ACCELERATE            480
 #define SCRATCH_NETEQ_BGN_UPDATE            960
 #define SCRATCH_NETEQ_EXPAND                252
 #else    /* NB */
-#define SCRATCH_DSP_INFO                     600
+#define SCRATCH_DSP_INFO                     1080
 #define SCRATCH_NETEQ_ACCELERATE            240
 #define SCRATCH_NETEQ_BGN_UPDATE            480
 #define SCRATCH_NETEQ_EXPAND                126
 #endif
 
 #if (defined(NETEQ_48KHZ_WIDEBAND)) 
-#define SIZE_SCRATCH_BUFFER                 3636
+#define SIZE_SCRATCH_BUFFER                 6516
 #elif (defined(NETEQ_32KHZ_WIDEBAND)) 
-#define SIZE_SCRATCH_BUFFER                 2424
+#define SIZE_SCRATCH_BUFFER                 4344
 #elif (defined(NETEQ_WIDEBAND)) 
-#define SIZE_SCRATCH_BUFFER                 1212
+#define SIZE_SCRATCH_BUFFER                 2172
 #else    /* NB */
-#define SIZE_SCRATCH_BUFFER                 606
+#define SIZE_SCRATCH_BUFFER                 1086
 #endif
 
 #ifdef NETEQ_DELAY_LOGGING
@@ -110,13 +110,15 @@
 #ifdef SCRATCH
     char pw8_ScratchBuffer[((SIZE_SCRATCH_BUFFER + 1) * 2)];
     WebRtc_Word16 *pw16_scratchPtr = (WebRtc_Word16*) pw8_ScratchBuffer;
-    WebRtc_Word16 pw16_decoded_buffer[NETEQ_MAX_FRAME_SIZE];
+    /* pad with 240*fs_mult to match the overflow guard below */
+    WebRtc_Word16 pw16_decoded_buffer[NETEQ_MAX_FRAME_SIZE+240*6];
     WebRtc_Word16 *pw16_NetEqAlgorithm_buffer = pw16_scratchPtr
         + SCRATCH_ALGORITHM_BUFFER;
     DSP2MCU_info_t *dspInfo = (DSP2MCU_info_t*) (pw16_scratchPtr + SCRATCH_DSP_INFO);
 #else
-    WebRtc_Word16 pw16_decoded_buffer[NETEQ_MAX_FRAME_SIZE];
-    WebRtc_Word16 pw16_NetEqAlgorithm_buffer[NETEQ_MAX_OUTPUT_SIZE];
+    /* pad with 240*fs_mult to match the overflow guard below */
+    WebRtc_Word16 pw16_decoded_buffer[NETEQ_MAX_FRAME_SIZE+240*6];
+    WebRtc_Word16 pw16_NetEqAlgorithm_buffer[NETEQ_MAX_OUTPUT_SIZE+240*6];
     DSP2MCU_info_t dspInfoStruct;
     DSP2MCU_info_t *dspInfo = &dspInfoStruct;
 #endif
diff --git a/modules/audio_coding/neteq/signal_mcu.c b/modules/audio_coding/neteq/signal_mcu.c
index b28f39c..2cccf1a 100644
--- a/modules/audio_coding/neteq/signal_mcu.c
+++ b/modules/audio_coding/neteq/signal_mcu.c
@@ -319,7 +319,13 @@
                 WebRtc_UWord16 tempFs;
 
                 tempFs = WebRtcNetEQ_DbGetSampleRate(&inst->codec_DB_inst, payloadType);
-                if (tempFs > 0)
+                /* TODO(tlegrand): Remove this limitation once ACM has full
+                 * 48 kHz support. */
+                if (tempFs > 32000)
+                {
+                    inst->fs = 32000;
+                }
+                else if (tempFs > 0)
                 {
                     inst->fs = tempFs;
                 }
diff --git a/modules/audio_conference_mixer/interface/audio_conference_mixer.h b/modules/audio_conference_mixer/interface/audio_conference_mixer.h
index 4ece1bf..9ffac2d 100644
--- a/modules/audio_conference_mixer/interface/audio_conference_mixer.h
+++ b/modules/audio_conference_mixer/interface/audio_conference_mixer.h
@@ -30,6 +30,7 @@
         kNbInHz           = 8000,
         kWbInHz           = 16000,
         kSwbInHz          = 32000,
+        kFbInHz           = 48000,
         kLowestPossible   = -1,
         kDefaultFrequency = kWbInHz
     };
diff --git a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
index 851642c..1fdd9dc 100644
--- a/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
+++ b/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc
@@ -282,6 +282,12 @@
                     SetOutputFrequency(kSwbInHz);
                 }
                 break;
+            case 48000:
+                if(OutputFrequency() != kFbInHz)
+                {
+                    SetOutputFrequency(kFbInHz);
+                }
+                break;
             default:
                 assert(false);
 
diff --git a/modules/audio_device/ios/audio_device_ios.cc b/modules/audio_device/ios/audio_device_ios.cc
index b73edc2..264556c 100644
--- a/modules/audio_device/ios/audio_device_ios.cc
+++ b/modules/audio_device/ios/audio_device_ios.cc
@@ -75,7 +75,7 @@
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
                  "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     _ptrAudioBuffer = audioBuffer;
 
@@ -98,7 +98,7 @@
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
                  "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (_initialized) {
         return 0;
@@ -199,7 +199,7 @@
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
                  "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_initialized) {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
@@ -253,7 +253,7 @@
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id,
                  "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_initialized) {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice,
@@ -799,7 +799,7 @@
 WebRtc_Word32 AudioDeviceIPhone::InitPlayout() {
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_initialized) {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "  Not initialized");
@@ -855,7 +855,7 @@
 WebRtc_Word32 AudioDeviceIPhone::InitRecording() {
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_initialized) {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -913,7 +913,7 @@
 WebRtc_Word32 AudioDeviceIPhone::StartRecording() {
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_recIsInitialized) {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -960,7 +960,7 @@
 WebRtc_Word32 AudioDeviceIPhone::StopRecording() {
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_recIsInitialized) {
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
@@ -992,7 +992,7 @@
     // This lock is (among other things) needed to avoid concurrency issues
     // with capture thread
     // shutting down AU Remote IO
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_playIsInitialized) {
         WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
@@ -1035,7 +1035,7 @@
 WebRtc_Word32 AudioDeviceIPhone::StopPlayout() {
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_playIsInitialized) {
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
@@ -1072,7 +1072,7 @@
 WebRtc_Word32 AudioDeviceIPhone::ResetAudioDevice() {
     WEBRTC_TRACE(kTraceModuleCall, kTraceAudioDevice, _id, "%s", __FUNCTION__);
 
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     if (!_playIsInitialized && !_recIsInitialized) {
         WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
diff --git a/modules/audio_device/ios/audio_device_utility_ios.cc b/modules/audio_device/ios/audio_device_utility_ios.cc
index 87c7a50..965d13f 100644
--- a/modules/audio_device/ios/audio_device_utility_ios.cc
+++ b/modules/audio_device/ios/audio_device_utility_ios.cc
@@ -27,7 +27,7 @@
 AudioDeviceUtilityIPhone::~AudioDeviceUtilityIPhone() {
     WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id,
                  "%s destroyed", __FUNCTION__);
-    CriticalSectionScoped lock(_critSect);
+    CriticalSectionScoped lock(&_critSect);
 
     delete &_critSect;
 }
diff --git a/modules/audio_processing/test/process_test.cc b/modules/audio_processing/test/process_test.cc
index aa432ff..0e78230 100644
--- a/modules/audio_processing/test/process_test.cc
+++ b/modules/audio_processing/test/process_test.cc
@@ -35,6 +35,7 @@
 using webrtc::scoped_array;
 using webrtc::TickInterval;
 using webrtc::TickTime;
+using webrtc::VoiceDetection;
 
 using webrtc::audioproc::Event;
 using webrtc::audioproc::Init;
@@ -346,6 +347,30 @@
     } else if (strcmp(argv[i], "-vad") == 0) {
       ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
 
+    } else if (strcmp(argv[i], "--vad_very_low") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->voice_detection()->set_likelihood(
+              VoiceDetection::kVeryLowLikelihood));
+
+    } else if (strcmp(argv[i], "--vad_low") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->voice_detection()->set_likelihood(
+              VoiceDetection::kLowLikelihood));
+
+    } else if (strcmp(argv[i], "--vad_moderate") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->voice_detection()->set_likelihood(
+              VoiceDetection::kModerateLikelihood));
+
+    } else if (strcmp(argv[i], "--vad_high") == 0) {
+      ASSERT_EQ(apm->kNoError, apm->voice_detection()->Enable(true));
+      ASSERT_EQ(apm->kNoError,
+          apm->voice_detection()->set_likelihood(
+              VoiceDetection::kHighLikelihood));
+
     } else if (strcmp(argv[i], "--vad_out_file") == 0) {
       i++;
       ASSERT_LT(i, argc) << "Specify filename after --vad_out_file";
diff --git a/modules/audio_processing/test/unit_test.cc b/modules/audio_processing/test/unit_test.cc
index 3e28fb3..221facd 100644
--- a/modules/audio_processing/test/unit_test.cc
+++ b/modules/audio_processing/test/unit_test.cc
@@ -1422,17 +1422,18 @@
 }  // namespace
 
 int main(int argc, char** argv) {
-  ::testing::InitGoogleTest(&argc, argv);
-
   for (int i = 1; i < argc; i++) {
     if (strcmp(argv[i], "--write_ref_data") == 0) {
       write_ref_data = true;
     }
   }
 
-  int err = RUN_ALL_TESTS();
-
+  // We don't use TestSuite here because it would require the Android platform
+  // build to depend on Gmock.
+  webrtc::test::SetExecutablePath(argv[0]);
+  testing::InitGoogleTest(&argc, argv);
+  int result = RUN_ALL_TESTS();
   // Optional, but removes memory leak noise from Valgrind.
   google::protobuf::ShutdownProtobufLibrary();
-  return err;
+  return result;
 }
diff --git a/modules/media_file/source/media_file_utility.cc b/modules/media_file/source/media_file_utility.cc
index 8d06a88..5616e34 100644
--- a/modules/media_file/source/media_file_utility.cc
+++ b/modules/media_file/source/media_file_utility.cc
@@ -1407,7 +1407,7 @@
         start,
         stop);
 
-#if defined(WEBRTC_CODEC_GSMAMR) || defined(WEBRTC_CODEC_GSMAMRWB) || \
+#if defined(WEBRTC_CODEC_AMR) || defined(WEBRTC_CODEC_AMRWB) || \
     defined(WEBRTC_CODEC_ILBC)
     WebRtc_Word16 read_len = 0;
 #endif
@@ -1418,10 +1418,10 @@
     _startPointInMs = start;
     _stopPointInMs = stop;
 
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     WebRtc_Word32 AMRmode2bytes[9]={12,13,15,17,19,20,26,31,5};
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     WebRtc_Word32 AMRWBmode2bytes[10]={17,23,32,36,40,46,50,58,60,6};
 #endif
 
@@ -1440,7 +1440,7 @@
         buf[cnt]=0;
     }
 
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     if(!strcmp("#!AMR\n", buf))
     {
         strcpy(codec_info_.plname, "amr");
@@ -1484,7 +1484,7 @@
         }
     }
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     if(!strcmp("#!AMRWB\n", buf))
     {
         strcpy(codec_info_.plname, "amr-wb");
@@ -1605,10 +1605,10 @@
         outData,
         bufferSize);
 
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     WebRtc_UWord32 AMRmode2bytes[9]={12,13,15,17,19,20,26,31,5};
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     WebRtc_UWord32 AMRWBmode2bytes[10]={17,23,32,36,40,46,50,58,60,6};
 #endif
     WebRtc_UWord32 bytesRead = 0;
@@ -1619,7 +1619,7 @@
         return -1;
     }
 
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     if(_codecId == kCodecAmr)
     {
         WebRtc_Word32 res = in.Read(outData, 1);
@@ -1677,7 +1677,7 @@
         }
     }
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     if(_codecId == kCodecAmrWb)
     {
         WebRtc_Word32 res = in.Read(outData, 1);
@@ -1809,7 +1809,7 @@
 
     _writing = false;
 
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     if(STR_CASE_CMP(codecInst.plname, "amr") == 0)
     {
         if(codecInst.pacsize == 160)
@@ -1822,7 +1822,7 @@
         }
     }
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     if(STR_CASE_CMP(codecInst.plname, "amr-wb") == 0)
     {
         if(codecInst.pacsize == 320)
@@ -2172,13 +2172,13 @@
             _codecId = kCodecL16_32Khz;
         }
     }
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     else if(STR_CASE_CMP(codecInst.plname, "amr") == 0)
     {
         _codecId = kCodecAmr;
     }
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     else if(STR_CASE_CMP(codecInst.plname, "amr-wb") == 0)
     {
         _codecId = kCodecAmrWb;
@@ -2394,7 +2394,7 @@
             {
                 buf[cnt] = 0;
             }
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
             if(!strcmp("#!AMR\n", buf))
             {
                 WebRtc_UWord8 dummy;
@@ -2456,7 +2456,7 @@
                 }
             }
 #endif
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
             if(!strcmp("#!AMRWB\n", buf))
             {
                 WebRtc_UWord8 dummy;
diff --git a/modules/modules.gyp b/modules/modules.gyp
index 089f538..a5b31c6 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -15,6 +15,7 @@
     'audio_coding/codecs/ilbc/ilbc.gypi',
     'audio_coding/codecs/isac/main/source/isac.gypi',
     'audio_coding/codecs/isac/fix/source/isacfix.gypi',
+    'audio_coding/codecs/opus/opus.gypi',
     'audio_coding/codecs/pcm16b/pcm16b.gypi',
     'audio_coding/main/source/audio_coding_module.gypi',
     'audio_coding/neteq/neteq.gypi',
diff --git a/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h b/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h
index 76a9583..31bbcd3 100644
--- a/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h
+++ b/modules/remote_bitrate_estimator/include/mock/mock_remote_bitrate_observer.h
@@ -19,8 +19,8 @@
 
 class MockRemoteBitrateObserver : public RemoteBitrateObserver {
  public:
-  MOCK_METHOD2(OnReceiveBitrateChanged,
-      void(unsigned int ssrc, unsigned int bitrate));
+  MOCK_METHOD1(OnReceiveBitrateChanged,
+      void(unsigned int bitrate));
 };
 
 }  // namespace webrtc
diff --git a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
index 10eef95..01d840b 100644
--- a/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
+++ b/modules/remote_bitrate_estimator/include/remote_bitrate_estimator.h
@@ -24,11 +24,9 @@
 // the incoming streams.
 class RemoteBitrateObserver {
  public:
-  // Called when a receive channel has a new bitrate estimate for the incoming
-  // stream.
-  // TODO(holmer): Remove |ssrc| argument and remove SSRC map from VieRemb.
-  virtual void OnReceiveBitrateChanged(unsigned int ssrc,
-                                       unsigned int bitrate) = 0;
+  // Called when a receive channel group has a new bitrate estimate for the
+  // incoming streams.
+  virtual void OnReceiveBitrateChanged(unsigned int bitrate) = 0;
 
   virtual ~RemoteBitrateObserver() {}
 };
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_multi_stream.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_multi_stream.cc
index e98c704..6c7fff3 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_multi_stream.cc
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_multi_stream.cc
@@ -122,7 +122,7 @@
   const RateControlRegion region = remote_rate_.Update(&input, time_now);
   unsigned int target_bitrate = remote_rate_.UpdateBandwidthEstimate(time_now);
   if (remote_rate_.ValidEstimate()) {
-    observer_->OnReceiveBitrateChanged(1, target_bitrate);
+    observer_->OnReceiveBitrateChanged(target_bitrate);
   }
   overuse_detector_.SetRateControlRegion(region);
 }
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
index 45e301f..f1600d8 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.cc
@@ -28,19 +28,21 @@
     int64_t arrival_time,
     uint32_t rtp_timestamp) {
   CriticalSectionScoped cs(crit_sect_.get());
-  SsrcBitrateControlsMap::iterator it = bitrate_controls_.find(ssrc);
-  if (it == bitrate_controls_.end()) {
+  SsrcOveruseDetectorMap::iterator it = overuse_detectors_.find(ssrc);
+  if (it == overuse_detectors_.end()) {
     // This is a new SSRC. Adding to map.
     // TODO(holmer): If the channel changes SSRC the old SSRC will still be
     // around in this map until the channel is deleted. This is OK since the
     // callback will no longer be called for the old SSRC. This will be
     // automatically cleaned up when we have one RemoteBitrateEstimator per REMB
     // group.
-    bitrate_controls_.insert(std::make_pair(ssrc, BitrateControls(options_)));
-    it = bitrate_controls_.find(ssrc);
+    std::pair<SsrcOveruseDetectorMap::iterator, bool> insert_result =
+        overuse_detectors_.insert(std::make_pair(ssrc, OveruseDetector(
+            options_)));
+    it = insert_result.first;
   }
-  OveruseDetector* overuse_detector = &it->second.overuse_detector;
-  it->second.incoming_bitrate.Update(packet_size, arrival_time);
+  OveruseDetector* overuse_detector = &it->second;
+  incoming_bitrate_.Update(packet_size, arrival_time);
   const BandwidthUsage prior_state = overuse_detector->State();
   overuse_detector->Update(packet_size, -1, rtp_timestamp, arrival_time);
   if (prior_state != overuse_detector->State() &&
@@ -53,49 +55,47 @@
 void RemoteBitrateEstimatorSingleStream::UpdateEstimate(unsigned int ssrc,
                                                         int64_t time_now) {
   CriticalSectionScoped cs(crit_sect_.get());
-  SsrcBitrateControlsMap::iterator it = bitrate_controls_.find(ssrc);
-  if (it == bitrate_controls_.end()) {
+  SsrcOveruseDetectorMap::iterator it = overuse_detectors_.find(ssrc);
+  if (it == overuse_detectors_.end()) {
     return;
   }
-  OveruseDetector* overuse_detector = &it->second.overuse_detector;
-  RemoteRateControl* remote_rate = &it->second.remote_rate;
+  OveruseDetector* overuse_detector = &it->second;
   const RateControlInput input(overuse_detector->State(),
-                               it->second.incoming_bitrate.BitRate(time_now),
+                               incoming_bitrate_.BitRate(time_now),
                                overuse_detector->NoiseVar());
-  const RateControlRegion region = remote_rate->Update(&input, time_now);
-  unsigned int target_bitrate = remote_rate->UpdateBandwidthEstimate(time_now);
-  if (remote_rate->ValidEstimate()) {
-    observer_->OnReceiveBitrateChanged(ssrc, target_bitrate);
+  const RateControlRegion region = remote_rate_.Update(&input, time_now);
+  unsigned int target_bitrate = remote_rate_.UpdateBandwidthEstimate(time_now);
+  if (remote_rate_.ValidEstimate()) {
+    observer_->OnReceiveBitrateChanged(target_bitrate);
   }
   overuse_detector->SetRateControlRegion(region);
 }
 
 void RemoteBitrateEstimatorSingleStream::SetRtt(unsigned int rtt) {
   CriticalSectionScoped cs(crit_sect_.get());
-  for (SsrcBitrateControlsMap::iterator it = bitrate_controls_.begin();
-      it != bitrate_controls_.end(); ++it) {
-    it->second.remote_rate.SetRtt(rtt);
-  }
+  remote_rate_.SetRtt(rtt);
 }
 
 void RemoteBitrateEstimatorSingleStream::RemoveStream(unsigned int ssrc) {
   CriticalSectionScoped cs(crit_sect_.get());
   // Ignoring the return value which is the number of elements erased.
-  bitrate_controls_.erase(ssrc);
+  overuse_detectors_.erase(ssrc);
 }
 
 bool RemoteBitrateEstimatorSingleStream::LatestEstimate(
     unsigned int ssrc, unsigned int* bitrate_bps) const {
   CriticalSectionScoped cs(crit_sect_.get());
   assert(bitrate_bps != NULL);
-  SsrcBitrateControlsMap::const_iterator it = bitrate_controls_.find(ssrc);
-  if (it == bitrate_controls_.end()) {
+  if (!remote_rate_.ValidEstimate()) {
     return false;
   }
-  if (!it->second.remote_rate.ValidEstimate()) {
-    return false;
-  }
-  *bitrate_bps = it->second.remote_rate.LatestEstimate();
+  // TODO(holmer): For now we're returning the estimate bandwidth per stream as
+  // it corresponds better to how the ViE API is designed. Will fix this when
+  // the API changes.
+  if (overuse_detectors_.size() > 0)
+    *bitrate_bps = remote_rate_.LatestEstimate() / overuse_detectors_.size();
+  else
+    *bitrate_bps = 0;
   return true;
 }
 
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
index 241bb0d..088b78d 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_single_stream.h
@@ -55,26 +55,12 @@
   bool LatestEstimate(unsigned int ssrc, unsigned int* bitrate_bps) const;
 
  private:
-  struct BitrateControls {
-    explicit BitrateControls(const OverUseDetectorOptions& options)
-        : remote_rate(),
-          overuse_detector(options),
-          incoming_bitrate() {
-    }
-    BitrateControls(const BitrateControls& other)
-        : remote_rate(other.remote_rate),
-          overuse_detector(other.overuse_detector),
-          incoming_bitrate(other.incoming_bitrate) {
-    }
-    RemoteRateControl remote_rate;
-    OveruseDetector overuse_detector;
-    BitRateStats incoming_bitrate;
-  };
-
-  typedef std::map<unsigned int, BitrateControls> SsrcBitrateControlsMap;
+  typedef std::map<unsigned int, OveruseDetector> SsrcOveruseDetectorMap;
 
   const OverUseDetectorOptions& options_;
-  SsrcBitrateControlsMap bitrate_controls_;
+  SsrcOveruseDetectorMap overuse_detectors_;
+  BitRateStats incoming_bitrate_;
+  RemoteRateControl remote_rate_;
   RemoteBitrateObserver* observer_;
   scoped_ptr<CriticalSectionWrapper> crit_sect_;
 };
diff --git a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest.cc b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest.cc
index 70f066c..59c1285 100644
--- a/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest.cc
+++ b/modules/remote_bitrate_estimator/remote_bitrate_estimator_unittest.cc
@@ -27,7 +27,7 @@
  public:
   TestBitrateObserver() : updated_(false), latest_bitrate_(0) {}
 
-  void OnReceiveBitrateChanged(unsigned int ssrc, unsigned int bitrate) {
+  void OnReceiveBitrateChanged(unsigned int bitrate) {
     latest_bitrate_ = bitrate;
     updated_ = true;
   }
diff --git a/modules/rtp_rtcp/source/rtp_receiver_audio.cc b/modules/rtp_rtcp/source/rtp_receiver_audio.cc
index a57da75..c9fd3df 100644
--- a/modules/rtp_rtcp/source/rtp_receiver_audio.cc
+++ b/modules/rtp_rtcp/source/rtp_receiver_audio.cc
@@ -27,6 +27,7 @@
     _cngNBPayloadType(-1),
     _cngWBPayloadType(-1),
     _cngSWBPayloadType(-1),
+    _cngFBPayloadType(-1),
     _cngPayloadType(-1),
     _G722PayloadType(-1),
     _lastReceivedG722(false),
@@ -94,7 +95,7 @@
 RTPReceiverAudio::CNGPayloadType(const WebRtc_Word8 payloadType,
                                  WebRtc_UWord32& frequency)
 {
-    //  we can have three CNG on 8000Hz, 16000Hz and 32000Hz
+    //  We can have four CNG on 8000Hz, 16000Hz, 32000Hz and 48000Hz.
     if(_cngNBPayloadType == payloadType)
     {
         frequency = 8000;
@@ -129,6 +130,15 @@
         }
         _cngPayloadType = _cngSWBPayloadType;
         return true;
+    }else if(_cngFBPayloadType == payloadType)
+    {
+        frequency = 48000;
+        if ((_cngPayloadType != -1) &&(_cngPayloadType !=_cngFBPayloadType))
+        {
+            ResetStatistics();
+        }
+        _cngPayloadType = _cngFBPayloadType;
+        return true;
     }else
     {
         //  not CNG
@@ -195,6 +205,8 @@
       _cngWBPayloadType = payloadType;
     } else if(frequency == 32000) {
       _cngSWBPayloadType = payloadType;
+    } else if(frequency == 48000) {
+      _cngFBPayloadType = payloadType;
     } else {
       assert(false);
       return NULL;
diff --git a/modules/rtp_rtcp/source/rtp_receiver_audio.h b/modules/rtp_rtcp/source/rtp_receiver_audio.h
index 0b0ba30..e256dd1 100644
--- a/modules/rtp_rtcp/source/rtp_receiver_audio.h
+++ b/modules/rtp_rtcp/source/rtp_receiver_audio.h
@@ -81,6 +81,7 @@
     WebRtc_Word8              _cngNBPayloadType;
     WebRtc_Word8              _cngWBPayloadType;
     WebRtc_Word8              _cngSWBPayloadType;
+    WebRtc_Word8              _cngFBPayloadType;
     WebRtc_Word8                _cngPayloadType;
 
     // G722 is special since it use the wrong number of RTP samples in timestamp VS. number of samples in the frame
diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.cc b/modules/rtp_rtcp/source/rtp_sender_audio.cc
index 0f6f69f..0c422de 100644
--- a/modules/rtp_rtcp/source/rtp_sender_audio.cc
+++ b/modules/rtp_rtcp/source/rtp_sender_audio.cc
@@ -38,6 +38,7 @@
     _cngNBPayloadType(-1),
     _cngWBPayloadType(-1),
     _cngSWBPayloadType(-1),
+    _cngFBPayloadType(-1),
     _lastPayloadType(-1),
     _includeAudioLevelIndication(false),    // @TODO - reset at Init()?
     _audioLevelIndicationID(0),
@@ -101,6 +102,10 @@
 
     } else if (frequency == 32000) {
       _cngSWBPayloadType = payloadType;
+
+    } else if (frequency == 48000) {
+      _cngFBPayloadType = payloadType;
+
     } else {
       return -1;
     }
@@ -159,6 +164,15 @@
                 return false;
             }
         }
+        if(_cngFBPayloadType != -1)
+        {
+            // we have configured SWB CNG
+            if(_cngFBPayloadType == payloadType)
+            {
+                // only set a marker bit when we change payload type to a non CNG
+                return false;
+            }
+        }
         // payloadType differ
         if(_lastPayloadType == -1)
         {
diff --git a/modules/rtp_rtcp/source/rtp_sender_audio.h b/modules/rtp_rtcp/source/rtp_sender_audio.h
index 5974441..fe9a952 100644
--- a/modules/rtp_rtcp/source/rtp_sender_audio.h
+++ b/modules/rtp_rtcp/source/rtp_sender_audio.h
@@ -117,6 +117,7 @@
     WebRtc_Word8      _cngNBPayloadType;
     WebRtc_Word8      _cngWBPayloadType;
     WebRtc_Word8      _cngSWBPayloadType;
+    WebRtc_Word8      _cngFBPayloadType;
     WebRtc_Word8      _lastPayloadType;
 
     // Audio level indication (https://datatracker.ietf.org/doc/draft-lennox-avt-rtp-audio-level-exthdr/)
diff --git a/modules/video_coding/main/source/frame_buffer.cc b/modules/video_coding/main/source/frame_buffer.cc
index abaadff..fe30bea 100644
--- a/modules/video_coding/main/source/frame_buffer.cc
+++ b/modules/video_coding/main/source/frame_buffer.cc
@@ -199,7 +199,7 @@
 }
 
 WebRtc_Word64
-VCMFrameBuffer::LatestPacketTimeMs()
+VCMFrameBuffer::LatestPacketTimeMs() const
 {
     return _latestPacketTimeMs;
 }
diff --git a/modules/video_coding/main/source/frame_buffer.h b/modules/video_coding/main/source/frame_buffer.h
index ea05754..eeacfad 100644
--- a/modules/video_coding/main/source/frame_buffer.h
+++ b/modules/video_coding/main/source/frame_buffer.h
@@ -74,7 +74,7 @@
     void IncrementNackCount();
     WebRtc_Word16 GetNackCount() const;
 
-    WebRtc_Word64 LatestPacketTimeMs();
+    WebRtc_Word64 LatestPacketTimeMs() const;
 
     webrtc::FrameType FrameType() const;
     void SetPreviousFrameLoss();
diff --git a/modules/video_coding/main/source/jitter_buffer.cc b/modules/video_coding/main/source/jitter_buffer.cc
index 0c4dd33..405c05e 100644
--- a/modules/video_coding/main/source/jitter_buffer.cc
+++ b/modules/video_coding/main/source/jitter_buffer.cc
@@ -28,10 +28,10 @@
 // Predicates used when searching for frames in the frame buffer list
 class FrameSmallerTimestamp {
  public:
-  FrameSmallerTimestamp(uint32_t timestamp) : timestamp_(timestamp) {}
+  explicit FrameSmallerTimestamp(uint32_t timestamp) : timestamp_(timestamp) {}
   bool operator()(VCMFrameBuffer* frame) {
     return (LatestTimestamp(timestamp_, frame->TimeStamp(), NULL) ==
-        timestamp_);
+            timestamp_);
   }
 
  private:
@@ -40,7 +40,7 @@
 
 class FrameEqualTimestamp {
  public:
-  FrameEqualTimestamp(uint32_t timestamp) : timestamp_(timestamp) {}
+  explicit FrameEqualTimestamp(uint32_t timestamp) : timestamp_(timestamp) {}
   bool operator()(VCMFrameBuffer* frame) {
     return (timestamp_ == frame->TimeStamp());
   }
@@ -58,521 +58,1183 @@
   }
 };
 
-// Constructor
 VCMJitterBuffer::VCMJitterBuffer(TickTimeBase* clock,
-                                 WebRtc_Word32 vcmId,
-                                 WebRtc_Word32 receiverId,
-                                 bool master) :
-    _vcmId(vcmId),
-    _receiverId(receiverId),
-    _clock(clock),
-    _running(false),
-    _critSect(CriticalSectionWrapper::CreateCriticalSection()),
-    _master(master),
-    _frameEvent(),
-    _packetEvent(),
-    _maxNumberOfFrames(kStartNumberOfFrames),
-    _frameBuffers(),
-    _frameList(),
-    _lastDecodedState(),
-    _packetsNotDecodable(0),
-    _receiveStatistics(),
-    _incomingFrameRate(0),
-    _incomingFrameCount(0),
-    _timeLastIncomingFrameCount(0),
-    _incomingBitCount(0),
-    _incomingBitRate(0),
-    _dropCount(0),
-    _numConsecutiveOldFrames(0),
-    _numConsecutiveOldPackets(0),
-    _discardedPackets(0),
-    _jitterEstimate(vcmId, receiverId),
-    _delayEstimate(_clock->MillisecondTimestamp()),
-    _rttMs(0),
-    _nackMode(kNoNack),
-    _lowRttNackThresholdMs(-1),
-    _highRttNackThresholdMs(-1),
-    _NACKSeqNum(),
-    _NACKSeqNumLength(0),
-    _waitingForKeyFrame(false),
-    _firstPacket(true)
-{
-    memset(_frameBuffers, 0, sizeof(_frameBuffers));
-    memset(_receiveStatistics, 0, sizeof(_receiveStatistics));
-    memset(_NACKSeqNumInternal, -1, sizeof(_NACKSeqNumInternal));
+                                 int vcm_id,
+                                 int receiver_id,
+                                 bool master)
+    : vcm_id_(vcm_id),
+      receiver_id_(receiver_id),
+      clock_(clock),
+      running_(false),
+      crit_sect_(CriticalSectionWrapper::CreateCriticalSection()),
+      master_(master),
+      frame_event_(),
+      packet_event_(),
+      max_number_of_frames_(kStartNumberOfFrames),
+      frame_buffers_(),
+      frame_list_(),
+      last_decoded_state_(),
+      first_packet_(true),
+      num_not_decodable_packets_(0),
+      receive_statistics_(),
+      incoming_frame_rate_(0),
+      incoming_frame_count_(0),
+      time_last_incoming_frame_count_(0),
+      incoming_bit_count_(0),
+      incoming_bit_rate_(0),
+      drop_count_(0),
+      num_consecutive_old_frames_(0),
+      num_consecutive_old_packets_(0),
+      num_discarded_packets_(0),
+      jitter_estimate_(vcm_id, receiver_id),
+      inter_frame_delay_(clock_->MillisecondTimestamp()),
+      rtt_ms_(0),
+      nack_mode_(kNoNack),
+      low_rtt_nack_threshold_ms_(-1),
+      high_rtt_nack_threshold_ms_(-1),
+      nack_seq_nums_(),
+      nack_seq_nums_length_(0),
+      waiting_for_key_frame_(false) {
+  memset(frame_buffers_, 0, sizeof(frame_buffers_));
+  memset(receive_statistics_, 0, sizeof(receive_statistics_));
+  memset(nack_seq_nums_internal_, -1, sizeof(nack_seq_nums_internal_));
 
-    for (int i = 0; i< kStartNumberOfFrames; i++)
-    {
-        _frameBuffers[i] = new VCMFrameBuffer();
-    }
+  for (int i = 0; i < kStartNumberOfFrames; i++) {
+    frame_buffers_[i] = new VCMFrameBuffer();
+  }
 }
 
-// Destructor
-VCMJitterBuffer::~VCMJitterBuffer()
-{
-    Stop();
-    for (int i = 0; i< kMaxNumberOfFrames; i++)
-    {
-        if (_frameBuffers[i])
-        {
-            delete _frameBuffers[i];
-        }
+VCMJitterBuffer::~VCMJitterBuffer() {
+  Stop();
+  for (int i = 0; i < kMaxNumberOfFrames; i++) {
+    if (frame_buffers_[i]) {
+      delete frame_buffers_[i];
     }
-    delete _critSect;
+  }
+  delete crit_sect_;
 }
 
-void
-VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs)
-{
-    if (this != &rhs)
-    {
-        _critSect->Enter();
-        rhs._critSect->Enter();
-        _vcmId = rhs._vcmId;
-        _receiverId = rhs._receiverId;
-        _running = rhs._running;
-        _master = !rhs._master;
-        _maxNumberOfFrames = rhs._maxNumberOfFrames;
-        _incomingFrameRate = rhs._incomingFrameRate;
-        _incomingFrameCount = rhs._incomingFrameCount;
-        _timeLastIncomingFrameCount = rhs._timeLastIncomingFrameCount;
-        _incomingBitCount = rhs._incomingBitCount;
-        _incomingBitRate = rhs._incomingBitRate;
-        _dropCount = rhs._dropCount;
-        _numConsecutiveOldFrames = rhs._numConsecutiveOldFrames;
-        _numConsecutiveOldPackets = rhs._numConsecutiveOldPackets;
-        _discardedPackets = rhs._discardedPackets;
-        _jitterEstimate = rhs._jitterEstimate;
-        _delayEstimate = rhs._delayEstimate;
-        _waitingForCompletion = rhs._waitingForCompletion;
-        _rttMs = rhs._rttMs;
-        _NACKSeqNumLength = rhs._NACKSeqNumLength;
-        _waitingForKeyFrame = rhs._waitingForKeyFrame;
-        _firstPacket = rhs._firstPacket;
-        _lastDecodedState =  rhs._lastDecodedState;
-        _packetsNotDecodable = rhs._packetsNotDecodable;
-        memcpy(_receiveStatistics, rhs._receiveStatistics,
-               sizeof(_receiveStatistics));
-        memcpy(_NACKSeqNumInternal, rhs._NACKSeqNumInternal,
-               sizeof(_NACKSeqNumInternal));
-        memcpy(_NACKSeqNum, rhs._NACKSeqNum, sizeof(_NACKSeqNum));
-        for (int i = 0; i < kMaxNumberOfFrames; i++)
-        {
-            if (_frameBuffers[i] != NULL)
-            {
-                delete _frameBuffers[i];
-                _frameBuffers[i] = NULL;
-            }
-        }
-        _frameList.clear();
-        for (int i = 0; i < _maxNumberOfFrames; i++)
-        {
-            _frameBuffers[i] = new VCMFrameBuffer(*(rhs._frameBuffers[i]));
-            if (_frameBuffers[i]->Length() > 0)
-            {
-                FrameList::reverse_iterator rit = std::find_if(
-                    _frameList.rbegin(), _frameList.rend(),
-                    FrameSmallerTimestamp(_frameBuffers[i]->TimeStamp()));
-                _frameList.insert(rit.base(), _frameBuffers[i]);
-            }
-        }
-        rhs._critSect->Leave();
-        _critSect->Leave();
+void VCMJitterBuffer::CopyFrom(const VCMJitterBuffer& rhs) {
+  if (this != &rhs) {
+    crit_sect_->Enter();
+    rhs.crit_sect_->Enter();
+    vcm_id_ = rhs.vcm_id_;
+    receiver_id_ = rhs.receiver_id_;
+    running_ = rhs.running_;
+    master_ = !rhs.master_;
+    max_number_of_frames_ = rhs.max_number_of_frames_;
+    incoming_frame_rate_ = rhs.incoming_frame_rate_;
+    incoming_frame_count_ = rhs.incoming_frame_count_;
+    time_last_incoming_frame_count_ = rhs.time_last_incoming_frame_count_;
+    incoming_bit_count_ = rhs.incoming_bit_count_;
+    incoming_bit_rate_ = rhs.incoming_bit_rate_;
+    drop_count_ = rhs.drop_count_;
+    num_consecutive_old_frames_ = rhs.num_consecutive_old_frames_;
+    num_consecutive_old_packets_ = rhs.num_consecutive_old_packets_;
+    num_discarded_packets_ = rhs.num_discarded_packets_;
+    jitter_estimate_ = rhs.jitter_estimate_;
+    inter_frame_delay_ = rhs.inter_frame_delay_;
+    waiting_for_completion_ = rhs.waiting_for_completion_;
+    rtt_ms_ = rhs.rtt_ms_;
+    nack_seq_nums_length_ = rhs.nack_seq_nums_length_;
+    waiting_for_key_frame_ = rhs.waiting_for_key_frame_;
+    first_packet_ = rhs.first_packet_;
+    last_decoded_state_ =  rhs.last_decoded_state_;
+    num_not_decodable_packets_ = rhs.num_not_decodable_packets_;
+    memcpy(receive_statistics_, rhs.receive_statistics_,
+           sizeof(receive_statistics_));
+    memcpy(nack_seq_nums_internal_, rhs.nack_seq_nums_internal_,
+           sizeof(nack_seq_nums_internal_));
+    memcpy(nack_seq_nums_, rhs.nack_seq_nums_, sizeof(nack_seq_nums_));
+    for (int i = 0; i < kMaxNumberOfFrames; i++) {
+      if (frame_buffers_[i] != NULL) {
+        delete frame_buffers_[i];
+        frame_buffers_[i] = NULL;
+      }
     }
+    frame_list_.clear();
+    for (int i = 0; i < max_number_of_frames_; i++) {
+      frame_buffers_[i] = new VCMFrameBuffer(*(rhs.frame_buffers_[i]));
+      if (frame_buffers_[i]->Length() > 0) {
+        FrameList::reverse_iterator rit = std::find_if(
+            frame_list_.rbegin(), frame_list_.rend(),
+            FrameSmallerTimestamp(frame_buffers_[i]->TimeStamp()));
+        frame_list_.insert(rit.base(), frame_buffers_[i]);
+      }
+    }
+    rhs.crit_sect_->Leave();
+    crit_sect_->Leave();
+  }
 }
 
-// Start jitter buffer
-void
-VCMJitterBuffer::Start()
-{
-    CriticalSectionScoped cs(_critSect);
-    _running = true;
-    _incomingFrameCount = 0;
-    _incomingFrameRate = 0;
-    _incomingBitCount = 0;
-    _incomingBitRate = 0;
-    _timeLastIncomingFrameCount = _clock->MillisecondTimestamp();
-    memset(_receiveStatistics, 0, sizeof(_receiveStatistics));
+void VCMJitterBuffer::Start() {
+  CriticalSectionScoped cs(crit_sect_);
+  running_ = true;
+  incoming_frame_count_ = 0;
+  incoming_frame_rate_ = 0;
+  incoming_bit_count_ = 0;
+  incoming_bit_rate_ = 0;
+  time_last_incoming_frame_count_ = clock_->MillisecondTimestamp();
+  memset(receive_statistics_, 0, sizeof(receive_statistics_));
 
-    _numConsecutiveOldFrames = 0;
-    _numConsecutiveOldPackets = 0;
-    _discardedPackets = 0;
+  num_consecutive_old_frames_ = 0;
+  num_consecutive_old_packets_ = 0;
+  num_discarded_packets_ = 0;
 
-    _frameEvent.Reset(); // start in a non-signaled state
-    _packetEvent.Reset(); // start in a non-signaled state
-    _waitingForCompletion.frameSize = 0;
-    _waitingForCompletion.timestamp = 0;
-    _waitingForCompletion.latestPacketTime = -1;
-    _firstPacket = true;
-    _NACKSeqNumLength = 0;
-    _waitingForKeyFrame = false;
-    _rttMs = 0;
-    _packetsNotDecodable = 0;
+  // Start in a non-signaled state.
+  frame_event_.Reset();
+  packet_event_.Reset();
+  waiting_for_completion_.frame_size = 0;
+  waiting_for_completion_.timestamp = 0;
+  waiting_for_completion_.latest_packet_time = -1;
+  first_packet_ = true;
+  nack_seq_nums_length_ = 0;
+  waiting_for_key_frame_ = false;
+  rtt_ms_ = 0;
+  num_not_decodable_packets_ = 0;
 
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId,
-                 _receiverId), "JB(0x%x): Jitter buffer: start", this);
+  WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+               VCMId(vcm_id_, receiver_id_), "JB(0x%x): Jitter buffer: start",
+               this);
 }
 
-
-// Stop jitter buffer
-void
-VCMJitterBuffer::Stop()
-{
-    _critSect->Enter();
-    _running = false;
-    _lastDecodedState.Reset();
-    _frameList.clear();
-    for (int i = 0; i < kMaxNumberOfFrames; i++)
-    {
-        if (_frameBuffers[i] != NULL)
-        {
-            static_cast<VCMFrameBuffer*>(_frameBuffers[i])->SetState(kStateFree);
-        }
+void VCMJitterBuffer::Stop() {
+  crit_sect_->Enter();
+  running_ = false;
+  last_decoded_state_.Reset();
+  frame_list_.clear();
+  for (int i = 0; i < kMaxNumberOfFrames; i++) {
+    if (frame_buffers_[i] != NULL) {
+      static_cast<VCMFrameBuffer*>(frame_buffers_[i])->SetState(kStateFree);
     }
+  }
 
-    _critSect->Leave();
-    _frameEvent.Set(); // Make sure we exit from trying to get a frame to decoder
-    _packetEvent.Set(); // Make sure we exit from trying to get a sequence number
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId,
-                 _receiverId), "JB(0x%x): Jitter buffer: stop", this);
+  crit_sect_->Leave();
+  // Make sure we wake up any threads waiting on these events.
+  frame_event_.Set();
+  packet_event_.Set();
+  WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+               VCMId(vcm_id_, receiver_id_), "JB(0x%x): Jitter buffer: stop",
+               this);
 }
 
-bool
-VCMJitterBuffer::Running() const
-{
-    CriticalSectionScoped cs(_critSect);
-    return _running;
+bool VCMJitterBuffer::Running() const {
+  CriticalSectionScoped cs(crit_sect_);
+  return running_;
 }
 
-// Flush jitter buffer
-void
-VCMJitterBuffer::Flush()
-{
-    CriticalSectionScoped cs(_critSect);
-    FlushInternal();
-}
-
-// Must be called under the critical section _critSect
-void
-VCMJitterBuffer::FlushInternal()
-{
-    // Erase all frames from the sorted list and set their state to free.
-    _frameList.clear();
-    for (WebRtc_Word32 i = 0; i < _maxNumberOfFrames; i++)
-    {
-        ReleaseFrameInternal(_frameBuffers[i]);
-    }
-    _lastDecodedState.Reset(); // TODO (mikhal): sync reset
-    _packetsNotDecodable = 0;
-
-    _frameEvent.Reset();
-    _packetEvent.Reset();
-
-    _numConsecutiveOldFrames = 0;
-    _numConsecutiveOldPackets = 0;
-
-    // Also reset the jitter and delay estimates
-    _jitterEstimate.Reset();
-    _delayEstimate.Reset(_clock->MillisecondTimestamp());
-
-    _waitingForCompletion.frameSize = 0;
-    _waitingForCompletion.timestamp = 0;
-    _waitingForCompletion.latestPacketTime = -1;
-
-    _firstPacket = true;
-
-    _NACKSeqNumLength = 0;
-
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, VCMId(_vcmId,
-                 _receiverId), "JB(0x%x): Jitter buffer: flush", this);
-}
-
-// Set the frame state to free and remove it from the sorted
-// frame list. Must be called from inside the critical section _critSect.
-void
-VCMJitterBuffer::ReleaseFrameInternal(VCMFrameBuffer* frame)
-{
-    if (frame != NULL && frame->GetState() != kStateDecoding)
-    {
-        frame->SetState(kStateFree);
-    }
-}
-
-// Update frame state (set as complete if conditions are met)
-// Doing it here increases the degree of freedom for e.g. future
-// reconstructability of separate layers. Must be called under the
-// critical section _critSect.
-VCMFrameBufferEnum
-VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame)
-{
-    if (frame == NULL)
-    {
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId), "JB(0x%x) FB(0x%x): "
-                         "UpdateFrameState NULL frame pointer", this, frame);
-        return kNoError;
-    }
-
-    int length = frame->Length();
-    if (_master)
-    {
-        // Only trace the primary jitter buffer to make it possible to parse
-        // and plot the trace file.
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),
-                     "JB(0x%x) FB(0x%x): Complete frame added to jitter buffer,"
-                     " size:%d type %d",
-                     this, frame,length,frame->FrameType());
-    }
-
-    if (length != 0 && !frame->GetCountedFrame())
-    {
-        // ignore Ack frames
-        _incomingFrameCount++;
-        frame->SetCountedFrame(true);
-    }
-
-    // Check if we should drop frame
-    // an old complete frame can arrive too late
-    if (_lastDecodedState.IsOldFrame(frame))
-    {
-        // Frame is older than the latest decoded frame, drop it. Will be
-        // released by CleanUpOldFrames later.
-        frame->Reset();
-        frame->SetState(kStateEmpty);
-
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),
-                     "JB(0x%x) FB(0x%x): Dropping old frame in Jitter buffer",
-                     this, frame);
-        _dropCount++;
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),
-                     "Jitter buffer drop count: %d, consecutive drops: %u",
-                     _dropCount, _numConsecutiveOldFrames);
-        // Flush() if this happens consistently.
-        _numConsecutiveOldFrames++;
-        if (_numConsecutiveOldFrames > kMaxConsecutiveOldFrames) {
-          FlushInternal();
-          return kFlushIndicator;
-        }
-        return kNoError;
-    }
-    _numConsecutiveOldFrames = 0;
-    frame->SetState(kStateComplete);
-
-
-    // Update receive statistics. We count all layers, thus when you use layers
-    // adding all key and delta frames might differ from frame count
-    if (frame->IsSessionComplete())
-    {
-        switch (frame->FrameType())
-        {
-        case kVideoFrameKey:
-            {
-                _receiveStatistics[0]++;
-                break;
-            }
-        case kVideoFrameDelta:
-            {
-                _receiveStatistics[1]++;
-                break;
-            }
-        case kVideoFrameGolden:
-            {
-                _receiveStatistics[2]++;
-                break;
-            }
-        case kVideoFrameAltRef:
-            {
-                _receiveStatistics[3]++;
-                break;
-            }
-        default:
-            assert(false);
-
-        }
-    }
-    const FrameList::iterator it = FindOldestCompleteContinuousFrame(false);
-    VCMFrameBuffer* oldFrame = NULL;
-    if (it != _frameList.end())
-    {
-        oldFrame = *it;
-    }
-
-    // Only signal if this is the oldest frame.
-    // Not necessary the case due to packet reordering or NACK.
-    if (!WaitForNack() || (oldFrame != NULL && oldFrame == frame))
-    {
-        _frameEvent.Set();
-    }
-    return kNoError;
+void VCMJitterBuffer::Flush() {
+  CriticalSectionScoped cs(crit_sect_);
+  // Erase all frames from the sorted list and set their state to free.
+  frame_list_.clear();
+  for (int i = 0; i < max_number_of_frames_; i++) {
+    ReleaseFrameIfNotDecoding(frame_buffers_[i]);
+  }
+  last_decoded_state_.Reset();  // TODO(mikhal): sync reset.
+  num_not_decodable_packets_ = 0;
+  frame_event_.Reset();
+  packet_event_.Reset();
+  num_consecutive_old_frames_ = 0;
+  num_consecutive_old_packets_ = 0;
+  // Also reset the jitter and delay estimates
+  jitter_estimate_.Reset();
+  inter_frame_delay_.Reset(clock_->MillisecondTimestamp());
+  waiting_for_completion_.frame_size = 0;
+  waiting_for_completion_.timestamp = 0;
+  waiting_for_completion_.latest_packet_time = -1;
+  first_packet_ = true;
+  nack_seq_nums_length_ = 0;
+  WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+               VCMId(vcm_id_, receiver_id_), "JB(0x%x): Jitter buffer: flush",
+               this);
 }
 
 // Get received key and delta frames
-WebRtc_Word32
-VCMJitterBuffer::GetFrameStatistics(WebRtc_UWord32& receivedDeltaFrames,
-                                    WebRtc_UWord32& receivedKeyFrames) const
-{
-    {
-        CriticalSectionScoped cs(_critSect);
-        receivedDeltaFrames = _receiveStatistics[1] + _receiveStatistics[3];
-        receivedKeyFrames = _receiveStatistics[0] + _receiveStatistics[2];
+void VCMJitterBuffer::FrameStatistics(uint32_t* received_delta_frames,
+                                      uint32_t* received_key_frames) const {
+  assert(received_delta_frames);
+  assert(received_key_frames);
+  CriticalSectionScoped cs(crit_sect_);
+  *received_delta_frames = receive_statistics_[1] + receive_statistics_[3];
+  *received_key_frames = receive_statistics_[0] + receive_statistics_[2];
+}
+
+int VCMJitterBuffer::num_not_decodable_packets() const {
+  CriticalSectionScoped cs(crit_sect_);
+  return num_not_decodable_packets_;
+}
+
+int VCMJitterBuffer::num_discarded_packets() const {
+  CriticalSectionScoped cs(crit_sect_);
+  return num_discarded_packets_;
+}
+
+// Calculate framerate and bitrate.
+void VCMJitterBuffer::IncomingRateStatistics(unsigned int* framerate,
+                                             unsigned int* bitrate) {
+  assert(framerate);
+  assert(bitrate);
+  CriticalSectionScoped cs(crit_sect_);
+  const int64_t now = clock_->MillisecondTimestamp();
+  int64_t diff = now - time_last_incoming_frame_count_;
+  if (diff < 1000 && incoming_frame_rate_ > 0 && incoming_bit_rate_ > 0) {
+    // Make sure we report something even though less than
+    // 1 second has passed since last update.
+    *framerate = incoming_frame_rate_;
+    *bitrate = incoming_bit_rate_;
+  } else if (incoming_frame_count_ != 0) {
+    // We have received frame(s) since last call to this function
+
+    // Prepare calculations
+    if (diff <= 0) {
+      diff = 1;
     }
-    return 0;
+    // we add 0.5f for rounding
+    float rate = 0.5f + ((incoming_frame_count_ * 1000.0f) / diff);
+    if (rate < 1.0f) {
+      rate = 1.0f;
+    }
+
+    // Calculate frame rate
+    // Let r be rate.
+    // r(0) = 1000*framecount/delta_time.
+    // (I.e. frames per second since last calculation.)
+    // frame_rate = r(0)/2 + r(-1)/2
+    // (I.e. fr/s average this and the previous calculation.)
+    *framerate = (incoming_frame_rate_ + static_cast<unsigned int>(rate)) / 2;
+    incoming_frame_rate_ = static_cast<unsigned int>(rate);
+
+    // Calculate bit rate
+    if (incoming_bit_count_ == 0) {
+      *bitrate = 0;
+    } else {
+      *bitrate = 10 * ((100 * incoming_bit_count_) /
+                       static_cast<unsigned int>(diff));
+    }
+    incoming_bit_rate_ = *bitrate;
+
+    // Reset count
+    incoming_frame_count_ = 0;
+    incoming_bit_count_ = 0;
+    time_last_incoming_frame_count_ = now;
+
+  } else {
+    // No frames since last call
+    time_last_incoming_frame_count_ = clock_->MillisecondTimestamp();
+    *framerate = 0;
+    bitrate = 0;
+    incoming_bit_rate_ = 0;
+  }
 }
 
-WebRtc_UWord32 VCMJitterBuffer::NumNotDecodablePackets() const {
-  CriticalSectionScoped cs(_critSect);
-  return _packetsNotDecodable;
+// Wait for the first packet in the next frame to arrive.
+int64_t VCMJitterBuffer::NextTimestamp(uint32_t max_wait_time_ms,
+                                       FrameType* incoming_frame_type,
+                                       int64_t* render_time_ms) {
+  assert(incoming_frame_type);
+  assert(render_time_ms);
+  if (!running_) {
+    return -1;
+  }
+
+  crit_sect_->Enter();
+
+  // Finding oldest frame ready for decoder, check sequence number and size.
+  CleanUpOldFrames();
+
+  FrameList::iterator it = frame_list_.begin();
+
+  if (it == frame_list_.end()) {
+    packet_event_.Reset();
+    crit_sect_->Leave();
+
+    if (packet_event_.Wait(max_wait_time_ms) == kEventSignaled) {
+      // are we closing down the Jitter buffer
+      if (!running_) {
+        return -1;
+      }
+      crit_sect_->Enter();
+
+      CleanUpOldFrames();
+      it = frame_list_.begin();
+    } else {
+      crit_sect_->Enter();
+    }
+  }
+
+  if (it == frame_list_.end()) {
+    crit_sect_->Leave();
+    return -1;
+  }
+  // We have a frame.
+  *incoming_frame_type = (*it)->FrameType();
+  *render_time_ms = (*it)->RenderTimeMs();
+  const uint32_t timestamp = (*it)->TimeStamp();
+  crit_sect_->Leave();
+
+  return timestamp;
 }
 
-WebRtc_UWord32 VCMJitterBuffer::DiscardedPackets() const {
-  CriticalSectionScoped cs(_critSect);
-  return _discardedPackets;
+// Answers the question:
+// Will the packet sequence be complete if the next frame is grabbed for
+// decoding right now? That is, have we lost a frame between the last decoded
+// frame and the next, or is the next
+// frame missing one or more packets?
+bool VCMJitterBuffer::CompleteSequenceWithNextFrame() {
+  CriticalSectionScoped cs(crit_sect_);
+  // Finding oldest frame ready for decoder, check sequence number and size
+  CleanUpOldFrames();
+
+  if (frame_list_.empty())
+    return true;
+
+  VCMFrameBuffer* oldest_frame = frame_list_.front();
+  if (frame_list_.size() <= 1 &&
+      oldest_frame->GetState() != kStateComplete) {
+    // Frame not ready to be decoded.
+    return true;
+  }
+  if (!oldest_frame->Complete()) {
+    return false;
+  }
+
+  // See if we have lost a frame before this one.
+  if (last_decoded_state_.init()) {
+    // Following start, reset or flush -> check for key frame.
+    if (oldest_frame->FrameType() != kVideoFrameKey) {
+      return false;
+    }
+  } else if (oldest_frame->GetLowSeqNum() == -1) {
+    return false;
+  } else if (!last_decoded_state_.ContinuousFrame(oldest_frame)) {
+    return false;
+  }
+  return true;
+}
+
+// Returns immediately or a |max_wait_time_ms| ms event hang waiting for a
+// complete frame, |max_wait_time_ms| decided by caller.
+VCMEncodedFrame* VCMJitterBuffer::GetCompleteFrameForDecoding(
+    uint32_t max_wait_time_ms) {
+  if (!running_) {
+    return NULL;
+  }
+
+  crit_sect_->Enter();
+
+  CleanUpOldFrames();
+
+  if (last_decoded_state_.init() && WaitForRetransmissions()) {
+    waiting_for_key_frame_ = true;
+  }
+
+  FrameList::iterator it = FindOldestCompleteContinuousFrame(false);
+  if (it == frame_list_.end()) {
+    if (max_wait_time_ms == 0) {
+      crit_sect_->Leave();
+      return NULL;
+    }
+    const int64_t end_wait_time_ms = clock_->MillisecondTimestamp()
+                                           + max_wait_time_ms;
+    int64_t wait_time_ms = max_wait_time_ms;
+    while (wait_time_ms > 0) {
+      crit_sect_->Leave();
+      const EventTypeWrapper ret =
+        frame_event_.Wait(static_cast<uint32_t>(wait_time_ms));
+      crit_sect_->Enter();
+      if (ret == kEventSignaled) {
+        // are we closing down the Jitter buffer
+        if (!running_) {
+          crit_sect_->Leave();
+          return NULL;
+        }
+
+        // Finding oldest frame ready for decoder, but check
+        // sequence number and size
+        CleanUpOldFrames();
+        it = FindOldestCompleteContinuousFrame(false);
+        if (it == frame_list_.end()) {
+          wait_time_ms = end_wait_time_ms -
+                         clock_->MillisecondTimestamp();
+        } else {
+          break;
+        }
+      } else {
+        crit_sect_->Leave();
+        return NULL;
+      }
+    }
+    // Inside |crit_sect_|.
+  } else {
+    // We already have a frame reset the event.
+    frame_event_.Reset();
+  }
+
+  if (it == frame_list_.end()) {
+    // Even after signaling we're still missing a complete continuous frame.
+    crit_sect_->Leave();
+    return NULL;
+  }
+
+  VCMFrameBuffer* oldest_frame = *it;
+  it = frame_list_.erase(it);
+
+  // Update jitter estimate.
+  const bool retransmitted = (oldest_frame->GetNackCount() > 0);
+  if (retransmitted) {
+    jitter_estimate_.FrameNacked();
+  } else if (oldest_frame->Length() > 0) {
+    // Ignore retransmitted and empty frames.
+    UpdateJitterEstimate(*oldest_frame, false);
+  }
+
+  oldest_frame->SetState(kStateDecoding);
+
+  CleanUpOldFrames();
+
+  if (oldest_frame->FrameType() == kVideoFrameKey) {
+    waiting_for_key_frame_ = false;
+  }
+
+  // We have a frame - update decoded state with frame info.
+  last_decoded_state_.SetState(oldest_frame);
+
+  crit_sect_->Leave();
+
+  return oldest_frame;
+}
+
+VCMEncodedFrame* VCMJitterBuffer::GetFrameForDecoding() {
+  CriticalSectionScoped cs(crit_sect_);
+  if (!running_) {
+    return NULL;
+  }
+
+  if (WaitForRetransmissions()) {
+    return GetFrameForDecodingNACK();
+  }
+
+  CleanUpOldFrames();
+
+  if (frame_list_.empty()) {
+    return NULL;
+  }
+
+  VCMFrameBuffer* oldest_frame = frame_list_.front();
+  if (frame_list_.size() <= 1 &&
+      oldest_frame->GetState() != kStateComplete) {
+    return NULL;
+  }
+
+  // Incomplete frame pulled out from jitter buffer,
+  // update the jitter estimate with what we currently know.
+  // This frame shouldn't have been retransmitted, but if we recently
+  // turned off NACK this might still happen.
+  const bool retransmitted = (oldest_frame->GetNackCount() > 0);
+  if (retransmitted) {
+    jitter_estimate_.FrameNacked();
+  } else if (oldest_frame->Length() > 0) {
+    // Ignore retransmitted and empty frames.
+    // Update with the previous incomplete frame first
+    if (waiting_for_completion_.latest_packet_time >= 0) {
+      UpdateJitterEstimate(waiting_for_completion_, true);
+    }
+    // Then wait for this one to get complete
+    waiting_for_completion_.frame_size = oldest_frame->Length();
+    waiting_for_completion_.latest_packet_time =
+      oldest_frame->LatestPacketTimeMs();
+    waiting_for_completion_.timestamp = oldest_frame->TimeStamp();
+  }
+  frame_list_.erase(frame_list_.begin());
+
+  // Look for previous frame loss
+  VerifyAndSetPreviousFrameLost(oldest_frame);
+
+  // The state must be changed to decoding before cleaning up zero sized
+  // frames to avoid empty frames being cleaned up and then given to the
+  // decoder.
+  // Set as decoding. Propagates the missing_frame bit.
+  oldest_frame->SetState(kStateDecoding);
+
+  CleanUpOldFrames();
+
+  if (oldest_frame->FrameType() == kVideoFrameKey) {
+    waiting_for_key_frame_ = false;
+  }
+
+  num_not_decodable_packets_ += oldest_frame->NotDecodablePackets();
+
+  // We have a frame - update decoded state with frame info.
+  last_decoded_state_.SetState(oldest_frame);
+
+  return oldest_frame;
+}
+
+// Release frame when done with decoding. Should never be used to release
+// frames from within the jitter buffer.
+void VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame) {
+  CriticalSectionScoped cs(crit_sect_);
+  VCMFrameBuffer* frame_buffer = static_cast<VCMFrameBuffer*>(frame);
+  if (frame_buffer)
+    frame_buffer->SetState(kStateFree);
 }
 
 // Gets frame to use for this timestamp. If no match, get empty frame.
-WebRtc_Word32
-VCMJitterBuffer::GetFrame(const VCMPacket& packet, VCMEncodedFrame*& frame)
-{
-    if (!_running) // don't accept incoming packets until we are started
-    {
-        return VCM_UNINITIALIZED;
+int VCMJitterBuffer::GetFrame(const VCMPacket& packet,
+                               VCMEncodedFrame*& frame) {
+  if (!running_) {  // Don't accept incoming packets until we are started.
+    return VCM_UNINITIALIZED;
+  }
+
+  crit_sect_->Enter();
+  // Does this packet belong to an old frame?
+  if (last_decoded_state_.IsOldPacket(&packet)) {
+    // Account only for media packets.
+    if (packet.sizeBytes > 0) {
+      num_discarded_packets_++;
+      num_consecutive_old_packets_++;
     }
+    // Update last decoded sequence number if the packet arrived late and
+    // belongs to a frame with a timestamp equal to the last decoded
+    // timestamp.
+    last_decoded_state_.UpdateOldPacket(&packet);
 
-    _critSect->Enter();
-    // Does this packet belong to an old frame?
-    if (_lastDecodedState.IsOldPacket(&packet))
-    {
-        // Account only for media packets
-        if (packet.sizeBytes > 0)
-        {
-            _discardedPackets++;
-            _numConsecutiveOldPackets++;
-        }
-        // Update last decoded sequence number if the packet arrived late and
-        // belongs to a frame with a timestamp equal to the last decoded
-        // timestamp.
-        _lastDecodedState.UpdateOldPacket(&packet);
-
-        if (_numConsecutiveOldPackets > kMaxConsecutiveOldPackets)
-        {
-            FlushInternal();
-            _critSect->Leave();
-            return VCM_FLUSH_INDICATOR;
-        }
-        _critSect->Leave();
-        return VCM_OLD_PACKET_ERROR;
+    if (num_consecutive_old_packets_ > kMaxConsecutiveOldPackets) {
+      Flush();
+      crit_sect_->Leave();
+      return VCM_FLUSH_INDICATOR;
     }
-    _numConsecutiveOldPackets = 0;
+    crit_sect_->Leave();
+    return VCM_OLD_PACKET_ERROR;
+  }
+  num_consecutive_old_packets_ = 0;
 
-    FrameList::iterator it = std::find_if(
-        _frameList.begin(),
-        _frameList.end(),
-        FrameEqualTimestamp(packet.timestamp));
+  FrameList::iterator it = std::find_if(
+                             frame_list_.begin(),
+                             frame_list_.end(),
+                             FrameEqualTimestamp(packet.timestamp));
 
-    if (it != _frameList.end()) {
-      frame = *it;
-      _critSect->Leave();
-      return VCM_OK;
-    }
+  if (it != frame_list_.end()) {
+    frame = *it;
+    crit_sect_->Leave();
+    return VCM_OK;
+  }
 
-    _critSect->Leave();
+  crit_sect_->Leave();
 
-    // No match, return empty frame
-    frame = GetEmptyFrame();
-    if (frame != NULL)
-    {
-        return VCM_OK;
-    }
-    // No free frame! Try to reclaim some...
-    _critSect->Enter();
-    RecycleFramesUntilKeyFrame();
-    _critSect->Leave();
+  // No match, return empty frame.
+  frame = GetEmptyFrame();
+  if (frame != NULL) {
+    return VCM_OK;
+  }
+  // No free frame! Try to reclaim some...
+  crit_sect_->Enter();
+  RecycleFramesUntilKeyFrame();
+  crit_sect_->Leave();
 
-    frame = GetEmptyFrame();
-    if (frame != NULL)
-    {
-        return VCM_OK;
-    }
-    return VCM_JITTER_BUFFER_ERROR;
+  frame = GetEmptyFrame();
+  if (frame != NULL) {
+    return VCM_OK;
+  }
+  return VCM_JITTER_BUFFER_ERROR;
 }
 
 // Deprecated! Kept for testing purposes.
-VCMEncodedFrame*
-VCMJitterBuffer::GetFrame(const VCMPacket& packet)
-{
-    VCMEncodedFrame* frame = NULL;
-    if (GetFrame(packet, frame) < 0)
-    {
-        return NULL;
-    }
-    return frame;
-}
-
-// Get empty frame, creates new (i.e. increases JB size) if necessary
-VCMFrameBuffer*
-VCMJitterBuffer::GetEmptyFrame()
-{
-    if (!_running) // don't accept incoming packets until we are started
-    {
-        return NULL;
-    }
-
-    _critSect->Enter();
-
-    for (int i = 0; i <_maxNumberOfFrames; ++i)
-    {
-        if (kStateFree == _frameBuffers[i]->GetState())
-        {
-            // found a free buffer
-            _frameBuffers[i]->SetState(kStateEmpty);
-            _critSect->Leave();
-            return _frameBuffers[i];
-        }
-    }
-
-    // Check if we can increase JB size
-    if (_maxNumberOfFrames < kMaxNumberOfFrames)
-    {
-        VCMFrameBuffer* ptrNewBuffer = new VCMFrameBuffer();
-        ptrNewBuffer->SetState(kStateEmpty);
-        _frameBuffers[_maxNumberOfFrames] = ptrNewBuffer;
-        _maxNumberOfFrames++;
-
-        _critSect->Leave();
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-        VCMId(_vcmId, _receiverId), "JB(0x%x) FB(0x%x): Jitter buffer "
-        "increased to:%d frames", this, ptrNewBuffer, _maxNumberOfFrames);
-        return ptrNewBuffer;
-    }
-    _critSect->Leave();
-
-    // We have reached max size, cannot increase JB size
+VCMEncodedFrame* VCMJitterBuffer::GetFrame(const VCMPacket& packet) {
+  VCMEncodedFrame* frame = NULL;
+  if (GetFrame(packet, frame) < 0) {
     return NULL;
+  }
+  return frame;
 }
 
+int64_t VCMJitterBuffer::LastPacketTime(VCMEncodedFrame* frame,
+                                        bool* retransmitted) const {
+  assert(retransmitted);
+  CriticalSectionScoped cs(crit_sect_);
+  *retransmitted = (static_cast<VCMFrameBuffer*>(frame)->GetNackCount() > 0);
+  return static_cast<VCMFrameBuffer*>(frame)->LatestPacketTimeMs();
+}
+
+VCMFrameBufferEnum VCMJitterBuffer::InsertPacket(VCMEncodedFrame* encoded_frame,
+                                                 const VCMPacket& packet) {
+  assert(encoded_frame);
+  CriticalSectionScoped cs(crit_sect_);
+  int64_t now_ms = clock_->MillisecondTimestamp();
+  VCMFrameBufferEnum buffer_return = kSizeError;
+  VCMFrameBufferEnum ret = kSizeError;
+  VCMFrameBuffer* frame = static_cast<VCMFrameBuffer*>(encoded_frame);
+
+  // We are keeping track of the first seq num, the latest seq num and
+  // the number of wraps to be able to calculate how many packets we expect.
+  if (first_packet_) {
+    // Now it's time to start estimating jitter
+    // reset the delay estimate.
+    inter_frame_delay_.Reset(clock_->MillisecondTimestamp());
+    first_packet_ = false;
+  }
+
+  // Empty packets may bias the jitter estimate (lacking size component),
+  // therefore don't let empty packet trigger the following updates:
+  if (packet.frameType != kFrameEmpty) {
+    if (waiting_for_completion_.timestamp == packet.timestamp) {
+      // This can get bad if we have a lot of duplicate packets,
+      // we will then count some packet multiple times.
+      waiting_for_completion_.frame_size += packet.sizeBytes;
+      waiting_for_completion_.latest_packet_time = now_ms;
+    } else if (waiting_for_completion_.latest_packet_time >= 0 &&
+               waiting_for_completion_.latest_packet_time + 2000 <= now_ms) {
+      // A packet should never be more than two seconds late
+      UpdateJitterEstimate(waiting_for_completion_, true);
+      waiting_for_completion_.latest_packet_time = -1;
+      waiting_for_completion_.frame_size = 0;
+      waiting_for_completion_.timestamp = 0;
+    }
+  }
+
+  VCMFrameBufferStateEnum state = frame->GetState();
+  last_decoded_state_.UpdateOldPacket(&packet);
+  // Insert packet
+  // Check for first packet
+  // High sequence number will be -1 if neither an empty packet nor
+  // a media packet has been inserted.
+  bool first = (frame->GetHighSeqNum() == -1);
+  // When in Hybrid mode, we allow for a decodable state
+  // Note: Under current version, a decodable frame will never be
+  // triggered, as the body of the function is empty.
+  // TODO(mikhal): Update when decodable is enabled.
+  buffer_return = frame->InsertPacket(packet, now_ms,
+                                      nack_mode_ == kNackHybrid,
+                                      rtt_ms_);
+  ret = buffer_return;
+  if (buffer_return > 0) {
+    incoming_bit_count_ += packet.sizeBytes << 3;
+
+    // Has this packet been nacked or is it about to be nacked?
+    if (IsPacketRetransmitted(packet)) {
+      frame->IncrementNackCount();
+    }
+
+    // Insert each frame once on the arrival of the first packet
+    // belonging to that frame (media or empty).
+    if (state == kStateEmpty && first) {
+      ret = kFirstPacket;
+      FrameList::reverse_iterator rit = std::find_if(
+          frame_list_.rbegin(),
+          frame_list_.rend(),
+          FrameSmallerTimestamp(frame->TimeStamp()));
+      frame_list_.insert(rit.base(), frame);
+    }
+  }
+  switch (buffer_return) {
+    case kStateError:
+    case kTimeStampError:
+    case kSizeError: {
+      if (frame != NULL) {
+        // Will be released when it gets old.
+        frame->Reset();
+        frame->SetState(kStateEmpty);
+      }
+      break;
+    }
+    case kCompleteSession: {
+      // Only update return value for a JB flush indicator.
+      if (UpdateFrameState(frame) == kFlushIndicator)
+        ret = kFlushIndicator;
+      // Signal that we have a received packet.
+      packet_event_.Set();
+      break;
+    }
+    case kDecodableSession:
+    case kIncomplete: {
+      // Signal that we have a received packet.
+      packet_event_.Set();
+      break;
+    }
+    case kNoError:
+    case kDuplicatePacket: {
+      break;
+    }
+    default: {
+      assert(false && "JitterBuffer::InsertPacket: Undefined value");
+    }
+  }
+  return ret;
+}
+
+uint32_t VCMJitterBuffer::EstimatedJitterMs() {
+  CriticalSectionScoped cs(crit_sect_);
+  uint32_t estimate = VCMJitterEstimator::OPERATING_SYSTEM_JITTER;
+
+  // Compute RTT multiplier for estimation
+  // low_rtt_nackThresholdMs_ == -1 means no FEC.
+  double rtt_mult = 1.0f;
+  if (nack_mode_ == kNackHybrid && (low_rtt_nack_threshold_ms_ >= 0 &&
+      static_cast<int>(rtt_ms_) > low_rtt_nack_threshold_ms_)) {
+    // from here we count on FEC
+    rtt_mult = 0.0f;
+  }
+  estimate += static_cast<uint32_t>
+              (jitter_estimate_.GetJitterEstimate(rtt_mult) + 0.5);
+  return estimate;
+}
+
+void VCMJitterBuffer::UpdateRtt(uint32_t rtt_ms) {
+  CriticalSectionScoped cs(crit_sect_);
+  rtt_ms_ = rtt_ms;
+  jitter_estimate_.UpdateRtt(rtt_ms);
+}
+
+void VCMJitterBuffer::SetNackMode(VCMNackMode mode,
+                                  int low_rtt_nack_threshold_ms,
+                                  int high_rtt_nack_threshold_ms) {
+  CriticalSectionScoped cs(crit_sect_);
+  nack_mode_ = mode;
+  assert(low_rtt_nack_threshold_ms >= -1 && high_rtt_nack_threshold_ms >= -1);
+  assert(high_rtt_nack_threshold_ms == -1 ||
+         low_rtt_nack_threshold_ms <= high_rtt_nack_threshold_ms);
+  assert(low_rtt_nack_threshold_ms > -1 || high_rtt_nack_threshold_ms == -1);
+  low_rtt_nack_threshold_ms_ = low_rtt_nack_threshold_ms;
+  high_rtt_nack_threshold_ms_ = high_rtt_nack_threshold_ms;
+  if (nack_mode_ == kNoNack) {
+    jitter_estimate_.ResetNackCount();
+  }
+}
+
+VCMNackMode VCMJitterBuffer::nack_mode() const {
+  CriticalSectionScoped cs(crit_sect_);
+  return nack_mode_;
+}
+
+uint16_t* VCMJitterBuffer::CreateNackList(uint16_t* nack_list_size,
+                                          bool* list_extended) {
+  assert(nack_list_size);
+  assert(list_extended);
+  // TODO(mikhal/stefan): Refactor to use last_decoded_state.
+  CriticalSectionScoped cs(crit_sect_);
+  int i = 0;
+  int32_t low_seq_num = -1;
+  int32_t high_seq_num = -1;
+  *list_extended = false;
+
+  // Don't create a NACK list if we won't wait for the retransmitted packets.
+  if (!WaitForRetransmissions()) {
+    *nack_list_size = 0;
+    return NULL;
+  }
+
+  // Find the lowest (last decoded) sequence number and
+  // the highest (highest sequence number of the newest frame)
+  // sequence number. The NACK list is a subset of the range
+  // between those two numbers.
+  GetLowHighSequenceNumbers(&low_seq_num, &high_seq_num);
+
+  // Build a list of all sequence numbers we have.
+  if (low_seq_num == -1 || high_seq_num == -1) {
+    // This happens if we lose the first packet, nothing is popped.
+    if (high_seq_num == -1) {
+      // We have not received any packets yet.
+      *nack_list_size = 0;
+    } else {
+      // Signal that we want a key frame request to be sent.
+      *nack_list_size = 0xffff;
+    }
+    return NULL;
+  }
+
+  int number_of_seq_num = 0;
+  if (low_seq_num > high_seq_num) {
+    if (low_seq_num - high_seq_num > 0x00ff) {
+      // Wrap.
+      number_of_seq_num = (0xffff - low_seq_num) + high_seq_num + 1;
+    }
+  } else {
+    number_of_seq_num = high_seq_num - low_seq_num;
+  }
+
+  if (number_of_seq_num > kNackHistoryLength) {
+    // NACK list has grown too big, flush and try to restart.
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "Nack list too large, try to find a key frame and restart "
+                 "from seq: %d. Lowest seq in jb %d",
+                 high_seq_num, low_seq_num);
+
+    // This NACK size will trigger a key frame request.
+    bool found_key_frame = false;
+
+    while (number_of_seq_num > kNackHistoryLength) {
+      found_key_frame = RecycleFramesUntilKeyFrame();
+
+      if (!found_key_frame) {
+        break;
+      }
+
+      // Check if we still have too many packets in the jitter buffer.
+      low_seq_num = -1;
+      high_seq_num = -1;
+      GetLowHighSequenceNumbers(&low_seq_num, &high_seq_num);
+
+      if (high_seq_num == -1) {
+        assert(low_seq_num != -1);  // This should never happen.
+        // We can't calculate the NACK list length.
+        return NULL;
+      }
+
+      number_of_seq_num = 0;
+      if (low_seq_num > high_seq_num) {
+        if (low_seq_num - high_seq_num > 0x00ff) {
+          // wrap
+          number_of_seq_num = (0xffff - low_seq_num) + high_seq_num + 1;
+          high_seq_num = low_seq_num;
+        }
+      } else {
+        number_of_seq_num = high_seq_num - low_seq_num;
+      }
+    }
+
+    if (!found_key_frame) {
+      // Set the last decoded sequence number to current high.
+      // This is to not get a large nack list again right away.
+      last_decoded_state_.SetSeqNum(static_cast<uint16_t>(high_seq_num));
+      // Set to trigger key frame signal.
+      *nack_list_size = 0xffff;
+      *list_extended = true;
+      WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1,
+                   "\tNo key frame found, request one. last_decoded_seq_num_ "
+                   "%d", last_decoded_state_.sequence_num());
+    } else {
+      // We have cleaned up the jitter buffer and found a key frame.
+      WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1,
+                   "\tKey frame found. last_decoded_seq_num_ %d",
+                   last_decoded_state_.sequence_num());
+      *nack_list_size = 0;
+    }
+
+    return NULL;
+  }
+
+  uint16_t seq_number_iterator = static_cast<uint16_t>(low_seq_num + 1);
+  for (i = 0; i < number_of_seq_num; i++) {
+    nack_seq_nums_internal_[i] = seq_number_iterator;
+    seq_number_iterator++;
+  }
+  // Now we have a list of all sequence numbers that could have been sent.
+  // Zero out the ones we have received.
+  for (i = 0; i < max_number_of_frames_; i++) {
+    // We don't need to check if frame is decoding since low_seq_num is based
+    // on the last decoded sequence number.
+    VCMFrameBufferStateEnum state = frame_buffers_[i]->GetState();
+    if ((kStateFree != state) &&
+        (kStateEmpty != state)) {
+      // Reaching thus far means we are going to update the NACK list
+      // When in hybrid mode, we use the soft NACKing feature.
+      if (nack_mode_ == kNackHybrid) {
+        frame_buffers_[i]->BuildSoftNackList(nack_seq_nums_internal_,
+                                             number_of_seq_num,
+                                             rtt_ms_);
+      } else {
+        // Used when the frame is being processed by the decoding thread
+        // don't need to use that info in this loop.
+        frame_buffers_[i]->BuildHardNackList(nack_seq_nums_internal_,
+                                             number_of_seq_num);
+      }
+    }
+  }
+
+  // Compress the list.
+  int empty_index = -1;
+  for (i = 0; i < number_of_seq_num; i++) {
+    if (nack_seq_nums_internal_[i] == -1 || nack_seq_nums_internal_[i] == -2) {
+      // This is empty.
+      if (empty_index == -1) {
+        // No empty index before, remember this position.
+        empty_index = i;
+      }
+    } else {
+      // This is not empty.
+      if (empty_index == -1) {
+        // No empty index, continue.
+      } else {
+        nack_seq_nums_internal_[empty_index] = nack_seq_nums_internal_[i];
+        nack_seq_nums_internal_[i] = -1;
+        empty_index++;
+      }
+    }
+  }
+
+  if (empty_index == -1) {
+    // No empty.
+    *nack_list_size = number_of_seq_num;
+  } else {
+    *nack_list_size = empty_index;
+  }
+
+  if (*nack_list_size > nack_seq_nums_length_) {
+    // Larger list: NACK list was extended since the last call.
+    *list_extended = true;
+  }
+
+  for (unsigned int j = 0; j < *nack_list_size; j++) {
+    // Check if the list has been extended since it was last created, i.e,
+    // new items have been added.
+    if (nack_seq_nums_length_ > j && !*list_extended) {
+      unsigned int k = 0;
+      for (k = j; k < nack_seq_nums_length_; k++) {
+        // Found the item in the last list, i.e, no new items found yet.
+        if (nack_seq_nums_[k] ==
+            static_cast<uint16_t>(nack_seq_nums_internal_[j])) {
+          break;
+        }
+      }
+      if (k == nack_seq_nums_length_) {  // New item not found in last list.
+        *list_extended = true;
+      }
+    } else {
+      *list_extended = true;
+    }
+    nack_seq_nums_[j] = static_cast<uint16_t>(nack_seq_nums_internal_[j]);
+  }
+
+  nack_seq_nums_length_ = *nack_list_size;
+
+  return nack_seq_nums_;
+}
+
+int64_t VCMJitterBuffer::LastDecodedTimestamp() const {
+  CriticalSectionScoped cs(crit_sect_);
+  return last_decoded_state_.time_stamp();
+}
+
+VCMEncodedFrame* VCMJitterBuffer::GetFrameForDecodingNACK() {
+  CleanUpOldFrames();
+  // First look for a complete continuous__ frame.
+  // When waiting for nack, wait for a key frame, if a continuous frame cannot
+  // be determined (i.e. initial decoding state).
+  if (last_decoded_state_.init()) {
+    waiting_for_key_frame_ = true;
+  }
+  // Allow for a decodable frame when in Hybrid mode.
+  bool enable_decodable = nack_mode_ == kNackHybrid ? true : false;
+  FrameList::iterator it = FindOldestCompleteContinuousFrame(enable_decodable);
+  if (it == frame_list_.end()) {
+    // If we didn't find one we're good with a complete key/decodable frame.
+    it = find_if(frame_list_.begin(), frame_list_.end(),
+                 CompleteDecodableKeyFrameCriteria());
+    if (it == frame_list_.end()) {
+      return NULL;
+    }
+  }
+  VCMFrameBuffer* oldest_frame = *it;
+  // Update jitter estimate
+  const bool retransmitted = (oldest_frame->GetNackCount() > 0);
+  if (retransmitted) {
+    jitter_estimate_.FrameNacked();
+  } else if (oldest_frame->Length() > 0) {
+    // Ignore retransmitted and empty frames.
+    UpdateJitterEstimate(*oldest_frame, false);
+  }
+  it = frame_list_.erase(it);
+
+  // Look for previous frame loss.
+  VerifyAndSetPreviousFrameLost(oldest_frame);
+
+  // The state must be changed to decoding before cleaning up zero sized
+  // frames to avoid empty frames being cleaned up and then given to the
+  // decoder.
+  oldest_frame->SetState(kStateDecoding);
+
+  // Clean up old frames and empty frames.
+  CleanUpOldFrames();
+
+  if (oldest_frame->FrameType() == kVideoFrameKey) {
+    waiting_for_key_frame_ = false;
+  }
+
+  // We have a frame - update decoded state with frame info.
+  last_decoded_state_.SetState(oldest_frame);
+
+  return oldest_frame;
+}
+
+// Set the frame state to free and remove it from the sorted
+// frame list. Must be called from inside the critical section crit_sect_.
+void VCMJitterBuffer::ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame) {
+  if (frame != NULL && frame->GetState() != kStateDecoding) {
+    frame->SetState(kStateFree);
+  }
+}
+
+VCMFrameBuffer* VCMJitterBuffer::GetEmptyFrame() {
+  if (!running_) {
+    return NULL;
+  }
+
+  crit_sect_->Enter();
+
+  for (int i = 0; i < max_number_of_frames_; ++i) {
+    if (kStateFree == frame_buffers_[i]->GetState()) {
+      // found a free buffer
+      frame_buffers_[i]->SetState(kStateEmpty);
+      crit_sect_->Leave();
+      return frame_buffers_[i];
+    }
+  }
+
+  // Check if we can increase JB size
+  if (max_number_of_frames_ < kMaxNumberOfFrames) {
+    VCMFrameBuffer* ptr_new_buffer = new VCMFrameBuffer();
+    ptr_new_buffer->SetState(kStateEmpty);
+    frame_buffers_[max_number_of_frames_] = ptr_new_buffer;
+    max_number_of_frames_++;
+
+    crit_sect_->Leave();
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "JB(0x%x) FB(0x%x): Jitter buffer  increased to:%d frames",
+                 this, ptr_new_buffer, max_number_of_frames_);
+    return ptr_new_buffer;
+  }
+  crit_sect_->Leave();
+
+  // We have reached max size, cannot increase JB size
+  return NULL;
+}
+
+// Recycle oldest frames up to a key frame, used if jitter buffer is completely
+// full.
+bool VCMJitterBuffer::RecycleFramesUntilKeyFrame() {
+  // Remove up to oldest key frame
+  while (frame_list_.size() > 0) {
+    // Throw at least one frame.
+    drop_count_++;
+    FrameList::iterator it = frame_list_.begin();
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "Jitter buffer drop count:%d, low_seq %d", drop_count_,
+                 (*it)->GetLowSeqNum());
+    ReleaseFrameIfNotDecoding(*it);
+    it = frame_list_.erase(it);
+    if (it != frame_list_.end() && (*it)->FrameType() == kVideoFrameKey) {
+      // Fake the last_decoded_state to match this key frame.
+      last_decoded_state_.SetStateOneBack(*it);
+      return true;
+    }
+  }
+  waiting_for_key_frame_ = true;
+  last_decoded_state_.Reset();  // TODO(mikhal): No sync.
+  return false;
+}
+
+// Must be called under the critical section |crit_sect_|.
+VCMFrameBufferEnum VCMJitterBuffer::UpdateFrameState(VCMFrameBuffer* frame) {
+  if (frame == NULL) {
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_), "JB(0x%x) FB(0x%x): "
+                 "UpdateFrameState NULL frame pointer", this, frame);
+    return kNoError;
+  }
+
+  int length = frame->Length();
+  if (master_) {
+    // Only trace the primary jitter buffer to make it possible to parse
+    // and plot the trace file.
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "JB(0x%x) FB(0x%x): Complete frame added to jitter buffer,"
+                 " size:%d type %d",
+                 this, frame, length, frame->FrameType());
+  }
+
+  if (length != 0 && !frame->GetCountedFrame()) {
+    // Ignore ACK frames.
+    incoming_frame_count_++;
+    frame->SetCountedFrame(true);
+  }
+
+  // Check if we should drop the frame. A complete frame can arrive too late.
+  if (last_decoded_state_.IsOldFrame(frame)) {
+    // Frame is older than the latest decoded frame, drop it. Will be
+    // released by CleanUpOldFrames later.
+    frame->Reset();
+    frame->SetState(kStateEmpty);
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "JB(0x%x) FB(0x%x): Dropping old frame in Jitter buffer",
+                 this, frame);
+    drop_count_++;
+    WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "Jitter buffer drop count: %d, consecutive drops: %u",
+                 drop_count_, num_consecutive_old_frames_);
+    // Flush() if this happens consistently.
+    num_consecutive_old_frames_++;
+    if (num_consecutive_old_frames_ > kMaxConsecutiveOldFrames) {
+      Flush();
+      return kFlushIndicator;
+    }
+    return kNoError;
+  }
+  num_consecutive_old_frames_ = 0;
+  frame->SetState(kStateComplete);
+
+  // Update receive statistics. We count all layers, thus when you use layers
+  // adding all key and delta frames might differ from frame count.
+  if (frame->IsSessionComplete()) {
+    switch (frame->FrameType()) {
+      case kVideoFrameKey: {
+        receive_statistics_[0]++;
+        break;
+      }
+      case kVideoFrameDelta: {
+        receive_statistics_[1]++;
+        break;
+      }
+      case kVideoFrameGolden: {
+        receive_statistics_[2]++;
+        break;
+      }
+      case kVideoFrameAltRef: {
+        receive_statistics_[3]++;
+        break;
+      }
+      default:
+        assert(false);
+    }
+  }
+  const FrameList::iterator it = FindOldestCompleteContinuousFrame(false);
+  VCMFrameBuffer* old_frame = NULL;
+  if (it != frame_list_.end()) {
+    old_frame = *it;
+  }
+
+  // Only signal if this is the oldest frame.
+  // Not necessarily the case due to packet reordering or NACK.
+  if (!WaitForRetransmissions() || (old_frame != NULL && old_frame == frame)) {
+    frame_event_.Set();
+  }
+  return kNoError;
+}
 
 // Find oldest complete frame used for getting next frame to decode
 // Must be called under critical section
-FrameList::iterator
-VCMJitterBuffer::FindOldestCompleteContinuousFrame(bool enable_decodable) {
+FrameList::iterator VCMJitterBuffer::FindOldestCompleteContinuousFrame(
+    bool enable_decodable) {
   // If we have more than one frame done since last time, pick oldest.
   VCMFrameBuffer* oldest_frame = NULL;
-  FrameList::iterator it = _frameList.begin();
+  FrameList::iterator it = frame_list_.begin();
 
   // When temporal layers are available, we search for a complete or decodable
   // frame until we hit one of the following:
   // 1. Continuous base or sync layer.
   // 2. The end of the list was reached.
-  for (; it != _frameList.end(); ++it)  {
+  for (; it != frame_list_.end(); ++it)  {
     oldest_frame = *it;
     VCMFrameBufferStateEnum state = oldest_frame->GetState();
     // Is this frame complete or decodable and continuous?
     if ((state == kStateComplete ||
-        (enable_decodable && state == kStateDecodable)) &&
-        _lastDecodedState.ContinuousFrame(oldest_frame)) {
+         (enable_decodable && state == kStateDecodable)) &&
+        last_decoded_state_.ContinuousFrame(oldest_frame)) {
       break;
     } else {
       int temporal_id = oldest_frame->TemporalId();
@@ -587,1184 +1249,184 @@
 
   if (oldest_frame == NULL) {
     // No complete frame no point to continue.
-    return _frameList.end();
-  } else  if (_waitingForKeyFrame &&
+    return frame_list_.end();
+  } else if (waiting_for_key_frame_ &&
               oldest_frame->FrameType() != kVideoFrameKey) {
     // We are waiting for a key frame.
-    return _frameList.end();
+    return frame_list_.end();
   }
-
   // We have a complete continuous frame.
   return it;
 }
 
-// Call from inside the critical section _critSect
-void
-VCMJitterBuffer::RecycleFrame(VCMFrameBuffer* frame)
-{
-    if (frame == NULL)
-    {
-        return;
-    }
-
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                 VCMId(_vcmId, _receiverId),
-                 "JB(0x%x) FB(0x%x): RecycleFrame, size:%d",
-                 this, frame, frame->Length());
-
-    ReleaseFrameInternal(frame);
-}
-
-// Calculate frame and bit rates
-WebRtc_Word32
-VCMJitterBuffer::GetUpdate(WebRtc_UWord32& frameRate, WebRtc_UWord32& bitRate)
-{
-    CriticalSectionScoped cs(_critSect);
-    const WebRtc_Word64 now = _clock->MillisecondTimestamp();
-    WebRtc_Word64 diff = now - _timeLastIncomingFrameCount;
-    if (diff < 1000 && _incomingFrameRate > 0 && _incomingBitRate > 0)
-    {
-        // Make sure we report something even though less than
-        // 1 second has passed since last update.
-        frameRate = _incomingFrameRate;
-        bitRate = _incomingBitRate;
-    }
-    else if (_incomingFrameCount != 0)
-    {
-        // We have received frame(s) since last call to this function
-
-        // Prepare calculations
-        if (diff <= 0)
-        {
-            diff = 1;
-        }
-        // we add 0.5f for rounding
-        float rate = 0.5f + ((_incomingFrameCount * 1000.0f) / diff);
-        if (rate < 1.0f) // don't go below 1, can crash
-        {
-            rate = 1.0f;
-        }
-
-        // Calculate frame rate
-        // Let r be rate.
-        // r(0) = 1000*framecount/delta_time.
-        // (I.e. frames per second since last calculation.)
-        // frameRate = r(0)/2 + r(-1)/2
-        // (I.e. fr/s average this and the previous calculation.)
-        frameRate = (_incomingFrameRate + (WebRtc_Word32)rate) >> 1;
-        _incomingFrameRate = (WebRtc_UWord8)rate;
-
-        // Calculate bit rate
-        if (_incomingBitCount == 0)
-        {
-            bitRate = 0;
-        }
-        else
-        {
-            bitRate = 10 * ((100 * _incomingBitCount) /
-                      static_cast<WebRtc_UWord32>(diff));
-        }
-        _incomingBitRate = bitRate;
-
-        // Reset count
-        _incomingFrameCount = 0;
-        _incomingBitCount = 0;
-        _timeLastIncomingFrameCount = now;
-
-    }
-    else
-    {
-        // No frames since last call
-        _timeLastIncomingFrameCount = _clock->MillisecondTimestamp();
-        frameRate = 0;
-        bitRate = 0;
-        _incomingBitRate = 0;
-    }
-
-    return 0;
-}
-
-// Returns immediately or a X ms event hang waiting for a complete frame,
-// X decided by caller
-VCMEncodedFrame*
-VCMJitterBuffer::GetCompleteFrameForDecoding(WebRtc_UWord32 maxWaitTimeMS)
-{
-    if (!_running)
-    {
-        return NULL;
-    }
-
-    _critSect->Enter();
-
-    CleanUpOldFrames();
-
-    if (_lastDecodedState.init() && WaitForNack()) {
-      _waitingForKeyFrame = true;
-    }
-
-    FrameList::iterator it = FindOldestCompleteContinuousFrame(false);
-    if (it == _frameList.end())
-    {
-        if (maxWaitTimeMS == 0)
-        {
-            _critSect->Leave();
-            return NULL;
-        }
-        const WebRtc_Word64 endWaitTimeMs = _clock->MillisecondTimestamp()
-                                            + maxWaitTimeMS;
-        WebRtc_Word64 waitTimeMs = maxWaitTimeMS;
-        while (waitTimeMs > 0)
-        {
-            _critSect->Leave();
-            const EventTypeWrapper ret =
-                  _frameEvent.Wait(static_cast<WebRtc_UWord32>(waitTimeMs));
-            _critSect->Enter();
-            if (ret == kEventSignaled)
-            {
-                // are we closing down the Jitter buffer
-                if (!_running)
-                {
-                    _critSect->Leave();
-                    return NULL;
-                }
-
-                // Finding oldest frame ready for decoder, but check
-                // sequence number and size
-                CleanUpOldFrames();
-                it = FindOldestCompleteContinuousFrame(false);
-                if (it == _frameList.end())
-                {
-                    waitTimeMs = endWaitTimeMs -
-                                 _clock->MillisecondTimestamp();
-                }
-                else
-                {
-                    break;
-                }
-            }
-            else
-            {
-                _critSect->Leave();
-                return NULL;
-            }
-        }
-        // Inside critSect
-    }
-    else
-    {
-        // we already have a frame reset the event
-        _frameEvent.Reset();
-    }
-
-    if (it == _frameList.end())
-    {
-        // Even after signaling we're still missing a complete continuous frame
-        _critSect->Leave();
-        return NULL;
-    }
-
-    VCMFrameBuffer* oldestFrame = *it;
-    it = _frameList.erase(it);
-
-    // Update jitter estimate
-    const bool retransmitted = (oldestFrame->GetNackCount() > 0);
-    if (retransmitted)
-    {
-        _jitterEstimate.FrameNacked();
-    }
-    else if (oldestFrame->Length() > 0)
-    {
-        // Ignore retransmitted and empty frames.
-        UpdateJitterAndDelayEstimates(*oldestFrame, false);
-    }
-
-    oldestFrame->SetState(kStateDecoding);
-
-    CleanUpOldFrames();
-
-    if (oldestFrame->FrameType() == kVideoFrameKey)
-    {
-        _waitingForKeyFrame = false;
-    }
-
-    // We have a frame - update decoded state with frame info.
-    _lastDecodedState.SetState(oldestFrame);
-
-    _critSect->Leave();
-
-    return oldestFrame;
-}
-
-WebRtc_UWord32
-VCMJitterBuffer::GetEstimatedJitterMS()
-{
-    CriticalSectionScoped cs(_critSect);
-    return GetEstimatedJitterMsInternal();
-}
-
-WebRtc_UWord32
-VCMJitterBuffer::GetEstimatedJitterMsInternal()
-{
-    WebRtc_UWord32 estimate = VCMJitterEstimator::OPERATING_SYSTEM_JITTER;
-
-    // Compute RTT multiplier for estimation
-    // _lowRttNackThresholdMs == -1 means no FEC.
-    double rttMult = 1.0f;
-    if (_nackMode == kNackHybrid && (_lowRttNackThresholdMs >= 0 &&
-        static_cast<int>(_rttMs) > _lowRttNackThresholdMs))
-    {
-        // from here we count on FEC
-        rttMult = 0.0f;
-    }
-    estimate += static_cast<WebRtc_UWord32>
-                (_jitterEstimate.GetJitterEstimate(rttMult) + 0.5);
-    return estimate;
-}
-
-void
-VCMJitterBuffer::UpdateRtt(WebRtc_UWord32 rttMs)
-{
-    CriticalSectionScoped cs(_critSect);
-    _rttMs = rttMs;
-    _jitterEstimate.UpdateRtt(rttMs);
-}
-
-// wait for the first packet in the next frame to arrive
-WebRtc_Word64
-VCMJitterBuffer::GetNextTimeStamp(WebRtc_UWord32 maxWaitTimeMS,
-                                  FrameType& incomingFrameType,
-                                  WebRtc_Word64& renderTimeMs)
-{
-    if (!_running)
-    {
-        return -1;
-    }
-
-    _critSect->Enter();
-
-    // Finding oldest frame ready for decoder, check sequence number and size
-    CleanUpOldFrames();
-
-    FrameList::iterator it = _frameList.begin();
-
-    if (it == _frameList.end())
-    {
-        _packetEvent.Reset();
-        _critSect->Leave();
-
-        if (_packetEvent.Wait(maxWaitTimeMS) == kEventSignaled)
-        {
-            // are we closing down the Jitter buffer
-            if (!_running)
-            {
-                return -1;
-            }
-            _critSect->Enter();
-
-            CleanUpOldFrames();
-            it = _frameList.begin();
-        }
-        else
-        {
-            _critSect->Enter();
-        }
-    }
-
-    if (it == _frameList.end())
-    {
-        _critSect->Leave();
-        return -1;
-    }
-    // we have a frame
-
-    // return frame type
-    // All layers are assumed to have the same type
-    incomingFrameType = (*it)->FrameType();
-
-    renderTimeMs = (*it)->RenderTimeMs();
-
-    const WebRtc_UWord32 timestamp = (*it)->TimeStamp();
-
-    _critSect->Leave();
-
-    // return current time
-    return timestamp;
-}
-
-// Answers the question:
-// Will the packet sequence be complete if the next frame is grabbed for
-// decoding right now? That is, have we lost a frame between the last decoded
-// frame and the next, or is the next
-// frame missing one or more packets?
-bool
-VCMJitterBuffer::CompleteSequenceWithNextFrame()
-{
-    CriticalSectionScoped cs(_critSect);
-    // Finding oldest frame ready for decoder, check sequence number and size
-    CleanUpOldFrames();
-
-    if (_frameList.empty())
-      return true;
-
-    VCMFrameBuffer* oldestFrame = _frameList.front();
-    if (_frameList.size() <= 1 &&
-        oldestFrame->GetState() != kStateComplete)
-    {
-        // Frame not ready to be decoded.
-        return true;
-    }
-    if (!oldestFrame->Complete())
-    {
-        return false;
-    }
-
-    // See if we have lost a frame before this one.
-    if (_lastDecodedState.init())
-    {
-        // Following start, reset or flush -> check for key frame.
-        if (oldestFrame->FrameType() != kVideoFrameKey)
-        {
-            return false;
-        }
-    }
-    else if (oldestFrame->GetLowSeqNum() == -1)
-    {
-        return false;
-    }
-    else if (!_lastDecodedState.ContinuousFrame(oldestFrame))
-    {
-        return false;
-    }
-    return true;
-}
-
-// Returns immediately
-VCMEncodedFrame*
-VCMJitterBuffer::GetFrameForDecoding()
-{
-    CriticalSectionScoped cs(_critSect);
-    if (!_running)
-    {
-        return NULL;
-    }
-
-    if (WaitForNack())
-    {
-        return GetFrameForDecodingNACK();
-    }
-
-    CleanUpOldFrames();
-
-    if (_frameList.empty()) {
-      return NULL;
-    }
-
-    VCMFrameBuffer* oldestFrame = _frameList.front();
-    if (_frameList.size() <= 1 &&
-        oldestFrame->GetState() != kStateComplete) {
-      return NULL;
-    }
-
-    // Incomplete frame pulled out from jitter buffer,
-    // update the jitter estimate with what we currently know.
-    // This frame shouldn't have been retransmitted, but if we recently
-    // turned off NACK this might still happen.
-    const bool retransmitted = (oldestFrame->GetNackCount() > 0);
-    if (retransmitted)
-    {
-        _jitterEstimate.FrameNacked();
-    }
-    else if (oldestFrame->Length() > 0)
-    {
-        // Ignore retransmitted and empty frames.
-        // Update with the previous incomplete frame first
-        if (_waitingForCompletion.latestPacketTime >= 0)
-        {
-            UpdateJitterAndDelayEstimates(_waitingForCompletion, true);
-        }
-        // Then wait for this one to get complete
-        _waitingForCompletion.frameSize = oldestFrame->Length();
-        _waitingForCompletion.latestPacketTime =
-                              oldestFrame->LatestPacketTimeMs();
-        _waitingForCompletion.timestamp = oldestFrame->TimeStamp();
-    }
-    _frameList.erase(_frameList.begin());
-
-    // Look for previous frame loss
-    VerifyAndSetPreviousFrameLost(*oldestFrame);
-
-    // The state must be changed to decoding before cleaning up zero sized
-    // frames to avoid empty frames being cleaned up and then given to the
-    // decoder.
-    // Set as decoding. Propagates the missingFrame bit.
-    oldestFrame->SetState(kStateDecoding);
-
-    CleanUpOldFrames();
-
-    if (oldestFrame->FrameType() == kVideoFrameKey)
-    {
-        _waitingForKeyFrame = false;
-    }
-
-    _packetsNotDecodable += oldestFrame->NotDecodablePackets();
-
-    // We have a frame - update decoded state with frame info.
-    _lastDecodedState.SetState(oldestFrame);
-
-    return oldestFrame;
-}
-
-VCMEncodedFrame*
-VCMJitterBuffer::GetFrameForDecodingNACK()
-{
-    // when we use NACK we don't release non complete frames
-    // unless we have a complete key frame.
-    // In hybrid mode, we may release decodable frames (non-complete)
-
-    // Clean up old frames and empty frames
-    CleanUpOldFrames();
-
-    // First look for a complete _continuous_ frame.
-    // When waiting for nack, wait for a key frame, if a continuous frame cannot
-    // be determined (i.e. initial decoding state).
-    if (_lastDecodedState.init()) {
-      _waitingForKeyFrame = true;
-    }
-
-    // Allow for a decodable frame when in Hybrid mode.
-    bool enableDecodable = _nackMode == kNackHybrid ? true : false;
-    FrameList::iterator it = FindOldestCompleteContinuousFrame(enableDecodable);
-    if (it == _frameList.end())
-    {
-        // If we didn't find one we're good with a complete key/decodable frame.
-        it = find_if(_frameList.begin(), _frameList.end(),
-                     CompleteDecodableKeyFrameCriteria());
-        if (it == _frameList.end())
-        {
-            return NULL;
-        }
-    }
-    VCMFrameBuffer* oldestFrame = *it;
-    // Update jitter estimate
-    const bool retransmitted = (oldestFrame->GetNackCount() > 0);
-    if (retransmitted)
-    {
-        _jitterEstimate.FrameNacked();
-    }
-    else if (oldestFrame->Length() > 0)
-    {
-        // Ignore retransmitted and empty frames.
-        UpdateJitterAndDelayEstimates(*oldestFrame, false);
-    }
-    it = _frameList.erase(it);
-
-    // Look for previous frame loss
-    VerifyAndSetPreviousFrameLost(*oldestFrame);
-
-    // The state must be changed to decoding before cleaning up zero sized
-    // frames to avoid empty frames being cleaned up and then given to the
-    // decoder.
-    oldestFrame->SetState(kStateDecoding);
-
-    // Clean up old frames and empty frames
-    CleanUpOldFrames();
-
-    if (oldestFrame->FrameType() == kVideoFrameKey)
-    {
-        _waitingForKeyFrame = false;
-    }
-
-    // We have a frame - update decoded state with frame info.
-    _lastDecodedState.SetState(oldestFrame);
-
-    return oldestFrame;
-}
-
-// Must be called under the critical section _critSect. Should never be called
-// with retransmitted frames, they must be filtered out before this function is
-// called.
-void
-VCMJitterBuffer::UpdateJitterAndDelayEstimates(VCMJitterSample& sample,
-                                               bool incompleteFrame)
-{
-    if (sample.latestPacketTime == -1)
-    {
-        return;
-    }
-    if (incompleteFrame)
-    {
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId), "Received incomplete frame "
-                     "timestamp %u frame size %u at time %u",
-                     sample.timestamp, sample.frameSize,
-                     MaskWord64ToUWord32(sample.latestPacketTime));
-    }
-    else
-    {
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId), "Received complete frame "
-                     "timestamp %u frame size %u at time %u",
-                     sample.timestamp, sample.frameSize,
-                     MaskWord64ToUWord32(sample.latestPacketTime));
-    }
-    UpdateJitterAndDelayEstimates(sample.latestPacketTime,
-                                  sample.timestamp,
-                                  sample.frameSize,
-                                  incompleteFrame);
-}
-
-// Must be called under the critical section _critSect. Should never be
-// called with retransmitted frames, they must be filtered out before this
-// function is called.
-void
-VCMJitterBuffer::UpdateJitterAndDelayEstimates(VCMFrameBuffer& frame,
-                                               bool incompleteFrame)
-{
-    if (frame.LatestPacketTimeMs() == -1)
-    {
-        return;
-    }
-    // No retransmitted frames should be a part of the jitter
-    // estimate.
-    if (incompleteFrame)
-    {
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),
-                   "Received incomplete frame timestamp %u frame type %d "
-                   "frame size %u at time %u, jitter estimate was %u",
-                   frame.TimeStamp(), frame.FrameType(), frame.Length(),
-                   MaskWord64ToUWord32(frame.LatestPacketTimeMs()),
-                   GetEstimatedJitterMsInternal());
-    }
-    else
-    {
-        WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),"Received complete frame "
-                     "timestamp %u frame type %d frame size %u at time %u, "
-                     "jitter estimate was %u",
-                     frame.TimeStamp(), frame.FrameType(), frame.Length(),
-                     MaskWord64ToUWord32(frame.LatestPacketTimeMs()),
-                     GetEstimatedJitterMsInternal());
-    }
-    UpdateJitterAndDelayEstimates(frame.LatestPacketTimeMs(), frame.TimeStamp(),
-                                  frame.Length(), incompleteFrame);
-}
-
-// Must be called under the critical section _critSect. Should never be called
-// with retransmitted frames, they must be filtered out before this function
-// is called.
-void
-VCMJitterBuffer::UpdateJitterAndDelayEstimates(WebRtc_Word64 latestPacketTimeMs,
-                                               WebRtc_UWord32 timestamp,
-                                               WebRtc_UWord32 frameSize,
-                                               bool incompleteFrame)
-{
-    if (latestPacketTimeMs == -1)
-    {
-        return;
-    }
-    WebRtc_Word64 frameDelay;
-    // Calculate the delay estimate
-    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
-                 VCMId(_vcmId, _receiverId),
-                 "Packet received and sent to jitter estimate with: "
-                 "timestamp=%u wallClock=%u", timestamp,
-                 MaskWord64ToUWord32(latestPacketTimeMs));
-    bool notReordered = _delayEstimate.CalculateDelay(timestamp,
-                                                      &frameDelay,
-                                                      latestPacketTimeMs);
-    // Filter out frames which have been reordered in time by the network
-    if (notReordered)
-    {
-        // Update the jitter estimate with the new samples
-        _jitterEstimate.UpdateEstimate(frameDelay, frameSize, incompleteFrame);
-    }
-}
-
-WebRtc_UWord16*
-VCMJitterBuffer::GetNackList(WebRtc_UWord16& nackSize,bool& listExtended)
-{
-    return CreateNackList(nackSize,listExtended);
-}
-
-// Assume called internally with critsect
-WebRtc_Word32
-VCMJitterBuffer::GetLowHighSequenceNumbers(WebRtc_Word32& lowSeqNum,
-                                           WebRtc_Word32& highSeqNum) const
-{
-    // TODO (mikhal/stefan): refactor to use lastDecodedState
-    WebRtc_Word32 i = 0;
-    WebRtc_Word32 seqNum = -1;
-
-    highSeqNum = -1;
-    lowSeqNum = -1;
-    if (!_lastDecodedState.init())
-      lowSeqNum = _lastDecodedState.sequence_num();
-
-    // find highest seq numbers
-    for (i = 0; i < _maxNumberOfFrames; ++i)
-    {
-        seqNum = _frameBuffers[i]->GetHighSeqNum();
-
-        // Ignore free / empty frames
-        VCMFrameBufferStateEnum state = _frameBuffers[i]->GetState();
-
-        if ((kStateFree != state) &&
-            (kStateEmpty != state) &&
-            (kStateDecoding != state) &&
-             seqNum != -1)
-        {
-            bool wrap;
-            highSeqNum = LatestSequenceNumber(seqNum, highSeqNum, &wrap);
-        }
-    } // for
-    return 0;
-}
-
-
-WebRtc_UWord16*
-VCMJitterBuffer::CreateNackList(WebRtc_UWord16& nackSize, bool& listExtended)
-{
-    // TODO (mikhal/stefan): Refactor to use lastDecodedState.
-    CriticalSectionScoped cs(_critSect);
-    int i = 0;
-    WebRtc_Word32 lowSeqNum = -1;
-    WebRtc_Word32 highSeqNum = -1;
-    listExtended = false;
-
-    // Don't create list, if we won't wait for it
-    if (!WaitForNack())
-    {
-        nackSize = 0;
-        return NULL;
-    }
-
-    // Find the lowest (last decoded) sequence number and
-    // the highest (highest sequence number of the newest frame)
-    // sequence number. The nack list is a subset of the range
-    // between those two numbers.
-    GetLowHighSequenceNumbers(lowSeqNum, highSeqNum);
-
-    // write a list of all seq num we have
-    if (lowSeqNum == -1 || highSeqNum == -1)
-    {
-        // This happens if we lose the first packet, nothing is popped
-        if (highSeqNum == -1)
-        {
-            // we have not received any packets yet
-            nackSize = 0;
-        }
-        else
-        {
-            // signal that we want a key frame request to be sent
-            nackSize = 0xffff;
-        }
-        return NULL;
-    }
-
-    int numberOfSeqNum = 0;
-    if (lowSeqNum > highSeqNum)
-    {
-        if (lowSeqNum - highSeqNum > 0x00ff)
-        {
-            // wrap
-            numberOfSeqNum = (0xffff-lowSeqNum) + highSeqNum + 1;
-        }
-    }
-    else
-    {
-        numberOfSeqNum = highSeqNum - lowSeqNum;
-    }
-
-    if (numberOfSeqNum > kNackHistoryLength)
-    {
-        // Nack list is too big, flush and try to restart.
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),
-                     "Nack list too large, try to find a key frame and restart "
-                     "from seq: %d. Lowest seq in jb %d", highSeqNum,lowSeqNum);
-
-        // This nack size will trigger a key request...
-        bool foundKeyFrame = false;
-
-        while (numberOfSeqNum > kNackHistoryLength)
-        {
-            foundKeyFrame = RecycleFramesUntilKeyFrame();
-
-            if (!foundKeyFrame)
-            {
-                break;
-            }
-
-            // Check if we still have too many packets in JB
-            lowSeqNum = -1;
-            highSeqNum = -1;
-            GetLowHighSequenceNumbers(lowSeqNum, highSeqNum);
-
-            if (highSeqNum == -1)
-            {
-                assert(lowSeqNum != -1); // This should never happen
-                // We can't calculate the nack list length...
-                return NULL;
-            }
-
-            numberOfSeqNum = 0;
-            if (lowSeqNum > highSeqNum)
-            {
-                if (lowSeqNum - highSeqNum > 0x00ff)
-                {
-                    // wrap
-                    numberOfSeqNum = (0xffff-lowSeqNum) + highSeqNum + 1;
-                    highSeqNum=lowSeqNum;
-                }
-            }
-            else
-            {
-                numberOfSeqNum = highSeqNum - lowSeqNum;
-            }
-
-        } // end while
-
-        if (!foundKeyFrame)
-        {
-            // No key frame in JB.
-
-            // Set the last decoded sequence number to current high.
-            // This is to not get a large nack list again right away
-            _lastDecodedState.SetSeqNum(static_cast<uint16_t>(highSeqNum));
-            // Set to trigger key frame signal
-            nackSize = 0xffff;
-            listExtended = true;
-            WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1,
-                    "\tNo key frame found, request one. _lastDecodedSeqNum[0] "
-                    "%d", _lastDecodedState.sequence_num());
-        }
-        else
-        {
-            // We have cleaned up the jb and found a key frame
-            // The function itself has set last decoded seq.
-            WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding, -1,
-                    "\tKey frame found. _lastDecodedSeqNum[0] %d",
-                    _lastDecodedState.sequence_num());
-            nackSize = 0;
-        }
-
-        return NULL;
-    }
-
-    WebRtc_UWord16 seqNumberIterator = (WebRtc_UWord16)(lowSeqNum + 1);
-    for (i = 0; i < numberOfSeqNum; i++)
-    {
-        _NACKSeqNumInternal[i] = seqNumberIterator;
-        seqNumberIterator++;
-    }
-
-    // now we have a list of all sequence numbers that could have been sent
-
-    // zero out the ones we have received
-    for (i = 0; i < _maxNumberOfFrames; i++)
-    {
-        // loop all created frames
-        // We don't need to check if frame is decoding since lowSeqNum is based
-        // on _lastDecodedSeqNum
-        // Ignore free frames
-        VCMFrameBufferStateEnum state = _frameBuffers[i]->GetState();
-
-        if ((kStateFree != state) &&
-            (kStateEmpty != state) &&
-            (kStateDecoding != state))
-        {
-            // Reaching thus far means we are going to update the nack list
-            // When in hybrid mode, we use the soft NACKing feature.
-            if (_nackMode == kNackHybrid)
-            {
-                _frameBuffers[i]->BuildSoftNackList(_NACKSeqNumInternal,
-                                                    numberOfSeqNum,
-                                                    _rttMs);
-            }
-            else
-            {
-                // Used when the frame is being processed by the decoding thread
-                // don't need to use that info in this loop.
-                _frameBuffers[i]->BuildHardNackList(_NACKSeqNumInternal,
-                                                    numberOfSeqNum);
-            }
-        }
-    }
-
-    // compress list
-    int emptyIndex = -1;
-    for (i = 0; i < numberOfSeqNum; i++)
-    {
-        if (_NACKSeqNumInternal[i] == -1 || _NACKSeqNumInternal[i] == -2 )
-        {
-            // this is empty
-            if (emptyIndex == -1)
-            {
-                // no empty index before, remember this position
-                emptyIndex = i;
-            }
-        }
-        else
-        {
-            // this is not empty
-            if (emptyIndex == -1)
-            {
-                // no empty index, continue
-            }
-            else
-            {
-                _NACKSeqNumInternal[emptyIndex] = _NACKSeqNumInternal[i];
-                _NACKSeqNumInternal[i] = -1;
-                emptyIndex++;
-            }
-        }
-    } // for
-
-    if (emptyIndex == -1)
-    {
-        // no empty
-        nackSize = numberOfSeqNum;
-    }
-    else
-    {
-        nackSize = emptyIndex;
-    }
-
-    if (nackSize > _NACKSeqNumLength)
-    {
-        // Larger list: nack list was extended since the last call.
-        listExtended = true;
-    }
-
-    for (WebRtc_UWord32 j = 0; j < nackSize; j++)
-    {
-        // Check if the list has been extended since it was last created. I.e,
-        // new items have been added
-        if (_NACKSeqNumLength > j && !listExtended)
-        {
-            WebRtc_UWord32 k = 0;
-            for (k = j; k < _NACKSeqNumLength; k++)
-            {
-                // Found the item in the last list, i.e, no new items found yet.
-                if (_NACKSeqNum[k] == (WebRtc_UWord16)_NACKSeqNumInternal[j])
-                {
-                   break;
-                }
-            }
-            if (k == _NACKSeqNumLength) // New item not found in last list.
-            {
-                listExtended = true;
-            }
-        }
-        else
-        {
-            listExtended = true;
-        }
-        _NACKSeqNum[j] = (WebRtc_UWord16)_NACKSeqNumInternal[j];
-    }
-
-    _NACKSeqNumLength = nackSize;
-
-    return _NACKSeqNum;
-}
-
-// Release frame when done with decoding. Should never be used to release
-// frames from within the jitter buffer.
-void
-VCMJitterBuffer::ReleaseFrame(VCMEncodedFrame* frame)
-{
-    CriticalSectionScoped cs(_critSect);
-    VCMFrameBuffer* frameBuffer = static_cast<VCMFrameBuffer*>(frame);
-    if (frameBuffer != NULL)
-        frameBuffer->SetState(kStateFree);
-}
-
-WebRtc_Word64
-VCMJitterBuffer::LastPacketTime(VCMEncodedFrame* frame,
-                                bool& retransmitted) const
-{
-    CriticalSectionScoped cs(_critSect);
-    retransmitted = (static_cast<VCMFrameBuffer*>(frame)->GetNackCount() > 0);
-    return static_cast<VCMFrameBuffer*>(frame)->LatestPacketTimeMs();
-}
-
-WebRtc_Word64
-VCMJitterBuffer::LastDecodedTimestamp() const
-{
-    CriticalSectionScoped cs(_critSect);
-    return _lastDecodedState.time_stamp();
-}
-
-// Insert packet
-// Takes crit sect, and inserts packet in frame buffer, possibly does logging
-VCMFrameBufferEnum
-VCMJitterBuffer::InsertPacket(VCMEncodedFrame* buffer, const VCMPacket& packet)
-{
-    CriticalSectionScoped cs(_critSect);
-    WebRtc_Word64 nowMs = _clock->MillisecondTimestamp();
-    VCMFrameBufferEnum bufferReturn = kSizeError;
-    VCMFrameBufferEnum ret = kSizeError;
-    VCMFrameBuffer* frame = static_cast<VCMFrameBuffer*>(buffer);
-
-    // We are keeping track of the first seq num, the latest seq num and
-    // the number of wraps to be able to calculate how many packets we expect.
-    if (_firstPacket)
-    {
-        // Now it's time to start estimating jitter
-        // reset the delay estimate.
-        _delayEstimate.Reset(_clock->MillisecondTimestamp());
-        _firstPacket = false;
-    }
-
-    // Empty packets may bias the jitter estimate (lacking size component),
-    // therefore don't let empty packet trigger the following updates:
-    if (packet.frameType != kFrameEmpty)
-    {
-        if (_waitingForCompletion.timestamp == packet.timestamp)
-        {
-            // This can get bad if we have a lot of duplicate packets,
-            // we will then count some packet multiple times.
-            _waitingForCompletion.frameSize += packet.sizeBytes;
-            _waitingForCompletion.latestPacketTime = nowMs;
-        }
-        else if (_waitingForCompletion.latestPacketTime >= 0 &&
-                 _waitingForCompletion.latestPacketTime + 2000 <= nowMs)
-        {
-            // A packet should never be more than two seconds late
-            UpdateJitterAndDelayEstimates(_waitingForCompletion, true);
-            _waitingForCompletion.latestPacketTime = -1;
-            _waitingForCompletion.frameSize = 0;
-            _waitingForCompletion.timestamp = 0;
-        }
-    }
-
-    if (frame != NULL)
-    {
-        VCMFrameBufferStateEnum state = frame->GetState();
-        _lastDecodedState.UpdateOldPacket(&packet);
-        // Insert packet
-        // Check for first packet
-        // High sequence number will be -1 if neither an empty packet nor
-        // a media packet has been inserted.
-        bool first = (frame->GetHighSeqNum() == -1);
-        // When in Hybrid mode, we allow for a decodable state
-        // Note: Under current version, a decodable frame will never be
-        // triggered, as the body of the function is empty.
-        // TODO (mikhal): Update when decodable is enabled.
-        bufferReturn = frame->InsertPacket(packet, nowMs,
-                                           _nackMode == kNackHybrid,
-                                           _rttMs);
-        ret = bufferReturn;
-
-        if (bufferReturn > 0)
-        {
-            _incomingBitCount += packet.sizeBytes << 3;
-
-            // Has this packet been nacked or is it about to be nacked?
-            if (IsPacketRetransmitted(packet))
-            {
-                frame->IncrementNackCount();
-            }
-
-            // Insert each frame once on the arrival of the first packet
-            // belonging to that frame (media or empty)
-            if (state == kStateEmpty && first)
-            {
-                ret = kFirstPacket;
-                FrameList::reverse_iterator rit = std::find_if(
-                    _frameList.rbegin(), _frameList.rend(),
-                    FrameSmallerTimestamp(frame->TimeStamp()));
-                _frameList.insert(rit.base(), frame);
-            }
-        }
-    }
-    switch(bufferReturn)
-    {
-    case kStateError:
-    case kTimeStampError:
-    case kSizeError:
-        {
-            if (frame != NULL)
-            {
-                // Will be released when it gets old.
-                frame->Reset();
-                frame->SetState(kStateEmpty);
-            }
-            break;
-        }
-    case kCompleteSession:
-        {
-            // Only update return value for a JB flush indicator.
-            if (UpdateFrameState(frame) == kFlushIndicator)
-              ret = kFlushIndicator;
-            // Signal that we have a received packet
-            _packetEvent.Set();
-            break;
-        }
-    case kDecodableSession:
-    case kIncomplete:
-        {
-          // Signal that we have a received packet
-          _packetEvent.Set();
-          break;
-        }
-    case kNoError:
-    case kDuplicatePacket:
-        {
-            break;
-        }
-    default:
-        {
-            assert(false && "JitterBuffer::InsertPacket: Undefined value");
-        }
-    }
-   return ret;
-}
-
-// Must be called from within _critSect
-void
-VCMJitterBuffer::UpdateOldJitterSample(const VCMPacket& packet)
-{
-    if (_waitingForCompletion.timestamp != packet.timestamp &&
-        LatestTimestamp(_waitingForCompletion.timestamp, packet.timestamp,
-                        NULL) == packet.timestamp)
-    {
-        // This is a newer frame than the one waiting for completion.
-        _waitingForCompletion.frameSize = packet.sizeBytes;
-        _waitingForCompletion.timestamp = packet.timestamp;
-    }
-    else
-    {
-        // This can get bad if we have a lot of duplicate packets,
-        // we will then count some packet multiple times.
-        _waitingForCompletion.frameSize += packet.sizeBytes;
-        _jitterEstimate.UpdateMaxFrameSize(_waitingForCompletion.frameSize);
-    }
-}
-
-// Must be called from within _critSect
-bool
-VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const
-{
-    if (_NACKSeqNumLength > 0)
-    {
-        for (WebRtc_UWord16 i = 0; i < _NACKSeqNumLength; i++)
-        {
-            if (packet.seqNum == _NACKSeqNum[i])
-            {
-                return true;
-            }
-        }
-    }
-    return false;
-}
-
-// Get nack status (enabled/disabled)
-VCMNackMode
-VCMJitterBuffer::GetNackMode() const
-{
-    CriticalSectionScoped cs(_critSect);
-    return _nackMode;
-}
-
-// Set NACK mode
-void
-VCMJitterBuffer::SetNackMode(VCMNackMode mode,
-                             int lowRttNackThresholdMs,
-                             int highRttNackThresholdMs)
-{
-    CriticalSectionScoped cs(_critSect);
-    _nackMode = mode;
-    assert(lowRttNackThresholdMs >= -1 && highRttNackThresholdMs >= -1);
-    assert(highRttNackThresholdMs == -1 ||
-           lowRttNackThresholdMs <= highRttNackThresholdMs);
-    assert(lowRttNackThresholdMs > -1 || highRttNackThresholdMs == -1);
-    _lowRttNackThresholdMs = lowRttNackThresholdMs;
-    _highRttNackThresholdMs = highRttNackThresholdMs;
-    if (_nackMode == kNoNack)
-    {
-        _jitterEstimate.ResetNackCount();
-    }
-}
-
-
-// Recycle oldest frames up to a key frame, used if JB is completely full
-bool
-VCMJitterBuffer::RecycleFramesUntilKeyFrame()
-{
-    // Remove up to oldest key frame
-    while (_frameList.size() > 0)
-    {
-        // Throw at least one frame.
-        _dropCount++;
-        FrameList::iterator it = _frameList.begin();
-        WEBRTC_TRACE(webrtc::kTraceWarning, webrtc::kTraceVideoCoding,
-                     VCMId(_vcmId, _receiverId),
-                     "Jitter buffer drop count:%d, lowSeq %d", _dropCount,
-                     (*it)->GetLowSeqNum());
-        RecycleFrame(*it);
-        it = _frameList.erase(it);
-        if (it != _frameList.end() && (*it)->FrameType() == kVideoFrameKey)
-        {
-            // Fake the lastDecodedState to match this key frame.
-            _lastDecodedState.SetStateOneBack(*it);
-            return true;
-        }
-    }
-    _waitingForKeyFrame = true;
-    _lastDecodedState.Reset(); // TODO (mikhal): no sync
-    return false;
-}
-
-// Must be called under the critical section _critSect.
+// Must be called under the critical section |crit_sect_|.
 void VCMJitterBuffer::CleanUpOldFrames() {
-  while (_frameList.size() > 0) {
-    VCMFrameBuffer* oldestFrame = _frameList.front();
-    bool nextFrameEmpty = (_lastDecodedState.ContinuousFrame(oldestFrame) &&
-        oldestFrame->GetState() == kStateEmpty);
-    if (_lastDecodedState.IsOldFrame(oldestFrame) ||
-        (nextFrameEmpty && _frameList.size() > 1)) {
-      ReleaseFrameInternal(_frameList.front());
-      _frameList.erase(_frameList.begin());
+  while (frame_list_.size() > 0) {
+    VCMFrameBuffer* oldest_frame = frame_list_.front();
+    bool next_frame_empty =
+        (last_decoded_state_.ContinuousFrame(oldest_frame) &&
+         oldest_frame->GetState() == kStateEmpty);
+    if (last_decoded_state_.IsOldFrame(oldest_frame) ||
+        (next_frame_empty && frame_list_.size() > 1)) {
+      ReleaseFrameIfNotDecoding(frame_list_.front());
+      frame_list_.erase(frame_list_.begin());
     } else {
       break;
     }
   }
 }
 
-// Used in GetFrameForDecoding
-void VCMJitterBuffer::VerifyAndSetPreviousFrameLost(VCMFrameBuffer& frame) {
-  frame.MakeSessionDecodable();  // Make sure the session can be decoded.
-  if (frame.FrameType() == kVideoFrameKey)
+void VCMJitterBuffer::VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame) {
+  assert(frame);
+  frame->MakeSessionDecodable();  // Make sure the session can be decoded.
+  if (frame->FrameType() == kVideoFrameKey)
     return;
 
-  if (!_lastDecodedState.ContinuousFrame(&frame))
-    frame.SetPreviousFrameLoss();
+  if (!last_decoded_state_.ContinuousFrame(frame))
+    frame->SetPreviousFrameLoss();
 }
 
-bool
-VCMJitterBuffer::WaitForNack()
-{
-     // NACK disabled -> can't wait
-     if (_nackMode == kNoNack)
-     {
-         return false;
-     }
-     // NACK only -> always wait
-     else if (_nackMode == kNackInfinite)
-     {
-         return true;
-     }
-     // else: hybrid mode, evaluate
-     // RTT high, don't wait
-     if (_highRttNackThresholdMs >= 0 &&
-         _rttMs >= static_cast<unsigned int>(_highRttNackThresholdMs))
-     {
-         return false;
-     }
-     // Either NACK only or hybrid
-     return true;
+// Must be called from within |crit_sect_|.
+bool VCMJitterBuffer::IsPacketRetransmitted(const VCMPacket& packet) const {
+  if (nack_seq_nums_length_ > 0) {
+    for (unsigned int i = 0; i < nack_seq_nums_length_; i++) {
+      if (packet.seqNum == nack_seq_nums_[i]) {
+        return true;
+      }
+    }
+  }
+  return false;
 }
 
+// Must be called under the critical section |crit_sect_|. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMJitterSample& sample,
+                                           bool incomplete_frame) {
+  if (sample.latest_packet_time == -1) {
+    return;
+  }
+  if (incomplete_frame) {
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_), "Received incomplete frame "
+                 "timestamp %u frame size %u at time %u",
+                 sample.timestamp, sample.frame_size,
+                 MaskWord64ToUWord32(sample.latest_packet_time));
+  } else {
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_), "Received complete frame "
+                 "timestamp %u frame size %u at time %u",
+                 sample.timestamp, sample.frame_size,
+                 MaskWord64ToUWord32(sample.latest_packet_time));
+  }
+  UpdateJitterEstimate(sample.latest_packet_time, sample.timestamp,
+                       sample.frame_size, incomplete_frame);
+}
+
+// Must be called under the critical section crit_sect_. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(const VCMFrameBuffer& frame,
+                                           bool incomplete_frame) {
+  if (frame.LatestPacketTimeMs() == -1) {
+    return;
+  }
+  // No retransmitted frames should be a part of the jitter
+  // estimate.
+  if (incomplete_frame) {
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_),
+                 "Received incomplete frame timestamp %u frame type %d "
+                 "frame size %u at time %u, jitter estimate was %u",
+                 frame.TimeStamp(), frame.FrameType(), frame.Length(),
+                 MaskWord64ToUWord32(frame.LatestPacketTimeMs()),
+                 EstimatedJitterMs());
+  } else {
+    WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+                 VCMId(vcm_id_, receiver_id_), "Received complete frame "
+                 "timestamp %u frame type %d frame size %u at time %u, "
+                 "jitter estimate was %u",
+                 frame.TimeStamp(), frame.FrameType(), frame.Length(),
+                 MaskWord64ToUWord32(frame.LatestPacketTimeMs()),
+                 EstimatedJitterMs());
+  }
+  UpdateJitterEstimate(frame.LatestPacketTimeMs(), frame.TimeStamp(),
+                       frame.Length(), incomplete_frame);
+}
+
+// Must be called under the critical section |crit_sect_|. Should never be
+// called with retransmitted frames, they must be filtered out before this
+// function is called.
+void VCMJitterBuffer::UpdateJitterEstimate(
+    int64_t latest_packet_time_ms,
+    uint32_t timestamp,
+    unsigned int frame_size,
+    bool incomplete_frame) {
+  if (latest_packet_time_ms == -1) {
+    return;
+  }
+  int64_t frame_delay;
+  // Calculate the delay estimate
+  WEBRTC_TRACE(webrtc::kTraceDebug, webrtc::kTraceVideoCoding,
+               VCMId(vcm_id_, receiver_id_),
+               "Packet received and sent to jitter estimate with: "
+               "timestamp=%u wall_clock=%u", timestamp,
+               MaskWord64ToUWord32(latest_packet_time_ms));
+  bool not_reordered = inter_frame_delay_.CalculateDelay(timestamp,
+                                                      &frame_delay,
+                                                      latest_packet_time_ms);
+  // Filter out frames which have been reordered in time by the network
+  if (not_reordered) {
+    // Update the jitter estimate with the new samples
+    jitter_estimate_.UpdateEstimate(frame_delay, frame_size, incomplete_frame);
+  }
+}
+
+// Assumed to be called internally from inside a critical section.
+void VCMJitterBuffer::GetLowHighSequenceNumbers(
+    int32_t* low_seq_num, int32_t* high_seq_num) const {
+  assert(low_seq_num);
+  assert(high_seq_num);
+  // TODO(mikhal/stefan): Refactor to use last_decoded_state.
+  int i = 0;
+  int32_t seq_num = -1;
+
+  *high_seq_num = -1;
+  *low_seq_num = -1;
+  if (!last_decoded_state_.init())
+    *low_seq_num = last_decoded_state_.sequence_num();
+
+  // find highest seq numbers
+  for (i = 0; i < max_number_of_frames_; ++i) {
+    seq_num = frame_buffers_[i]->GetHighSeqNum();
+
+    // Ignore free / empty frames
+    VCMFrameBufferStateEnum state = frame_buffers_[i]->GetState();
+
+    if ((kStateFree != state) &&
+        (kStateEmpty != state) &&
+        (kStateDecoding != state) &&
+        seq_num != -1) {
+      bool wrap;
+      *high_seq_num = LatestSequenceNumber(seq_num, *high_seq_num, &wrap);
+    }
+  }
+}
+
+bool VCMJitterBuffer::WaitForRetransmissions() {
+  if (nack_mode_ == kNoNack) {
+    // NACK disabled -> don't wait for retransmissions.
+    return false;
+  } else if (nack_mode_ == kNackInfinite) {
+    // NACK only -> always wait for retransmissions.
+    return true;
+  }
+  // Hybrid mode. Evaluate if the RTT is high, and in that case we don't wait
+  // for retransmissions.
+  if (high_rtt_nack_threshold_ms_ >= 0 &&
+      rtt_ms_ >= static_cast<unsigned int>(high_rtt_nack_threshold_ms_)) {
+    return false;
+  }
+  return true;
+}
 }  // namespace webrtc
diff --git a/modules/video_coding/main/source/jitter_buffer.h b/modules/video_coding/main/source/jitter_buffer.h
index d951187..5b30fb8 100644
--- a/modules/video_coding/main/source/jitter_buffer.h
+++ b/modules/video_coding/main/source/jitter_buffer.h
@@ -8,8 +8,8 @@
  *  be found in the AUTHORS file in the root of the source tree.
  */
 
-#ifndef WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
-#define WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+#ifndef WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
+#define WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
 
 #include <list>
 
@@ -24,14 +24,12 @@
 #include "system_wrappers/interface/critical_section_wrapper.h"
 #include "typedefs.h"
 
-namespace webrtc
-{
+namespace webrtc {
 
-enum VCMNackMode
-{
-    kNackInfinite,
-    kNackHybrid,
-    kNoNack
+enum VCMNackMode {
+  kNackInfinite,
+  kNackHybrid,
+  kNoNack
 };
 
 typedef std::list<VCMFrameBuffer*> FrameList;
@@ -42,218 +40,227 @@
 class VCMPacket;
 class VCMEncodedFrame;
 
-class VCMJitterSample
-{
-public:
-    VCMJitterSample() : timestamp(0), frameSize(0), latestPacketTime(-1) {}
-    WebRtc_UWord32 timestamp;
-    WebRtc_UWord32 frameSize;
-    WebRtc_Word64 latestPacketTime;
+struct VCMJitterSample {
+  VCMJitterSample() : timestamp(0), frame_size(0), latest_packet_time(-1) {}
+  uint32_t timestamp;
+  uint32_t frame_size;
+  int64_t latest_packet_time;
 };
 
-class VCMJitterBuffer
-{
-public:
-    VCMJitterBuffer(TickTimeBase* clock,
-                    WebRtc_Word32 vcmId = -1,
-                    WebRtc_Word32 receiverId = -1,
-                    bool master = true);
-    virtual ~VCMJitterBuffer();
+class VCMJitterBuffer {
+ public:
+  VCMJitterBuffer(TickTimeBase* clock, int vcm_id = -1, int receiver_id = -1,
+                  bool master = true);
+  virtual ~VCMJitterBuffer();
 
-    void CopyFrom(const VCMJitterBuffer& rhs);
+  // Makes |this| a deep copy of |rhs|.
+  void CopyFrom(const VCMJitterBuffer& rhs);
 
-    // We need a start and stop to break out of the wait event
-    // used in GetCompleteFrameForDecoding
-    void Start();
-    void Stop();
-    bool Running() const;
+  // Initializes and starts jitter buffer.
+  void Start();
 
-    // Empty the Jitter buffer of all its data
-    void Flush();
+  // Signals all internal events and stops the jitter buffer.
+  void Stop();
 
-    // Statistics, Get received key and delta frames
-    WebRtc_Word32 GetFrameStatistics(WebRtc_UWord32& receivedDeltaFrames,
-                                     WebRtc_UWord32& receivedKeyFrames) const;
+  // Returns true if the jitter buffer is running.
+  bool Running() const;
 
-    // The number of packets discarded by the jitter buffer because the decoder
-    // won't be able to decode them.
-    WebRtc_UWord32 NumNotDecodablePackets() const;
-    // Get number of packets discarded by the jitter buffer
-    WebRtc_UWord32 DiscardedPackets() const;
+  // Empty the jitter buffer of all its data.
+  void Flush();
 
-    // Statistics, Calculate frame and bit rates
-    WebRtc_Word32 GetUpdate(WebRtc_UWord32& frameRate, WebRtc_UWord32& bitRate);
+  // Get the number of received key and delta frames since the jitter buffer
+  // was started.
+  void FrameStatistics(uint32_t* received_delta_frames,
+                       uint32_t* received_key_frames) const;
 
-    // Wait for the first packet in the next frame to arrive, blocks
-    // for <= maxWaitTimeMS ms
-    WebRtc_Word64 GetNextTimeStamp(WebRtc_UWord32 maxWaitTimeMS,
-                                   FrameType& incomingFrameType,
-                                   WebRtc_Word64& renderTimeMs);
+  // The number of packets discarded by the jitter buffer because the decoder
+  // won't be able to decode them.
+  int num_not_decodable_packets() const;
 
-    // Will the packet sequence be complete if the next frame is grabbed
-    // for decoding right now? That is, have we lost a frame between the
-    // last decoded frame and the next, or is the next frame missing one
-    // or more packets?
-    bool CompleteSequenceWithNextFrame();
+  // Gets number of packets discarded by the jitter buffer.
+  int num_discarded_packets() const;
 
-    // TODO (mikhal/stefan): Merge all GetFrameForDecoding into one.
-    // Wait maxWaitTimeMS for a complete frame to arrive. After timeout NULL
-    // is returned.
-    VCMEncodedFrame* GetCompleteFrameForDecoding(WebRtc_UWord32 maxWaitTimeMS);
+  // Statistics, Calculate frame and bit rates.
+  void IncomingRateStatistics(unsigned int* framerate,
+                              unsigned int* bitrate);
 
-    // Get a frame for decoding (even an incomplete) without delay.
-    VCMEncodedFrame* GetFrameForDecoding();
+  // Waits for the first packet in the next frame to arrive and then returns
+  // the timestamp of that frame. |incoming_frame_type| and |render_time_ms| are
+  // set to the frame type and render time of the next frame.
+  // Blocks for up to |max_wait_time_ms| ms. Returns -1 if no packet has arrived
+  // after |max_wait_time_ms| ms.
+  int64_t NextTimestamp(uint32_t max_wait_time_ms,
+                        FrameType* incoming_frame_type,
+                        int64_t* render_time_ms);
 
-    VCMEncodedFrame* GetFrameForDecodingNACK();
+  // Checks if the packet sequence will be complete if the next frame would be
+  // grabbed for decoding. That is, if a frame has been lost between the
+  // last decoded frame and the next, or if the next frame is missing one
+  // or more packets.
+  bool CompleteSequenceWithNextFrame();
 
-    // Release frame (when done with decoding)
-    void ReleaseFrame(VCMEncodedFrame* frame);
+  // TODO(mikhal/stefan): Merge all GetFrameForDecoding into one.
+  // Wait |max_wait_time_ms| for a complete frame to arrive. After timeout NULL
+  // is returned.
+  VCMEncodedFrame* GetCompleteFrameForDecoding(uint32_t max_wait_time_ms);
 
-    // Get frame to use for this timestamp
-    WebRtc_Word32 GetFrame(const VCMPacket& packet, VCMEncodedFrame*&);
-    VCMEncodedFrame* GetFrame(const VCMPacket& packet); // deprecated
+  // Get a frame for decoding (even an incomplete) without delay.
+  VCMEncodedFrame* GetFrameForDecoding();
 
-    // Returns the time in ms when the latest packet was inserted into the frame.
-    // Retransmitted is set to true if any of the packets belonging to the frame
-    // has been retransmitted.
-    WebRtc_Word64 LastPacketTime(VCMEncodedFrame* frame,
-                                 bool& retransmitted) const;
+  // Releases a frame returned from the jitter buffer, should be called when
+  // done with decoding.
+  void ReleaseFrame(VCMEncodedFrame* frame);
 
-    // Insert a packet into a frame
-    VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame,
-                                    const VCMPacket& packet);
+  // Returns the frame assigned to this timestamp.
+  int GetFrame(const VCMPacket& packet, VCMEncodedFrame*&);
+  VCMEncodedFrame* GetFrame(const VCMPacket& packet);  // Deprecated.
 
-    // Sync
-    WebRtc_UWord32 GetEstimatedJitterMS();
-    void UpdateRtt(WebRtc_UWord32 rttMs);
+  // Returns the time in ms when the latest packet was inserted into the frame.
+  // Retransmitted is set to true if any of the packets belonging to the frame
+  // has been retransmitted.
+  int64_t LastPacketTime(VCMEncodedFrame* frame, bool* retransmitted) const;
 
-    // NACK
-    // Set the NACK mode. "highRttNackThreshold" is an RTT threshold in ms above
-    // which NACK will be disabled if the NACK mode is "kNackHybrid",
-    // -1 meaning that NACK is always enabled in the Hybrid mode.
-    // "lowRttNackThreshold" is an RTT threshold in ms below which we expect to
-    // rely on NACK only, and therefore are using larger buffers to have time to
-    // wait for retransmissions.
-    void SetNackMode(VCMNackMode mode,
-                     int lowRttNackThresholdMs,
-                     int highRttNackThresholdMs);
-    VCMNackMode GetNackMode() const;    // Get nack mode
-    // Get list of missing sequence numbers (size in number of elements)
-    WebRtc_UWord16* GetNackList(WebRtc_UWord16& nackSize,
-                                bool& listExtended);
+  // Inserts a packet into a frame returned from GetFrame().
+  VCMFrameBufferEnum InsertPacket(VCMEncodedFrame* frame,
+                                  const VCMPacket& packet);
 
-    WebRtc_Word64 LastDecodedTimestamp() const;
+  // Returns the estimated jitter in milliseconds.
+  uint32_t EstimatedJitterMs();
 
-private:
-    // Misc help functions
-    // Recycle (release) frame, used if we didn't receive whole frame
-    void RecycleFrame(VCMFrameBuffer* frame);
-    void ReleaseFrameInternal(VCMFrameBuffer* frame);
-    // Flush and reset the jitter buffer. Call under critical section.
-    void FlushInternal();
+  // Updates the round-trip time estimate.
+  void UpdateRtt(uint32_t rtt_ms);
 
-    // Help functions for insert packet
-    // Get empty frame, creates new (i.e. increases JB size) if necessary
-    VCMFrameBuffer* GetEmptyFrame();
-    // Recycle oldest frames up to a key frame, used if JB is completely full
-    bool RecycleFramesUntilKeyFrame();
-    // Update frame state
-    // (set as complete or reconstructable if conditions are met)
-    VCMFrameBufferEnum UpdateFrameState(VCMFrameBuffer* frameListItem);
+  // Set the NACK mode. |highRttNackThreshold| is an RTT threshold in ms above
+  // which NACK will be disabled if the NACK mode is |kNackHybrid|, -1 meaning
+  // that NACK is always enabled in the hybrid mode.
+  // |lowRttNackThreshold| is an RTT threshold in ms below which we expect to
+  // rely on NACK only, and therefore are using larger buffers to have time to
+  // wait for retransmissions.
+  void SetNackMode(VCMNackMode mode, int low_rtt_nack_threshold_ms,
+                   int high_rtt_nack_threshold_ms);
 
-    // Help functions for getting a frame
-    // Find oldest complete frame, used for getting next frame to decode
-    // When enabled, will return a decodable frame
-    FrameList::iterator FindOldestCompleteContinuousFrame(bool enableDecodable);
+  // Returns the current NACK mode.
+  VCMNackMode nack_mode() const;
 
-    void CleanUpOldFrames();
+  // Creates a list of missing sequence numbers.
+  uint16_t* CreateNackList(uint16_t* nack_list_size, bool* list_extended);
 
-    void VerifyAndSetPreviousFrameLost(VCMFrameBuffer& frame);
-    bool IsPacketRetransmitted(const VCMPacket& packet) const;
+  int64_t LastDecodedTimestamp() const;
 
-    void UpdateJitterAndDelayEstimates(VCMJitterSample& sample,
-                                       bool incompleteFrame);
-    void UpdateJitterAndDelayEstimates(VCMFrameBuffer& frame,
-                                       bool incompleteFrame);
-    void UpdateJitterAndDelayEstimates(WebRtc_Word64 latestPacketTimeMs,
-                                       WebRtc_UWord32 timestamp,
-                                       WebRtc_UWord32 frameSize,
-                                       bool incompleteFrame);
-    void UpdateOldJitterSample(const VCMPacket& packet);
-    WebRtc_UWord32 GetEstimatedJitterMsInternal();
+ private:
+  // In NACK-only mode this function doesn't return or release non-complete
+  // frames unless we have a complete key frame. In hybrid mode, we may release
+  // "decodable", incomplete frames.
+  VCMEncodedFrame* GetFrameForDecodingNACK();
 
-    // NACK help
-    WebRtc_UWord16* CreateNackList(WebRtc_UWord16& nackSize,
-                                   bool& listExtended);
-    WebRtc_Word32 GetLowHighSequenceNumbers(WebRtc_Word32& lowSeqNum,
-                                            WebRtc_Word32& highSeqNum) const;
+  void ReleaseFrameIfNotDecoding(VCMFrameBuffer* frame);
 
-    // Decide whether should wait for NACK (mainly relevant for hybrid mode)
-    bool WaitForNack();
+  // Gets an empty frame, creating a new frame if necessary (i.e. increases
+  // jitter buffer size).
+  VCMFrameBuffer* GetEmptyFrame();
 
-    WebRtc_Word32                 _vcmId;
-    WebRtc_Word32                 _receiverId;
-    TickTimeBase*                 _clock;
-    // If we are running (have started) or not
-    bool                          _running;
-    CriticalSectionWrapper*       _critSect;
-    bool                          _master;
-    // Event to signal when we have a frame ready for decoder
-    VCMEvent                      _frameEvent;
-    // Event to signal when we have received a packet
-    VCMEvent                      _packetEvent;
-    // Number of allocated frames
-    WebRtc_Word32                 _maxNumberOfFrames;
-    // Array of pointers to the frames in JB
-    VCMFrameBuffer*               _frameBuffers[kMaxNumberOfFrames];
-    FrameList _frameList;
+  // Recycles oldest frames until a key frame is found. Used if jitter buffer is
+  // completely full. Returns true if a key frame was found.
+  bool RecycleFramesUntilKeyFrame();
 
-    // timing
-    VCMDecodingState       _lastDecodedState;
-    WebRtc_UWord32          _packetsNotDecodable;
+  // Sets the state of |frame| to complete if it's not too old to be decoded.
+  // Also updates the frame statistics. Signals the |frame_event| if this is
+  // the next frame to be decoded.
+  VCMFrameBufferEnum UpdateFrameState(VCMFrameBuffer* frame);
 
-    // Statistics
-    // Frame counter for each type (key, delta, golden, key-delta)
-    WebRtc_UWord8           _receiveStatistics[4];
-    // Latest calculated frame rates of incoming stream
-    WebRtc_UWord8           _incomingFrameRate;
-    // Frame counter, reset in GetUpdate
-    WebRtc_UWord32          _incomingFrameCount;
-    // Real time for last _frameCount reset
-    WebRtc_Word64           _timeLastIncomingFrameCount;
-    // Received bits counter, reset in GetUpdate
-    WebRtc_UWord32          _incomingBitCount;
-    WebRtc_UWord32          _incomingBitRate;
-    WebRtc_UWord32          _dropCount;            // Frame drop counter
-    // Number of frames in a row that have been too old
-    WebRtc_UWord32          _numConsecutiveOldFrames;
-    // Number of packets in a row that have been too old
-    WebRtc_UWord32          _numConsecutiveOldPackets;
-    // Number of packets discarded by the jitter buffer
-    WebRtc_UWord32          _discardedPackets;
+  // Finds the oldest complete frame, used for getting next frame to decode.
+  // Can return a decodable, incomplete frame if |enable_decodable| is true.
+  FrameList::iterator FindOldestCompleteContinuousFrame(bool enable_decodable);
 
-    // Filters for estimating jitter
-    VCMJitterEstimator      _jitterEstimate;
-    // Calculates network delays used for jitter calculations
-    VCMInterFrameDelay      _delayEstimate;
-    VCMJitterSample         _waitingForCompletion;
-    WebRtc_UWord32          _rttMs;
+  void CleanUpOldFrames();
 
-    // NACK
-    VCMNackMode             _nackMode;
-    int                     _lowRttNackThresholdMs;
-    int                     _highRttNackThresholdMs;
-    // Holds the internal nack list (the missing sequence numbers)
-    WebRtc_Word32           _NACKSeqNumInternal[kNackHistoryLength];
-    WebRtc_UWord16          _NACKSeqNum[kNackHistoryLength];
-    WebRtc_UWord32          _NACKSeqNumLength;
-    bool                    _waitingForKeyFrame;
+  // Sets the "decodable" and "frame loss" flags of a frame depending on which
+  // packets have been received and which are missing.
+  // A frame is "decodable" if enough packets of that frame has been received
+  // for it to be usable by the decoder.
+  // A frame has the "frame loss" flag set if packets are missing  after the
+  // last decoded frame and before |frame|.
+  void VerifyAndSetPreviousFrameLost(VCMFrameBuffer* frame);
 
-    bool                    _firstPacket;
+  // Returns true if |packet| is likely to have been retransmitted.
+  bool IsPacketRetransmitted(const VCMPacket& packet) const;
 
-    DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
+  // The following three functions update the jitter estimate with the
+  // payload size, receive time and RTP timestamp of a frame.
+  void UpdateJitterEstimate(const VCMJitterSample& sample,
+                            bool incomplete_frame);
+  void UpdateJitterEstimate(const VCMFrameBuffer& frame, bool incomplete_frame);
+  void UpdateJitterEstimate(int64_t latest_packet_time_ms,
+                            uint32_t timestamp,
+                            unsigned int frame_size,
+                            bool incomplete_frame);
+
+  // Returns the lowest and highest known sequence numbers, where the lowest is
+  // the last decoded sequence number if a frame has been decoded.
+  // -1 is returned if a sequence number cannot be determined.
+  void GetLowHighSequenceNumbers(int32_t* low_seq_num,
+                                 int32_t* high_seq_num) const;
+
+  // Returns true if we should wait for retransmissions, false otherwise.
+  bool WaitForRetransmissions();
+
+  int vcm_id_;
+  int receiver_id_;
+  TickTimeBase* clock_;
+  // If we are running (have started) or not.
+  bool running_;
+  CriticalSectionWrapper* crit_sect_;
+  bool master_;
+  // Event to signal when we have a frame ready for decoder.
+  VCMEvent frame_event_;
+  // Event to signal when we have received a packet.
+  VCMEvent packet_event_;
+  // Number of allocated frames.
+  int max_number_of_frames_;
+  // Array of pointers to the frames in jitter buffer.
+  VCMFrameBuffer* frame_buffers_[kMaxNumberOfFrames];
+  FrameList frame_list_;
+  VCMDecodingState last_decoded_state_;
+  bool first_packet_;
+
+  // Statistics.
+  int num_not_decodable_packets_;
+  // Frame counter for each type (key, delta, golden, key-delta).
+  unsigned int receive_statistics_[4];
+  // Latest calculated frame rates of incoming stream.
+  unsigned int incoming_frame_rate_;
+  unsigned int incoming_frame_count_;
+  int64_t time_last_incoming_frame_count_;
+  unsigned int incoming_bit_count_;
+  unsigned int incoming_bit_rate_;
+  unsigned int drop_count_;  // Frame drop counter.
+  // Number of frames in a row that have been too old.
+  int num_consecutive_old_frames_;
+  // Number of packets in a row that have been too old.
+  int num_consecutive_old_packets_;
+  // Number of packets discarded by the jitter buffer.
+  int num_discarded_packets_;
+
+  // Jitter estimation.
+  // Filter for estimating jitter.
+  VCMJitterEstimator jitter_estimate_;
+  // Calculates network delays used for jitter calculations.
+  VCMInterFrameDelay inter_frame_delay_;
+  VCMJitterSample waiting_for_completion_;
+  WebRtc_UWord32 rtt_ms_;
+
+  // NACK and retransmissions.
+  VCMNackMode nack_mode_;
+  int low_rtt_nack_threshold_ms_;
+  int high_rtt_nack_threshold_ms_;
+  // Holds the internal NACK list (the missing sequence numbers).
+  int32_t nack_seq_nums_internal_[kNackHistoryLength];
+  uint16_t nack_seq_nums_[kNackHistoryLength];
+  unsigned int nack_seq_nums_length_;
+  bool waiting_for_key_frame_;
+
+  DISALLOW_COPY_AND_ASSIGN(VCMJitterBuffer);
 };
+}  // namespace webrtc
 
-} // namespace webrtc
-
-#endif // WEBRTC_MODULES_VIDEO_CODING_JITTER_BUFFER_H_
+#endif  // WEBRTC_MODULES_VIDEO_CODING_MAIN_SOURCE_JITTER_BUFFER_H_
diff --git a/modules/video_coding/main/source/jitter_buffer_unittest.cc b/modules/video_coding/main/source/jitter_buffer_unittest.cc
index 6e48ea7..88ca597 100644
--- a/modules/video_coding/main/source/jitter_buffer_unittest.cc
+++ b/modules/video_coding/main/source/jitter_buffer_unittest.cc
@@ -287,7 +287,8 @@
 
   uint16_t nack_list_length = kNackHistoryLength;
   bool extended;
-  uint16_t* nack_list = jitter_buffer_->GetNackList(nack_list_length, extended);
+  uint16_t* nack_list = jitter_buffer_->CreateNackList(&nack_list_length,
+                                                       &extended);
   // Verify that the jitter buffer requests a key frame.
   EXPECT_TRUE(nack_list_length == 0xffff && nack_list == NULL);
 
@@ -302,14 +303,14 @@
   InsertFrame(kVideoFrameDelta);
   uint16_t nack_list_size = 0;
   bool extended = false;
-  uint16_t* list = jitter_buffer_->GetNackList(nack_list_size, extended);
+  uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended);
   // No list generated, and a key frame request is signaled.
   EXPECT_TRUE(list == NULL);
   EXPECT_EQ(0xFFFF, nack_list_size);
 }
 
 TEST_F(TestJitterBufferNack, TestNormalOperation) {
-  EXPECT_EQ(kNackInfinite, jitter_buffer_->GetNackMode());
+  EXPECT_EQ(kNackInfinite, jitter_buffer_->nack_mode());
 
   InsertFrame(kVideoFrameKey);
   EXPECT_TRUE(DecodeFrame());
@@ -335,7 +336,7 @@
   EXPECT_FALSE(DecodeFrame());
   uint16_t nack_list_size = 0;
   bool extended = false;
-  uint16_t* list = jitter_buffer_->GetNackList(nack_list_size, extended);
+  uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended);
   // Verify the NACK list.
   const int kExpectedNackSize = 9;
   ASSERT_EQ(kExpectedNackSize, nack_list_size);
@@ -365,7 +366,7 @@
   EXPECT_FALSE(DecodeCompleteFrame());
   uint16_t nack_list_size = 0;
   bool extended = false;
-  uint16_t* list = jitter_buffer_->GetNackList(nack_list_size, extended);
+  uint16_t* list = jitter_buffer_->CreateNackList(&nack_list_size, &extended);
   // Verify the NACK list.
   const int kExpectedNackSize = 10;
   ASSERT_EQ(kExpectedNackSize, nack_list_size);
diff --git a/modules/video_coding/main/source/receiver.cc b/modules/video_coding/main/source/receiver.cc
index 6be5336..f9b81dc 100644
--- a/modules/video_coding/main/source/receiver.cc
+++ b/modules/video_coding/main/source/receiver.cc
@@ -200,9 +200,9 @@
     FrameType incomingFrameType = kVideoFrameDelta;
     nextRenderTimeMs = -1;
     const WebRtc_Word64 startTimeMs = _clock->MillisecondTimestamp();
-    WebRtc_Word64 ret = _jitterBuffer.GetNextTimeStamp(maxWaitTimeMs,
-                                                       incomingFrameType,
-                                                       nextRenderTimeMs);
+    WebRtc_Word64 ret = _jitterBuffer.NextTimestamp(maxWaitTimeMs,
+                                                    &incomingFrameType,
+                                                    &nextRenderTimeMs);
     if (ret < 0)
     {
         // No timestamp in jitter buffer at the moment
@@ -211,7 +211,7 @@
     const WebRtc_UWord32 timeStamp = static_cast<WebRtc_UWord32>(ret);
 
     // Update the timing
-    _timing.SetRequiredDelay(_jitterBuffer.GetEstimatedJitterMS());
+    _timing.SetRequiredDelay(_jitterBuffer.EstimatedJitterMs());
     _timing.UpdateCurrentDelay(timeStamp);
 
     const WebRtc_Word32 tempWaitTime = maxWaitTimeMs -
@@ -233,7 +233,7 @@
     {
         bool retransmitted = false;
         const WebRtc_Word64 lastPacketTimeMs =
-                _jitterBuffer.LastPacketTime(frame, retransmitted);
+                _jitterBuffer.LastPacketTime(frame, &retransmitted);
         if (lastPacketTimeMs >= 0 && !retransmitted)
         {
             // We don't want to include timestamps which have suffered from retransmission
@@ -367,20 +367,21 @@
 WebRtc_Word32
 VCMReceiver::ReceiveStatistics(WebRtc_UWord32& bitRate, WebRtc_UWord32& frameRate)
 {
-    const WebRtc_Word32 ret = _jitterBuffer.GetUpdate(frameRate, bitRate);
+    _jitterBuffer.IncomingRateStatistics(&frameRate, &bitRate);
     bitRate /= 1000; // Should be in kbps
-    return ret;
+    return 0;
 }
 
 WebRtc_Word32
 VCMReceiver::ReceivedFrameCount(VCMFrameCount& frameCount) const
 {
-    return _jitterBuffer.GetFrameStatistics(frameCount.numDeltaFrames,
-                                            frameCount.numKeyFrames);
+    _jitterBuffer.FrameStatistics(&frameCount.numDeltaFrames,
+                                     &frameCount.numKeyFrames);
+    return 0;
 }
 
 WebRtc_UWord32 VCMReceiver::DiscardedPackets() const {
-  return _jitterBuffer.DiscardedPackets();
+  return _jitterBuffer.num_discarded_packets();
 }
 
 void
@@ -399,7 +400,7 @@
 VCMReceiver::NackMode() const
 {
     CriticalSectionScoped cs(_critSect);
-    return _jitterBuffer.GetNackMode();
+    return _jitterBuffer.nack_mode();
 }
 
 VCMNackStatus
@@ -407,7 +408,8 @@
 {
     bool extended = false;
     WebRtc_UWord16 nackListSize = 0;
-    WebRtc_UWord16* internalNackList = _jitterBuffer.GetNackList(nackListSize, extended);
+    WebRtc_UWord16* internalNackList = _jitterBuffer.CreateNackList(
+        &nackListSize, &extended);
     if (internalNackList == NULL && nackListSize == 0xffff)
     {
         // This combination is used to trigger key frame requests.
@@ -468,7 +470,7 @@
 void
 VCMReceiver::UpdateState(VCMEncodedFrame& frame)
 {
-    if (_jitterBuffer.GetNackMode() == kNoNack)
+    if (_jitterBuffer.nack_mode() == kNoNack)
     {
         // Dual decoder mode has not been enabled.
         return;
diff --git a/modules/video_coding/main/test/jitter_buffer_test.cc b/modules/video_coding/main/test/jitter_buffer_test.cc
index 2066983..99d8ac5 100644
--- a/modules/video_coding/main/test/jitter_buffer_test.cc
+++ b/modules/video_coding/main/test/jitter_buffer_test.cc
@@ -135,13 +135,9 @@
         }
     }
 
-    // Test out of range inputs
-    TEST(kSizeError == jb.InsertPacket(0, packet));
-    jb.ReleaseFrame(0);
-
     // Not started
     TEST(0 == jb.GetFrame(packet));
-    TEST(-1 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(-1 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(0 == jb.GetCompleteFrameForDecoding(10));
     TEST(0 == jb.GetFrameForDecoding());
 
@@ -179,7 +175,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -220,7 +216,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -279,7 +275,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameKey);
@@ -355,7 +351,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -432,7 +428,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -509,7 +505,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -550,7 +546,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -624,7 +620,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -686,7 +682,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -728,14 +724,14 @@
     //
     WebRtc_UWord32 numDeltaFrames = 0;
     WebRtc_UWord32 numKeyFrames = 0;
-    TEST(jb.GetFrameStatistics(numDeltaFrames, numKeyFrames) == 0);
+    jb.FrameStatistics(&numDeltaFrames, &numKeyFrames);
 
     TEST(numDeltaFrames == 8);
     TEST(numKeyFrames == 1);
 
     WebRtc_UWord32 frameRate;
     WebRtc_UWord32 bitRate;
-    TEST(jb.GetUpdate(frameRate, bitRate) == 0);
+    jb.IncomingRateStatistics(&frameRate, &bitRate);
 
     // these depend on CPU speed works on a T61
     TEST(frameRate > 30);
@@ -786,8 +782,8 @@
       TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
       // Get packet notification
-      TEST(timeStamp - 33 * 90 == jb.GetNextTimeStamp(10, incomingFrameType,
-                                                      renderTimeMs));
+      TEST(timeStamp - 33 * 90 == jb.NextTimestamp(10, &incomingFrameType,
+                                                   &renderTimeMs));
 
       // Check incoming frame type
       if (i == 0)
@@ -858,7 +854,7 @@
       jb.ReleaseFrame(frameOut);
     }
 
-    TEST(jb.NumNotDecodablePackets() == 10);
+    TEST(jb.num_not_decodable_packets() == 10);
 
     // Insert 3 old packets and verify that we have 3 discarded packets
     // Match value to actual latest timestamp decoded
@@ -875,12 +871,12 @@
     frameIn = jb.GetFrame(packet);
     TEST(frameIn == NULL);
 
-    TEST(jb.DiscardedPackets() == 3);
+    TEST(jb.num_discarded_packets() == 3);
 
     jb.Flush();
 
     // This statistic shouldn't be reset by a flush.
-    TEST(jb.DiscardedPackets() == 3);
+    TEST(jb.num_discarded_packets() == 3);
 
     //printf("DONE Statistics\n");
 
@@ -916,7 +912,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -943,7 +939,8 @@
         TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
         // get packet notification
-        TEST(timeStamp == jb.GetNextTimeStamp(2, incomingFrameType, renderTimeMs));
+        TEST(timeStamp == jb.NextTimestamp(2, &incomingFrameType,
+                                           &renderTimeMs));
 
         // check incoming frame type
         TEST(incomingFrameType == kVideoFrameDelta);
@@ -1009,7 +1006,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -1036,7 +1033,8 @@
         TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
         // get packet notification
-        TEST(timeStamp == jb.GetNextTimeStamp(2, incomingFrameType, renderTimeMs));
+        TEST(timeStamp == jb.NextTimestamp(2, &incomingFrameType,
+                                           &renderTimeMs));
 
         // check incoming frame type
         TEST(incomingFrameType == kVideoFrameDelta);
@@ -1101,7 +1099,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -1125,7 +1123,7 @@
     TEST(kIncomplete == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -1186,7 +1184,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(3000 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(3000 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Get the frame
@@ -1240,7 +1238,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Get the frame
@@ -1291,7 +1289,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -1334,7 +1332,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameDelta);
@@ -1394,7 +1392,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // Get packet notification
-    TEST(0xffffff00 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Insert next frame
@@ -1413,7 +1411,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // Get packet notification
-    TEST(0xffffff00 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Get frame
@@ -1426,7 +1424,7 @@
     TEST(frameOut->FrameType() == kVideoFrameDelta);
 
     // Get packet notification
-    TEST(2700 == jb.GetNextTimeStamp(0, incomingFrameType, renderTimeMs));
+    TEST(2700 == jb.NextTimestamp(0, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Get frame
@@ -1469,7 +1467,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // Get packet notification
-    TEST(2700 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(2700 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Insert second frame
@@ -1488,7 +1486,7 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // Get packet notification
-    TEST(0xffffff00 == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(0xffffff00 == jb.NextTimestamp(10, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Get frame
@@ -1501,7 +1499,7 @@
     TEST(frameOut->FrameType() == kVideoFrameDelta);
 
     // get packet notification
-    TEST(2700 == jb.GetNextTimeStamp(0, incomingFrameType, renderTimeMs));
+    TEST(2700 == jb.NextTimestamp(0, &incomingFrameType, &renderTimeMs));
     TEST(kVideoFrameDelta == incomingFrameType);
 
     // Get frame
@@ -1551,7 +1549,8 @@
         }
 
         // get packet notification
-        TEST(packet.timestamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+        TEST(packet.timestamp == jb.NextTimestamp(10, &incomingFrameType,
+                                                  &renderTimeMs));
 
         // check incoming frame type
         TEST(incomingFrameType == kVideoFrameDelta);
@@ -1622,8 +1621,8 @@
         TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
         // Get packet notification, should be first inserted frame
-        TEST(timeStampStart == jb.GetNextTimeStamp(10, incomingFrameType,
-                                                   renderTimeMs));
+        TEST(timeStampStart == jb.NextTimestamp(10, &incomingFrameType,
+                                                &renderTimeMs));
 
         // check incoming frame type
         TEST(incomingFrameType == kVideoFrameDelta);
@@ -1650,8 +1649,8 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // First inserted key frame should be oldest in buffer
-    TEST(timeStampFirstKey == jb.GetNextTimeStamp(10, incomingFrameType,
-                                                  renderTimeMs));
+    TEST(timeStampFirstKey == jb.NextTimestamp(10, &incomingFrameType,
+                                               &renderTimeMs));
 
     // check incoming frame type
     TEST(incomingFrameType == kVideoFrameKey);
@@ -1764,7 +1763,8 @@
     TEST(kFirstPacket == jb.InsertPacket(frameIn, packet));
 
     // Get packet notification
-    TEST(timeStamp == jb.GetNextTimeStamp(10, incomingFrameType, renderTimeMs));
+    TEST(timeStamp == jb.NextTimestamp(10, &incomingFrameType,
+                                       &renderTimeMs));
     frameOut = jb.GetFrameForDecoding();
 
     // We can decode everything from a NALU until a packet has been lost.
diff --git a/system_wrappers/source/system_wrappers.gyp b/system_wrappers/source/system_wrappers.gyp
index f834709..7a3cd20 100644
--- a/system_wrappers/source/system_wrappers.gyp
+++ b/system_wrappers/source/system_wrappers.gyp
@@ -226,7 +226,7 @@
             }, {
               'sources!': [ 'data_log_unittest.cc', ],
             }],
-            ['os_posix!=1', {
+            ['os_posix==0', {
               'sources!': [ 'thread_posix_unittest.cc', ],
             }],
           ],
diff --git a/test/fuzz/peerconnection/corpus/template00.html b/test/fuzz/peerconnection/corpus/template00.html
deleted file mode 100644
index 08bccb0..0000000
--- a/test/fuzz/peerconnection/corpus/template00.html
+++ /dev/null
@@ -1,122 +0,0 @@
-<!DOCTYPE HTML PUBLIC "-//IETF//DTD HTML//EN">
-<!--
-  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
-
-  Use of this source code is governed by a BSD-style license
-  that can be found in the LICENSE file in the root of the source
-  tree. An additional intellectual property rights grant can be found
-  in the file PATENTS.  All contributing project authors may
-  be found in the AUTHORS file in the root of the source tree.
--->
-<html>
-<head>
-  <title>WebRTC PeerConnection Fuzz Test Template</title>
-INCLUDE_RANDOM_JS
-INCLUDE_FUZZ_SDP_JS
-  <script type="text/javascript">
-  var gFirstConnection = null;
-  var gSecondConnection = null;
-
-  // Variables in caps are filled in by the fuzzer.
-  var gTransformOfferSdp = TRANSFORM_OFFER_SDP;
-  var gTransformAnswerSdp = TRANSFORM_ANSWER_SDP;
-
-  // START_OF_POSSIBLE_INJECTED_LOCATION_RELOADS
-  function startTest() {
-    navigator.webkitGetUserMedia(REQUEST_AUDIO_AND_VIDEO,
-                                 getUserMediaOkCallback,
-                                 getUserMediaFailedCallback);
-  }
-
-  function getUserMediaFailedCallback(error) {
-    console.log('getUserMedia request failed with code ' + error.code);
-  }
-
-  function callUsingStream(localStream) {
-    gFirstConnection = new webkitPeerConnection00(
-        null, onIceCandidateToFirst);
-    gFirstConnection.addStream(localStream);
-    var offer = gFirstConnection.createOffer(null);
-    gFirstConnection.setLocalDescription(
-        webkitPeerConnection00.SDP_OFFER, offer);
-    return offer.toSdp();
-  }
-
-  function receiveCall(offerSdp) {
-    gSecondConnection = new webkitPeerConnection00(
-        null, onIceCandidateToSecond);
-    gSecondConnection.onaddstream = onRemoteStream;
-    var parsedOffer = new SessionDescription(offerSdp);
-    gSecondConnection.setRemoteDescription(
-        webkitPeerConnection00.SDP_OFFER, parsedOffer);
-
-    var answer = gSecondConnection.createAnswer(
-        offerSdp, { has_audio: true, has_video: true });
-    gSecondConnection.setLocalDescription(
-        webkitPeerConnection00.SDP_ANSWER, answer);
-    gSecondConnection.startIce();
-    return answer.toSdp();
-  }
-
-  function handleAnswer(answerSdp) {
-    var parsed_answer = new SessionDescription(answerSdp);
-    gFirstConnection.setRemoteDescription(
-        webkitPeerConnection00.SDP_ANSWER, parsed_answer);
-    gFirstConnection.startIce();
-  }
-
-  function getUserMediaOkCallback(localStream) {
-    var localStreamUrl = webkitURL.createObjectURL(localStream);
-    document.getElementById('local-view').src = localStreamUrl;
-
-    var offerSdp = callUsingStream(localStream);
-    offerSdp = gTransformOfferSdp(offerSdp);
-    var answerSdp = receiveCall(offerSdp);
-    answerSdp = gTransformAnswerSdp(answerSdp);
-    console.log(offerSdp);
-    console.log(answerSdp);
-    handleAnswer(answerSdp);
-  }
-
-  function onIceCandidateToFirst(candidate, more) {
-    if (candidate) {
-      gSecondConnection.processIceMessage(candidate);
-    }
-  }
-
-  function onIceCandidateToSecond(candidate, more) {
-    if (candidate) {
-      gFirstConnection.processIceMessage(candidate);
-    }
-  }
-
-  function onRemoteStream(e) {
-    var remoteStreamUrl = webkitURL.createObjectURL(e.stream);
-    document.getElementById('remote-view').src = remoteStreamUrl;
-  }
-
-  window.onload = function() {
-    setRandomRolls(gRandomRolls);
-    startTest();
-  }
-  // END_OF_POSSIBLE_INJECTED_LOCATION_RELOADS
-
-  // This variable is placed here since its value is pretty big.
-  var gRandomRolls = ARRAY_OF_RANDOM_ROLLS;
-  </script>
-</head>
-<body>
-  <table border="0">
-    <tr>
-      <td>Local Preview</td>
-      <td>Remote Stream</td>
-    </tr>
-    <tr>
-      <td><video width="320" height="240" id="local-view"
-          autoplay="autoplay"></video></td>
-      <td><video width="320" height="240" id="remote-view"
-          autoplay="autoplay"></video></td>
-    </tr>
-  </table>
-</body>
-</html>
\ No newline at end of file
diff --git a/test/fuzz/peerconnection/fuzz_main_run.py b/test/fuzz/peerconnection/fuzz_main_run.py
index 2f5e1d3..fb30a75 100755
--- a/test/fuzz/peerconnection/fuzz_main_run.py
+++ b/test/fuzz/peerconnection/fuzz_main_run.py
@@ -71,11 +71,7 @@
   this_scripts_path = os.path.dirname(os.path.realpath(__file__))
   corpus_path = os.path.join(this_scripts_path, 'corpus');
 
-  # Choose the newest version of the API more often than the old one.
-  if random.random() < 0.8:
-    template_to_use = 'template01.html'
-  else:
-    template_to_use = 'template00.html'
+  template_to_use = 'template01.html'
   template = _ReadFile(os.path.join(corpus_path, template_to_use))
 
   file_extension = 'html'
diff --git a/test/test_suite.cc b/test/test_suite.cc
index ac3f3a2..81913fd 100644
--- a/test/test_suite.cc
+++ b/test/test_suite.cc
@@ -9,13 +9,16 @@
  */
 
 #include "test/test_suite.h"
+#include "test/testsupport/fileutils.h"
 
 #include "gmock/gmock.h"
 #include "gtest/gtest.h"
 
 namespace webrtc {
 namespace test {
+
 TestSuite::TestSuite(int argc, char** argv) {
+  SetExecutablePath(argv[0]);
   testing::InitGoogleMock(&argc, argv);  // Runs InitGoogleTest() internally.
 }
 
diff --git a/test/testsupport/fileutils.cc b/test/testsupport/fileutils.cc
index 0679b1f..399d8a6 100644
--- a/test/testsupport/fileutils.cc
+++ b/test/testsupport/fileutils.cc
@@ -24,6 +24,7 @@
 #endif
 
 #include <cstdio>
+#include <cstring>
 
 #include "typedefs.h"  // For architecture defines
 
@@ -35,53 +36,83 @@
 #else
 static const char* kPathDelimiter = "/";
 #endif
+
+#ifdef WEBRTC_ANDROID
+static const char* kRootDirName = "/sdcard/";
+static const char* kResourcesDirName = "resources";
+#else
 // The file we're looking for to identify the project root dir.
 static const char* kProjectRootFileName = "DEPS";
 static const char* kOutputDirName = "out";
 static const char* kFallbackPath = "./";
-#ifdef WEBRTC_ANDROID
-static const char* kResourcesDirName = "/sdcard/";
-#else
 static const char* kResourcesDirName = "resources";
 #endif
 const char* kCannotFindProjectRootDir = "ERROR_CANNOT_FIND_PROJECT_ROOT_DIR";
 
-std::string ProjectRootPath() {
+namespace {
+char relative_dir_path[FILENAME_MAX];
+bool relative_dir_path_set = false;
+}
+
+void SetExecutablePath(const std::string& path) {
   std::string working_dir = WorkingDir();
-  if (working_dir == kFallbackPath) {
+  std::string temp_path = path;
+
+  // Handle absolute paths; convert them to relative paths to the working dir.
+  if (path.find(working_dir) != std::string::npos) {
+    temp_path = path.substr(working_dir.length() + 1);
+  }
+  // Trim away the executable name; only store the relative dir path.
+  temp_path = temp_path.substr(0, temp_path.find_last_of(kPathDelimiter));
+  strncpy(relative_dir_path, temp_path.c_str(), FILENAME_MAX);
+  relative_dir_path_set = true;
+}
+
+bool FileExists(std::string& file_name) {
+  struct stat file_info = {0};
+  return stat(file_name.c_str(), &file_info) == 0;
+}
+
+#ifdef WEBRTC_ANDROID
+
+std::string ProjectRootPath() {
+  return kRootDirName;
+}
+
+std::string OutputPath() {
+  return kRootDirName;
+}
+
+std::string WorkingDir() {
+  return kRootDirName;
+}
+
+#else // WEBRTC_ANDROID
+
+std::string ProjectRootPath() {
+  std::string path = WorkingDir();
+  if (path == kFallbackPath) {
     return kCannotFindProjectRootDir;
   }
+  if (relative_dir_path_set) {
+    path = path + kPathDelimiter + relative_dir_path;
+  }
   // Check for our file that verifies the root dir.
-  std::string current_path(working_dir);
-  FILE* file = NULL;
-  int path_delimiter_index = current_path.find_last_of(kPathDelimiter);
-  while (path_delimiter_index > -1) {
-    std::string root_filename = current_path + kPathDelimiter +
-        kProjectRootFileName;
-    file = fopen(root_filename.c_str(), "r");
-    if (file != NULL) {
-      fclose(file);
-      return current_path + kPathDelimiter;
+  size_t path_delimiter_index = path.find_last_of(kPathDelimiter);
+  while (path_delimiter_index != std::string::npos) {
+    std::string root_filename = path + kPathDelimiter + kProjectRootFileName;
+    if (FileExists(root_filename)) {
+      return path + kPathDelimiter;
     }
     // Move up one directory in the directory tree.
-    current_path = current_path.substr(0, path_delimiter_index);
-    path_delimiter_index = current_path.find_last_of(kPathDelimiter);
+    path = path.substr(0, path_delimiter_index);
+    path_delimiter_index = path.find_last_of(kPathDelimiter);
   }
   // Reached the root directory.
   fprintf(stderr, "Cannot find project root directory!\n");
   return kCannotFindProjectRootDir;
 }
 
-#ifdef WEBRTC_ANDROID
-
-std::string OutputPath() {
-  // We need to touch this variable so it doesn't get flagged as unused.
-  (void)kOutputDirName;
-  return "/sdcard/";
-}
-
-#else  // WEBRTC_ANDROID
-
 std::string OutputPath() {
   std::string path = ProjectRootPath();
   if (path == kCannotFindProjectRootDir) {
@@ -94,8 +125,6 @@
   return path + kPathDelimiter;
 }
 
-#endif  // !WEBRTC_ANDROID
-
 std::string WorkingDir() {
   char path_buffer[FILENAME_MAX];
   if (!GET_CURRENT_DIR(path_buffer, sizeof(path_buffer))) {
@@ -106,6 +135,8 @@
   }
 }
 
+#endif  // !WEBRTC_ANDROID
+
 bool CreateDirectory(std::string directory_name) {
   struct stat path_info = {0};
   // Check if the path exists already:
@@ -126,11 +157,6 @@
   return true;
 }
 
-bool FileExists(std::string file_name) {
-  struct stat file_info = {0};
-  return stat(file_name.c_str(), &file_info) == 0;
-}
-
 std::string ResourcePath(std::string name, std::string extension) {
   std::string platform = "win";
 #ifdef WEBRTC_LINUX
@@ -146,9 +172,6 @@
   std::string architecture = "32";
 #endif  // WEBRTC_ARCH_64_BITS
 
-#ifdef WEBRTC_ANDROID
-  std::string resources_path = kResourcesDirName;
-#else
   std::string resources_path = ProjectRootPath() + kResourcesDirName +
       kPathDelimiter;
   std::string resource_file = resources_path + name + "_" + platform + "_" +
@@ -166,7 +189,7 @@
   if (FileExists(resource_file)) {
     return resource_file;
   }
-#endif
+
   // Fall back on name without architecture or platform.
   return resources_path + name + "." + extension;
 }
diff --git a/test/testsupport/fileutils.h b/test/testsupport/fileutils.h
index b6c1346..e642a5f 100644
--- a/test/testsupport/fileutils.h
+++ b/test/testsupport/fileutils.h
@@ -138,6 +138,14 @@
 // empty or if the file does not exist/is readable.
 size_t GetFileSize(std::string filename);
 
+// Sets the executable path, i.e. the path to the executable that is being used
+// when launching it. This is usually the path relative to the working directory
+// but can also be an absolute path. The intention with this function is to pass
+// the argv[0] being sent into the main function to make it possible for
+// fileutils.h to find the correct project paths even when the working directory
+// is outside the project tree (which happens in some cases).
+void SetExecutablePath(const std::string& path_to_executable);
+
 }  // namespace test
 }  // namespace webrtc
 
diff --git a/test/testsupport/fileutils_unittest.cc b/test/testsupport/fileutils_unittest.cc
index 1b76b3c..940b070 100644
--- a/test/testsupport/fileutils_unittest.cc
+++ b/test/testsupport/fileutils_unittest.cc
@@ -23,7 +23,6 @@
 static const char* kPathDelimiter = "/";
 #endif
 
-static const std::string kDummyDir = "file_utils_unittest_dummy_dir";
 static const std::string kResourcesDir = "resources";
 static const std::string kTestName = "fileutils_unittest";
 static const std::string kExtension = "tmp";
@@ -68,9 +67,6 @@
       ASSERT_GT(fprintf(file, "%s",  "Dummy data"), 0);
       fclose(file);
     }
-    // Create a dummy subdir that can be chdir'ed into for testing purposes.
-    empty_dummy_dir_ = original_working_dir_ + kPathDelimiter + kDummyDir;
-    webrtc::test::CreateDirectory(empty_dummy_dir_);
   }
   static void TearDownTestCase() {
     // Clean up all resource files written
@@ -78,7 +74,6 @@
             file_it != files_.end(); ++file_it) {
       remove(file_it->c_str());
     }
-    std::remove(empty_dummy_dir_.c_str());
   }
   void SetUp() {
     ASSERT_EQ(chdir(original_working_dir_.c_str()), 0);
@@ -88,14 +83,12 @@
   }
  protected:
   static FileList files_;
-  static std::string empty_dummy_dir_;
  private:
   static std::string original_working_dir_;
 };
 
 FileList FileUtilsTest::files_;
 std::string FileUtilsTest::original_working_dir_ = "";
-std::string FileUtilsTest::empty_dummy_dir_ = "";
 
 // Tests that the project root path is returned for the default working
 // directory that is automatically set when the test executable is launched.
@@ -117,37 +110,8 @@
   ASSERT_EQ(path.length() - expected_end.length(), path.find(expected_end));
 }
 
-// Tests setting the current working directory to a directory three levels
-// deeper from the current one. Then testing that the project path returned
-// is still the same, when the function under test is called again.
-TEST_F(FileUtilsTest, ProjectRootPathFromDeeperWorkingDir) {
-  std::string path = webrtc::test::ProjectRootPath();
-  std::string original_working_dir = path;  // This is the correct project root
-  // Change to a subdirectory path.
-  ASSERT_EQ(0, chdir(empty_dummy_dir_.c_str()));
-  ASSERT_EQ(original_working_dir, webrtc::test::ProjectRootPath());
-}
-
-// Similar to the above test, but for the output dir
-TEST_F(FileUtilsTest, OutputPathFromDeeperWorkingDir) {
-  std::string path = webrtc::test::OutputPath();
-  std::string original_working_dir = path;
-  ASSERT_EQ(0, chdir(empty_dummy_dir_.c_str()));
-  ASSERT_EQ(original_working_dir, webrtc::test::OutputPath());
-}
-
 // Tests with current working directory set to a directory higher up in the
-// directory tree than the project root dir. This case shall return a specified
-// error string as a directory (which will be an invalid path).
-TEST_F(FileUtilsTest, ProjectRootPathFromRootWorkingDir) {
-  // Change current working dir to the root of the current file system
-  // (this will always be "above" our project root dir).
-  ASSERT_EQ(0, chdir(kPathDelimiter));
-  ASSERT_EQ(webrtc::test::kCannotFindProjectRootDir,
-            webrtc::test::ProjectRootPath());
-}
-
-// Similar to the above test, but for the output dir
+// directory tree than the project root dir.
 TEST_F(FileUtilsTest, OutputPathFromRootWorkingDir) {
   ASSERT_EQ(0, chdir(kPathDelimiter));
   ASSERT_EQ("./", webrtc::test::OutputPath());
@@ -177,8 +141,14 @@
   std::string resource = webrtc::test::ResourcePath(kTestName, kExtension);
   ASSERT_GT(resource.find(kTestName), 0u);
   ASSERT_GT(resource.find(kExtension), 0u);
+}
+
+TEST_F(FileUtilsTest, ResourcePathFromRootWorkingDir) {
   ASSERT_EQ(0, chdir(kPathDelimiter));
-  ASSERT_EQ("./", webrtc::test::OutputPath());
+  std::string resource = webrtc::test::ResourcePath(kTestName, kExtension);
+  ASSERT_NE(resource.find("resources"), std::string::npos);
+  ASSERT_GT(resource.find(kTestName), 0u);
+  ASSERT_GT(resource.find(kExtension), 0u);
 }
 
 TEST_F(FileUtilsTest, GetFileSizeExistingFile) {
diff --git a/video_engine/test/android/jni/vie_android_java_api.cc b/video_engine/test/android/jni/vie_android_java_api.cc
index 3443a3e..5273d90 100644
--- a/video_engine/test/android/jni/vie_android_java_api.cc
+++ b/video_engine/test/android/jni/vie_android_java_api.cc
@@ -215,7 +215,7 @@
       _frameRateO(0), _bitRateO(0) {
     _callbackCls = _env->GetObjectClass(_callbackObj);
     _callbackId
-        = _env->GetMethodID(_callbackCls, "UpdateStats", "(IIIII)I");
+        = _env->GetMethodID(_callbackCls, "updateStats", "(IIIII)I");
     if (_callbackId == NULL) {
       __android_log_print(ANDROID_LOG_ERROR, WEBRTC_LOG_TAG,
                           "Failed to get jid");
diff --git a/video_engine/test/android/src/org/webrtc/videoengineapp/IViEAndroidCallback.java b/video_engine/test/android/src/org/webrtc/videoengineapp/IViEAndroidCallback.java
index 5a26190..711500b 100644
--- a/video_engine/test/android/src/org/webrtc/videoengineapp/IViEAndroidCallback.java
+++ b/video_engine/test/android/src/org/webrtc/videoengineapp/IViEAndroidCallback.java
@@ -11,7 +11,7 @@
 package org.webrtc.videoengineapp;
 
 public interface IViEAndroidCallback {
-    public int UpdateStats(int frameRateI, int bitRateI,
+    public int updateStats(int frameRateI, int bitRateI,
         int packetLoss, int frameRateO,
         int bitRateO);
 }
diff --git a/video_engine/test/android/src/org/webrtc/videoengineapp/WebRTCDemo.java b/video_engine/test/android/src/org/webrtc/videoengineapp/WebRTCDemo.java
index 9d94726..55431ff 100644
--- a/video_engine/test/android/src/org/webrtc/videoengineapp/WebRTCDemo.java
+++ b/video_engine/test/android/src/org/webrtc/videoengineapp/WebRTCDemo.java
@@ -10,32 +10,18 @@
 
 package org.webrtc.videoengineapp;
 
-import java.io.File;
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.NetworkInterface;
-import java.net.SocketException;
-import java.util.Enumeration;
-
-import org.webrtc.videoengine.ViERenderer;
-
-import android.app.TabActivity;
 import android.app.AlertDialog;
-import android.app.Dialog;
+import android.app.TabActivity;
 import android.content.BroadcastReceiver;
 import android.content.Context;
 import android.content.DialogInterface;
 import android.content.Intent;
 import android.content.IntentFilter;
-import android.content.res.Configuration;
 import android.content.pm.ActivityInfo;
+import android.content.res.Configuration;
 import android.graphics.Canvas;
-import android.graphics.Color;
-import android.graphics.DashPathEffect;
 import android.graphics.Paint;
-import android.graphics.Path;
 import android.graphics.PixelFormat;
-import android.graphics.Rect;
 import android.hardware.SensorManager;
 import android.media.AudioManager;
 import android.media.MediaPlayer;
@@ -45,19 +31,19 @@
 import android.os.PowerManager;
 import android.os.PowerManager.WakeLock;
 import android.util.Log;
+import android.view.Display;
 import android.view.Gravity;
 import android.view.KeyEvent;
 import android.view.LayoutInflater;
+import android.view.OrientationEventListener;
 import android.view.Surface;
 import android.view.SurfaceView;
 import android.view.View;
 import android.view.ViewGroup;
-import android.view.Display;
-import android.view.OrientationEventListener;
 import android.view.Window;
 import android.view.WindowManager;
-import android.view.WindowManager.LayoutParams;
 import android.widget.AdapterView;
+import android.widget.AdapterView.OnItemSelectedListener;
 import android.widget.ArrayAdapter;
 import android.widget.Button;
 import android.widget.CheckBox;
@@ -66,14 +52,23 @@
 import android.widget.RadioGroup;
 import android.widget.Spinner;
 import android.widget.TabHost;
-import android.widget.TextView;
-import android.widget.AdapterView.OnItemSelectedListener;
 import android.widget.TabHost.TabSpec;
+import android.widget.TextView;
 
+import org.webrtc.videoengine.ViERenderer;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.InetAddress;
+import java.net.NetworkInterface;
+import java.net.SocketException;
+import java.util.Enumeration;
+
+/** {@} */
 public class WebRTCDemo extends TabActivity implements IViEAndroidCallback,
                                                 View.OnClickListener,
                                                 OnItemSelectedListener {
-    private ViEAndroidJavaAPI ViEAndroidAPI = null;
+    private ViEAndroidJavaAPI vieAndroidAPI = null;
 
     // remote renderer
     private SurfaceView remoteSurfaceView = null;
@@ -195,7 +190,7 @@
 
     private BroadcastReceiver receiver;
 
-    public int GetCameraOrientation(int cameraOrientation) {
+    public int getCameraOrientation(int cameraOrientation) {
         Display display = this.getWindowManager().getDefaultDisplay();
         int displatyRotation = display.getRotation();
         int degrees = 0;
@@ -205,21 +200,20 @@
             case Surface.ROTATION_180: degrees = 180; break;
             case Surface.ROTATION_270: degrees = 270; break;
         }
-        int result=0;
-        if(cameraOrientation>180) {
-            result=(cameraOrientation + degrees) % 360;
-        }
-        else {
-            result=(cameraOrientation - degrees+360) % 360;
+        int result = 0;
+        if (cameraOrientation > 180) {
+            result = (cameraOrientation + degrees) % 360;
+        } else {
+            result = (cameraOrientation - degrees + 360) % 360;
         }
         return result;
     }
 
     public void onConfigurationChanged(Configuration newConfig) {
         super.onConfigurationChanged(newConfig);
-        int newRotation = GetCameraOrientation(currentCameraOrientation);
-        if (viERunning){
-            ViEAndroidAPI.SetRotation(cameraId,newRotation);
+        int newRotation = getCameraOrientation(currentCameraOrientation);
+        if (viERunning) {
+            vieAndroidAPI.SetRotation(cameraId, newRotation);
         }
     }
 
@@ -235,7 +229,7 @@
         // Set screen orientation
         setRequestedOrientation (ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE);
 
-        PowerManager pm = (PowerManager)this.getSystemService(
+        PowerManager pm = (PowerManager) this.getSystemService(
             Context.POWER_SERVICE);
         wakeLock = pm.newWakeLock(
             PowerManager.SCREEN_DIM_WAKE_LOCK, TAG);
@@ -255,16 +249,15 @@
                         if (voERunning) {
                             if (state == 1) {
                                 enableSpeaker = true;
-                            }
-                            else {
+                            } else {
                                 enableSpeaker = false;
                             }
-                            RouteAudio(enableSpeaker);
+                            routeAudio(enableSpeaker);
                         }
                     }
                 }
             };
-        registerReceiver(receiver, receiverFilter );
+        registerReceiver(receiver, receiverFilter);
 
         mTabHost = getTabHost();
 
@@ -293,11 +286,11 @@
         mTabHost.addTab(mTaba);
 
         int childCount = mTabHost.getTabWidget().getChildCount();
-        for (int i=0; i<childCount; i++)
+        for (int i = 0; i < childCount; i++) {
             mTabHost.getTabWidget().getChildAt(i).getLayoutParams().height = 50;
-
+        }
         orientationListener =
-                new OrientationEventListener(this,SensorManager.SENSOR_DELAY_UI) {
+                new OrientationEventListener(this, SensorManager.SENSOR_DELAY_UI) {
                     public void onOrientationChanged (int orientation) {
                         if (orientation != ORIENTATION_UNKNOWN) {
                             currentOrientation = orientation;
@@ -312,13 +305,12 @@
         File webrtcDir = new File(webrtcDebugDir);
         if (!webrtcDir.exists() && webrtcDir.mkdir() == false) {
             Log.v(TAG, "Failed to create " + webrtcDebugDir);
-        }
-        else if (!webrtcDir.isDirectory()) {
+        } else if (!webrtcDir.isDirectory()) {
             Log.v(TAG, webrtcDebugDir + " exists but not a folder");
             webrtcDebugDir = null;
         }
 
-        StartMain();
+        startMain();
         return;
     }
 
@@ -357,7 +349,7 @@
         }
     }
 
-    private String GetLocalIpAddress() {
+    private String getLocalIpAddress() {
         String localIPs = "";
         try {
             for (Enumeration<NetworkInterface> en = NetworkInterface
@@ -386,8 +378,8 @@
     public boolean onKeyDown(int keyCode, KeyEvent event) {
         if (keyCode == KeyEvent.KEYCODE_BACK) {
             if (viERunning) {
-                StopAll();
-                StartMain();
+                stopAll();
+                startMain();
             }
             finish();
             return true;
@@ -395,26 +387,26 @@
         return super.onKeyDown(keyCode, event);
     }
 
-    private void StopAll() {
-        Log.d(TAG, "StopAll");
+    private void stopAll() {
+        Log.d(TAG, "stopAll");
 
-        if (ViEAndroidAPI != null) {
+        if (vieAndroidAPI != null) {
 
-            StopCPULoad();
+            stopCPULoad();
 
             if (voERunning) {
                 voERunning = false;
-                StopVoiceEngine();
+                stopVoiceEngine();
             }
 
             if (viERunning) {
                 viERunning = false;
-                ViEAndroidAPI.StopRender(channel);
-                ViEAndroidAPI.StopReceive(channel);
-                ViEAndroidAPI.StopSend(channel);
-                ViEAndroidAPI.RemoveRemoteRenderer(channel);
-                ViEAndroidAPI.StopCamera(cameraId);
-                ViEAndroidAPI.Terminate();
+                vieAndroidAPI.StopRender(channel);
+                vieAndroidAPI.StopReceive(channel);
+                vieAndroidAPI.StopSend(channel);
+                vieAndroidAPI.RemoveRemoteRenderer(channel);
+                vieAndroidAPI.StopCamera(cameraId);
+                vieAndroidAPI.Terminate();
                 mLlRemoteSurface.removeView(remoteSurfaceView);
                 mLlLocalSurface.removeView(svLocal);
                 remoteSurfaceView = null;
@@ -423,6 +415,7 @@
         }
     }
 
+    /** {@ArrayAdapter} */
     public class SpinnerAdapter extends ArrayAdapter<String> {
         private String[] mCodecString = null;
         public SpinnerAdapter(Context context, int textViewResourceId, String[] objects) {
@@ -441,23 +434,23 @@
         public View getCustomView(int position, View convertView, ViewGroup parent) {
             LayoutInflater inflater = getLayoutInflater();
             View row = inflater.inflate(R.layout.row, parent, false);
-            TextView label = (TextView)row.findViewById(R.id.spinner_row);
+            TextView label = (TextView) row.findViewById(R.id.spinner_row);
             label.setText(mCodecString[position]);
             return row;
         }
     }
 
-    private void StartMain() {
+    private void startMain() {
         mTabHost.setCurrentTab(0);
 
         mLlRemoteSurface = (LinearLayout) findViewById(R.id.llRemoteView);
         mLlLocalSurface = (LinearLayout) findViewById(R.id.llLocalView);
 
-        if (null == ViEAndroidAPI)
-            ViEAndroidAPI = new ViEAndroidJavaAPI(this);
-
-        if (0 > SetupVoE() || 0 > ViEAndroidAPI.GetVideoEngine() ||
-                0 > ViEAndroidAPI.Init(enableTrace) ) {
+        if (null == vieAndroidAPI) {
+            vieAndroidAPI = new ViEAndroidJavaAPI(this);
+        }
+        if (0 > setupVoE() || 0 > vieAndroidAPI.GetVideoEngine() ||
+                0 > vieAndroidAPI.Init(enableTrace)) {
             // Show dialog
             AlertDialog alertDialog = new AlertDialog.Builder(this).create();
             alertDialog.setTitle("WebRTC Error");
@@ -469,9 +462,9 @@
             alertDialog.show();
         }
 
-        btSwitchCamera = (Button)findViewById(R.id.btSwitchCamera);
+        btSwitchCamera = (Button) findViewById(R.id.btSwitchCamera);
         btSwitchCamera.setOnClickListener(this);
-        btStartStopCall = (Button)findViewById(R.id.btStartStopCall);
+        btStartStopCall = (Button) findViewById(R.id.btStartStopCall);
         btStartStopCall.setOnClickListener(this);
         findViewById(R.id.btExit).setOnClickListener(this);
 
@@ -480,8 +473,8 @@
         svLocal = null;
 
         // Video codec
-        mVideoCodecsStrings = ViEAndroidAPI.GetCodecs();
-        spCodecType = (Spinner)findViewById(R.id.spCodecType);
+        mVideoCodecsStrings = vieAndroidAPI.GetCodecs();
+        spCodecType = (Spinner) findViewById(R.id.spCodecType);
         spCodecType.setOnItemSelectedListener(this);
         spCodecType.setAdapter(new SpinnerAdapter(this,
                         R.layout.row,
@@ -497,8 +490,8 @@
         spCodecSize.setSelection(0);
 
         // Voice codec
-        mVoiceCodecsStrings = ViEAndroidAPI.VoE_GetCodecs();
-        spVoiceCodecType = (Spinner)findViewById(R.id.spVoiceCodecType);
+        mVoiceCodecsStrings = vieAndroidAPI.VoE_GetCodecs();
+        spVoiceCodecType = (Spinner) findViewById(R.id.spVoiceCodecType);
         spVoiceCodecType.setOnItemSelectedListener(this);
         spVoiceCodecType.setAdapter(new SpinnerAdapter(this,
                         R.layout.row,
@@ -512,12 +505,11 @@
             }
         }
 
-        RadioGroup radioGroup = (RadioGroup)findViewById(R.id.radio_group1);
+        RadioGroup radioGroup = (RadioGroup) findViewById(R.id.radio_group1);
         radioGroup.clearCheck();
         if (useOpenGLRender == true) {
             radioGroup.check(R.id.radio_opengl);
-        }
-        else {
+        } else {
             radioGroup.check(R.id.radio_surface);
         }
 
@@ -590,21 +582,20 @@
         if (loopbackMode) {
             remoteIp = LOOPBACK_IP;
             etRemoteIp.setText(remoteIp);
-        }
-        else {
-            GetLocalIpAddress();
+        } else {
+            getLocalIpAddress();
             etRemoteIp.setText(remoteIp);
         }
 
         // Read settings to refresh each configuration
-        ReadSettings();
+        readSettings();
     }
 
-    private String GetRemoteIPString() {
+    private String getRemoteIPString() {
         return etRemoteIp.getText().toString();
     }
 
-    private void StartPlayingRingtone() {
+    private void startPlayingRingtone() {
         MediaPlayer mMediaPlayer = new MediaPlayer();
         try {
             mMediaPlayer.setDataSource(this, Uri.parse(RINGTONE_URL));
@@ -616,13 +607,13 @@
         }
     }
 
-    private void StartCall() {
+    private void startCall() {
         int ret = 0;
 
-        StartPlayingRingtone();
+        startPlayingRingtone();
 
         if (enableVoice) {
-            StartVoiceEngine();
+            startVoiceEngine();
         }
 
         if (enableVideo) {
@@ -631,61 +622,60 @@
                 svLocal = ViERenderer.CreateLocalRenderer(this);
             }
 
-            channel = ViEAndroidAPI.CreateChannel(voiceChannel);
-            ret = ViEAndroidAPI.SetLocalReceiver(channel,
+            channel = vieAndroidAPI.CreateChannel(voiceChannel);
+            ret = vieAndroidAPI.SetLocalReceiver(channel,
                                                  receivePortVideo);
-            ret = ViEAndroidAPI.SetSendDestination(channel,
+            ret = vieAndroidAPI.SetSendDestination(channel,
                                                    destinationPortVideo,
-                                                   GetRemoteIPString());
+                                                   getRemoteIPString());
 
             if (enableVideoReceive) {
-                if(useOpenGLRender) {
+                if (useOpenGLRender) {
                     Log.v(TAG, "Create OpenGL Render");
                     remoteSurfaceView = ViERenderer.CreateRenderer(this, true);
-                    ret = ViEAndroidAPI.AddRemoteRenderer(channel, remoteSurfaceView);
-                }
-                else {
+                    ret = vieAndroidAPI.AddRemoteRenderer(channel, remoteSurfaceView);
+                } else {
                     Log.v(TAG, "Create SurfaceView Render");
                     remoteSurfaceView = ViERenderer.CreateRenderer(this, false);
-                    ret = ViEAndroidAPI.AddRemoteRenderer(channel, remoteSurfaceView);
+                    ret = vieAndroidAPI.AddRemoteRenderer(channel, remoteSurfaceView);
                 }
 
-                ret = ViEAndroidAPI.SetReceiveCodec(channel,
+                ret = vieAndroidAPI.SetReceiveCodec(channel,
                         codecType,
                         INIT_BITRATE,
                         codecSizeWidth,
                         codecSizeHeight,
                         RECEIVE_CODEC_FRAMERATE);
-                ret = ViEAndroidAPI.StartRender(channel);
-                ret = ViEAndroidAPI.StartReceive(channel);
+                ret = vieAndroidAPI.StartRender(channel);
+                ret = vieAndroidAPI.StartReceive(channel);
             }
 
             if (enableVideoSend) {
                 currentCameraOrientation =
-                        ViEAndroidAPI.GetCameraOrientation(usingFrontCamera ? 1 : 0);
-                ret = ViEAndroidAPI.SetSendCodec(channel, codecType, INIT_BITRATE,
+                        vieAndroidAPI.GetCameraOrientation(usingFrontCamera ? 1 : 0);
+                ret = vieAndroidAPI.SetSendCodec(channel, codecType, INIT_BITRATE,
                         codecSizeWidth, codecSizeHeight, SEND_CODEC_FRAMERATE);
-                int camId = ViEAndroidAPI.StartCamera(channel, usingFrontCamera ? 1 : 0);
+                int camId = vieAndroidAPI.StartCamera(channel, usingFrontCamera ? 1 : 0);
 
-                if(camId > 0) {
+                if (camId > 0) {
                     cameraId = camId;
-                    int neededRotation = GetCameraOrientation(currentCameraOrientation);
-                    ViEAndroidAPI.SetRotation(cameraId, neededRotation);
-                }
-                else {
+                    int neededRotation = getCameraOrientation(currentCameraOrientation);
+                    vieAndroidAPI.SetRotation(cameraId, neededRotation);
+                } else {
                     ret = camId;
                 }
-                ret = ViEAndroidAPI.StartSend(channel);
+                ret = vieAndroidAPI.StartSend(channel);
             }
 
             // TODO(leozwang): Add more options besides PLI, currently use pli
             // as the default. Also check return value.
-            ret = ViEAndroidAPI.EnablePLI(channel, true);
-            ret = ViEAndroidAPI.SetCallback(channel, this);
+            ret = vieAndroidAPI.EnablePLI(channel, true);
+            ret = vieAndroidAPI.SetCallback(channel, this);
 
             if (enableVideoSend) {
-                if (mLlLocalSurface != null)
+                if (mLlLocalSurface != null) {
                     mLlLocalSurface.addView(svLocal);
+                }
             }
 
             if (enableVideoReceive) {
@@ -696,64 +686,62 @@
 
             isStatsOn = cbStats.isChecked();
             if (isStatsOn) {
-                AddStatsView();
-            }
-            else {
-                RemoveSatsView();
+                addStatusView();
+            } else {
+                removeStatusView();
             }
 
             isCPULoadOn = cbCPULoad.isChecked();
             if (isCPULoadOn) {
-                StartCPULoad();
-            }
-            else {
-                StopCPULoad();
+                startCPULoad();
+            } else {
+                stopCPULoad();
             }
 
             viERunning = true;
         }
     }
 
-    private void StopVoiceEngine() {
+    private void stopVoiceEngine() {
         // Stop send
-        if (0 != ViEAndroidAPI.VoE_StopSend(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_StopSend(voiceChannel)) {
             Log.d(TAG, "VoE stop send failed");
         }
 
         // Stop listen
-        if (0 != ViEAndroidAPI.VoE_StopListen(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_StopListen(voiceChannel)) {
             Log.d(TAG, "VoE stop listen failed");
         }
 
         // Stop playout
-        if (0 != ViEAndroidAPI.VoE_StopPlayout(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_StopPlayout(voiceChannel)) {
             Log.d(TAG, "VoE stop playout failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_DeleteChannel(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_DeleteChannel(voiceChannel)) {
             Log.d(TAG, "VoE delete channel failed");
         }
-        voiceChannel=-1;
+        voiceChannel = -1;
 
         // Terminate
-        if (0 != ViEAndroidAPI.VoE_Terminate()) {
+        if (0 != vieAndroidAPI.VoE_Terminate()) {
             Log.d(TAG, "VoE terminate failed");
         }
     }
 
-    private int SetupVoE() {
+    private int setupVoE() {
         // Create VoiceEngine
         // Error logging is done in native API wrapper
-        ViEAndroidAPI.VoE_Create(getApplicationContext());
+        vieAndroidAPI.VoE_Create(getApplicationContext());
 
         // Initialize
-        if (0 != ViEAndroidAPI.VoE_Init(enableTrace)) {
+        if (0 != vieAndroidAPI.VoE_Init(enableTrace)) {
             Log.d(TAG, "VoE init failed");
             return -1;
         }
 
         // Create channel
-        voiceChannel = ViEAndroidAPI.VoE_CreateChannel();
+        voiceChannel = vieAndroidAPI.VoE_CreateChannel();
         if (0 != voiceChannel) {
             Log.d(TAG, "VoE create channel failed");
             return -1;
@@ -764,53 +752,53 @@
         return 0;
     }
 
-    private int StartVoiceEngine() {
+    private int startVoiceEngine() {
         // Set local receiver
-        if (0 != ViEAndroidAPI.VoE_SetLocalReceiver(voiceChannel,
+        if (0 != vieAndroidAPI.VoE_SetLocalReceiver(voiceChannel,
                         receivePortVoice)) {
             Log.d(TAG, "VoE set local receiver failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_StartListen(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_StartListen(voiceChannel)) {
             Log.d(TAG, "VoE start listen failed");
         }
 
         // Route audio
-        RouteAudio(enableSpeaker);
+        routeAudio(enableSpeaker);
 
         // set volume to default value
-        if (0 != ViEAndroidAPI.VoE_SetSpeakerVolume(volumeLevel)) {
+        if (0 != vieAndroidAPI.VoE_SetSpeakerVolume(volumeLevel)) {
             Log.d(TAG, "VoE set speaker volume failed");
         }
 
         // Start playout
-        if (0 != ViEAndroidAPI.VoE_StartPlayout(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_StartPlayout(voiceChannel)) {
             Log.d(TAG, "VoE start playout failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_SetSendDestination(voiceChannel,
+        if (0 != vieAndroidAPI.VoE_SetSendDestination(voiceChannel,
                                                       destinationPortVoice,
-                                                      GetRemoteIPString())) {
+                                                      getRemoteIPString())) {
             Log.d(TAG, "VoE set send  destination failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_SetSendCodec(voiceChannel, voiceCodecType)) {
+        if (0 != vieAndroidAPI.VoE_SetSendCodec(voiceChannel, voiceCodecType)) {
             Log.d(TAG, "VoE set send codec failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_SetECStatus(enableAECM)) {
+        if (0 != vieAndroidAPI.VoE_SetECStatus(enableAECM)) {
             Log.d(TAG, "VoE set EC Status failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_SetAGCStatus(enableAGC)) {
+        if (0 != vieAndroidAPI.VoE_SetAGCStatus(enableAGC)) {
             Log.d(TAG, "VoE set AGC Status failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_SetNSStatus(enableNS)) {
+        if (0 != vieAndroidAPI.VoE_SetNSStatus(enableNS)) {
             Log.d(TAG, "VoE set NS Status failed");
         }
 
-        if (0 != ViEAndroidAPI.VoE_StartSend(voiceChannel)) {
+        if (0 != vieAndroidAPI.VoE_StartSend(voiceChannel)) {
             Log.d(TAG, "VoE start send failed");
         }
 
@@ -818,8 +806,8 @@
         return 0;
     }
 
-    private void RouteAudio(boolean enableSpeaker) {
-        if (0 != ViEAndroidAPI.VoE_SetLoudspeakerStatus(enableSpeaker)) {
+    private void routeAudio(boolean enableSpeaker) {
+        if (0 != vieAndroidAPI.VoE_SetLoudspeakerStatus(enableSpeaker)) {
             Log.d(TAG, "VoE set louspeaker status failed");
         }
     }
@@ -827,41 +815,39 @@
     public void onClick(View arg0) {
         switch (arg0.getId()) {
             case R.id.btSwitchCamera:
-                if (usingFrontCamera ){
+                if (usingFrontCamera) {
                     btSwitchCamera.setText(R.string.frontCamera);
-                }
-                else {
+                } else {
                     btSwitchCamera.setText(R.string.backCamera);
                 }
                 usingFrontCamera = !usingFrontCamera;
 
                 if (viERunning) {
                     currentCameraOrientation =
-                            ViEAndroidAPI.GetCameraOrientation(usingFrontCamera?1:0);
-                    ViEAndroidAPI.StopCamera(cameraId);
+                            vieAndroidAPI.GetCameraOrientation(usingFrontCamera ? 1 : 0);
+                    vieAndroidAPI.StopCamera(cameraId);
                     mLlLocalSurface.removeView(svLocal);
 
-                    ViEAndroidAPI.StartCamera(channel,usingFrontCamera?1:0);
+                    vieAndroidAPI.StartCamera(channel, usingFrontCamera ? 1 : 0);
                     mLlLocalSurface.addView(svLocal);
-                    int neededRotation = GetCameraOrientation(currentCameraOrientation);
-                    ViEAndroidAPI.SetRotation(cameraId, neededRotation);
+                    int neededRotation = getCameraOrientation(currentCameraOrientation);
+                    vieAndroidAPI.SetRotation(cameraId, neededRotation);
                 }
                 break;
             case R.id.btStartStopCall:
-                ReadSettings();
+                readSettings();
                 if (viERunning || voERunning) {
-                    StopAll();
+                    stopAll();
                     wakeLock.release(); // release the wake lock
                     btStartStopCall.setText(R.string.startCall);
-                }
-                else if (enableVoice || enableVideo){
-                    StartCall();
+                } else if (enableVoice || enableVideo){
+                    startCall();
                     wakeLock.acquire(); // screen stay on during the call
                     btStartStopCall.setText(R.string.stopCall);
                 }
                 break;
             case R.id.btExit:
-                StopAll();
+                stopAll();
                 finish();
                 break;
             case R.id.cbLoopback:
@@ -869,9 +855,8 @@
                 if (loopbackMode) {
                     remoteIp = LOOPBACK_IP;
                     etRemoteIp.setText(LOOPBACK_IP);
-                }
-                else {
-                    GetLocalIpAddress();
+                } else {
+                    getLocalIpAddress();
                     etRemoteIp.setText(remoteIp);
                 }
                 break;
@@ -881,19 +866,17 @@
             case R.id.cbStats:
                 isStatsOn = cbStats.isChecked();
                 if (isStatsOn) {
-                    AddStatsView();
-                }
-                else {
-                    RemoveSatsView();
+                    addStatusView();
+                } else {
+                    removeStatusView();
                 }
                 break;
             case R.id.cbCPULoad:
                 isCPULoadOn = cbCPULoad.isChecked();
                 if (isCPULoadOn) {
-                    StartCPULoad();
-                }
-                else {
-                    StopCPULoad();
+                    startCPULoad();
+                } else {
+                    stopCPULoad();
                 }
                 break;
             case R.id.radio_surface:
@@ -905,73 +888,70 @@
             case R.id.cbNack:
                 enableNack  = cbEnableNack.isChecked();
                 if (viERunning) {
-                    ViEAndroidAPI.EnableNACK(channel, enableNack);
+                    vieAndroidAPI.EnableNACK(channel, enableNack);
                 }
                 break;
             case R.id.cbSpeaker:
                 enableSpeaker = cbEnableSpeaker.isChecked();
-                if (voERunning){
-                    RouteAudio(enableSpeaker);
+                if (voERunning) {
+                    routeAudio(enableSpeaker);
                 }
                 break;
             case R.id.cbDebugRecording:
-                if(voERunning && webrtcDebugDir != null) {
-                    if (cbEnableDebugAPM.isChecked() ) {
-                        ViEAndroidAPI.VoE_StartDebugRecording(
+                if (voERunning && webrtcDebugDir != null) {
+                    if (cbEnableDebugAPM.isChecked()) {
+                        vieAndroidAPI.VoE_StartDebugRecording(
                             webrtcDebugDir + String.format("/apm_%d.dat",
                                     System.currentTimeMillis()));
-                    }
-                    else {
-                        ViEAndroidAPI.VoE_StopDebugRecording();
+                    } else {
+                        vieAndroidAPI.VoE_StopDebugRecording();
                     }
                 }
                 break;
             case R.id.cbVoiceRTPDump:
-                if(voERunning && webrtcDebugDir != null) {
-                    if (cbEnableVoiceRTPDump.isChecked() ) {
-                        ViEAndroidAPI.VoE_StartIncomingRTPDump(channel,
+                if (voERunning && webrtcDebugDir != null) {
+                    if (cbEnableVoiceRTPDump.isChecked()) {
+                        vieAndroidAPI.VoE_StartIncomingRTPDump(channel,
                                 webrtcDebugDir + String.format("/voe_%d.rtp",
                                         System.currentTimeMillis()));
-                    }
-                    else {
-                        ViEAndroidAPI.VoE_StopIncomingRTPDump(channel);
+                    } else {
+                        vieAndroidAPI.VoE_StopIncomingRTPDump(channel);
                     }
                 }
                 break;
             case R.id.cbVideoRTPDump:
-                if(viERunning && webrtcDebugDir != null) {
-                    if (cbEnableVideoRTPDump.isChecked() ) {
-                        ViEAndroidAPI.StartIncomingRTPDump(channel,
+                if (viERunning && webrtcDebugDir != null) {
+                    if (cbEnableVideoRTPDump.isChecked()) {
+                        vieAndroidAPI.StartIncomingRTPDump(channel,
                                 webrtcDebugDir + String.format("/vie_%d.rtp",
                                         System.currentTimeMillis()));
-                    }
-                    else {
-                        ViEAndroidAPI.StopIncomingRTPDump(channel);
+                    } else {
+                        vieAndroidAPI.StopIncomingRTPDump(channel);
                     }
                 }
                 break;
             case R.id.cbAutoGainControl:
-                enableAGC=cbEnableAGC.isChecked();
-                if(voERunning) {
-                    ViEAndroidAPI.VoE_SetAGCStatus(enableAGC);
+                enableAGC = cbEnableAGC.isChecked();
+                if (voERunning) {
+                    vieAndroidAPI.VoE_SetAGCStatus(enableAGC);
                 }
                 break;
             case R.id.cbNoiseSuppression:
-                enableNS=cbEnableNS.isChecked();
-                if(voERunning) {
-                    ViEAndroidAPI.VoE_SetNSStatus(enableNS);
+                enableNS = cbEnableNS.isChecked();
+                if (voERunning) {
+                    vieAndroidAPI.VoE_SetNSStatus(enableNS);
                 }
                 break;
             case R.id.cbAECM:
                 enableAECM = cbEnableAECM.isChecked();
                 if (voERunning) {
-                    ViEAndroidAPI.VoE_SetECStatus(enableAECM);
+                    vieAndroidAPI.VoE_SetECStatus(enableAECM);
                 }
                 break;
         }
     }
 
-    private void ReadSettings() {
+    private void readSettings() {
         codecType = spCodecType.getSelectedItemPosition();
         voiceCodecType = spVoiceCodecType.getSelectedItemPosition();
 
@@ -1006,26 +986,27 @@
             int position, long id) {
         if ((adapterView == spCodecType || adapterView == spCodecSize) &&
                 viERunning) {
-            ReadSettings();
+            readSettings();
             // change the codectype
             if (enableVideoReceive) {
-                if (0 != ViEAndroidAPI.SetReceiveCodec(channel, codecType,
+                if (0 != vieAndroidAPI.SetReceiveCodec(channel, codecType,
                                 INIT_BITRATE, codecSizeWidth,
                                 codecSizeHeight,
-                                RECEIVE_CODEC_FRAMERATE))
+                                RECEIVE_CODEC_FRAMERATE)) {
                     Log.d(TAG, "ViE set receive codec failed");
+                }
             }
             if (enableVideoSend) {
-                if (0 != ViEAndroidAPI.SetSendCodec(channel, codecType,
+                if (0 != vieAndroidAPI.SetSendCodec(channel, codecType,
                                 INIT_BITRATE, codecSizeWidth, codecSizeHeight,
-                                SEND_CODEC_FRAMERATE))
+                                SEND_CODEC_FRAMERATE)) {
                     Log.d(TAG, "ViE set send codec failed");
+                }
             }
-        }
-        else if ((adapterView == spVoiceCodecType) && voERunning) {
+        } else if ((adapterView == spVoiceCodecType) && voERunning) {
             // change voice engine codec
-            ReadSettings();
-            if (0 != ViEAndroidAPI.VoE_SetSendCodec(voiceChannel, voiceCodecType)) {
+            readSettings();
+            if (0 != vieAndroidAPI.VoE_SetSendCodec(voiceChannel, voiceCodecType)) {
                 Log.d(TAG, "VoE set send codec failed");
             }
         }
@@ -1035,17 +1016,17 @@
         Log.d(TAG, "No setting selected");
     }
 
-    public int UpdateStats(int in_frameRateI, int in_bitRateI, int in_packetLoss,
-            int in_frameRateO, int in_bitRateO) {
-        frameRateI = in_frameRateI;
-        bitRateI = in_bitRateI;
-        packetLoss = in_packetLoss;
-        frameRateO = in_frameRateO;
-        bitRateO = in_bitRateO;
+    public int updateStats(int inFrameRateI, int inBitRateI,
+            int inPacketLoss, int inFrameRateO, int inBitRateO) {
+        frameRateI = inFrameRateI;
+        bitRateI = inBitRateI;
+        packetLoss = inPacketLoss;
+        frameRateO = inFrameRateO;
+        bitRateO = inBitRateO;
         return 0;
     }
 
-    private void AddStatsView() {
+    private void addStatusView() {
         if (statsView != null) {
             return;
         }
@@ -1063,45 +1044,42 @@
         statsView.setBackgroundColor(0);
     }
 
-    private void RemoveSatsView() {
+    private void removeStatusView() {
         mTabHost.removeView(statsView);
         statsView = null;
     }
 
-    private void StartCPULoad() {
+    private void startCPULoad() {
         if (null == mBackgroundLoad) {
             mBackgroundLoad = new Thread(new Runnable() {
                     public void run() {
                         Log.v(TAG, "Background load started");
                         mIsBackgroudLoadRunning = true;
-                        try{
+                        try {
                             while (mIsBackgroudLoadRunning) {
-                                // This while simulates cpu load.
+                                // This while loop simulates cpu load.
                                 // Log.v(TAG, "Runnable!!!");
                             }
-                        }
-                        catch(Throwable t) {
-                            Log.v(TAG, "StartCPULoad failed");
+                        } catch (Throwable t) {
+                            Log.v(TAG, "startCPULoad failed");
                         }
                     }
                 });
             mBackgroundLoad.start();
-        }
-        else {
+        } else {
             if (mBackgroundLoad.getState() == Thread.State.TERMINATED) {
                 mBackgroundLoad.start();
             }
         }
     }
 
-    private void StopCPULoad() {
+    private void stopCPULoad() {
         if (null != mBackgroundLoad) {
             mIsBackgroudLoadRunning = false;
-            try{
+            try {
                 mBackgroundLoad.join();
-            }
-            catch(Throwable t) {
-                Log.v(TAG, "StopCPULoad failed");
+            } catch (Throwable t) {
+                Log.v(TAG, "stopCPULoad failed");
             }
         }
     }
diff --git a/video_engine/test/auto_test/source/vie_autotest_codec.cc b/video_engine/test/auto_test/source/vie_autotest_codec.cc
index 0812e03..4cb9c04 100644
--- a/video_engine/test/auto_test/source/vie_autotest_codec.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_codec.cc
@@ -526,8 +526,8 @@
     number_of_errors += ViETest::TestError(vie_external_codec != NULL,
                                            "ERROR: %s at line %d",
                                            __FUNCTION__, __LINE__);
-    webrtc::VideoCodec codec_struct;
-    error = ViE.codec->GetSendCodec(channel.videoChannel, codecStruct);
+    webrtc::VideoCodec codec;
+    error = ViE.codec->GetSendCodec(channel.videoChannel, codec);
     number_of_errors += ViETest::TestError(vie_external_codec != NULL,
                                            "ERROR: %s at line %d",
                                            __FUNCTION__, __LINE__);
@@ -538,7 +538,7 @@
 
       // Test to register on wrong channel.
       error = vie_external_codec->RegisterExternalSendCodec(
-          channel.videoChannel + 5, codecStruct.plType, &ext_encoder);
+          channel.videoChannel + 5, codec.plType, &ext_encoder);
       number_of_errors += ViETest::TestError(error == -1,
                                              "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
@@ -547,22 +547,22 @@
           "ERROR: %s at line %d", __FUNCTION__, __LINE__);
 
       error = vie_external_codec->RegisterExternalSendCodec(
-                channel.videoChannel, codecStruct.plType, &ext_encoder);
+                channel.videoChannel, codec.plType, &ext_encoder);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       // Use new external encoder
-      error = ViE.codec->SetSendCodec(channel.videoChannel, codecStruct);
+      error = ViE.codec->SetSendCodec(channel.videoChannel, codec);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       TbI420Decoder ext_decoder;
       error = vie_external_codec->RegisterExternalReceiveCodec(
-          channel.videoChannel, codecStruct.plType, &ext_decoder);
+          channel.videoChannel, codec.plType, &ext_decoder);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
-      error = ViE.codec->SetReceiveCodec(channel.videoChannel, codec_struct);
+      error = ViE.codec->SetReceiveCodec(channel.videoChannel, codec);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
@@ -571,7 +571,7 @@
 
       // Test to deregister on wrong channel
       error = vie_external_codec->DeRegisterExternalSendCodec(
-          channel.videoChannel + 5, codecStruct.plType);
+          channel.videoChannel + 5, codec.plType);
       number_of_errors += ViETest::TestError(error == -1,
                                              "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
@@ -581,19 +581,19 @@
 
       // Test to deregister wrong payload type.
       error = vie_external_codec->DeRegisterExternalSendCodec(
-          channel.videoChannel, codecStruct.plType - 1);
+          channel.videoChannel, codec.plType - 1);
       number_of_errors += ViETest::TestError(error == -1,
                                              "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       // Deregister external send codec
       error = vie_external_codec->DeRegisterExternalSendCodec(
-          channel.videoChannel, codecStruct.plType);
+          channel.videoChannel, codec.plType);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       error = vie_external_codec->DeRegisterExternalReceiveCodec(
-          channel.videoChannel, codecStruct.plType);
+          channel.videoChannel, codec.plType);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
@@ -612,12 +612,12 @@
       number_of_errors += ViETest::TestError(
           encode_calls.RegisterEncodeCompleteCallback == 1,
           "ERROR: %s at line %d", __FUNCTION__, __LINE__);
+      number_of_errors += ViETest::TestError(
+          encode_calls.SetChannelParameters > 1, "ERROR: %s at line %d",
+          __FUNCTION__, __LINE__);
       number_of_errors += ViETest::TestError(encode_calls.SetRates > 1,
                                              "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
-      number_of_errors += ViETest::TestError(encode_calls.SetPacketLoss > 1,
-                                             "ERROR: %s at line %d",
-                                             __FUNCTION__, __LINE__);
 
       TbI420Decoder::FunctionCalls decode_calls =
           ext_decoder.GetFunctionCalls();
@@ -636,26 +636,26 @@
 
       ViETest::Log("Changing payload type Using external I420 codec");
 
-      codec_struct.plType = codecStruct.plType - 1;
+      codec.plType = codec.plType - 1;
       error = vie_external_codec->RegisterExternalReceiveCodec(
-          channel.videoChannel, codec_struct.plType, &ext_decoder);
+          channel.videoChannel, codec.plType, &ext_decoder);
       number_of_errors += ViETest::TestError(error == 0,
                                              "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       error = ViE.codec->SetReceiveCodec(channel.videoChannel,
-                                         codec_struct);
+                                         codec);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       error = vie_external_codec->RegisterExternalSendCodec(
-                channel.videoChannel, codec_struct.plType, &ext_encoder);
+                channel.videoChannel, codec.plType, &ext_encoder);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
       // Use new external encoder
       error = ViE.codec->SetSendCodec(channel.videoChannel,
-                                      codec_struct);
+                                      codec);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
@@ -666,11 +666,11 @@
       /// **************************************************************
 
       error = vie_external_codec->DeRegisterExternalSendCodec(
-                channel.videoChannel, codecStruct.plType);
+                channel.videoChannel, codec.plType);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
       error = vie_external_codec->DeRegisterExternalReceiveCodec(
-                channel.videoChannel, codecStruct.plType);
+                channel.videoChannel, codec.plType);
       number_of_errors += ViETest::TestError(error == 0, "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
 
@@ -688,13 +688,12 @@
       number_of_errors += ViETest::TestError(
           encode_calls.RegisterEncodeCompleteCallback == 2,
           "ERROR: %s at line %d", __FUNCTION__, __LINE__);
+      number_of_errors += ViETest::TestError(
+          encode_calls.SetChannelParameters > 1, "ERROR: %s at line %d",
+          __FUNCTION__, __LINE__);
       number_of_errors += ViETest::TestError(encode_calls.SetRates > 1,
                                              "ERROR: %s at line %d",
                                              __FUNCTION__, __LINE__);
-      number_of_errors += ViETest::TestError(encode_calls.SetPacketLoss > 1,
-                                             "ERROR: %s at line %d",
-                                             __FUNCTION__, __LINE__);
-
       decode_calls = ext_decoder.GetFunctionCalls();
       number_of_errors += ViETest::TestError(decode_calls.InitDecode == 2,
                                              "ERROR: %s at line %d",
diff --git a/video_engine/test/auto_test/source/vie_autotest_encryption.cc b/video_engine/test/auto_test/source/vie_autotest_encryption.cc
index 0c06f32..1d7a835 100644
--- a/video_engine/test/auto_test/source/vie_autotest_encryption.cc
+++ b/video_engine/test/auto_test/source/vie_autotest_encryption.cc
@@ -31,45 +31,53 @@
     {
     }
 
-    virtual void encrypt(int channel_no, unsigned char * in_data,
-                         unsigned char * out_data, int bytes_in, int* bytes_out)
+    virtual void encrypt(int channel_no, unsigned char* in_data,
+                         unsigned char* out_data, int bytes_in, int* bytes_out)
     {
         for (int i = 0; i < bytes_in; i++)
         {
             out_data[i] = ~in_data[i];
         }
+        assert(*bytes_out >= bytes_in + 2);
         *bytes_out = bytes_in + 2;
+        out_data[bytes_in] = 'a';
+        out_data[bytes_in + 1] = 'b';
     }
 
-    virtual void decrypt(int channel_no, unsigned char * in_data,
-                         unsigned char * out_data, int bytes_in, int* bytes_out)
+    virtual void decrypt(int channel_no, unsigned char* in_data,
+                         unsigned char* out_data, int bytes_in, int* bytes_out)
     {
         for (int i = 0; i < bytes_in - 2; i++)
         {
             out_data[i] = ~in_data[i];
         }
+        assert(*bytes_out >= bytes_in - 2);
         *bytes_out = bytes_in - 2;
     }
 
-    virtual void encrypt_rtcp(int channel_no, unsigned char * in_data,
-                              unsigned char * out_data, int bytes_in,
+    virtual void encrypt_rtcp(int channel_no, unsigned char* in_data,
+                              unsigned char* out_data, int bytes_in,
                               int* bytes_out)
     {
         for (int i = 0; i < bytes_in; i++)
         {
             out_data[i] = ~in_data[i];
         }
+        assert(*bytes_out >= bytes_in + 2);
         *bytes_out = bytes_in + 2;
+        out_data[bytes_in] = 'a';
+        out_data[bytes_in + 1] = 'b';
     }
 
-    virtual void decrypt_rtcp(int channel_no, unsigned char * in_data,
-                              unsigned char * out_data, int bytes_in,
+    virtual void decrypt_rtcp(int channel_no, unsigned char* in_data,
+                              unsigned char* out_data, int bytes_in,
                               int* bytes_out)
     {
         for (int i = 0; i < bytes_in - 2; i++)
         {
             out_data[i] = ~in_data[i];
         }
+        assert(*bytes_out >= bytes_in - 2);
         *bytes_out = bytes_in - 2;
     }
 };
diff --git a/video_engine/test/libvietest/include/tb_I420_codec.h b/video_engine/test/libvietest/include/tb_I420_codec.h
index 0d15212..721a5ec 100644
--- a/video_engine/test/libvietest/include/tb_I420_codec.h
+++ b/video_engine/test/libvietest/include/tb_I420_codec.h
@@ -48,8 +48,6 @@
     virtual WebRtc_Word32 SetChannelParameters(WebRtc_UWord32 packetLoss,
                                                int rtt);
 
-    virtual WebRtc_Word32 SetPacketLoss(WebRtc_UWord32 packetLoss);
-
     virtual WebRtc_Word32 SetRates(WebRtc_UWord32 newBitRate,
                                    WebRtc_UWord32 frameRate);
 
@@ -65,8 +63,8 @@
         WebRtc_Word32 RegisterEncodeCompleteCallback;
         WebRtc_Word32 Release;
         WebRtc_Word32 Reset;
+        WebRtc_Word32 SetChannelParameters;
         WebRtc_Word32 SetRates;
-        WebRtc_Word32 SetPacketLoss;
         WebRtc_Word32 SetPeriodicKeyFrames;
         WebRtc_Word32 CodecConfigParameters;
 
diff --git a/video_engine/test/libvietest/testbed/tb_I420_codec.cc b/video_engine/test/libvietest/testbed/tb_I420_codec.cc
index 2782f93..cb9e2dc 100644
--- a/video_engine/test/libvietest/testbed/tb_I420_codec.cc
+++ b/video_engine/test/libvietest/testbed/tb_I420_codec.cc
@@ -79,7 +79,8 @@
 
 WebRtc_Word32 TbI420Encoder::SetChannelParameters(WebRtc_UWord32 packetLoss,
                                                   int rtt) {
-  return 0;
+  _functionCalls.SetChannelParameters++;
+  return WEBRTC_VIDEO_CODEC_OK;
 }
 
 WebRtc_Word32 TbI420Encoder::InitEncode(const webrtc::VideoCodec* inst,
@@ -171,12 +172,6 @@
     return WEBRTC_VIDEO_CODEC_OK;
 }
 
-WebRtc_Word32 TbI420Encoder::SetPacketLoss(WebRtc_UWord32 packetLoss)
-{
-    _functionCalls.SetPacketLoss++;
-    return WEBRTC_VIDEO_CODEC_OK;
-}
-
 WebRtc_Word32 TbI420Encoder::SetRates(WebRtc_UWord32 newBitRate,
                                       WebRtc_UWord32 frameRate)
 {
diff --git a/video_engine/vie_file_image.cc b/video_engine/vie_file_image.cc
index 772642c..1f224fe 100644
--- a/video_engine/vie_file_image.cc
+++ b/video_engine/vie_file_image.cc
@@ -69,8 +69,7 @@
   }
   fclose(image_file);
 
-  JpegDecoder decoder;
-  int ret = decoder.Decode(image_buffer, *video_frame);
+  int ret = ConvertJpegToI420(image_buffer, video_frame);
 
   delete [] image_buffer._buffer;
   image_buffer._buffer = NULL;
diff --git a/video_engine/vie_receiver.cc b/video_engine/vie_receiver.cc
index f3c6a6a..a32c813 100644
--- a/video_engine/vie_receiver.cc
+++ b/video_engine/vie_receiver.cc
@@ -164,7 +164,7 @@
     CriticalSectionScoped cs(receive_cs_.get());
 
     if (external_decryption_) {
-      int decrypted_length = 0;
+      int decrypted_length = kViEMaxMtu;
       external_decryption_->decrypt(channel_id_, received_packet,
                                     decryption_buffer_, received_packet_length,
                                     &decrypted_length);
@@ -202,7 +202,7 @@
     CriticalSectionScoped cs(receive_cs_.get());
 
     if (external_decryption_) {
-      int decrypted_length = 0;
+      int decrypted_length = kViEMaxMtu;
       external_decryption_->decrypt_rtcp(channel_id_, received_packet,
                                          decryption_buffer_,
                                          received_packet_length,
diff --git a/video_engine/vie_remb.cc b/video_engine/vie_remb.cc
index 496e6ad..07c4f8a 100644
--- a/video_engine/vie_remb.cc
+++ b/video_engine/vie_remb.cc
@@ -21,8 +21,8 @@
 
 namespace webrtc {
 
-const int kRembSendIntervallMs = 1000;
 const int kRembTimeOutThresholdMs = 2000;
+const int kRembSendIntervallMs = 1000;
 const unsigned int kRembMinimumBitrateKbps = 50;
 
 // % threshold for if we should send a new REMB asap.
@@ -32,7 +32,9 @@
     : process_thread_(process_thread),
       list_crit_(CriticalSectionWrapper::CreateCriticalSection()),
       last_remb_time_(TickTime::MillisecondTimestamp()),
-      last_send_bitrate_(0) {
+      last_send_bitrate_(0),
+      bitrate_(0),
+      bitrate_update_time_ms_(-1) {
   process_thread->RegisterModule(this);
 }
 
@@ -62,7 +64,6 @@
                "VieRemb::RemoveReceiveChannel(%p)", rtp_rtcp);
 
   CriticalSectionScoped cs(list_crit_.get());
-  unsigned int ssrc = rtp_rtcp->RemoteSSRC();
   for (RtpModules::iterator it = receive_modules_.begin();
        it != receive_modules_.end(); ++it) {
     if ((*it) == rtp_rtcp) {
@@ -70,7 +71,6 @@
       break;
     }
   }
-  update_time_bitrates_.erase(ssrc);
 }
 
 void VieRemb::AddRembSender(RtpRtcp* rtp_rtcp) {
@@ -110,23 +110,14 @@
     return true;
 }
 
-void VieRemb::OnReceiveBitrateChanged(unsigned int ssrc, unsigned int bitrate) {
+void VieRemb::OnReceiveBitrateChanged(unsigned int bitrate) {
   WEBRTC_TRACE(kTraceStream, kTraceVideo, -1,
-               "VieRemb::UpdateBitrateEstimate(ssrc: %u, bitrate: %u)",
-               ssrc, bitrate);
+               "VieRemb::UpdateBitrateEstimate(bitrate: %u)", bitrate);
   CriticalSectionScoped cs(list_crit_.get());
-
-  // Check if this is a new ssrc and add it to the map if it is.
-  if (update_time_bitrates_.find(ssrc) == update_time_bitrates_.end()) {
-    update_time_bitrates_[ssrc] = std::make_pair(
-        TickTime::MillisecondTimestamp(), bitrate);
-  }
-
   // If we already have an estimate, check if the new total estimate is below
   // kSendThresholdPercent of the previous estimate.
   if (last_send_bitrate_ > 0) {
-    unsigned int new_remb_bitrate = last_send_bitrate_ -
-        update_time_bitrates_[ssrc].second + bitrate;
+    unsigned int new_remb_bitrate = last_send_bitrate_ - bitrate_ + bitrate;
 
     if (new_remb_bitrate < kSendThresholdPercent * last_send_bitrate_ / 100) {
       // The new bitrate estimate is less than kSendThresholdPercent % of the
@@ -134,8 +125,8 @@
       last_remb_time_ = TickTime::MillisecondTimestamp() - kRembSendIntervallMs;
     }
   }
-  update_time_bitrates_[ssrc] = std::make_pair(
-      TickTime::MillisecondTimestamp(), bitrate);
+  bitrate_ = bitrate;
+  bitrate_update_time_ms_ = TickTime::MillisecondTimestamp();
 }
 
 WebRtc_Word32 VieRemb::ChangeUniqueId(const WebRtc_Word32 id) {
@@ -157,20 +148,14 @@
   // Calculate total receive bitrate estimate.
   list_crit_->Enter();
 
-  // Remove any timed out estimates.
-  SsrcTimeBitrate::iterator it = update_time_bitrates_.begin();
-  while (it != update_time_bitrates_.end()) {
-    if (TickTime::MillisecondTimestamp() - it->second.first >
+  // Reset the estimate if it has timed out.
+  if (TickTime::MillisecondTimestamp() - bitrate_update_time_ms_ >
       kRembTimeOutThresholdMs) {
-      update_time_bitrates_.erase(it++);
-    } else {
-      ++it;
-    }
+    bitrate_ = 0;
+    bitrate_update_time_ms_ = -1;
   }
 
-  int num_bitrates = update_time_bitrates_.size();
-
-  if (num_bitrates == 0 || receive_modules_.empty()) {
+  if (bitrate_update_time_ms_ == -1 || receive_modules_.empty()) {
     list_crit_->Leave();
     return 0;
   }
@@ -178,12 +163,6 @@
   // TODO(mflodman) Use std::vector and change RTP module API.
   unsigned int* ssrcs = new unsigned int[receive_modules_.size()];
 
-  unsigned int total_bitrate = 0;
-  for (it = update_time_bitrates_.begin(); it != update_time_bitrates_.end();
-      ++it) {
-    total_bitrate += it->second.second;
-  }
-
   int idx = 0;
   RtpModules::iterator rtp_it;
   for (rtp_it = receive_modules_.begin(); rtp_it != receive_modules_.end();
@@ -198,7 +177,7 @@
   } else {
     sender = receive_modules_.front();
   }
-  last_send_bitrate_ = total_bitrate;
+  last_send_bitrate_ = bitrate_;
 
   // Never send a REMB lower than last_send_bitrate_.
   if (last_send_bitrate_ < kRembMinimumBitrateKbps) {
@@ -207,7 +186,7 @@
   list_crit_->Leave();
 
   if (sender) {
-    sender->SetREMBData(total_bitrate, num_bitrates, ssrcs);
+    sender->SetREMBData(bitrate_, receive_modules_.size(), ssrcs);
   }
   delete [] ssrcs;
   return 0;
diff --git a/video_engine/vie_remb.h b/video_engine/vie_remb.h
index 7fe12f8..43f373e 100644
--- a/video_engine/vie_remb.h
+++ b/video_engine/vie_remb.h
@@ -53,12 +53,12 @@
   // Returns true if the instance is in use, false otherwise.
   bool InUse() const;
 
-  // Called every time there is a new bitrate estimate for the received stream
-  // with given SSRC. This call will trigger a new RTCP REMB packet if the
-  // bitrate estimate has decreased or if no RTCP REMB packet has been sent for
+  // Called every time there is a new bitrate estimate for a receive channel
+  // group. This call will trigger a new RTCP REMB packet if the bitrate
+  // estimate has decreased or if no RTCP REMB packet has been sent for
   // a certain time interval.
   // Implements RtpReceiveBitrateUpdate.
-  virtual void OnReceiveBitrateChanged(unsigned int ssrc, unsigned int bitrate);
+  virtual void OnReceiveBitrateChanged(unsigned int bitrate);
 
   // Implements Module.
   virtual WebRtc_Word32 ChangeUniqueId(const WebRtc_Word32 id);
@@ -67,8 +67,6 @@
 
  private:
   typedef std::list<RtpRtcp*> RtpModules;
-  typedef std::map<unsigned int, std::pair<int64_t, unsigned int> >
-      SsrcTimeBitrate;
 
   ProcessThread* process_thread_;
   scoped_ptr<CriticalSectionWrapper> list_crit_;
@@ -83,8 +81,9 @@
   // All modules that can send REMB RTCP.
   RtpModules rtcp_sender_;
 
-  // The last bitrate update for each SSRC.
-  SsrcTimeBitrate update_time_bitrates_;
+  // The last bitrate update.
+  unsigned int bitrate_;
+  int64_t bitrate_update_time_ms_;
 };
 
 }  // namespace webrtc
diff --git a/video_engine/vie_remb_unittest.cc b/video_engine/vie_remb_unittest.cc
index b024877..3a7f205 100644
--- a/video_engine/vie_remb_unittest.cc
+++ b/video_engine/vie_remb_unittest.cc
@@ -55,11 +55,11 @@
   vie_remb_->AddRembSender(&rtp);
 
   const unsigned int bitrate_estimate = 456;
-  unsigned int ssrc[] = { 1234 };
+  unsigned int ssrc = 1234;
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   EXPECT_CALL(rtp, RemoteSSRC())
-      .WillRepeatedly(Return(ssrc[0]));
+      .WillRepeatedly(Return(ssrc));
 
   // TODO(mflodman) Add fake clock and remove the lowered bitrate below.
   SleepMs(1010);
@@ -68,7 +68,7 @@
   vie_remb_->Process();
 
   // Lower bitrate to send another REMB packet.
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate - 100);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate - 100);
   EXPECT_CALL(rtp, SetREMBData(bitrate_estimate - 100, 1, _))
         .Times(1);
   vie_remb_->Process();
@@ -83,11 +83,11 @@
   vie_remb_->AddRembSender(&rtp);
 
   unsigned int bitrate_estimate = 456;
-  unsigned int ssrc[] = { 1234 };
+  unsigned int ssrc = 1234;
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   EXPECT_CALL(rtp, RemoteSSRC())
-      .WillRepeatedly(Return(ssrc[0]));
+      .WillRepeatedly(Return(ssrc));
   // Call process to get a first estimate.
   SleepMs(1010);
   EXPECT_CALL(rtp, SetREMBData(bitrate_estimate, 1, _))
@@ -99,11 +99,11 @@
   bitrate_estimate = bitrate_estimate - 100;
   EXPECT_CALL(rtp, SetREMBData(bitrate_estimate, 1, _))
       .Times(1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   vie_remb_->Process();
 }
 
-TEST_F(ViERembTest, VerifyCombinedBitrateEstimate) {
+TEST_F(ViERembTest, VerifyIncreasingAndDecreasing) {
   MockRtpRtcp rtp_0;
   MockRtpRtcp rtp_1;
   vie_remb_->AddReceiveChannel(&rtp_0);
@@ -113,27 +113,32 @@
   unsigned int bitrate_estimate[] = { 456, 789 };
   unsigned int ssrc[] = { 1234, 5678 };
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate[0]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate[0]);
   EXPECT_CALL(rtp_0, RemoteSSRC())
       .Times(AnyNumber())
       .WillRepeatedly(Return(ssrc[0]));
+  EXPECT_CALL(rtp_1, RemoteSSRC())
+      .Times(AnyNumber())
+      .WillRepeatedly(Return(ssrc[1]));
 
   // Call process to get a first estimate.
-  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate[0], 1, _))
+  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate[0], 2, _))
         .Times(1);
   SleepMs(1010);
   vie_remb_->Process();
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[1], bitrate_estimate[1] + 100);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate[1] + 100);
+  EXPECT_CALL(rtp_0, RemoteSSRC())
+      .Times(AnyNumber())
+      .WillRepeatedly(Return(ssrc[0]));
   EXPECT_CALL(rtp_1, RemoteSSRC())
       .Times(AnyNumber())
       .WillRepeatedly(Return(ssrc[1]));
 
   // Lower the estimate to trigger a callback.
-  int total_bitrate = bitrate_estimate[0] + bitrate_estimate[1];
-  EXPECT_CALL(rtp_0, SetREMBData(total_bitrate, 2, _))
+  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate[1], 2, _))
       .Times(1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[1], bitrate_estimate[1]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate[1]);
   vie_remb_->Process();
 
   vie_remb_->RemoveReceiveChannel(&rtp_0);
@@ -148,15 +153,13 @@
   vie_remb_->AddRembSender(&rtp_0);
   vie_remb_->AddReceiveChannel(&rtp_1);
 
-  unsigned int bitrate_estimate[] = { 456, 789 };
+  unsigned int bitrate_estimate = 456;
   unsigned int ssrc[] = { 1234, 5678 };
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate[0]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   EXPECT_CALL(rtp_0, RemoteSSRC())
       .Times(AnyNumber())
       .WillRepeatedly(Return(ssrc[0]));
-
-  vie_remb_->OnReceiveBitrateChanged(ssrc[1], bitrate_estimate[1]);
   EXPECT_CALL(rtp_1, RemoteSSRC())
       .Times(AnyNumber())
       .WillRepeatedly(Return(ssrc[1]));
@@ -164,19 +167,18 @@
   // Trigger a first call to have a running state.
   // TODO(mflodman) Add fake clock.
   SleepMs(1010);
-  EXPECT_CALL(rtp_0,
-              SetREMBData(bitrate_estimate[0] + bitrate_estimate[1], 2, _))
+  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate, 2, _))
       .Times(1);
   vie_remb_->Process();
 
   // Increased estimate shouldn't trigger a callback right away.
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate[0] + 1);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate + 1);
   EXPECT_CALL(rtp_0, SetREMBData(_, _, _))
       .Times(0);
 
-  // Decresing the estimate less than 3% shouldn't trigger a new callback.
-  int lower_estimate = bitrate_estimate[0] * 98 / 100;
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], lower_estimate);
+  // Decreasing the estimate less than 3% shouldn't trigger a new callback.
+  int lower_estimate = bitrate_estimate * 98 / 100;
+  vie_remb_->OnReceiveBitrateChanged(lower_estimate);
   EXPECT_CALL(rtp_0, SetREMBData(_, _, _))
       .Times(0);
 
@@ -193,45 +195,40 @@
   vie_remb_->AddRembSender(&rtp_0);
   vie_remb_->AddReceiveChannel(&rtp_1);
 
-  unsigned int bitrate_estimate[] = { 456, 789 };
+  unsigned int bitrate_estimate = 456;
   unsigned int ssrc[] = { 1234, 5678 };
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate[0]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   EXPECT_CALL(rtp_0, RemoteSSRC())
       .Times(AnyNumber())
       .WillRepeatedly(Return(ssrc[0]));
-
-  vie_remb_->OnReceiveBitrateChanged(ssrc[1], bitrate_estimate[1]);
   EXPECT_CALL(rtp_1, RemoteSSRC())
       .Times(AnyNumber())
       .WillRepeatedly(Return(ssrc[1]));
 
   // Call process to get a first estimate.
   SleepMs(1010);
-  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate[0] + bitrate_estimate[1], 2,
-                                 _))
+  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate, 2, _))
       .Times(1);
   vie_remb_->Process();
 
   // Decrease estimate to trigger a REMB.
-  bitrate_estimate[0] = bitrate_estimate[0] - 100;
-  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate[0] + bitrate_estimate[1], 2,
-                                 _))
+  bitrate_estimate = bitrate_estimate - 100;
+  EXPECT_CALL(rtp_0, SetREMBData(bitrate_estimate, 2, _))
       .Times(1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate[0]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   vie_remb_->Process();
 
   // Remove the sending module, add it again -> should get remb on the second
   // module.
   vie_remb_->RemoveRembSender(&rtp_0);
   vie_remb_->AddRembSender(&rtp_1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate[0]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
 
-  bitrate_estimate[1] = bitrate_estimate[1] - 100;
-  EXPECT_CALL(rtp_1, SetREMBData(bitrate_estimate[0] + bitrate_estimate[1], 2,
-                                 _))
+  bitrate_estimate = bitrate_estimate - 100;
+  EXPECT_CALL(rtp_1, SetREMBData(bitrate_estimate, 2, _))
         .Times(1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[1], bitrate_estimate[1]);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   vie_remb_->Process();
 
   vie_remb_->RemoveReceiveChannel(&rtp_0);
@@ -241,13 +238,13 @@
 TEST_F(ViERembTest, OnlyOneRembForDoubleProcess) {
   MockRtpRtcp rtp;
   unsigned int bitrate_estimate = 456;
-  unsigned int ssrc[] = { 1234 };
+  unsigned int ssrc = 1234;
 
   vie_remb_->AddReceiveChannel(&rtp);
   vie_remb_->AddRembSender(&rtp);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   EXPECT_CALL(rtp, RemoteSSRC())
-      .WillRepeatedly(Return(ssrc[0]));
+      .WillRepeatedly(Return(ssrc));
 
   // Call process to get a first estimate.
   SleepMs(1010);
@@ -259,7 +256,7 @@
   bitrate_estimate = bitrate_estimate - 100;
   EXPECT_CALL(rtp, SetREMBData(bitrate_estimate, 1, _))
       .Times(1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   vie_remb_->Process();
 
   // Call Process again, this should not trigger a new callback.
@@ -295,11 +292,11 @@
   vie_remb_->AddReceiveChannel(&rtp);
 
   unsigned int bitrate_estimate = 456;
-  unsigned int ssrc[] = { 1234 };
+  unsigned int ssrc = 1234;
 
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   EXPECT_CALL(rtp, RemoteSSRC())
-      .WillRepeatedly(Return(ssrc[0]));
+      .WillRepeatedly(Return(ssrc));
 
   // Call process to get a first estimate.
   SleepMs(1010);
@@ -311,7 +308,7 @@
   bitrate_estimate = bitrate_estimate - 100;
   EXPECT_CALL(rtp, SetREMBData(_, _, _))
       .Times(1);
-  vie_remb_->OnReceiveBitrateChanged(ssrc[0], bitrate_estimate);
+  vie_remb_->OnReceiveBitrateChanged(bitrate_estimate);
   vie_remb_->Process();
 }
 
diff --git a/video_engine/vie_sender.cc b/video_engine/vie_sender.cc
index 6f682c1..4e6dcae 100644
--- a/video_engine/vie_sender.cc
+++ b/video_engine/vie_sender.cc
@@ -137,6 +137,8 @@
   // TODO(mflodman) Change decrypt to get rid of this cast.
   void* tmp_ptr = const_cast<void*>(data);
   unsigned char* send_packet = static_cast<unsigned char*>(tmp_ptr);
+
+  // Data length for packets sent to possible encryption and to the transport.
   int send_packet_length = len;
 
   if (rtp_dump_) {
@@ -144,10 +146,13 @@
   }
 
   if (external_encryption_) {
-    external_encryption_->encrypt(channel_id_, send_packet,
-                                  encryption_buffer_, send_packet_length,
-                                  static_cast<int*>(&send_packet_length));
+    // Encryption buffer size.
+    int encrypted_packet_length = kViEMaxMtu;
+
+    external_encryption_->encrypt(channel_id_, send_packet, encryption_buffer_,
+                                  send_packet_length, &encrypted_packet_length);
     send_packet = encryption_buffer_;
+    send_packet_length = encrypted_packet_length;
   }
   const int bytes_sent = transport_->SendPacket(channel_id_, send_packet,
                                                 send_packet_length);
@@ -171,6 +176,8 @@
   // TODO(mflodman) Change decrypt to get rid of this cast.
   void* tmp_ptr = const_cast<void*>(data);
   unsigned char* send_packet = static_cast<unsigned char*>(tmp_ptr);
+
+  // Data length for packets sent to possible encryption and to the transport.
   int send_packet_length = len;
 
   if (rtp_dump_) {
@@ -178,10 +185,14 @@
   }
 
   if (external_encryption_) {
+    // Encryption buffer size.
+    int encrypted_packet_length = kViEMaxMtu;
+
     external_encryption_->encrypt_rtcp(
         channel_id_, send_packet, encryption_buffer_, send_packet_length,
-        static_cast<int*>(&send_packet_length));
+        &encrypted_packet_length);
     send_packet = encryption_buffer_;
+    send_packet_length = encrypted_packet_length;
   }
 
   const int bytes_sent = transport_->SendRTCPPacket(channel_id_, send_packet,
diff --git a/voice_engine/channel.cc b/voice_engine/channel.cc
index b4889e2..4370b74 100644
--- a/voice_engine/channel.cc
+++ b/voice_engine/channel.cc
@@ -6403,12 +6403,12 @@
     }
 
     WebRtc_Word32 playoutFrequency = _audioCodingModule.PlayoutFrequency();
-    if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0)
-    {
-        if (STR_CASE_CMP("G722", currRecCodec.plname) == 0)
-        {
-            playoutFrequency = 8000;
-        }
+    if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0) {
+      if (STR_CASE_CMP("G722", currRecCodec.plname) == 0) {
+        playoutFrequency = 8000;
+      } else if (STR_CASE_CMP("opus", currRecCodec.plname) == 0) {
+        playoutFrequency = 48000;
+      }
     }
     timestamp -= (delayMS * (playoutFrequency/1000));
 
@@ -6482,16 +6482,20 @@
     rtpReceiveFrequency = _audioCodingModule.ReceiveFrequency();
 
     CodecInst currRecCodec;
-    if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0)
-    {
-        if (STR_CASE_CMP("G722", currRecCodec.plname) == 0)
-        {
-            // Even though the actual sampling rate for G.722 audio is
-            // 16,000 Hz, the RTP clock rate for the G722 payload format is
-            // 8,000 Hz because that value was erroneously assigned in
-            // RFC 1890 and must remain unchanged for backward compatibility.
-            rtpReceiveFrequency = 8000;
-        }
+    if (_audioCodingModule.ReceiveCodec(currRecCodec) == 0) {
+      if (STR_CASE_CMP("G722", currRecCodec.plname) == 0) {
+        // Even though the actual sampling rate for G.722 audio is
+        // 16,000 Hz, the RTP clock rate for the G722 payload format is
+        // 8,000 Hz because that value was erroneously assigned in
+        // RFC 1890 and must remain unchanged for backward compatibility.
+        rtpReceiveFrequency = 8000;
+      } else if (STR_CASE_CMP("opus", currRecCodec.plname) == 0) {
+        // We are resampling Opus internally to 32,000 Hz until all our
+        // DSP routines can operate at 48,000 Hz, but the RTP clock
+        // rate for the Opus payload format is standardized to 48,000 Hz,
+        // because that is the maximum supported decoding sampling rate.
+        rtpReceiveFrequency = 48000;
+      }
     }
 
     const WebRtc_UWord32 timeStampDiff = timestamp - _playoutTimeStampRTP;
@@ -6499,24 +6503,25 @@
 
     if (timeStampDiff > 0)
     {
-        switch (rtpReceiveFrequency)
-        {
-            case 8000:
-                timeStampDiffMs = timeStampDiff >> 3;
-                break;
-            case 16000:
-                timeStampDiffMs = timeStampDiff >> 4;
-                break;
-            case 32000:
-                timeStampDiffMs = timeStampDiff >> 5;
-                break;
-            default:
-                WEBRTC_TRACE(kTraceWarning, kTraceVoice,
-                             VoEId(_instanceId, _channelId),
-                             "Channel::UpdatePacketDelay() invalid sample "
-                             "rate");
-                timeStampDiffMs = 0;
-                return -1;
+        switch (rtpReceiveFrequency) {
+          case 8000:
+            timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff >> 3);
+            break;
+          case 16000:
+            timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff >> 4);
+            break;
+          case 32000:
+            timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff >> 5);
+            break;
+          case 48000:
+            timeStampDiffMs = static_cast<WebRtc_UWord32>(timeStampDiff / 48);
+            break;
+          default:
+            WEBRTC_TRACE(kTraceWarning, kTraceVoice,
+                         VoEId(_instanceId, _channelId),
+                         "Channel::UpdatePacketDelay() invalid sample rate");
+            timeStampDiffMs = 0;
+            return -1;
         }
         if (timeStampDiffMs > 5000)
         {
@@ -6539,20 +6544,23 @@
         if (sequenceNumber - _previousSequenceNumber == 1)
         {
             WebRtc_UWord16 packetDelayMs = 0;
-            switch (rtpReceiveFrequency)
-            {
-            case 8000:
-                packetDelayMs = (WebRtc_UWord16)(
+            switch (rtpReceiveFrequency) {
+              case 8000:
+                packetDelayMs = static_cast<WebRtc_UWord16>(
                     (timestamp - _previousTimestamp) >> 3);
                 break;
-            case 16000:
-                packetDelayMs = (WebRtc_UWord16)(
+              case 16000:
+                packetDelayMs = static_cast<WebRtc_UWord16>(
                     (timestamp - _previousTimestamp) >> 4);
                 break;
-            case 32000:
-                packetDelayMs = (WebRtc_UWord16)(
+              case 32000:
+                packetDelayMs = static_cast<WebRtc_UWord16>(
                     (timestamp - _previousTimestamp) >> 5);
                 break;
+              case 48000:
+                packetDelayMs = static_cast<WebRtc_UWord16>(
+                    (timestamp - _previousTimestamp) / 48);
+                break;
             }
 
             if (packetDelayMs >= 10 && packetDelayMs <= 60)
diff --git a/voice_engine/include/voe_external_media.h b/voice_engine/include/voe_external_media.h
index 50d2d38..a4ef7e2 100644
--- a/voice_engine/include/voe_external_media.h
+++ b/voice_engine/include/voe_external_media.h
@@ -47,9 +47,9 @@
     // given by the |type| parameter. The function should modify the
     // original data and ensure that it is copied back to the |audio10ms|
     // array. The number of samples in the frame cannot be changed.
-    // The sampling frequency will depend upon the codec used. 
+    // The sampling frequency will depend upon the codec used.
     // If |isStereo| is true, audio10ms will contain 16-bit PCM data
-    // samples in interleaved stereo format (L0,R0,L1,R1,…):
+    // samples in interleaved stereo format (L0,R0,L1,R1,...).
     virtual void Process(const int channel, const ProcessingTypes type,
                          WebRtc_Word16 audio10ms[], const int length,
                          const int samplingFreq, const bool isStereo) = 0;
diff --git a/voice_engine/test/auto_test/voe_extended_test.cc b/voice_engine/test/auto_test/voe_extended_test.cc
index 2fcd642..a9d1e29 100644
--- a/voice_engine/test/auto_test/voe_extended_test.cc
+++ b/voice_engine/test/auto_test/voe_extended_test.cc
@@ -2244,7 +2244,7 @@
   ANL();
   ANL();
 
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
   //////////////////////////
   // SetAMREncFormat
 
@@ -2308,8 +2308,8 @@
   ANL();
   AOK();
   ANL();
-#endif // #ifdef WEBRTC_CODEC_GSMAMR
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#endif // #ifdef WEBRTC_CODEC_AMR
+#ifdef WEBRTC_CODEC_AMRWB
   //////////////////////////
   // SetAMRWbEncFormat
 
@@ -2373,7 +2373,7 @@
   ANL();
   AOK();
   ANL();
-#endif // #ifdef WEBRTC_CODEC_GSMAMRWB
+#endif // #ifdef WEBRTC_CODEC_AMRWB
   ///////////////////////////////
   // SetSendCNPayloadType
   TEST(SetSendCNPayloadType);
diff --git a/voice_engine/transmit_mixer.cc b/voice_engine/transmit_mixer.cc
index 6153c5b..d987c4e 100644
--- a/voice_engine/transmit_mixer.cc
+++ b/voice_engine/transmit_mixer.cc
@@ -322,8 +322,14 @@
 
       if (codec.channels == 2)
         stereo_codec_ = true;
-      if (codec.plfreq > _mixingFrequency)
+
+      // TODO(tlegrand): Remove once we have full 48 kHz support in
+      // Audio Coding Module.
+      if (codec.plfreq > 32000) {
+        _mixingFrequency = 32000;
+      } else if (codec.plfreq > _mixingFrequency) {
         _mixingFrequency = codec.plfreq;
+      }
     }
     channel = sc.GetNextChannel(iterator);
   }
diff --git a/voice_engine/voe_codec_impl.cc b/voice_engine/voe_codec_impl.cc
index 588ffe6..6414efc 100644
--- a/voice_engine/voe_codec_impl.cc
+++ b/voice_engine/voe_codec_impl.cc
@@ -217,7 +217,7 @@
 {
     WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                  "SetAMREncFormat(channel=%d, mode=%d)", channel, mode);
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     if (!_shared->statistics().Initialized())
     {
         _shared->SetLastError(VE_NOT_INITED, kTraceError);
@@ -243,7 +243,7 @@
 {
     WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
                  "SetAMRDecFormat(channel=%i, mode=%i)", channel, mode);
-#ifdef WEBRTC_CODEC_GSMAMR
+#ifdef WEBRTC_CODEC_AMR
     if (!_shared->statistics().Initialized())
     {
         _shared->SetLastError(VE_NOT_INITED, kTraceError);
@@ -271,7 +271,7 @@
                  "SetAMRWbEncFormat(channel=%d, mode=%d)", channel, mode);
     ANDROID_NOT_SUPPORTED(_shared->statistics());
     IPHONE_NOT_SUPPORTED(_shared->statistics());
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     if (!_shared->statistics().Initialized())
     {
         _shared->SetLastError(VE_NOT_INITED, kTraceError);
@@ -299,7 +299,7 @@
                  "SetAMRWbDecFormat(channel=%i, mode=%i)", channel, mode);
     ANDROID_NOT_SUPPORTED(_shared->statistics());
     IPHONE_NOT_SUPPORTED(_shared->statistics());
-#ifdef WEBRTC_CODEC_GSMAMRWB
+#ifdef WEBRTC_CODEC_AMRWB
     if (!_shared->statistics().Initialized())
     {
         _shared->SetLastError(VE_NOT_INITED, kTraceError);