H.264 video codec support using OpenH264 (http://www.openh264.org/) for encoding and FFmpeg (https://www.ffmpeg.org/) for decoding.

It works on all platforms except Android and iOS (FFmpeg limitation).

Implemented behind compile time flags, off by default.
The plan is to have it enabled in Chrome (see bug), but not in Chromium/webrtc by default.

Flags to turn it on:
- rtc_use_h264 = true
- ffmpeg_branding = "Chrome" (or other brand that includes H.264 decoder)

Tests using H264:
- video_loopback --codec=H264
- screenshare_loopback --codec=H264
- video_engine_tests (EndToEndTest.SendsAndReceivesH264)

NOTRY=True
BUG=500605, 468365
BUG=https://bugs.chromium.org/p/webrtc/issues/detail?id=5424

Review URL: https://codereview.webrtc.org/1306813009

Cr-Original-Commit-Position: refs/heads/master@{#11390}
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: bab934bffe5c4b7e5cd6e8fee2c9c2682002d59b
diff --git a/build/common.gypi b/build/common.gypi
index d483f24..a77420c 100644
--- a/build/common.gypi
+++ b/build/common.gypi
@@ -144,7 +144,7 @@
     # compilation succeeds but |H264DecoderImpl| fails to initialize.
     # CHECK THE OPENH264, FFMPEG AND H.264 LICENSES/PATENTS BEFORE BUILDING.
     # http://www.openh264.org, https://www.ffmpeg.org/
-    'rtc_use_h264%': 0,  # TODO(hbos): enc/dec in follow up CL(s).
+    'rtc_use_h264%': 0,
 
     'conditions': [
       ['build_with_chromium==1', {
diff --git a/build/webrtc.gni b/build/webrtc.gni
index f76aa36..f784ab1 100644
--- a/build/webrtc.gni
+++ b/build/webrtc.gni
@@ -98,7 +98,7 @@
   # compilation succeeds but |H264DecoderImpl| fails to initialize.
   # CHECK THE OPENH264, FFMPEG AND H.264 LICENSES/PATENTS BEFORE BUILDING.
   # http://www.openh264.org, https://www.ffmpeg.org/
-  rtc_use_h264 = false  # TODO(hbos): enc/dec in follow up CL(s).
+  rtc_use_h264 = false
 }
 
 # A second declare_args block, so that declarations within it can
diff --git a/common_video/BUILD.gn b/common_video/BUILD.gn
index 4ef968d..a1686a7 100644
--- a/common_video/BUILD.gn
+++ b/common_video/BUILD.gn
@@ -48,6 +48,7 @@
 
   deps = [
     "..:webrtc_common",
+    "../modules/video_coding:webrtc_h264",
     "../system_wrappers",
   ]
 
diff --git a/common_video/common_video.gyp b/common_video/common_video.gyp
index fe14da1..c40885e 100644
--- a/common_video/common_video.gyp
+++ b/common_video/common_video.gyp
@@ -19,6 +19,7 @@
       ],
       'dependencies': [
         '<(webrtc_root)/common.gyp:webrtc_common',
+        '<(webrtc_root)/modules/modules.gyp:webrtc_h264',
         '<(webrtc_root)/system_wrappers/system_wrappers.gyp:system_wrappers',
       ],
       'direct_dependent_settings': {
diff --git a/common_video/video_frame.cc b/common_video/video_frame.cc
index 3bf59a7..86de823 100644
--- a/common_video/video_frame.cc
+++ b/common_video/video_frame.cc
@@ -19,9 +19,9 @@
 
 namespace webrtc {
 
-// TODO(hbos): The FFmpeg video decoder will require up to 8 bytes, update this
-// when the FFmpeg decoder is added.
-const size_t EncodedImage::kBufferPaddingBytesH264 = 0;
+// FFmpeg's decoder, used by H264DecoderImpl, requires up to 8 bytes padding due
+// to optimized bitstream readers. See avcodec_decode_video2.
+const size_t EncodedImage::kBufferPaddingBytesH264 = 8;
 
 bool EqualPlane(const uint8_t* data1,
                 const uint8_t* data2,
diff --git a/modules/modules.gyp b/modules/modules.gyp
index 9c50d26..b0b3711 100644
--- a/modules/modules.gyp
+++ b/modules/modules.gyp
@@ -45,6 +45,13 @@
           }, {
             'desktop_capture_supported%': 0,
           }],
+          ['rtc_use_h264==1', {
+            'videoprocessor_defines': [
+              'WEBRTC_VIDEOPROCESSOR_H264_TESTS',
+            ],
+          }, {
+            'videoprocessor_defines': [],
+          }],
         ],
       },
       'targets': [
@@ -69,6 +76,7 @@
           ],
           'defines': [
             '<@(audio_coding_defines)',
+            '<@(videoprocessor_defines)',
           ],
           'sources': [
             'audio_coding/test/APITest.cc',
diff --git a/modules/video_coding/BUILD.gn b/modules/video_coding/BUILD.gn
index f321fbc..115ed35 100644
--- a/modules/video_coding/BUILD.gn
+++ b/modules/video_coding/BUILD.gn
@@ -133,15 +133,19 @@
     configs -= [ "//build/config/clang:find_bad_constructs" ]
   }
 
+  defines = []
   deps = [
     "../../system_wrappers",
   ]
 
   if (rtc_use_h264) {
-    # Dependency for sake of compiling and so that variables use_openh264 and
-    # ffmpeg_branding are recognized build arguments (avoid "Build argument has
-    # no effect" error). The variables and dependencies will be used for real as
-    # soon as https://codereview.webrtc.org/1306813009/ lands.
+    defines += [ "WEBRTC_THIRD_PARTY_H264" ]
+    sources += [
+      "codecs/h264/h264_decoder_impl.cc",
+      "codecs/h264/h264_decoder_impl.h",
+      "codecs/h264/h264_encoder_impl.cc",
+      "codecs/h264/h264_encoder_impl.h",
+    ]
     deps += [
       "//third_party/ffmpeg:ffmpeg",
       "//third_party/openh264:encoder",
diff --git a/modules/video_coding/codecs/h264/h264.cc b/modules/video_coding/codecs/h264/h264.cc
index 645ed2c..6f7316b 100644
--- a/modules/video_coding/codecs/h264/h264.cc
+++ b/modules/video_coding/codecs/h264/h264.cc
@@ -11,12 +11,17 @@
 
 #include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
 
+#if defined(WEBRTC_THIRD_PARTY_H264)
+#include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h"
+#include "webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h"
+#endif
 #if defined(WEBRTC_IOS)
 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_decoder.h"
 #include "webrtc/modules/video_coding/codecs/h264/h264_video_toolbox_encoder.h"
 #endif
 
 #include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
 
 namespace webrtc {
 
@@ -27,9 +32,15 @@
 extern bool IsH264CodecSupportedObjC();
 #endif
 
+// If any H.264 codec is supported (iOS HW or OpenH264/FFmpeg).
 bool IsH264CodecSupported() {
 #if defined(WEBRTC_IOS)
-  return IsH264CodecSupportedObjC();
+  if (IsH264CodecSupportedObjC()) {
+    return true;
+  }
+#endif
+#if defined(WEBRTC_THIRD_PARTY_H264)
+  return true;
 #else
   return false;
 #endif
@@ -38,7 +49,14 @@
 H264Encoder* H264Encoder::Create() {
   RTC_DCHECK(H264Encoder::IsSupported());
 #if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
-  return new H264VideoToolboxEncoder();
+  if (IsH264CodecSupportedObjC()) {
+    LOG(LS_INFO) << "Creating H264VideoToolboxEncoder.";
+    return new H264VideoToolboxEncoder();
+  }
+#endif
+#if defined(WEBRTC_THIRD_PARTY_H264)
+  LOG(LS_INFO) << "Creating H264EncoderImpl.";
+  return new H264EncoderImpl();
 #else
   RTC_NOTREACHED();
   return nullptr;
@@ -52,7 +70,14 @@
 H264Decoder* H264Decoder::Create() {
   RTC_DCHECK(H264Decoder::IsSupported());
 #if defined(WEBRTC_IOS) && defined(WEBRTC_VIDEO_TOOLBOX_SUPPORTED)
-  return new H264VideoToolboxDecoder();
+  if (IsH264CodecSupportedObjC()) {
+    LOG(LS_INFO) << "Creating H264VideoToolboxDecoder.";
+    return new H264VideoToolboxDecoder();
+  }
+#endif
+#if defined(WEBRTC_THIRD_PARTY_H264)
+  LOG(LS_INFO) << "Creating H264DecoderImpl.";
+  return new H264DecoderImpl();
 #else
   RTC_NOTREACHED();
   return nullptr;
diff --git a/modules/video_coding/codecs/h264/h264.gypi b/modules/video_coding/codecs/h264/h264.gypi
index 78f0be3..0e80561 100644
--- a/modules/video_coding/codecs/h264/h264.gypi
+++ b/modules/video_coding/codecs/h264/h264.gypi
@@ -24,12 +24,19 @@
           ],
         }],
         ['rtc_use_h264==1', {
-          # Dependency for sake of compiling The dependencies will be used for
-          # real as soon as https://codereview.webrtc.org/1306813009/ lands.
+          'defines': [
+            'WEBRTC_THIRD_PARTY_H264',
+          ],
           'dependencies': [
             '<(DEPTH)/third_party/ffmpeg/ffmpeg.gyp:ffmpeg',
             '<(DEPTH)/third_party/openh264/openh264.gyp:openh264_encoder',
           ],
+          'sources': [
+            'h264_decoder_impl.cc',
+            'h264_decoder_impl.h',
+            'h264_encoder_impl.cc',
+            'h264_encoder_impl.h',
+          ],
         }],
       ],
       'sources': [
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.cc b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
new file mode 100644
index 0000000..75d2bfa
--- /dev/null
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.cc
@@ -0,0 +1,362 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_decoder_impl.h"
+
+#include <algorithm>
+#include <limits>
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+#include "third_party/ffmpeg/libavformat/avformat.h"
+#include "third_party/ffmpeg/libavutil/imgutils.h"
+}  // extern "C"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/keep_ref_until_done.h"
+#include "webrtc/base/logging.h"
+
+namespace webrtc {
+
+namespace {
+
+const AVPixelFormat kPixelFormat = AV_PIX_FMT_YUV420P;
+const size_t kYPlaneIndex = 0;
+const size_t kUPlaneIndex = 1;
+const size_t kVPlaneIndex = 2;
+
+#if !defined(WEBRTC_CHROMIUM_BUILD)
+
+bool ffmpeg_initialized = false;
+
+// Called by FFmpeg to do mutex operations if initialized using
+// |InitializeFFmpeg|.
+int LockManagerOperation(void** lock, AVLockOp op)
+    EXCLUSIVE_LOCK_FUNCTION() UNLOCK_FUNCTION() {
+  switch (op) {
+    case AV_LOCK_CREATE:
+      *lock = new rtc::CriticalSection();
+      return 0;
+    case AV_LOCK_OBTAIN:
+      static_cast<rtc::CriticalSection*>(*lock)->Enter();
+      return 0;
+    case AV_LOCK_RELEASE:
+      static_cast<rtc::CriticalSection*>(*lock)->Leave();
+      return 0;
+    case AV_LOCK_DESTROY:
+      delete static_cast<rtc::CriticalSection*>(*lock);
+      *lock = nullptr;
+      return 0;
+  }
+  RTC_NOTREACHED() << "Unrecognized AVLockOp.";
+  return -1;
+}
+
+// TODO(hbos): Assumed to be called on a single thread. Should DCHECK that
+// InitializeFFmpeg is only called on one thread or make it thread safe.
+// See https://bugs.chromium.org/p/webrtc/issues/detail?id=5427.
+void InitializeFFmpeg() {
+  if (!ffmpeg_initialized) {
+    if (av_lockmgr_register(LockManagerOperation) < 0) {
+      RTC_NOTREACHED() << "av_lockmgr_register failed.";
+      return;
+    }
+    av_register_all();
+    ffmpeg_initialized = true;
+  }
+}
+
+#endif  // !defined(WEBRTC_CHROMIUM_BUILD)
+
+// Called by FFmpeg when it is done with a frame buffer, see AVGetBuffer2.
+void AVFreeBuffer2(void* opaque, uint8_t* data) {
+  VideoFrame* video_frame = static_cast<VideoFrame*>(opaque);
+  delete video_frame;
+}
+
+// Called by FFmpeg when it needs a frame buffer to store decoded frames in.
+// The VideoFrames returned by FFmpeg at |Decode| originate from here. They are
+// reference counted and freed by FFmpeg using |AVFreeBuffer2|.
+// TODO(hbos): Use a frame pool for better performance instead of create/free.
+// Could be owned by decoder, |static_cast<H264DecoderImpl*>(context->opaque)|.
+// Consider verifying that the buffer was allocated by us to avoid unsafe type
+// cast. See https://bugs.chromium.org/p/webrtc/issues/detail?id=5428.
+int AVGetBuffer2(AVCodecContext* context, AVFrame* av_frame, int flags) {
+  RTC_CHECK_EQ(context->pix_fmt, kPixelFormat);  // Same as in InitDecode.
+  // Necessary capability to be allowed to provide our own buffers.
+  RTC_CHECK(context->codec->capabilities | AV_CODEC_CAP_DR1);
+  // |av_frame->width| and |av_frame->height| are set by FFmpeg. These are the
+  // actual image's dimensions and may be different from |context->width| and
+  // |context->coded_width| due to reordering.
+  int width = av_frame->width;
+  int height = av_frame->height;
+  // See |lowres|, if used the decoder scales the image by 1/2^(lowres). This
+  // has implications on which resolutions are valid, but we don't use it.
+  RTC_CHECK_EQ(context->lowres, 0);
+  // Adjust the |width| and |height| to values acceptable by the decoder.
+  // Without this, FFmpeg may overflow the buffer. If modified, |width| and/or
+  // |height| are larger than the actual image and the image has to be cropped
+  // (top-left corner) after decoding to avoid visible borders to the right and
+  // bottom of the actual image.
+  avcodec_align_dimensions(context, &width, &height);
+
+  RTC_CHECK_GE(width, 0);
+  RTC_CHECK_GE(height, 0);
+  int ret = av_image_check_size(static_cast<unsigned int>(width),
+                                static_cast<unsigned int>(height), 0, nullptr);
+  if (ret < 0) {
+    LOG(LS_ERROR) << "Invalid picture size " << width << "x" << height;
+    return ret;
+  }
+
+  // The video frame is stored in |video_frame|. |av_frame| is FFmpeg's version
+  // of a video frame and will be set up to reference |video_frame|'s buffers.
+  VideoFrame* video_frame = new VideoFrame();
+  int stride_y = width;
+  int stride_uv = (width + 1) / 2;
+  RTC_CHECK_EQ(0, video_frame->CreateEmptyFrame(
+      width, height, stride_y, stride_uv, stride_uv));
+  int total_size = video_frame->allocated_size(kYPlane) +
+                   video_frame->allocated_size(kUPlane) +
+                   video_frame->allocated_size(kVPlane);
+  RTC_DCHECK_EQ(total_size, stride_y * height +
+                (stride_uv + stride_uv) * ((height + 1) / 2));
+
+  // FFmpeg expects the initial allocation to be zero-initialized according to
+  // http://crbug.com/390941.
+  // Using a single |av_frame->buf| - YUV is required to be a continuous blob of
+  // memory. We can zero-initialize with one memset operation for all planes.
+  RTC_DCHECK_EQ(video_frame->buffer(kUPlane),
+      video_frame->buffer(kYPlane) + video_frame->allocated_size(kYPlane));
+  RTC_DCHECK_EQ(video_frame->buffer(kVPlane),
+      video_frame->buffer(kUPlane) + video_frame->allocated_size(kUPlane));
+  memset(video_frame->buffer(kYPlane), 0, total_size);
+
+  av_frame->format = context->pix_fmt;
+  av_frame->reordered_opaque = context->reordered_opaque;
+
+  // Set |av_frame| members as required by FFmpeg.
+  av_frame->data[kYPlaneIndex] = video_frame->buffer(kYPlane);
+  av_frame->linesize[kYPlaneIndex] = video_frame->stride(kYPlane);
+  av_frame->data[kUPlaneIndex] = video_frame->buffer(kUPlane);
+  av_frame->linesize[kUPlaneIndex] = video_frame->stride(kUPlane);
+  av_frame->data[kVPlaneIndex] = video_frame->buffer(kVPlane);
+  av_frame->linesize[kVPlaneIndex] = video_frame->stride(kVPlane);
+  RTC_DCHECK_EQ(av_frame->extended_data, av_frame->data);
+
+  av_frame->buf[0] = av_buffer_create(av_frame->data[kYPlaneIndex],
+                                      total_size,
+                                      AVFreeBuffer2,
+                                      static_cast<void*>(video_frame),
+                                      0);
+  RTC_CHECK(av_frame->buf[0]);
+  return 0;
+}
+
+}  // namespace
+
+H264DecoderImpl::H264DecoderImpl()
+    : decoded_image_callback_(nullptr) {
+}
+
+H264DecoderImpl::~H264DecoderImpl() {
+  Release();
+}
+
+int32_t H264DecoderImpl::InitDecode(const VideoCodec* codec_settings,
+                                    int32_t number_of_cores) {
+  if (codec_settings &&
+      codec_settings->codecType != kVideoCodecH264) {
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  }
+
+  // In Chromium FFmpeg will be initialized outside of WebRTC and we should not
+  // attempt to do so ourselves or it will be initialized twice.
+  // TODO(hbos): Put behind a different flag in case non-chromium project wants
+  // to initialize externally.
+  // See https://bugs.chromium.org/p/webrtc/issues/detail?id=5427.
+#if !defined(WEBRTC_CHROMIUM_BUILD)
+  // Make sure FFmpeg has been initialized.
+  InitializeFFmpeg();
+#endif
+
+  // Release necessary in case of re-initializing.
+  int32_t ret = Release();
+  if (ret != WEBRTC_VIDEO_CODEC_OK)
+    return ret;
+  RTC_DCHECK(!av_context_);
+
+  // Initialize AVCodecContext.
+  av_context_.reset(avcodec_alloc_context3(nullptr));
+
+  av_context_->codec_type = AVMEDIA_TYPE_VIDEO;
+  av_context_->codec_id = AV_CODEC_ID_H264;
+  if (codec_settings) {
+    av_context_->coded_width = codec_settings->width;
+    av_context_->coded_height = codec_settings->height;
+  }
+  av_context_->pix_fmt = kPixelFormat;
+  av_context_->extradata = nullptr;
+  av_context_->extradata_size = 0;
+
+  av_context_->thread_count = 1;
+  av_context_->thread_type = FF_THREAD_SLICE;
+
+  // FFmpeg will get video buffers from our AVGetBuffer2, memory managed by us.
+  av_context_->get_buffer2 = AVGetBuffer2;
+  // get_buffer2 is called with the context, there |opaque| can be used to get a
+  // pointer |this|.
+  av_context_->opaque = this;
+  // Use ref counted frames (av_frame_unref).
+  av_context_->refcounted_frames = 1;  // true
+
+  AVCodec* codec = avcodec_find_decoder(av_context_->codec_id);
+  if (!codec) {
+    // This is an indication that FFmpeg has not been initialized or it has not
+    // been compiled/initialized with the correct set of codecs.
+    LOG(LS_ERROR) << "FFmpeg H.264 decoder not found.";
+    Release();
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+  int res = avcodec_open2(av_context_.get(), codec, nullptr);
+  if (res < 0) {
+    LOG(LS_ERROR) << "avcodec_open2 error: " << res;
+    Release();
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+
+  av_frame_.reset(av_frame_alloc());
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::Release() {
+  av_context_.reset();
+  av_frame_.reset();
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::Reset() {
+  if (!IsInitialized())
+    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+  InitDecode(nullptr, 1);
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::RegisterDecodeCompleteCallback(
+    DecodedImageCallback* callback) {
+  decoded_image_callback_ = callback;
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264DecoderImpl::Decode(const EncodedImage& input_image,
+                                bool /*missing_frames*/,
+                                const RTPFragmentationHeader* /*fragmentation*/,
+                                const CodecSpecificInfo* codec_specific_info,
+                                int64_t /*render_time_ms*/) {
+  if (!IsInitialized())
+    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+  if (!decoded_image_callback_) {
+    LOG(LS_WARNING) << "InitDecode() has been called, but a callback function "
+        "has not been set with RegisterDecodeCompleteCallback()";
+    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+  }
+  if (!input_image._buffer || !input_image._length)
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  if (codec_specific_info &&
+      codec_specific_info->codecType != kVideoCodecH264) {
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  }
+
+  // FFmpeg requires padding due to some optimized bitstream readers reading 32
+  // or 64 bits at once and could read over the end. See avcodec_decode_video2.
+  RTC_CHECK_GE(input_image._size, input_image._length +
+                   EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
+  // "If the first 23 bits of the additional bytes are not 0, then damaged MPEG
+  // bitstreams could cause overread and segfault." See
+  // AV_INPUT_BUFFER_PADDING_SIZE. We'll zero the entire padding just in case.
+  memset(input_image._buffer + input_image._length,
+         0,
+         EncodedImage::GetBufferPaddingBytes(kVideoCodecH264));
+
+  AVPacket packet;
+  av_init_packet(&packet);
+  packet.data = input_image._buffer;
+  if (input_image._length >
+      static_cast<size_t>(std::numeric_limits<int>::max())) {
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+  packet.size = static_cast<int>(input_image._length);
+  av_context_->reordered_opaque = input_image.ntp_time_ms_ * 1000;  // ms -> μs
+
+  int frame_decoded = 0;
+  int result = avcodec_decode_video2(av_context_.get(),
+                                     av_frame_.get(),
+                                     &frame_decoded,
+                                     &packet);
+  if (result < 0) {
+    LOG(LS_ERROR) << "avcodec_decode_video2 error: " << result;
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+  // |result| is number of bytes used, which should be all of them.
+  if (result != packet.size) {
+    LOG(LS_ERROR) << "avcodec_decode_video2 consumed " << result << " bytes "
+        "when " << packet.size << " bytes were expected.";
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+
+  if (!frame_decoded) {
+    LOG(LS_WARNING) << "avcodec_decode_video2 successful but no frame was "
+        "decoded.";
+    return WEBRTC_VIDEO_CODEC_OK;
+  }
+
+  // Obtain the |video_frame| containing the decoded image.
+  VideoFrame* video_frame = static_cast<VideoFrame*>(
+      av_buffer_get_opaque(av_frame_->buf[0]));
+  RTC_DCHECK(video_frame);
+  RTC_CHECK_EQ(av_frame_->data[kYPlane], video_frame->buffer(kYPlane));
+  RTC_CHECK_EQ(av_frame_->data[kUPlane], video_frame->buffer(kUPlane));
+  RTC_CHECK_EQ(av_frame_->data[kVPlane], video_frame->buffer(kVPlane));
+  video_frame->set_timestamp(input_image._timeStamp);
+
+  // The decoded image may be larger than what is supposed to be visible, see
+  // |AVGetBuffer2|'s use of |avcodec_align_dimensions|. This crops the image
+  // without copying the underlying buffer.
+  rtc::scoped_refptr<VideoFrameBuffer> buf = video_frame->video_frame_buffer();
+  if (av_frame_->width != buf->width() || av_frame_->height != buf->height()) {
+    video_frame->set_video_frame_buffer(
+        new rtc::RefCountedObject<WrappedI420Buffer>(
+            av_frame_->width, av_frame_->height,
+            buf->data(kYPlane), buf->stride(kYPlane),
+            buf->data(kUPlane), buf->stride(kUPlane),
+            buf->data(kVPlane), buf->stride(kVPlane),
+            rtc::KeepRefUntilDone(buf)));
+  }
+
+  // Return decoded frame.
+  int32_t ret = decoded_image_callback_->Decoded(*video_frame);
+  // Stop referencing it, possibly freeing |video_frame|.
+  av_frame_unref(av_frame_.get());
+  video_frame = nullptr;
+
+  if (ret) {
+    LOG(LS_WARNING) << "DecodedImageCallback::Decoded returned " << ret;
+    return ret;
+  }
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool H264DecoderImpl::IsInitialized() const {
+  return av_context_ != nullptr;
+}
+
+}  // namespace webrtc
diff --git a/modules/video_coding/codecs/h264/h264_decoder_impl.h b/modules/video_coding/codecs/h264/h264_decoder_impl.h
new file mode 100644
index 0000000..f5924fc
--- /dev/null
+++ b/modules/video_coding/codecs/h264/h264_decoder_impl.h
@@ -0,0 +1,65 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+extern "C" {
+#include "third_party/ffmpeg/libavcodec/avcodec.h"
+}  // extern "C"
+
+#include "webrtc/base/scoped_ptr.h"
+
+namespace webrtc {
+
+struct AVCodecContextDeleter {
+  void operator()(AVCodecContext* ptr) const { avcodec_free_context(&ptr); }
+};
+struct AVFrameDeleter {
+  void operator()(AVFrame* ptr) const { av_frame_free(&ptr); }
+};
+
+class H264DecoderImpl : public H264Decoder {
+ public:
+  H264DecoderImpl();
+  ~H264DecoderImpl() override;
+
+  // If |codec_settings| is NULL it is ignored. If it is not NULL,
+  // |codec_settings->codecType| must be |kVideoCodecH264|.
+  int32_t InitDecode(const VideoCodec* codec_settings,
+                     int32_t number_of_cores) override;
+  int32_t Release() override;
+  int32_t Reset() override;
+
+  int32_t RegisterDecodeCompleteCallback(
+      DecodedImageCallback* callback) override;
+
+  // |missing_frames|, |fragmentation| and |render_time_ms| are ignored.
+  int32_t Decode(const EncodedImage& input_image,
+                 bool /*missing_frames*/,
+                 const RTPFragmentationHeader* /*fragmentation*/,
+                 const CodecSpecificInfo* codec_specific_info = nullptr,
+                 int64_t render_time_ms = -1) override;
+
+ private:
+  bool IsInitialized() const;
+
+  rtc::scoped_ptr<AVCodecContext, AVCodecContextDeleter> av_context_;
+  rtc::scoped_ptr<AVFrame, AVFrameDeleter> av_frame_;
+
+  DecodedImageCallback* decoded_image_callback_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_DECODER_IMPL_H_
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.cc b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
new file mode 100644
index 0000000..281eb3d
--- /dev/null
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.cc
@@ -0,0 +1,401 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#include "webrtc/modules/video_coding/codecs/h264/h264_encoder_impl.h"
+
+#include <limits>
+
+#include "third_party/openh264/src/codec/api/svc/codec_api.h"
+#include "third_party/openh264/src/codec/api/svc/codec_app_def.h"
+#include "third_party/openh264/src/codec/api/svc/codec_def.h"
+
+#include "webrtc/base/checks.h"
+#include "webrtc/base/logging.h"
+#include "webrtc/common_video/libyuv/include/webrtc_libyuv.h"
+
+namespace webrtc {
+
+namespace {
+
+const bool kOpenH264EncoderDetailedLogging = false;
+
+int NumberOfThreads(int width, int height, int number_of_cores) {
+  if (width * height >= 1920 * 1080 && number_of_cores > 8) {
+    return 8;  // 8 threads for 1080p on high perf machines.
+  } else if (width * height > 1280 * 960 && number_of_cores >= 6) {
+    return 3;  // 3 threads for 1080p.
+  } else if (width * height > 640 * 480 && number_of_cores >= 3) {
+    return 2;  // 2 threads for qHD/HD.
+  } else {
+    return 1;  // 1 thread for VGA or less.
+  }
+}
+
+}  // namespace
+
+static FrameType EVideoFrameType_to_FrameType(EVideoFrameType type) {
+  switch (type) {
+    case videoFrameTypeInvalid:
+      return kEmptyFrame;
+    case videoFrameTypeIDR:
+      return kVideoFrameKey;
+    case videoFrameTypeSkip:
+    case videoFrameTypeI:
+    case videoFrameTypeP:
+    case videoFrameTypeIPMixed:
+      return kVideoFrameDelta;
+    default:
+      LOG(LS_WARNING) << "Unknown EVideoFrameType: " << type;
+      return kVideoFrameDelta;
+  }
+}
+
+// Helper method used by H264EncoderImpl::Encode.
+// Copies the encoded bytes from |info| to |encoded_image| and updates the
+// fragmentation information of |frag_header|. The |encoded_image->_buffer| may
+// be deleted and reallocated if a bigger buffer is required.
+//
+// After OpenH264 encoding, the encoded bytes are stored in |info| spread out
+// over a number of layers and "NAL units". Each NAL unit is a fragment starting
+// with the four-byte start code {0,0,0,1}. All of this data (including the
+// start codes) is copied to the |encoded_image->_buffer| and the |frag_header|
+// is updated to point to each fragment, with offsets and lengths set as to
+// exclude the start codes.
+static void RtpFragmentize(EncodedImage* encoded_image,
+                           rtc::scoped_ptr<uint8_t[]>* encoded_image_buffer,
+                           const VideoFrame& frame,
+                           SFrameBSInfo* info,
+                           RTPFragmentationHeader* frag_header) {
+  // Calculate minimum buffer size required to hold encoded data.
+  size_t required_size = 0;
+  size_t fragments_count = 0;
+  for (int layer = 0; layer < info->iLayerNum; ++layer) {
+    const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
+    for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++fragments_count) {
+      RTC_CHECK_GE(layerInfo.pNalLengthInByte[nal], 0);
+      // Ensure |required_size| will not overflow.
+      RTC_CHECK_LE(static_cast<size_t>(layerInfo.pNalLengthInByte[nal]),
+                   std::numeric_limits<size_t>::max() - required_size);
+      required_size += layerInfo.pNalLengthInByte[nal];
+    }
+  }
+  if (encoded_image->_size < required_size) {
+    // Increase buffer size. Allocate enough to hold an unencoded image, this
+    // should be more than enough to hold any encoded data of future frames of
+    // the same size (avoiding possible future reallocation due to variations in
+    // required size).
+    encoded_image->_size = CalcBufferSize(kI420, frame.width(), frame.height());
+    if (encoded_image->_size < required_size) {
+      // Encoded data > unencoded data. Allocate required bytes.
+      LOG(LS_WARNING) << "Encoding produced more bytes than the original image "
+                      << "data! Original bytes: " << encoded_image->_size
+                      << ", encoded bytes: " << required_size << ".";
+      encoded_image->_size = required_size;
+    }
+    encoded_image->_buffer = new uint8_t[encoded_image->_size];
+    encoded_image_buffer->reset(encoded_image->_buffer);
+  }
+
+  // Iterate layers and NAL units, note each NAL unit as a fragment and copy
+  // the data to |encoded_image->_buffer|.
+  const uint8_t start_code[4] = {0, 0, 0, 1};
+  frag_header->VerifyAndAllocateFragmentationHeader(fragments_count);
+  size_t frag = 0;
+  encoded_image->_length = 0;
+  for (int layer = 0; layer < info->iLayerNum; ++layer) {
+    const SLayerBSInfo& layerInfo = info->sLayerInfo[layer];
+    // Iterate NAL units making up this layer, noting fragments.
+    size_t layer_len = 0;
+    for (int nal = 0; nal < layerInfo.iNalCount; ++nal, ++frag) {
+      // Because the sum of all layer lengths, |required_size|, fits in a
+      // |size_t|, we know that any indices in-between will not overflow.
+      RTC_DCHECK_GE(layerInfo.pNalLengthInByte[nal], 4);
+      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+0], start_code[0]);
+      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+1], start_code[1]);
+      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+2], start_code[2]);
+      RTC_DCHECK_EQ(layerInfo.pBsBuf[layer_len+3], start_code[3]);
+      frag_header->fragmentationOffset[frag] =
+          encoded_image->_length + layer_len + sizeof(start_code);
+      frag_header->fragmentationLength[frag] =
+          layerInfo.pNalLengthInByte[nal] - sizeof(start_code);
+      layer_len += layerInfo.pNalLengthInByte[nal];
+    }
+    // Copy the entire layer's data (including start codes).
+    memcpy(encoded_image->_buffer + encoded_image->_length,
+           layerInfo.pBsBuf,
+           layer_len);
+    encoded_image->_length += layer_len;
+  }
+}
+
+H264EncoderImpl::H264EncoderImpl()
+    : openh264_encoder_(nullptr),
+      encoded_image_callback_(nullptr) {
+}
+
+H264EncoderImpl::~H264EncoderImpl() {
+  Release();
+}
+
+int32_t H264EncoderImpl::InitEncode(const VideoCodec* codec_settings,
+                                    int32_t number_of_cores,
+                                    size_t /*max_payload_size*/) {
+  if (!codec_settings ||
+      codec_settings->codecType != kVideoCodecH264) {
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  }
+  if (codec_settings->maxFramerate == 0)
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  if (codec_settings->width < 1 || codec_settings->height < 1)
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+
+  int32_t release_ret = Release();
+  if (release_ret != WEBRTC_VIDEO_CODEC_OK)
+    return release_ret;
+  RTC_DCHECK(!openh264_encoder_);
+
+  // Create encoder.
+  if (WelsCreateSVCEncoder(&openh264_encoder_) != 0) {
+    // Failed to create encoder.
+    LOG(LS_ERROR) << "Failed to create OpenH264 encoder";
+    RTC_DCHECK(!openh264_encoder_);
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+  RTC_DCHECK(openh264_encoder_);
+  if (kOpenH264EncoderDetailedLogging) {
+    int trace_level = WELS_LOG_DETAIL;
+    openh264_encoder_->SetOption(ENCODER_OPTION_TRACE_LEVEL,
+                                 &trace_level);
+  }
+  // else WELS_LOG_DEFAULT is used by default.
+
+  codec_settings_ = *codec_settings;
+  if (codec_settings_.targetBitrate == 0)
+    codec_settings_.targetBitrate = codec_settings_.startBitrate;
+
+  // Initialization parameters.
+  // There are two ways to initialize. There is SEncParamBase (cleared with
+  // memset(&p, 0, sizeof(SEncParamBase)) used in Initialize, and SEncParamExt
+  // which is a superset of SEncParamBase (cleared with GetDefaultParams) used
+  // in InitializeExt.
+  SEncParamExt init_params;
+  openh264_encoder_->GetDefaultParams(&init_params);
+  if (codec_settings_.mode == kRealtimeVideo) {
+    init_params.iUsageType = CAMERA_VIDEO_REAL_TIME;
+  } else if (codec_settings_.mode == kScreensharing) {
+    init_params.iUsageType = SCREEN_CONTENT_REAL_TIME;
+  } else {
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  }
+  init_params.iPicWidth = codec_settings_.width;
+  init_params.iPicHeight = codec_settings_.height;
+  // |init_params| uses bit/s, |codec_settings_| uses kbit/s.
+  init_params.iTargetBitrate = codec_settings_.targetBitrate * 1000;
+  init_params.iMaxBitrate = codec_settings_.maxBitrate * 1000;
+  // Rate Control mode
+  init_params.iRCMode = RC_BITRATE_MODE;
+  init_params.fMaxFrameRate = static_cast<float>(codec_settings_.maxFramerate);
+
+  // The following parameters are extension parameters (they're in SEncParamExt,
+  // not in SEncParamBase).
+  init_params.bEnableFrameSkip =
+      codec_settings_.codecSpecific.H264.frameDroppingOn;
+  // |uiIntraPeriod|    - multiple of GOP size
+  // |keyFrameInterval| - number of frames
+  init_params.uiIntraPeriod =
+      codec_settings_.codecSpecific.H264.keyFrameInterval;
+  init_params.uiMaxNalSize = 0;
+  // Threading model: use auto.
+  //  0: auto (dynamic imp. internal encoder)
+  //  1: single thread (default value)
+  // >1: number of threads
+  init_params.iMultipleThreadIdc = NumberOfThreads(init_params.iPicWidth,
+                                                   init_params.iPicHeight,
+                                                   number_of_cores);
+  // The base spatial layer 0 is the only one we use.
+  init_params.sSpatialLayers[0].iVideoWidth        = init_params.iPicWidth;
+  init_params.sSpatialLayers[0].iVideoHeight       = init_params.iPicHeight;
+  init_params.sSpatialLayers[0].fFrameRate         = init_params.fMaxFrameRate;
+  init_params.sSpatialLayers[0].iSpatialBitrate    = init_params.iTargetBitrate;
+  init_params.sSpatialLayers[0].iMaxSpatialBitrate = init_params.iMaxBitrate;
+  // Slice num according to number of threads.
+  init_params.sSpatialLayers[0].sSliceCfg.uiSliceMode = SM_AUTO_SLICE;
+
+  // Initialize.
+  if (openh264_encoder_->InitializeExt(&init_params) != 0) {
+    LOG(LS_ERROR) << "Failed to initialize OpenH264 encoder";
+    Release();
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+  int video_format = EVideoFormatType::videoFormatI420;
+  openh264_encoder_->SetOption(ENCODER_OPTION_DATAFORMAT,
+                               &video_format);
+
+  // Initialize encoded image. Default buffer size: size of unencoded data.
+  encoded_image_._size = CalcBufferSize(
+      kI420, codec_settings_.width, codec_settings_.height);
+  encoded_image_._buffer = new uint8_t[encoded_image_._size];
+  encoded_image_buffer_.reset(encoded_image_._buffer);
+  encoded_image_._completeFrame = true;
+  encoded_image_._encodedWidth = 0;
+  encoded_image_._encodedHeight = 0;
+  encoded_image_._length = 0;
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::Release() {
+  if (openh264_encoder_) {
+    int uninit_ret = openh264_encoder_->Uninitialize();
+    if (uninit_ret != 0) {
+      LOG(LS_WARNING) << "OpenH264 encoder's Uninitialize() returned "
+                      << "unsuccessful: " << uninit_ret;
+    }
+    WelsDestroySVCEncoder(openh264_encoder_);
+    openh264_encoder_ = nullptr;
+  }
+  if (encoded_image_._buffer != nullptr) {
+    encoded_image_._buffer = nullptr;
+    encoded_image_buffer_.reset();
+  }
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::RegisterEncodeCompleteCallback(
+    EncodedImageCallback* callback) {
+  encoded_image_callback_ = callback;
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::SetRates(uint32_t bitrate, uint32_t framerate) {
+  if (bitrate <= 0 || framerate <= 0) {
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  }
+  codec_settings_.targetBitrate = bitrate;
+  codec_settings_.maxFramerate = framerate;
+
+  SBitrateInfo target_bitrate;
+  memset(&target_bitrate, 0, sizeof(SBitrateInfo));
+  target_bitrate.iLayer = SPATIAL_LAYER_ALL,
+  target_bitrate.iBitrate = codec_settings_.targetBitrate * 1000;
+  openh264_encoder_->SetOption(ENCODER_OPTION_BITRATE,
+                               &target_bitrate);
+  float max_framerate = static_cast<float>(codec_settings_.maxFramerate);
+  openh264_encoder_->SetOption(ENCODER_OPTION_FRAME_RATE,
+                               &max_framerate);
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::Encode(
+    const VideoFrame& frame, const CodecSpecificInfo* codec_specific_info,
+    const std::vector<FrameType>* frame_types) {
+  if (!IsInitialized())
+    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+  if (frame.IsZeroSize())
+    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
+  if (!encoded_image_callback_) {
+    LOG(LS_WARNING) << "InitEncode() has been called, but a callback function "
+                    << "has not been set with RegisterEncodeCompleteCallback()";
+    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
+  }
+  if (frame.width()  != codec_settings_.width ||
+      frame.height() != codec_settings_.height) {
+    LOG(LS_WARNING) << "Encoder initialized for " << codec_settings_.width
+                    << "x" << codec_settings_.height << " but trying to encode "
+                    << frame.width() << "x" << frame.height() << " frame.";
+    return WEBRTC_VIDEO_CODEC_ERR_SIZE;
+  }
+
+  bool force_key_frame = false;
+  if (frame_types != nullptr) {
+    // We only support a single stream.
+    RTC_DCHECK_EQ(frame_types->size(), static_cast<size_t>(1));
+    // Skip frame?
+    if ((*frame_types)[0] == kEmptyFrame) {
+      return WEBRTC_VIDEO_CODEC_OK;
+    }
+    // Force key frame?
+    force_key_frame = (*frame_types)[0] == kVideoFrameKey;
+  }
+  if (force_key_frame) {
+    // API doc says ForceIntraFrame(false) does nothing, but calling this
+    // function forces a key frame regardless of the |bIDR| argument's value.
+    // (If every frame is a key frame we get lag/delays.)
+    openh264_encoder_->ForceIntraFrame(true);
+  }
+
+  // EncodeFrame input.
+  SSourcePicture picture;
+  memset(&picture, 0, sizeof(SSourcePicture));
+  picture.iPicWidth = frame.width();
+  picture.iPicHeight = frame.height();
+  picture.iColorFormat = EVideoFormatType::videoFormatI420;
+  picture.uiTimeStamp = frame.ntp_time_ms();
+  picture.iStride[0] = frame.stride(kYPlane);
+  picture.iStride[1] = frame.stride(kUPlane);
+  picture.iStride[2] = frame.stride(kVPlane);
+  picture.pData[0] = const_cast<uint8_t*>(frame.buffer(kYPlane));
+  picture.pData[1] = const_cast<uint8_t*>(frame.buffer(kUPlane));
+  picture.pData[2] = const_cast<uint8_t*>(frame.buffer(kVPlane));
+
+  // EncodeFrame output.
+  SFrameBSInfo info;
+  memset(&info, 0, sizeof(SFrameBSInfo));
+
+  // Encode!
+  int enc_ret = openh264_encoder_->EncodeFrame(&picture, &info);
+  if (enc_ret != 0) {
+    LOG(LS_ERROR) << "OpenH264 frame encoding failed, EncodeFrame returned "
+                  << enc_ret << ".";
+    return WEBRTC_VIDEO_CODEC_ERROR;
+  }
+
+  encoded_image_._encodedWidth = frame.width();
+  encoded_image_._encodedHeight = frame.height();
+  encoded_image_._timeStamp = frame.timestamp();
+  encoded_image_.ntp_time_ms_ = frame.ntp_time_ms();
+  encoded_image_.capture_time_ms_ = frame.render_time_ms();
+  encoded_image_._frameType = EVideoFrameType_to_FrameType(info.eFrameType);
+
+  // Split encoded image up into fragments. This also updates |encoded_image_|.
+  RTPFragmentationHeader frag_header;
+  RtpFragmentize(&encoded_image_, &encoded_image_buffer_, frame, &info,
+                 &frag_header);
+
+  // Encoder can skip frames to save bandwidth in which case
+  // |encoded_image_._length| == 0.
+  if (encoded_image_._length > 0) {
+    // Deliver encoded image.
+    CodecSpecificInfo codec_specific;
+    codec_specific.codecType = kVideoCodecH264;
+    encoded_image_callback_->Encoded(encoded_image_,
+                                     &codec_specific,
+                                     &frag_header);
+  }
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+bool H264EncoderImpl::IsInitialized() const {
+  return openh264_encoder_ != nullptr;
+}
+
+int32_t H264EncoderImpl::SetChannelParameters(
+    uint32_t packet_loss, int64_t rtt) {
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+int32_t H264EncoderImpl::SetPeriodicKeyFrames(bool enable) {
+  return WEBRTC_VIDEO_CODEC_OK;
+}
+
+void H264EncoderImpl::OnDroppedFrame() {
+}
+
+}  // namespace webrtc
diff --git a/modules/video_coding/codecs/h264/h264_encoder_impl.h b/modules/video_coding/codecs/h264/h264_encoder_impl.h
new file mode 100644
index 0000000..0bf06ac
--- /dev/null
+++ b/modules/video_coding/codecs/h264/h264_encoder_impl.h
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (c) 2015 The WebRTC project authors. All Rights Reserved.
+ *
+ *  Use of this source code is governed by a BSD-style license
+ *  that can be found in the LICENSE file in the root of the source
+ *  tree. An additional intellectual property rights grant can be found
+ *  in the file PATENTS.  All contributing project authors may
+ *  be found in the AUTHORS file in the root of the source tree.
+ *
+ */
+
+#ifndef WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
+#define WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
+
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
+
+#include <vector>
+
+#include "webrtc/base/scoped_ptr.h"
+
+class ISVCEncoder;
+
+namespace webrtc {
+
+class H264EncoderImpl : public H264Encoder {
+ public:
+  H264EncoderImpl();
+  ~H264EncoderImpl() override;
+
+  // |max_payload_size| is ignored.
+  // The following members of |codec_settings| are used. The rest are ignored.
+  // - codecType (must be kVideoCodecH264)
+  // - targetBitrate
+  // - maxFramerate
+  // - width
+  // - height
+  int32_t InitEncode(const VideoCodec* codec_settings,
+                     int32_t number_of_cores,
+                     size_t /*max_payload_size*/) override;
+  int32_t Release() override;
+
+  int32_t RegisterEncodeCompleteCallback(
+      EncodedImageCallback* callback) override;
+  int32_t SetRates(uint32_t bitrate, uint32_t framerate) override;
+
+  // The result of encoding - an EncodedImage and RTPFragmentationHeader - are
+  // passed to the encode complete callback.
+  int32_t Encode(const VideoFrame& frame,
+                 const CodecSpecificInfo* codec_specific_info,
+                 const std::vector<FrameType>* frame_types) override;
+
+  // Unsupported / Do nothing.
+  int32_t SetChannelParameters(uint32_t packet_loss, int64_t rtt) override;
+  int32_t SetPeriodicKeyFrames(bool enable) override;
+  void OnDroppedFrame() override;
+
+ private:
+  bool IsInitialized() const;
+
+  ISVCEncoder* openh264_encoder_;
+  VideoCodec codec_settings_;
+
+  EncodedImage encoded_image_;
+  rtc::scoped_ptr<uint8_t[]> encoded_image_buffer_;
+  EncodedImageCallback* encoded_image_callback_;
+};
+
+}  // namespace webrtc
+
+#endif  // WEBRTC_MODULES_VIDEO_CODING_CODECS_H264_H264_ENCODER_IMPL_H_
diff --git a/modules/video_coding/codecs/h264/include/h264.h b/modules/video_coding/codecs/h264/include/h264.h
index 50ca57c..7f0bbf0 100644
--- a/modules/video_coding/codecs/h264/include/h264.h
+++ b/modules/video_coding/codecs/h264/include/h264.h
@@ -30,6 +30,7 @@
 class H264Encoder : public VideoEncoder {
  public:
   static H264Encoder* Create();
+  // If H.264 is supported (any implementation).
   static bool IsSupported();
 
   ~H264Encoder() override {}
diff --git a/modules/video_coding/codecs/test/videoprocessor.cc b/modules/video_coding/codecs/test/videoprocessor.cc
index c047c51..5b33e82 100644
--- a/modules/video_coding/codecs/test/videoprocessor.cc
+++ b/modules/video_coding/codecs/test/videoprocessor.cc
@@ -225,8 +225,10 @@
   }
 }
 
-void VideoProcessorImpl::FrameEncoded(webrtc::VideoCodecType codec,
-                                      const EncodedImage& encoded_image) {
+void VideoProcessorImpl::FrameEncoded(
+    webrtc::VideoCodecType codec,
+    const EncodedImage& encoded_image,
+    const webrtc::RTPFragmentationHeader* fragmentation) {
   // Timestamp is frame number, so this gives us #dropped frames.
   int num_dropped_from_prev_encode =
       encoded_image._timeStamp - prev_time_stamp_ - 1;
@@ -277,14 +279,18 @@
         assert(false);
     }
   }
+
+  // Make a raw copy of the |encoded_image| buffer.
   size_t copied_buffer_size = encoded_image._length +
                               EncodedImage::GetBufferPaddingBytes(codec);
   rtc::scoped_ptr<uint8_t[]> copied_buffer(new uint8_t[copied_buffer_size]);
   memcpy(copied_buffer.get(), encoded_image._buffer, encoded_image._length);
+  // The image to feed to the decoder.
   EncodedImage copied_image;
   memcpy(&copied_image, &encoded_image, sizeof(copied_image));
   copied_image._size = copied_buffer_size;
   copied_image._buffer = copied_buffer.get();
+
   if (!exclude_this_frame) {
     stat.packets_dropped =
         packet_manipulator_->ManipulatePackets(&copied_image);
@@ -415,14 +421,17 @@
     const EncodedImage& encoded_image,
     const webrtc::CodecSpecificInfo* codec_specific_info,
     const webrtc::RTPFragmentationHeader* fragmentation) {
+  // Forward to parent class.
   RTC_CHECK(codec_specific_info);
   video_processor_->FrameEncoded(codec_specific_info->codecType,
-                                 encoded_image);  // Forward to parent class.
+                                 encoded_image,
+                                 fragmentation);
   return 0;
 }
 int32_t VideoProcessorImpl::VideoProcessorDecodeCompleteCallback::Decoded(
     VideoFrame& image) {
-  video_processor_->FrameDecoded(image);  // Forward to parent class.
+  // Forward to parent class.
+  video_processor_->FrameDecoded(image);
   return 0;
 }
 
diff --git a/modules/video_coding/codecs/test/videoprocessor.h b/modules/video_coding/codecs/test/videoprocessor.h
index 7c83de2..cd1c7b9 100644
--- a/modules/video_coding/codecs/test/videoprocessor.h
+++ b/modules/video_coding/codecs/test/videoprocessor.h
@@ -173,7 +173,8 @@
  private:
   // Invoked by the callback when a frame has completed encoding.
   void FrameEncoded(webrtc::VideoCodecType codec,
-                    const webrtc::EncodedImage& encodedImage);
+                    const webrtc::EncodedImage& encodedImage,
+                    const webrtc::RTPFragmentationHeader* fragmentation);
   // Invoked by the callback when a frame has completed decoding.
   void FrameDecoded(const webrtc::VideoFrame& image);
   // Used for getting a 32-bit integer representing time
diff --git a/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc b/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
index 7b92616..79d75c9 100644
--- a/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
+++ b/modules/video_coding/codecs/test/videoprocessor_integrationtest.cc
@@ -15,9 +15,10 @@
 #include "webrtc/modules/video_coding/include/video_codec_interface.h"
 #include "webrtc/modules/video_coding/codecs/test/packet_manipulator.h"
 #include "webrtc/modules/video_coding/codecs/test/videoprocessor.h"
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
-#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8_common_types.h"
+#include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
 #include "webrtc/modules/video_coding/include/video_coding.h"
 #include "webrtc/test/testsupport/fileutils.h"
 #include "webrtc/test/testsupport/frame_reader.h"
@@ -150,7 +151,11 @@
   virtual ~VideoProcessorIntegrationTest() {}
 
   void SetUpCodecConfig() {
-    if (codec_type_ == kVideoCodecVP8) {
+    if (codec_type_ == kVideoCodecH264) {
+      encoder_ = H264Encoder::Create();
+      decoder_ = H264Decoder::Create();
+      VideoCodingModule::Codec(kVideoCodecH264, &codec_settings_);
+    } else if (codec_type_ == kVideoCodecVP8) {
       encoder_ = VP8Encoder::Create();
       decoder_ = VP8Decoder::Create();
       VideoCodingModule::Codec(kVideoCodecVP8, &codec_settings_);
@@ -184,6 +189,12 @@
 
     // These features may be set depending on the test.
     switch (config_.codec_settings->codecType) {
+      case kVideoCodecH264:
+        config_.codec_settings->codecSpecific.H264.frameDroppingOn =
+            frame_dropper_on_;
+        config_.codec_settings->codecSpecific.H264.keyFrameInterval =
+            kBaseKeyFrameInterval;
+        break;
       case kVideoCodecVP8:
         config_.codec_settings->codecSpecific.VP8.errorConcealmentOn =
             error_concealment_on_;
@@ -579,6 +590,37 @@
   rc_metrics[update_index].num_key_frames = num_key_frames;
 }
 
+#if defined(WEBRTC_VIDEOPROCESSOR_H264_TESTS)
+
+// H264: Run with no packet loss and fixed bitrate. Quality should be very high.
+// Note(hbos): The PacketManipulatorImpl code used to simulate packet loss in
+// these unittests appears to drop "packets" in a way that is not compatible
+// with H264. Therefore ProcessXPercentPacketLossH264, X != 0, unittests have
+// not been added.
+TEST_F(VideoProcessorIntegrationTest, Process0PercentPacketLossH264) {
+  // Bitrate and frame rate profile.
+  RateProfile rate_profile;
+  SetRateProfilePars(&rate_profile, 0, 500, 30, 0);
+  rate_profile.frame_index_rate_update[1] = kNbrFramesShort + 1;
+  rate_profile.num_frames = kNbrFramesShort;
+  // Codec/network settings.
+  CodecConfigPars process_settings;
+  SetCodecParameters(&process_settings, kVideoCodecH264, 0.0f, -1, 1, false,
+                     false, true, false);
+  // Metrics for expected quality.
+  QualityMetrics quality_metrics;
+  SetQualityMetrics(&quality_metrics, 35.0, 25.0, 0.93, 0.70);
+  // Metrics for rate control.
+  RateControlMetrics rc_metrics[1];
+  SetRateControlMetrics(rc_metrics, 0, 2, 60, 20, 10, 20, 0, 1);
+  ProcessFramesAndVerify(quality_metrics,
+                         rate_profile,
+                         process_settings,
+                         rc_metrics);
+}
+
+#endif  // defined(WEBRTC_VIDEOPROCESSOR_H264_TESTS)
+
 // VP9: Run with no packet loss and fixed bitrate. Quality should be very high.
 // One key frame (first frame only) in sequence. Setting |key_frame_interval|
 // to -1 below means no periodic key frames in test.
diff --git a/test/encoder_settings.cc b/test/encoder_settings.cc
index bae1350..64339df 100644
--- a/test/encoder_settings.cc
+++ b/test/encoder_settings.cc
@@ -58,7 +58,9 @@
   VideoReceiveStream::Decoder decoder;
   decoder.payload_type = encoder_settings.payload_type;
   decoder.payload_name = encoder_settings.payload_name;
-  if (encoder_settings.payload_name == "VP8") {
+  if (encoder_settings.payload_name == "H264") {
+    decoder.decoder = VideoDecoder::Create(VideoDecoder::kH264);
+  } else if (encoder_settings.payload_name == "VP8") {
     decoder.decoder = VideoDecoder::Create(VideoDecoder::kVp8);
   } else if (encoder_settings.payload_name == "VP9") {
     decoder.decoder = VideoDecoder::Create(VideoDecoder::kVp9);
diff --git a/video/end_to_end_tests.cc b/video/end_to_end_tests.cc
index d036474..6201e92 100644
--- a/video/end_to_end_tests.cc
+++ b/video/end_to_end_tests.cc
@@ -23,6 +23,7 @@
 #include "webrtc/modules/rtp_rtcp/include/rtp_rtcp.h"
 #include "webrtc/modules/rtp_rtcp/source/byte_io.h"
 #include "webrtc/modules/rtp_rtcp/source/rtcp_utility.h"
+#include "webrtc/modules/video_coding/codecs/h264/include/h264.h"
 #include "webrtc/modules/video_coding/codecs/vp8/include/vp8.h"
 #include "webrtc/modules/video_coding/codecs/vp9/include/vp9.h"
 #include "webrtc/modules/video_coding/include/video_coding_defines.h"
@@ -286,12 +287,15 @@
   RunBaseTest(&test);
 }
 
+#if defined(WEBRTC_END_TO_END_H264_TESTS)
+
 TEST_F(EndToEndTest, SendsAndReceivesH264) {
   class H264Observer : public test::EndToEndTest, public VideoRenderer {
    public:
     H264Observer()
         : EndToEndTest(2 * kDefaultTimeoutMs),
-          fake_encoder_(Clock::GetRealTimeClock()),
+          encoder_(VideoEncoder::Create(VideoEncoder::kH264)),
+          decoder_(H264Decoder::Create()),
           frame_counter_(0) {}
 
     void PerformTest() override {
@@ -305,9 +309,9 @@
         VideoEncoderConfig* encoder_config) override {
       send_config->rtp.nack.rtp_history_ms =
           (*receive_configs)[0].rtp.nack.rtp_history_ms = kNackRtpHistoryMs;
-      send_config->encoder_settings.encoder = &fake_encoder_;
+      send_config->encoder_settings.encoder = encoder_.get();
       send_config->encoder_settings.payload_name = "H264";
-      send_config->encoder_settings.payload_type = kFakeVideoSendPayloadType;
+      send_config->encoder_settings.payload_type = 126;
       encoder_config->streams[0].min_bitrate_bps = 50000;
       encoder_config->streams[0].target_bitrate_bps =
           encoder_config->streams[0].max_bitrate_bps = 2000000;
@@ -318,7 +322,7 @@
           send_config->encoder_settings.payload_type;
       (*receive_configs)[0].decoders[0].payload_name =
           send_config->encoder_settings.payload_name;
-      (*receive_configs)[0].decoders[0].decoder = &fake_decoder_;
+      (*receive_configs)[0].decoders[0].decoder = decoder_.get();
     }
 
     void RenderFrame(const VideoFrame& video_frame,
@@ -331,14 +335,16 @@
     bool IsTextureSupported() const override { return false; }
 
    private:
-    test::FakeH264Decoder fake_decoder_;
-    test::FakeH264Encoder fake_encoder_;
+    rtc::scoped_ptr<webrtc::VideoEncoder> encoder_;
+    rtc::scoped_ptr<webrtc::VideoDecoder> decoder_;
     int frame_counter_;
   } test;
 
   RunBaseTest(&test);
 }
 
+#endif  // defined(WEBRTC_END_TO_END_H264_TESTS)
+
 TEST_F(EndToEndTest, ReceiverUsesLocalSsrc) {
   class SyncRtcpObserver : public test::EndToEndTest {
    public:
diff --git a/video/video_quality_test.cc b/video/video_quality_test.cc
index 6e8c990..5a8c4db 100644
--- a/video/video_quality_test.cc
+++ b/video/video_quality_test.cc
@@ -37,6 +37,7 @@
 namespace webrtc {
 
 static const int kSendStatsPollingIntervalMs = 1000;
+static const int kPayloadTypeH264 = 122;
 static const int kPayloadTypeVP8 = 123;
 static const int kPayloadTypeVP9 = 124;
 
@@ -780,7 +781,10 @@
   CreateSendConfig(num_streams, 0, send_transport);
 
   int payload_type;
-  if (params_.common.codec == "VP8") {
+  if (params_.common.codec == "H264") {
+    encoder_.reset(VideoEncoder::Create(VideoEncoder::kH264));
+    payload_type = kPayloadTypeH264;
+  } else if (params_.common.codec == "VP8") {
     encoder_.reset(VideoEncoder::Create(VideoEncoder::kVp8));
     payload_type = kPayloadTypeVP8;
   } else if (params_.common.codec == "VP9") {
diff --git a/webrtc_tests.gypi b/webrtc_tests.gypi
index 998c4e0..94c9731 100644
--- a/webrtc_tests.gypi
+++ b/webrtc_tests.gypi
@@ -194,6 +194,11 @@
         'webrtc',
       ],
       'conditions': [
+        ['rtc_use_h264==1', {
+          'defines': [
+            'WEBRTC_END_TO_END_H264_TESTS',
+          ],
+        }],
         ['OS=="android"', {
           'dependencies': [
             '<(DEPTH)/testing/android/native_test.gyp:native_test_native_code',