diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 34fb5e6..6334c05 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -1191,6 +1191,7 @@
                  r"^remoting[\\\/]host[\\\/].*",
                  r"^sandbox[\\\/]linux[\\\/].*",
                  r"^tools[\\\/]",
+                 r"^ui[\\\/]base[\\\/]resource[\\\/]data_pack.cc$",
                  r"^ui[\\\/]aura[\\\/]bench[\\\/]bench_main\.cc$",
                  r"^ui[\\\/]ozone[\\\/]platform[\\\/]cast[\\\/]",
                  r"^storage[\\\/]browser[\\\/]fileapi[\\\/]" +
diff --git a/cc/scheduler/begin_frame_source.cc b/cc/scheduler/begin_frame_source.cc
index e8cda0e..a5aa4bd 100644
--- a/cc/scheduler/begin_frame_source.cc
+++ b/cc/scheduler/begin_frame_source.cc
@@ -261,7 +261,7 @@
 
   // Send a MISSED begin frame if necessary.
   if (missed_begin_frame_args_.IsValid()) {
-    BeginFrameArgs last_args = obs->LastUsedBeginFrameArgs();
+    const BeginFrameArgs& last_args = obs->LastUsedBeginFrameArgs();
     if (!last_args.IsValid() ||
         (missed_begin_frame_args_.frame_time > last_args.frame_time)) {
       DCHECK((missed_begin_frame_args_.source_id != last_args.source_id) ||
@@ -302,8 +302,19 @@
   missed_begin_frame_args_ = args;
   missed_begin_frame_args_.type = BeginFrameArgs::MISSED;
   std::unordered_set<BeginFrameObserver*> observers(observers_);
-  for (auto* obs : observers)
-    obs->OnBeginFrame(args);
+  for (auto* obs : observers) {
+    // It is possible that the source in which |args| originate changes, or that
+    // our hookup to this source changes, so we have to check for continuity.
+    // See also https://crbug.com/690127 for what may happen without this check.
+    const BeginFrameArgs& last_args = obs->LastUsedBeginFrameArgs();
+    if (!last_args.IsValid() || (args.frame_time > last_args.frame_time)) {
+      DCHECK((args.source_id != last_args.source_id) ||
+             (args.sequence_number > last_args.sequence_number))
+          << "current " << args.AsValue()->ToString() << ", last "
+          << last_args.AsValue()->ToString();
+      obs->OnBeginFrame(args);
+    }
+  }
 }
 
 }  // namespace cc
diff --git a/cc/surfaces/compositor_frame_sink_support.cc b/cc/surfaces/compositor_frame_sink_support.cc
index 0edb96e..6cb2414b 100644
--- a/cc/surfaces/compositor_frame_sink_support.cc
+++ b/cc/surfaces/compositor_frame_sink_support.cc
@@ -169,6 +169,10 @@
   child_frame_sinks_.erase(it);
 }
 
+void CompositorFrameSinkSupport::ForceReclaimResources() {
+  surface_factory_.ClearSurface();
+}
+
 void CompositorFrameSinkSupport::ReferencedSurfacesChanged(
     const LocalSurfaceId& local_surface_id,
     const std::vector<SurfaceId>* active_referenced_surfaces,
diff --git a/cc/surfaces/compositor_frame_sink_support.h b/cc/surfaces/compositor_frame_sink_support.h
index 406c51e..5feef00 100644
--- a/cc/surfaces/compositor_frame_sink_support.h
+++ b/cc/surfaces/compositor_frame_sink_support.h
@@ -56,6 +56,7 @@
   void AddChildFrameSink(const FrameSinkId& child_frame_sink_id);
   void RemoveChildFrameSink(const FrameSinkId& child_frame_sink_id);
   void RequestCopyOfSurface(std::unique_ptr<CopyOutputRequest> request);
+  void ForceReclaimResources();
 
  private:
   // Update surface references with SurfaceManager for current CompositorFrame
diff --git a/cc/surfaces/direct_compositor_frame_sink.cc b/cc/surfaces/direct_compositor_frame_sink.cc
index 10c37ee7..01cb8b1 100644
--- a/cc/surfaces/direct_compositor_frame_sink.cc
+++ b/cc/surfaces/direct_compositor_frame_sink.cc
@@ -29,14 +29,12 @@
                           shared_bitmap_manager),
       frame_sink_id_(frame_sink_id),
       surface_manager_(surface_manager),
-      display_(display),
-      factory_(frame_sink_id, surface_manager, this) {
+      display_(display) {
   DCHECK(thread_checker_.CalledOnValidThread());
   capabilities_.can_force_reclaim_resources = true;
   // Display and DirectCompositorFrameSink share a GL context, so sync
   // points aren't needed when passing resources between them.
   capabilities_.delegated_sync_points_required = false;
-  factory_.set_needs_sync_points(false);
 }
 
 DirectCompositorFrameSink::DirectCompositorFrameSink(
@@ -47,8 +45,7 @@
     : CompositorFrameSink(std::move(vulkan_context_provider)),
       frame_sink_id_(frame_sink_id),
       surface_manager_(surface_manager),
-      display_(display),
-      factory_(frame_sink_id_, surface_manager, this) {
+      display_(display) {
   DCHECK(thread_checker_.CalledOnValidThread());
   capabilities_.can_force_reclaim_resources = true;
 }
@@ -64,14 +61,21 @@
   if (!CompositorFrameSink::BindToClient(client))
     return false;
 
-  surface_manager_->RegisterSurfaceFactoryClient(frame_sink_id_, this);
-
   // We want the Display's output surface to hear about lost context, and since
   // this shares a context with it, we should not be listening for lost context
   // callbacks on the context here.
   if (auto* cp = context_provider())
     cp->SetLostContextCallback(base::Closure());
 
+  constexpr bool is_root = true;
+  constexpr bool handles_frame_sink_id_invalidation = false;
+  support_ = base::MakeUnique<CompositorFrameSinkSupport>(
+      this, surface_manager_, frame_sink_id_, is_root,
+      handles_frame_sink_id_invalidation,
+      capabilities_.delegated_sync_points_required);
+  begin_frame_source_ = base::MakeUnique<ExternalBeginFrameSource>(this);
+  client_->SetBeginFrameSource(begin_frame_source_.get());
+
   // Avoid initializing GL context here, as this should be sharing the
   // Display's context.
   display_->Initialize(this, surface_manager_);
@@ -79,10 +83,12 @@
 }
 
 void DirectCompositorFrameSink::DetachFromClient() {
+  client_->SetBeginFrameSource(nullptr);
+  begin_frame_source_.reset();
+
   // Unregister the SurfaceFactoryClient here instead of the dtor so that only
   // one client is alive for this namespace at any given time.
-  surface_manager_->UnregisterSurfaceFactoryClient(frame_sink_id_);
-  factory_.EvictSurface();
+  support_.reset();
 
   CompositorFrameSink::DetachFromClient();
 }
@@ -96,27 +102,12 @@
   display_->SetLocalSurfaceId(delegated_local_surface_id_,
                               frame.metadata.device_scale_factor);
 
-  factory_.SubmitCompositorFrame(
-      delegated_local_surface_id_, std::move(frame),
-      base::Bind(&DirectCompositorFrameSink::DidDrawCallback,
-                 base::Unretained(this)));
+  support_->SubmitCompositorFrame(delegated_local_surface_id_,
+                                  std::move(frame));
 }
 
 void DirectCompositorFrameSink::ForceReclaimResources() {
-  if (delegated_local_surface_id_.is_valid())
-    factory_.ClearSurface();
-}
-
-void DirectCompositorFrameSink::ReturnResources(
-    const ReturnedResourceArray& resources) {
-  if (client_)
-    client_->ReclaimResources(resources);
-}
-
-void DirectCompositorFrameSink::SetBeginFrameSource(
-    BeginFrameSource* begin_frame_source) {
-  DCHECK(client_);
-  client_->SetBeginFrameSource(begin_frame_source);
+  support_->ForceReclaimResources();
 }
 
 void DirectCompositorFrameSink::DisplayOutputSurfaceLost() {
@@ -136,8 +127,27 @@
   // be drawn.
 }
 
-void DirectCompositorFrameSink::DidDrawCallback() {
+void DirectCompositorFrameSink::DidReceiveCompositorFrameAck() {
   client_->DidReceiveCompositorFrameAck();
 }
 
+void DirectCompositorFrameSink::OnBeginFrame(const BeginFrameArgs& args) {
+  begin_frame_source_->OnBeginFrame(args);
+}
+
+void DirectCompositorFrameSink::ReclaimResources(
+    const ReturnedResourceArray& resources) {
+  client_->ReclaimResources(resources);
+}
+
+void DirectCompositorFrameSink::WillDrawSurface(
+    const LocalSurfaceId& local_surface_id,
+    const gfx::Rect& damage_rect) {
+  // TODO(staraz): Implement this.
+}
+
+void DirectCompositorFrameSink::OnNeedsBeginFrames(bool needs_begin_frame) {
+  support_->SetNeedsBeginFrame(needs_begin_frame);
+}
+
 }  // namespace cc
diff --git a/cc/surfaces/direct_compositor_frame_sink.h b/cc/surfaces/direct_compositor_frame_sink.h
index 609f381..ddc4d1b4 100644
--- a/cc/surfaces/direct_compositor_frame_sink.h
+++ b/cc/surfaces/direct_compositor_frame_sink.h
@@ -8,6 +8,9 @@
 #include "base/macros.h"
 #include "base/threading/thread_checker.h"
 #include "cc/output/compositor_frame_sink.h"
+#include "cc/scheduler/begin_frame_source.h"
+#include "cc/surfaces/compositor_frame_sink_support.h"
+#include "cc/surfaces/compositor_frame_sink_support_client.h"
 #include "cc/surfaces/display_client.h"
 #include "cc/surfaces/surface_factory.h"
 #include "cc/surfaces/surface_factory_client.h"
@@ -23,7 +26,8 @@
 // client's frame being the root surface of the Display.
 class CC_SURFACES_EXPORT DirectCompositorFrameSink
     : public CompositorFrameSink,
-      public SurfaceFactoryClient,
+      public NON_EXPORTED_BASE(CompositorFrameSinkSupportClient),
+      public ExternalBeginFrameSourceClient,
       public NON_EXPORTED_BASE(DisplayClient) {
  public:
   // The underlying Display, SurfaceManager, and SurfaceIdAllocator must outlive
@@ -49,10 +53,6 @@
   void SubmitCompositorFrame(CompositorFrame frame) override;
   void ForceReclaimResources() override;
 
-  // SurfaceFactoryClient implementation.
-  void ReturnResources(const ReturnedResourceArray& resources) override;
-  void SetBeginFrameSource(BeginFrameSource* begin_frame_source) override;
-
   // DisplayClient implementation.
   void DisplayOutputSurfaceLost() override;
   void DisplayWillDrawAndSwap(bool will_draw_and_swap,
@@ -60,7 +60,15 @@
   void DisplayDidDrawAndSwap() override;
 
  private:
-  void DidDrawCallback();
+  // CompositorFrameSinkSupportClient implementation:
+  void DidReceiveCompositorFrameAck() override;
+  void OnBeginFrame(const BeginFrameArgs& args) override;
+  void ReclaimResources(const ReturnedResourceArray& resources) override;
+  void WillDrawSurface(const LocalSurfaceId& local_surface_id,
+                       const gfx::Rect& damage_rect) override;
+
+  // ExternalBeginFrameSourceClient implementation:
+  void OnNeedsBeginFrames(bool needs_begin_frame) override;
 
   // This class is only meant to be used on a single thread.
   base::ThreadChecker thread_checker_;
@@ -70,9 +78,10 @@
   SurfaceManager* surface_manager_;
   SurfaceIdAllocator surface_id_allocator_;
   Display* display_;
-  SurfaceFactory factory_;
   gfx::Size last_swap_frame_size_;
   bool is_lost_ = false;
+  std::unique_ptr<CompositorFrameSinkSupport> support_;
+  std::unique_ptr<ExternalBeginFrameSource> begin_frame_source_;
 
   DISALLOW_COPY_AND_ASSIGN(DirectCompositorFrameSink);
 };
diff --git a/media/audio/win/audio_low_latency_input_win.cc b/media/audio/win/audio_low_latency_input_win.cc
index 22355580..df491b1 100644
--- a/media/audio/win/audio_low_latency_input_win.cc
+++ b/media/audio/win/audio_low_latency_input_win.cc
@@ -4,6 +4,7 @@
 
 #include "media/audio/win/audio_low_latency_input_win.h"
 
+#include <cmath>
 #include <memory>
 
 #include "base/logging.h"
@@ -14,19 +15,44 @@
 #include "media/audio/win/audio_manager_win.h"
 #include "media/audio/win/avrt_wrapper_win.h"
 #include "media/audio/win/core_audio_util_win.h"
+#include "media/base/audio_block_fifo.h"
 #include "media/base/audio_bus.h"
+#include "media/base/channel_layout.h"
+#include "media/base/limits.h"
 
 using base::win::ScopedComPtr;
 using base::win::ScopedCOMInitializer;
 
 namespace media {
+namespace {
+bool IsSupportedFormatForConversion(const WAVEFORMATEX& format) {
+  if (format.nSamplesPerSec < limits::kMinSampleRate ||
+      format.nSamplesPerSec > limits::kMaxSampleRate) {
+    return false;
+  }
+
+  switch (format.wBitsPerSample) {
+    case 8:
+    case 16:
+    case 32:
+      break;
+    default:
+      return false;
+  }
+
+  if (GuessChannelLayout(format.nChannels) == CHANNEL_LAYOUT_UNSUPPORTED) {
+    LOG(ERROR) << "Hardware configuration not supported for audio conversion";
+    return false;
+  }
+
+  return true;
+}
+}
 
 WASAPIAudioInputStream::WASAPIAudioInputStream(AudioManagerWin* manager,
                                                const AudioParameters& params,
                                                const std::string& device_id)
-    : manager_(manager),
-      device_id_(device_id),
-      audio_bus_(media::AudioBus::Create(params)) {
+    : manager_(manager), device_id_(device_id) {
   DCHECK(manager_);
   DCHECK(!device_id_.empty());
 
@@ -123,9 +149,10 @@
   // Initialize the audio stream between the client and the device using
   // shared mode and a lowest possible glitch-free latency.
   hr = InitializeAudioEngine();
+  if (SUCCEEDED(hr) && converter_)
+    open_result_ = OPEN_RESULT_OK_WITH_RESAMPLING;
   ReportOpenResult();  // Report before we assign a value to |opened_|.
   opened_ = SUCCEEDED(hr);
-  DCHECK(open_result_ == OPEN_RESULT_OK || !opened_);
 
   return opened_;
 }
@@ -227,6 +254,9 @@
   // It is also valid to call Close() after Start() has been called.
   Stop();
 
+  if (converter_)
+    converter_->RemoveInput(this);
+
   // Inform the audio manager that we have been closed. This will cause our
   // destruction.
   manager_->ReleaseInputStream(this);
@@ -320,11 +350,19 @@
   //    the selected packet size used in each callback.
   // 2) The selected buffer size is larger than the recorded buffer size in
   //    each event.
-  size_t buffer_frame_index = 0;
-  size_t capture_buffer_size =
-      std::max(2 * endpoint_buffer_size_frames_ * frame_size_,
-               2 * packet_size_frames_ * frame_size_);
-  std::unique_ptr<uint8_t[]> capture_buffer(new uint8_t[capture_buffer_size]);
+  // In the case where no resampling is required, a single buffer should be
+  // enough but in case we get buffers that don't match exactly, we'll go with
+  // two. Same applies if we need to resample and the buffer ratio is perfect.
+  // However if the buffer ratio is imperfect, we will need 3 buffers to safely
+  // be able to buffer up data in cases where a conversion requires two audio
+  // buffers (and we need to be able to write to the third one).
+  DCHECK(!fifo_);
+  const int buffers_required =
+      converter_ && imperfect_buffer_size_conversion_ ? 3 : 2;
+  fifo_.reset(new AudioBlockFifo(format_.nChannels, packet_size_frames_,
+                                 buffers_required));
+
+  DVLOG(1) << "AudioBlockFifo needs " << buffers_required << " buffers";
 
   LARGE_INTEGER now_count = {};
   bool recording = true;
@@ -379,19 +417,12 @@
         }
 
         if (num_frames_to_read != 0) {
-          size_t pos = buffer_frame_index * frame_size_;
-          size_t num_bytes = num_frames_to_read * frame_size_;
-          DCHECK_GE(capture_buffer_size, pos + num_bytes);
-
           if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
-            // Clear out the local buffer since silence is reported.
-            memset(&capture_buffer[pos], 0, num_bytes);
+            fifo_->PushSilence(num_frames_to_read);
           } else {
-            // Copy captured data from audio engine buffer to local buffer.
-            memcpy(&capture_buffer[pos], data_ptr, num_bytes);
+            fifo_->Push(data_ptr, num_frames_to_read,
+                        format_.wBitsPerSample / 8);
           }
-
-          buffer_frame_index += num_frames_to_read;
         }
 
         hr = audio_capture_client_->ReleaseBuffer(num_frames_to_read);
@@ -410,7 +441,7 @@
                     first_audio_frame_timestamp) /
                    10000.0) *
                           ms_to_frame_count_ +
-                      buffer_frame_index - num_frames_to_read;
+                      fifo_->GetAvailableFrames() - num_frames_to_read;
 
         // Get a cached AGC volume level which is updated once every second
         // on the audio manager thread. Note that, |volume| is also updated
@@ -420,31 +451,22 @@
         // Deliver captured data to the registered consumer using a packet
         // size which was specified at construction.
         uint32_t delay_frames = static_cast<uint32_t>(audio_delay_frames + 0.5);
-        while (buffer_frame_index >= packet_size_frames_) {
-          // Copy data to audio bus to match the OnData interface.
-          uint8_t* audio_data =
-              reinterpret_cast<uint8_t*>(capture_buffer.get());
-          audio_bus_->FromInterleaved(audio_data, audio_bus_->frames(),
-                                      format_.wBitsPerSample / 8);
+        while (fifo_->available_blocks()) {
+          if (converter_) {
+            if (imperfect_buffer_size_conversion_ &&
+                fifo_->available_blocks() == 1) {
+              // Special case. We need to buffer up more audio before we can
+              // convert or else we'll suffer an underrun.
+              break;
+            }
+            converter_->ConvertWithDelay(delay_frames, convert_bus_.get());
+            sink_->OnData(this, convert_bus_.get(), delay_frames * frame_size_,
+                          volume);
+          } else {
+            sink_->OnData(this, fifo_->Consume(), delay_frames * frame_size_,
+                          volume);
+          }
 
-          // Deliver data packet, delay estimation and volume level to
-          // the user.
-          sink_->OnData(this, audio_bus_.get(), delay_frames * frame_size_,
-                        volume);
-
-          // Store parts of the recorded data which can't be delivered
-          // using the current packet size. The stored section will be used
-          // either in the next while-loop iteration or in the next
-          // capture event.
-          // TODO(tommi): If this data will be used in the next capture
-          // event, we will report incorrect delay estimates because
-          // we'll use the one for the captured data that time around
-          // (i.e. in the future).
-          memmove(&capture_buffer[0], &capture_buffer[packet_size_bytes_],
-                  (buffer_frame_index - packet_size_frames_) * frame_size_);
-
-          DCHECK_GE(buffer_frame_index, packet_size_frames_);
-          buffer_frame_index -= packet_size_frames_;
           if (delay_frames > packet_size_frames_) {
             delay_frames -= packet_size_frames_;
           } else {
@@ -469,6 +491,8 @@
   if (mm_task && !avrt::AvRevertMmThreadCharacteristics(mm_task)) {
     PLOG(WARNING) << "Failed to disable MMCSS";
   }
+
+  fifo_.reset();
 }
 
 void WASAPIAudioInputStream::HandleError(HRESULT err) {
@@ -587,14 +611,74 @@
   // application and the floating-point samples that the engine uses for its
   // internal processing. However, the format for an application stream
   // typically must have the same number of channels and the same sample
-  // rate as the stream format used by the device.
+  // rate as the stream format used byfCHANNEL_LAYOUT_UNSUPPORTED the device.
   // Many audio devices support both PCM and non-PCM stream formats. However,
   // the audio engine can mix only PCM streams.
   base::win::ScopedCoMem<WAVEFORMATEX> closest_match;
   HRESULT hr = audio_client_->IsFormatSupported(AUDCLNT_SHAREMODE_SHARED,
                                                 &format_, &closest_match);
-  DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
-                                << "but a closest match exists.";
+  DLOG_IF(ERROR, hr == S_FALSE)
+      << "Format is not supported but a closest match exists.";
+
+  if (hr == S_FALSE && IsSupportedFormatForConversion(*closest_match.get())) {
+    DVLOG(1) << "Audio capture data conversion needed.";
+    // Ideally, we want a 1:1 ratio between the buffers we get and the buffers
+    // we give to OnData so that each buffer we receive from the OS can be
+    // directly converted to a buffer that matches with what was asked for.
+    const double buffer_ratio =
+        format_.nSamplesPerSec / static_cast<double>(packet_size_frames_);
+    double new_frames_per_buffer = closest_match->nSamplesPerSec / buffer_ratio;
+
+    const auto input_layout = GuessChannelLayout(closest_match->nChannels);
+    DCHECK_NE(CHANNEL_LAYOUT_UNSUPPORTED, input_layout);
+    const auto output_layout = GuessChannelLayout(format_.nChannels);
+    DCHECK_NE(CHANNEL_LAYOUT_UNSUPPORTED, output_layout);
+
+    const AudioParameters input(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+                                input_layout, closest_match->nSamplesPerSec,
+                                closest_match->wBitsPerSample,
+                                static_cast<int>(new_frames_per_buffer));
+
+    const AudioParameters output(AudioParameters::AUDIO_PCM_LOW_LATENCY,
+                                 output_layout, format_.nSamplesPerSec,
+                                 format_.wBitsPerSample, packet_size_frames_);
+
+    converter_.reset(new AudioConverter(input, output, false));
+    converter_->AddInput(this);
+    converter_->PrimeWithSilence();
+    convert_bus_ = AudioBus::Create(output);
+
+    // Now change the format we're going to ask for to better match with what
+    // the OS can provide.  If we succeed in opening the stream with these
+    // params, we can take care of the required resampling.
+    format_.wBitsPerSample = closest_match->wBitsPerSample;
+    format_.nSamplesPerSec = closest_match->nSamplesPerSec;
+    format_.nChannels = closest_match->nChannels;
+    format_.nBlockAlign = (format_.wBitsPerSample / 8) * format_.nChannels;
+    format_.nAvgBytesPerSec = format_.nSamplesPerSec * format_.nBlockAlign;
+    DVLOG(1) << "Will convert audio from: \nbits: " << format_.wBitsPerSample
+             << "\nsample rate: " << format_.nSamplesPerSec
+             << "\nchannels: " << format_.nChannels
+             << "\nblock align: " << format_.nBlockAlign
+             << "\navg bytes per sec: " << format_.nAvgBytesPerSec;
+
+    // Update our packet size assumptions based on the new format.
+    const auto new_bytes_per_buffer =
+        static_cast<int>(new_frames_per_buffer) * format_.nBlockAlign;
+    packet_size_frames_ = new_bytes_per_buffer / format_.nBlockAlign;
+    packet_size_bytes_ = new_bytes_per_buffer;
+    frame_size_ = format_.nBlockAlign;
+    ms_to_frame_count_ = static_cast<double>(format_.nSamplesPerSec) / 1000.0;
+
+    imperfect_buffer_size_conversion_ =
+        std::modf(new_frames_per_buffer, &new_frames_per_buffer) != 0.0;
+    DVLOG_IF(1, imperfect_buffer_size_conversion_)
+        << "Audio capture data conversion: Need to inject fifo";
+
+    // Indicate that we're good to go with a close match.
+    hr = S_OK;
+  }
+
   return (hr == S_OK);
 }
 
@@ -738,4 +822,10 @@
                             OPEN_RESULT_MAX + 1);
 }
 
+double WASAPIAudioInputStream::ProvideInput(AudioBus* audio_bus,
+                                            uint32_t frames_delayed) {
+  fifo_->Consume()->CopyTo(audio_bus);
+  return 1.0;
+}
+
 }  // namespace media
diff --git a/media/audio/win/audio_low_latency_input_win.h b/media/audio/win/audio_low_latency_input_win.h
index 09aeaf99..c371beda 100644
--- a/media/audio/win/audio_low_latency_input_win.h
+++ b/media/audio/win/audio_low_latency_input_win.h
@@ -75,11 +75,13 @@
 #include "base/win/scoped_comptr.h"
 #include "base/win/scoped_handle.h"
 #include "media/audio/agc_audio_stream.h"
+#include "media/base/audio_converter.h"
 #include "media/base/audio_parameters.h"
 #include "media/base/media_export.h"
 
 namespace media {
 
+class AudioBlockFifo;
 class AudioBus;
 class AudioManagerWin;
 
@@ -87,6 +89,7 @@
 class MEDIA_EXPORT WASAPIAudioInputStream
     : public AgcAudioStream<AudioInputStream>,
       public base::DelegateSimpleThread::Delegate,
+      public AudioConverter::InputCallback,
       NON_EXPORTED_BASE(public base::NonThreadSafe) {
  public:
   // The ctor takes all the usual parameters, plus |manager| which is the
@@ -125,6 +128,9 @@
   HRESULT InitializeAudioEngine();
   void ReportOpenResult() const;
 
+  // AudioConverter::InputCallback implementation.
+  double ProvideInput(AudioBus* audio_bus, uint32_t frames_delayed) override;
+
   // Used to track down where we fail during initialization which at the
   // moment seems to be happening frequently and we're not sure why.
   // The reason might be expected (e.g. trying to open "default" on a machine
@@ -146,7 +152,8 @@
     OPEN_RESULT_SET_EVENT_HANDLE = 11,
     OPEN_RESULT_NO_CAPTURE_CLIENT = 12,
     OPEN_RESULT_NO_AUDIO_VOLUME = 13,
-    OPEN_RESULT_MAX = OPEN_RESULT_NO_AUDIO_VOLUME
+    OPEN_RESULT_OK_WITH_RESAMPLING = 14,
+    OPEN_RESULT_MAX = OPEN_RESULT_OK_WITH_RESAMPLING
   };
 
   // Our creator, the audio manager needs to be notified when we close.
@@ -233,16 +240,21 @@
   // This event will be signaled when capturing shall stop.
   base::win::ScopedHandle stop_capture_event_;
 
-  // Extra audio bus used for storage of deinterleaved data for the OnData
-  // callback.
-  std::unique_ptr<media::AudioBus> audio_bus_;
-
   // Never set it through external API. Only used when |device_id_| ==
   // kLoopbackWithMuteDeviceId.
   // True, if we have muted the system audio for the stream capturing, and
   // indicates that we need to unmute the system audio when stopping capturing.
   bool mute_done_ = false;
 
+  // Used for the captured audio on the callback thread.
+  std::unique_ptr<AudioBlockFifo> fifo_;
+
+  // If the caller requires resampling (should only be in exceptional cases and
+  // ideally, never), we support using an AudioConverter.
+  std::unique_ptr<AudioConverter> converter_;
+  std::unique_ptr<AudioBus> convert_bus_;
+  bool imperfect_buffer_size_conversion_ = false;
+
   DISALLOW_COPY_AND_ASSIGN(WASAPIAudioInputStream);
 };
 
diff --git a/media/audio/win/audio_low_latency_input_win_unittest.cc b/media/audio/win/audio_low_latency_input_win_unittest.cc
index 7e16bae23..f546ad3 100644
--- a/media/audio/win/audio_low_latency_input_win_unittest.cc
+++ b/media/audio/win/audio_low_latency_input_win_unittest.cc
@@ -4,10 +4,10 @@
 
 #include "media/audio/win/audio_low_latency_input_win.h"
 
-#include <windows.h>
 #include <mmsystem.h>
 #include <stddef.h>
 #include <stdint.h>
+#include <windows.h>
 
 #include <memory>
 
@@ -18,6 +18,7 @@
 #include "base/path_service.h"
 #include "base/run_loop.h"
 #include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
 #include "base/test/test_timeouts.h"
 #include "base/win/scoped_com_initializer.h"
 #include "media/audio/audio_device_description.h"
@@ -64,9 +65,7 @@
   int num_received_audio_frames() const { return num_received_audio_frames_; }
 
   // Waits until OnData() is called on another thread.
-  void WaitForData() {
-    data_event_.Wait();
-  }
+  void WaitForData() { data_event_.Wait(); }
 
   void OnData(AudioInputStream* stream,
               const AudioBus* src,
@@ -78,9 +77,7 @@
     data_event_.Signal();
   }
 
-  void OnError(AudioInputStream* stream) override {
-    error_ = true;
-  }
+  void OnError(AudioInputStream* stream) override { error_ = true; }
 
  private:
   int num_received_audio_frames_;
@@ -108,7 +105,7 @@
     binary_file_ = base::OpenFile(file_path, "wb");
     DLOG_IF(ERROR, !binary_file_) << "Failed to open binary PCM data file.";
     VLOG(0) << ">> Output file: " << file_path.value() << " has been created.";
-    VLOG(0) << "bits_per_sample_:" << bits_per_sample_;
+    VLOG(0) << ">> bits_per_sample_:" << bits_per_sample_;
   }
 
   ~WriteToFileAudioSink() override {
@@ -177,12 +174,17 @@
     frames_per_buffer_ = default_params_.frames_per_buffer();
   }
 
+  AudioInputStreamWrapper(AudioManager* audio_manager,
+                          const AudioParameters& default_params)
+      : audio_man_(audio_manager), default_params_(default_params) {
+    EXPECT_EQ(format(), AudioParameters::AUDIO_PCM_LOW_LATENCY);
+    frames_per_buffer_ = default_params_.frames_per_buffer();
+  }
+
   ~AudioInputStreamWrapper() {}
 
   // Creates AudioInputStream object using default parameters.
-  AudioInputStream* Create() {
-    return CreateInputStream();
-  }
+  AudioInputStream* Create() { return CreateInputStream(); }
 
   // Creates AudioInputStream object using non-default parameters where the
   // frame size is modified.
@@ -225,8 +227,7 @@
 
 class ScopedAudioInputStream {
  public:
-  explicit ScopedAudioInputStream(AudioInputStream* stream)
-      : stream_(stream) {}
+  explicit ScopedAudioInputStream(AudioInputStream* stream) : stream_(stream) {}
 
   ~ScopedAudioInputStream() {
     if (stream_)
@@ -239,9 +240,7 @@
     stream_ = NULL;
   }
 
-  AudioInputStream* operator->() {
-    return stream_;
-  }
+  AudioInputStream* operator->() { return stream_; }
 
   AudioInputStream* get() const { return stream_; }
 
@@ -404,8 +403,8 @@
   count = 0;
   ais.Reset(aisw.Create(2 * frames_per_buffer_10ms));
   EXPECT_TRUE(ais->Open());
-  bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
-      (aisw.bits_per_sample() / 8);
+  bytes_per_packet =
+      aisw.channels() * aisw.frames_per_buffer() * (aisw.bits_per_sample() / 8);
 
   {
     base::RunLoop run_loop;
@@ -425,8 +424,8 @@
   count = 0;
   ais.Reset(aisw.Create(frames_per_buffer_10ms / 2));
   EXPECT_TRUE(ais->Open());
-  bytes_per_packet = aisw.channels() * aisw.frames_per_buffer() *
-    (aisw.bits_per_sample() / 8);
+  bytes_per_packet =
+      aisw.channels() * aisw.frames_per_buffer() * (aisw.bits_per_sample() / 8);
 
   {
     base::RunLoop run_loop;
@@ -483,11 +482,11 @@
   // Name of the output PCM file containing captured data. The output file
   // will be stored in the directory containing 'media_unittests.exe'.
   // Example of full name: \src\build\Debug\out_stereo_10sec.pcm.
-  const char* file_name = "out_stereo_10sec.pcm";
+  const char* file_name = "out_10sec.pcm";
 
   AudioInputStreamWrapper aisw(audio_manager_.get());
   ScopedAudioInputStream ais(aisw.Create());
-  EXPECT_TRUE(ais->Open());
+  ASSERT_TRUE(ais->Open());
 
   VLOG(0) << ">> Sample rate: " << aisw.sample_rate() << " [Hz]";
   WriteToFileAudioSink file_sink(file_name, aisw.bits_per_sample());
@@ -499,4 +498,63 @@
   ais.Close();
 }
 
+TEST_F(WinAudioInputTest, DISABLED_WASAPIAudioInputStreamResampleToFile) {
+  ABORT_AUDIO_TEST_IF_NOT(HasCoreAudioAndInputDevices(audio_manager_.get()));
+
+  // This is basically the same test as WASAPIAudioInputStreamRecordToFile
+  // except it forces use of a different sample rate than is preferred by
+  // the hardware.  This functionality is offered while we still have code
+  // that doesn't ask the lower levels for what the preferred audio parameters
+  // are (and previously depended on the old Wave API to do this automatically).
+
+  struct TestData {
+    const int rate;
+    const int frames;
+    ChannelLayout layout;
+  } tests[] = {
+      {8000, 80, CHANNEL_LAYOUT_MONO},
+      {8000, 80, CHANNEL_LAYOUT_STEREO},
+      {44100, 441, CHANNEL_LAYOUT_MONO},
+      {44100, 1024, CHANNEL_LAYOUT_STEREO},
+  };
+
+  for (const auto& test : tests) {
+    AudioParameters params;
+    ASSERT_TRUE(SUCCEEDED(CoreAudioUtil::GetPreferredAudioParameters(
+        AudioDeviceDescription::kDefaultDeviceId, false, &params)));
+
+    VLOG(0) << ">> Hardware sample rate: " << params.sample_rate() << " [Hz]";
+    VLOG(0) << ">> Hardware channel layout: "
+            << ChannelLayoutToString(params.channel_layout());
+
+    // Pick a somewhat difficult sample rate to convert too.
+    // If the sample rate is 8kHz, 16kHz, 32kHz, 48kHz etc, we convert to
+    // 44.1kHz.
+    // Otherwise (e.g. 44.1kHz, 22.05kHz etc) we convert to 48kHz.
+    const int hw_sample_rate = params.sample_rate();
+    params.Reset(params.format(), test.layout, test.rate,
+                 params.bits_per_sample(), test.frames);
+
+    std::string file_name(base::StringPrintf(
+        "resampled_10sec_%i_to_%i_%s.pcm", hw_sample_rate, params.sample_rate(),
+        ChannelLayoutToString(params.channel_layout())));
+
+    AudioInputStreamWrapper aisw(audio_manager_.get(), params);
+    ScopedAudioInputStream ais(aisw.Create());
+    ASSERT_TRUE(ais->Open());
+
+    VLOG(0) << ">> Resampled rate will be: " << aisw.sample_rate() << " [Hz]";
+    VLOG(0) << ">> New layout will be: "
+            << ChannelLayoutToString(params.channel_layout());
+    WriteToFileAudioSink file_sink(file_name.c_str(), aisw.bits_per_sample());
+    VLOG(0) << ">> Speak into the default microphone while recording.";
+    ais->Start(&file_sink);
+    base::PlatformThread::Sleep(TestTimeouts::action_timeout());
+    // base::PlatformThread::Sleep(base::TimeDelta::FromMinutes(10));
+    ais->Stop();
+    VLOG(0) << ">> Recording has stopped.";
+    ais.Close();
+  }
+}
+
 }  // namespace media
diff --git a/media/base/audio_block_fifo.cc b/media/base/audio_block_fifo.cc
index 4313cad7..be15fb11 100644
--- a/media/base/audio_block_fifo.cc
+++ b/media/base/audio_block_fifo.cc
@@ -27,38 +27,11 @@
 void AudioBlockFifo::Push(const void* source,
                           int frames,
                           int bytes_per_sample) {
-  DCHECK(source);
-  DCHECK_GT(frames, 0);
-  DCHECK_GT(bytes_per_sample, 0);
-  DCHECK_LT(available_blocks_, static_cast<int>(audio_blocks_.size()));
-  CHECK_LE(frames, GetUnfilledFrames());
+  PushInternal(source, frames, bytes_per_sample);
+}
 
-  const uint8_t* source_ptr = static_cast<const uint8_t*>(source);
-  int frames_to_push = frames;
-  while (frames_to_push) {
-    // Get the current write block.
-    AudioBus* current_block = audio_blocks_[write_block_];
-
-    // Figure out what segment sizes we need when adding the new content to
-    // the FIFO.
-    const int push_frames =
-        std::min(block_frames_ - write_pos_, frames_to_push);
-
-    // Deinterleave the content to the FIFO and update the |write_pos_|.
-    current_block->FromInterleavedPartial(
-        source_ptr, write_pos_, push_frames, bytes_per_sample);
-    write_pos_ = (write_pos_ + push_frames) % block_frames_;
-    if (!write_pos_) {
-      // The current block is completely filled, increment |write_block_| and
-      // |available_blocks_|.
-      write_block_ = (write_block_ + 1) % audio_blocks_.size();
-      ++available_blocks_;
-    }
-
-    source_ptr += push_frames * bytes_per_sample * channels_;
-    frames_to_push -= push_frames;
-    DCHECK_GE(frames_to_push, 0);
-  }
+void AudioBlockFifo::PushSilence(int frames) {
+  PushInternal(nullptr, frames, 0);
 }
 
 const AudioBus* AudioBlockFifo::Consume() {
@@ -103,8 +76,7 @@
     return;
 
   std::rotate(audio_blocks_.begin() + read_block_,
-              audio_blocks_.begin() + original_size,
-              audio_blocks_.end());
+              audio_blocks_.begin() + original_size, audio_blocks_.end());
 
   // Update the write pointer if it is on top of the new inserted blocks.
   if (write_block_ >= read_block_)
@@ -117,4 +89,47 @@
   DCHECK_LT(write_block_, static_cast<int>(audio_blocks_.size()));
 }
 
+void AudioBlockFifo::PushInternal(const void* source,
+                                  int frames,
+                                  int bytes_per_sample) {
+  // |source| may be nullptr if bytes_per_sample is 0. In that case,
+  // we inject silence.
+  DCHECK((source && bytes_per_sample > 0) || (!source && !bytes_per_sample));
+  DCHECK_GT(frames, 0);
+  DCHECK_LT(available_blocks_, static_cast<int>(audio_blocks_.size()));
+  CHECK_LE(frames, GetUnfilledFrames());
+
+  const uint8_t* source_ptr = static_cast<const uint8_t*>(source);
+  int frames_to_push = frames;
+  while (frames_to_push) {
+    // Get the current write block.
+    AudioBus* current_block = audio_blocks_[write_block_];
+
+    // Figure out what segment sizes we need when adding the new content to
+    // the FIFO.
+    const int push_frames =
+        std::min(block_frames_ - write_pos_, frames_to_push);
+
+    if (source) {
+      // Deinterleave the content to the FIFO and update the |write_pos_|.
+      current_block->FromInterleavedPartial(source_ptr, write_pos_, push_frames,
+                                            bytes_per_sample);
+    } else {
+      current_block->ZeroFramesPartial(write_pos_, push_frames);
+    }
+    write_pos_ = (write_pos_ + push_frames) % block_frames_;
+    if (!write_pos_) {
+      // The current block is completely filled, increment |write_block_| and
+      // |available_blocks_|.
+      write_block_ = (write_block_ + 1) % audio_blocks_.size();
+      ++available_blocks_;
+    }
+
+    if (source_ptr)
+      source_ptr += push_frames * bytes_per_sample * channels_;
+    frames_to_push -= push_frames;
+    DCHECK_GE(frames_to_push, 0);
+  }
+}
+
 }  // namespace media
diff --git a/media/base/audio_block_fifo.h b/media/base/audio_block_fifo.h
index a90a3df..8c0d5f5c 100644
--- a/media/base/audio_block_fifo.h
+++ b/media/base/audio_block_fifo.h
@@ -29,6 +29,9 @@
   // Push() will crash if the allocated space is insufficient.
   void Push(const void* source, int frames, int bytes_per_sample);
 
+  // Pushes zeroed out frames to the FIFO.
+  void PushSilence(int frames);
+
   // Consumes a block of audio from the FIFO.  Returns an AudioBus which
   // contains the consumed audio data to avoid copying.
   // Consume() will crash if the FIFO does not contain a block of data.
@@ -50,6 +53,11 @@
   void IncreaseCapacity(int blocks);
 
  private:
+  // Common implementation for Push() and PushSilence.  if |source| is nullptr,
+  // silence will be pushed. To push silence, set source and bytes_per_sample to
+  // nullptr and 0 respectively.
+  void PushInternal(const void* source, int frames, int bytes_per_sample);
+
   // The actual FIFO is a vector of audio buses.
   ScopedVector<AudioBus> audio_blocks_;
 
diff --git a/media/base/audio_block_fifo_unittest.cc b/media/base/audio_block_fifo_unittest.cc
index d671e836..63b09ce 100644
--- a/media/base/audio_block_fifo_unittest.cc
+++ b/media/base/audio_block_fifo_unittest.cc
@@ -18,8 +18,11 @@
   AudioBlockFifoTest() {}
   ~AudioBlockFifoTest() override {}
 
-  void PushAndVerify(AudioBlockFifo* fifo, int frames_to_push,
-                     int channels, int block_frames, int max_frames) {
+  void PushAndVerify(AudioBlockFifo* fifo,
+                     int frames_to_push,
+                     int channels,
+                     int block_frames,
+                     int max_frames) {
     for (int filled_frames = max_frames - fifo->GetUnfilledFrames();
          filled_frames + frames_to_push <= max_frames;) {
       Push(fifo, frames_to_push, channels);
@@ -39,7 +42,8 @@
     fifo->Push(data.get(), frames_to_push, bytes_per_sample);
   }
 
-  void ConsumeAndVerify(AudioBlockFifo* fifo, int expected_unfilled_frames,
+  void ConsumeAndVerify(AudioBlockFifo* fifo,
+                        int expected_unfilled_frames,
                         int expected_available_blocks) {
     const AudioBus* bus = fifo->Consume();
     EXPECT_EQ(fifo->GetUnfilledFrames(), expected_unfilled_frames);
@@ -123,10 +127,9 @@
   fifo.Clear();
   int new_push_frames = 128;
   // Change the input frame and try to fill up the FIFO.
-  PushAndVerify(&fifo, new_push_frames, channels, frames,
-                frames * blocks);
+  PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks);
   EXPECT_TRUE(fifo.GetUnfilledFrames() != 0);
-  EXPECT_TRUE(fifo.available_blocks() == blocks -1);
+  EXPECT_TRUE(fifo.available_blocks() == blocks - 1);
 
   // Consume all the existing filled blocks of data.
   while (fifo.available_blocks()) {
@@ -145,8 +148,7 @@
 
   // Completely fill up the buffer again.
   new_push_frames = frames * blocks - remain_frames;
-  PushAndVerify(&fifo, new_push_frames, channels, frames,
-                frames * blocks);
+  PushAndVerify(&fifo, new_push_frames, channels, frames, frames * blocks);
   EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
   EXPECT_TRUE(fifo.available_blocks() == blocks);
 }
@@ -169,6 +171,24 @@
   EXPECT_TRUE(fifo.GetUnfilledFrames() == frames);
 }
 
+TEST_F(AudioBlockFifoTest, PushAndConsumeSilence) {
+  static const int channels = 2;
+  static const int frames = 441;
+  static const int blocks = 2;
+  AudioBlockFifo fifo(channels, frames, blocks);
+  // First push non-zero data.
+  Push(&fifo, frames, channels);
+  // Then push silence.
+  fifo.PushSilence(frames);
+  EXPECT_TRUE(fifo.GetUnfilledFrames() == 0);
+  EXPECT_TRUE(fifo.available_blocks() == blocks);
+
+  // Consume two blocks of data. The first should not be zero, but the second
+  // should be.
+  EXPECT_FALSE(fifo.Consume()->AreFramesZero());
+  EXPECT_TRUE(fifo.Consume()->AreFramesZero());
+}
+
 // Dynamically increase the capacity of FIFO and verify buffers are correct.
 TEST_F(AudioBlockFifoTest, DynamicallyIncreaseCapacity) {
   // Create a FIFO with default blocks of buffers.
diff --git a/tools/grit/grit/format/gen_predetermined_ids.py b/tools/grit/grit/format/gen_predetermined_ids.py
new file mode 100755
index 0000000..eb6afa8
--- /dev/null
+++ b/tools/grit/grit/format/gen_predetermined_ids.py
@@ -0,0 +1,157 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+A tool to generate a predetermined resource ids file that can be used as an
+input to grit via the -p option. This is meant to be run manually every once in
+a while and its output checked in. See tools/gritsettings/README.md for details.
+"""
+
+import fnmatch
+import os
+import re
+import sys
+
+# Regular expressions for parsing the #define macro format. Separate regular
+# expressions are used for parsing lines with pragma (for builds with
+# enable_resource_whitelist_generation flag) in windows and non-windows, and for
+# lines without pragma, For example,
+# Without generate whitelist flag:
+#   #define IDS_FOO_MESSAGE 1234
+# With generate whitelist flag in non-windows:
+#   #define IDS_FOO_MESSAGE _Pragma("whitelisted_resource_1234") 1234
+# With generate whitelist flag in windows:
+#   #define IDS_FOO_MESSAGE __pragma(message("whitelisted_resource_1234")) 1234
+RESOURCE_EXTRACT_REGEX = re.compile('^#define (\S*) (\d*)$', re.MULTILINE)
+RESOURCE_EXTRACT_REGEX_PRAGMA = re.compile(
+      '^#define (\S*) _Pragma\("whitelisted_resource_\d*"\) (\d*)$',
+      re.MULTILINE)
+RESOURCE_EXTRACT_REGEX_PRAGMA_WINDOWS = re.compile(
+      '^#define (\S*) __pragma\(message\("whitelisted_resource_\d*"\)\) (\d*)$',
+      re.MULTILINE)
+
+ORDERED_RESOURCE_IDS_REGEX = re.compile('^Resource=(\d*)$', re.MULTILINE)
+
+
+def _GetResourceNameIdPairsIter(string_to_scan):
+  """Gets an iterator of the resource name and id pairs of the given string.
+
+  Scans the input string for lines of the form "#define NAME ID" and returns
+  an iterator over all matching (NAME, ID) pairs.
+
+  Args:
+    string_to_scan: The input string to scan.
+
+  Yields:
+    A tuple of name and id.
+  """
+  for match in RESOURCE_EXTRACT_REGEX.finditer(string_to_scan):
+    yield match.group(1, 2)
+  for match in RESOURCE_EXTRACT_REGEX_PRAGMA.finditer(string_to_scan):
+    yield match.group(1, 2)
+  for match in RESOURCE_EXTRACT_REGEX_PRAGMA_WINDOWS.finditer(string_to_scan):
+    yield match.group(1, 2)
+
+
+def _ReadOrderedResourceIds(path):
+  """Reads ordered resource ids from the given file.
+
+  The resources are expected to be of the format produced by running Chrome
+  with --print-resource-ids command line.
+
+  Args:
+    path: File path to read resource ids from.
+
+  Returns:
+    An array of ordered resource ids.
+  """
+  ordered_resource_ids = []
+  with open(path, "r") as f:
+    for match in ORDERED_RESOURCE_IDS_REGEX.finditer(f.read()):
+      ordered_resource_ids.append(int(match.group(1)))
+  return ordered_resource_ids
+
+
+def GenerateResourceMapping(original_resources, ordered_resource_ids):
+  """Generates a resource mapping from the ordered ids and the original mapping.
+
+  The returned dict will assign new ids to ordered_resource_ids numerically
+  increasing from 101.
+
+  Args:
+    original_resources: A dict of original resource ids to resource names.
+    ordered_resource_ids: An array of ordered resource ids.
+
+  Returns:
+    A dict of resource ids to resource names.
+  """
+  output_resource_map = {}
+  # 101 is used as the starting value since other parts of GRIT require it to be
+  # the minimum (e.g. rc_header.py) based on Windows resource numbering.
+  next_id = 101
+  for original_id in ordered_resource_ids:
+    resource_name = original_resources[original_id]
+    output_resource_map[next_id] = resource_name
+    next_id += 1
+  return output_resource_map
+
+
+def ReadResourceIdsFromFile(file, original_resources):
+  """Reads resource ids from a GRIT-produced header file.
+
+  Args:
+    file: File to a GRIT-produced header file to read from.
+    original_resources: Dict of resource ids to resource names to add to.
+  """
+  for resource_name, resource_id in _GetResourceNameIdPairsIter(file.read()):
+    original_resources[int(resource_id)] = resource_name
+
+
+def _ReadOriginalResourceIds(out_dir):
+  """Reads resource ids from GRIT header files in the specified directory.
+
+  Args:
+    out_dir: A Chrome build output directory (e.g. out/gn) to scan.
+
+  Returns:
+    A dict of resource ids to resource names.
+  """
+  original_resources = {}
+  for root, dirnames, filenames in os.walk(out_dir + '/gen'):
+    for filename in filenames:
+      if filename.endswith(('_resources.h', '_settings.h', '_strings.h')):
+        with open(os.path.join(root, filename), "r") as f:
+          ReadResourceIdsFromFile(f, original_resources)
+  return original_resources
+
+
+def _GeneratePredeterminedIdsFile(ordered_resources_file, out_dir):
+  """Generates a predetermined ids file.
+
+  Args:
+    ordered_resources_file: File path to read ordered resource ids from.
+    out_dir: A Chrome build output directory (e.g. out/gn) to scan.
+
+  Returns:
+    A dict of resource ids to resource names.
+  """
+  original_resources = _ReadOriginalResourceIds(out_dir)
+  ordered_resource_ids = _ReadOrderedResourceIds(ordered_resources_file)
+  output_resource_map = GenerateResourceMapping(original_resources,
+                                                ordered_resource_ids)
+  for res_id in sorted(output_resource_map.keys()):
+    print "{} {}".format(output_resource_map[res_id], res_id)
+
+
+def main(argv):
+  if len(argv) != 2:
+    print("usage: gen_predetermined_ids.py <ordered_resources_file> <out_dir>")
+    sys.exit(1)
+  ordered_resources_file, out_dir = argv[0], argv[1]
+  _GeneratePredeterminedIdsFile(ordered_resources_file, out_dir)
+
+
+if '__main__' == __name__:
+  main(sys.argv[1:])
diff --git a/tools/grit/grit/format/gen_predetermined_ids_unittest.py b/tools/grit/grit/format/gen_predetermined_ids_unittest.py
new file mode 100755
index 0000000..472a09d3
--- /dev/null
+++ b/tools/grit/grit/format/gen_predetermined_ids_unittest.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright 2017 The Chromium Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+'''Unit tests for the gen_predetermined_ids module.'''
+
+import os
+import sys
+if __name__ == '__main__':
+  sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
+
+import StringIO
+import unittest
+
+from grit.format import gen_predetermined_ids
+
+class GenPredeterminedIdsUnittest(unittest.TestCase):
+  def testGenerateResourceMapping(self):
+    original_resources = {200: 'A', 201: 'B', 300: 'C', 350: 'D', 370: 'E'}
+    ordered_resource_ids = [300, 201, 370]
+    mapping = gen_predetermined_ids.GenerateResourceMapping(
+        original_resources, ordered_resource_ids)
+    self.assertEqual({101: 'C', 102: 'B', 103: 'E'}, mapping)
+
+  def testReadResourceIdsFromFile(self):
+    f = StringIO.StringIO('''
+// This file is automatically generated by GRIT. Do not edit.
+
+#pragma once
+
+#define IDS_BOOKMARKS_NO_ITEMS 12500
+#define IDS_BOOKMARK_BAR_IMPORT_LINK _Pragma("whitelisted_resource_12501") 12501
+#define IDS_BOOKMARK_X __pragma(message("whitelisted_resource_12502")) 12502
+''')
+    resources = {}
+    gen_predetermined_ids.ReadResourceIdsFromFile(f, resources)
+    self.assertEqual({12500: 'IDS_BOOKMARKS_NO_ITEMS',
+                      12501: 'IDS_BOOKMARK_BAR_IMPORT_LINK',
+                      12502: 'IDS_BOOKMARK_X'}, resources)
+
+if __name__ == '__main__':
+  unittest.main()
diff --git a/tools/gritsettings/README.md b/tools/gritsettings/README.md
new file mode 100644
index 0000000..344fef09
--- /dev/null
+++ b/tools/gritsettings/README.md
@@ -0,0 +1,36 @@
+### tools/gritsettings README
+
+This directory contains several files that apply global to the Chrome resource
+generation system (which uses GRIT - see tools/grit).
+
+**resource_ids**: This file is used to assign starting resource ids for
+resources and strings used by Chromium. This is done to ensure that resource ids
+are unique across all the grd files. If you are adding a new grd file, please
+add a new entry to this file.
+
+**translation_expectations.pyl**: Specifies which grd files should be translated
+and into which languages they should be translated. Used by the internal
+translation process.
+
+**startup_resources_[platform].txt**: These files provide a pre-determined
+resource id ordering that will be used by GRIT when assigning resources ids. The
+goal is to have the resource loaded during Chrome startup be ordered first in
+the .pak files, so that fewer page faults are suffered during Chrome start up.
+To update or generate one of these files, follow these instructions:
+
+  1. Build a Chrome official release build and launch it with command line:
+     `--print-resource-ids` and save the output to a file (e.g. res.txt).
+
+  2. Generate the startup_resources_[platform].txt via the following command
+     (you can redirect its output to the new file location):
+
+     `
+     tools/grit/grit/format/gen_predetermined_ids.py res_ids.txt out/gn
+     `
+
+     In the above command, res_ids.txt is the file produced in step 1 and out/gn
+     is you Chrome build directory where you compiled Chrome. The output of the
+     command can be added as a new startup_resource_[platform]
+
+  3. If this is a new file, modify `tools/grit/grit_rule.gni` to set its path
+     via `grit_predetermined_resource_ids_file` for the given platform.
diff --git a/tools/metrics/histograms/histograms.xml b/tools/metrics/histograms/histograms.xml
index 2026b258..ac5d116 100644
--- a/tools/metrics/histograms/histograms.xml
+++ b/tools/metrics/histograms/histograms.xml
@@ -79676,6 +79676,7 @@
   <int value="11" label="SET_EVENT_HANDLE"/>
   <int value="12" label="NO_CAPTURE_CLIENT"/>
   <int value="13" label="NO_AUDIO_VOLUME"/>
+  <int value="14" label="OK_WITH_RESAMPLING"/>
 </enum>
 
 <enum name="AudioThreadStatus" type="int">
diff --git a/ui/base/ime/input_method_win.cc b/ui/base/ime/input_method_win.cc
index 49efa466..e2bb5286 100644
--- a/ui/base/ime/input_method_win.cc
+++ b/ui/base/ime/input_method_win.cc
@@ -57,6 +57,11 @@
 
 InputMethodWin::~InputMethodWin() {}
 
+void InputMethodWin::OnFocus() {
+  InputMethodBase::OnFocus();
+  RefreshInputLanguage();
+}
+
 bool InputMethodWin::OnUntranslatedIMEMessage(
     const base::NativeEvent& event,
     InputMethod::NativeEventResult* result) {
@@ -275,8 +280,7 @@
   // which is known to be incompatible with TSF.
   // TODO(shuchen): Use ITfLanguageProfileNotifySink instead.
   OnInputMethodChanged();
-  imm32_manager_.SetInputLanguage();
-  UpdateIMEState();
+  RefreshInputLanguage();
 }
 
 bool InputMethodWin::IsInputLocaleCJK() const {
@@ -656,6 +660,20 @@
   return 1;  // returns non-zero value when succeeded.
 }
 
+void InputMethodWin::RefreshInputLanguage() {
+  TextInputType type_original = GetTextInputType();
+  imm32_manager_.SetInputLanguage();
+  if (type_original != GetTextInputType()) {
+    // Only update the IME state when necessary.
+    // It's unnecessary to report IME state, when:
+    // 1) Switching betweeen 2 top-level windows, and the switched-away window
+    //    receives OnInputLocaleChanged.
+    // 2) The text input type is not changed by |SetInputLanguage|.
+    // Please refer to crbug.com/679564.
+    UpdateIMEState();
+  }
+}
+
 bool InputMethodWin::IsWindowFocused(const TextInputClient* client) const {
   if (!client)
     return false;
diff --git a/ui/base/ime/input_method_win.h b/ui/base/ime/input_method_win.h
index 9478ac2..1eab715f 100644
--- a/ui/base/ime/input_method_win.h
+++ b/ui/base/ime/input_method_win.h
@@ -23,6 +23,9 @@
                  HWND toplevel_window_handle);
   ~InputMethodWin() override;
 
+  // Overridden from InputMethodBase:
+  void OnFocus() override;
+
   // Overridden from InputMethod:
   bool OnUntranslatedIMEMessage(const base::NativeEvent& event,
                                 NativeEventResult* result) override;
@@ -88,6 +91,8 @@
   LRESULT OnReconvertString(RECONVERTSTRING* reconv);
   LRESULT OnQueryCharPosition(IMECHARPOSITION* char_positon);
 
+  void RefreshInputLanguage();
+
   // Returns true if the Win32 native window bound to |client| is considered
   // to be ready for receiving keyboard input.
   bool IsWindowFocused(const TextInputClient* client) const;
diff --git a/ui/base/resource/data_pack.cc b/ui/base/resource/data_pack.cc
index c7def6da..3d07d985 100644
--- a/ui/base/resource/data_pack.cc
+++ b/ui/base/resource/data_pack.cc
@@ -5,15 +5,19 @@
 #include "ui/base/resource/data_pack.h"
 
 #include <errno.h>
+#include <set>
 #include <utility>
 
+#include "base/command_line.h"
 #include "base/files/file_util.h"
 #include "base/files/memory_mapped_file.h"
 #include "base/logging.h"
 #include "base/memory/ptr_util.h"
 #include "base/memory/ref_counted_memory.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/stl_util.h"
 #include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
 
 // For details of the file layout, see
 // http://dev.chromium.org/developers/design-documents/linuxresourcesandlocalizedstrings
@@ -24,7 +28,7 @@
 // Length of file header: version, entry count and text encoding type.
 static const size_t kHeaderLength = 2 * sizeof(uint32_t) + sizeof(uint8_t);
 
-#pragma pack(push,2)
+#pragma pack(push, 2)
 struct DataPackEntry {
   uint16_t resource_id;
   uint32_t file_offset;
@@ -65,6 +69,35 @@
   UMA_HISTOGRAM_ENUMERATION("DataPack.Load", error, LOAD_ERRORS_COUNT);
 }
 
+// Prints the given resource id the first time it's loaded if Chrome has been
+// started with --print-resource-ids. This output is then used to generate a
+// more optimal resource renumbering to improve startup speed. See
+// tools/gritsettings/README.md for more info.
+void MaybePrintResourceId(uint16_t resource_id) {
+  // This code is run in other binaries than Chrome which do not initialize the
+  // CommandLine object. Early return in those cases.
+  if (!base::CommandLine::InitializedForCurrentProcess())
+    return;
+
+  // Note: This switch isn't in ui/base/ui_base_switches.h because ui/base
+  // depends on ui/base/resource and thus it would cause a circular dependency.
+  static bool print_resource_ids =
+      base::CommandLine::ForCurrentProcess()->HasSwitch("print-resource-ids");
+  if (!print_resource_ids)
+    return;
+
+  // Note: These are leaked intentionally. However, it's only allocated if the
+  // above command line is specified, so it shouldn't affect regular users.
+  static std::set<uint16_t>* resource_ids_logged = new std::set<uint16_t>();
+  // DataPack doesn't require single-threaded access, so use a lock.
+  static base::Lock* lock = new base::Lock;
+  base::AutoLock auto_lock(*lock);
+  if (!base::ContainsKey(*resource_ids_logged, resource_id)) {
+    printf("Resource=%d\n", resource_id);
+    resource_ids_logged->insert(resource_id);
+  }
+}
+
 }  // namespace
 
 namespace ui {
@@ -258,6 +291,7 @@
     return false;
   }
 
+  MaybePrintResourceId(resource_id);
   size_t length = next_entry->file_offset - target->file_offset;
   data->set(reinterpret_cast<const char*>(data_source_->GetData() +
                                           target->file_offset),