Update libchrome to r381699

TEST=Build all

Change-Id: I962a83a044a102515c316d04bb65e7c9302e0f0c
diff --git a/Android.mk b/Android.mk
index 2904098..203426b 100644
--- a/Android.mk
+++ b/Android.mk
@@ -65,10 +65,13 @@
 	base/files/file_util.cc \
 	base/files/file_util_posix.cc \
 	base/files/important_file_writer.cc \
+	base/files/memory_mapped_file.cc \
+	base/files/memory_mapped_file_posix.cc \
 	base/files/scoped_file.cc \
 	base/files/scoped_temp_dir.cc \
 	base/guid.cc \
 	base/guid_posix.cc \
+	base/hash.cc \
 	base/json/json_file_value_serializer.cc \
 	base/json/json_parser.cc \
 	base/json/json_reader.cc \
@@ -98,6 +101,9 @@
 	base/metrics/histogram.cc \
 	base/metrics/histogram_samples.cc \
 	base/metrics/histogram_snapshot_manager.cc \
+	base/metrics/persistent_histogram_allocator.cc \
+	base/metrics/persistent_memory_allocator.cc \
+	base/metrics/persistent_sample_map.cc \
 	base/metrics/sample_map.cc \
 	base/metrics/sample_vector.cc \
 	base/metrics/sparse_histogram.cc \
@@ -116,7 +122,6 @@
 	base/process/process_metrics.cc \
 	base/process/process_metrics_posix.cc \
 	base/process/process_posix.cc \
-	base/profiler/alternate_timer.cc \
 	base/profiler/scoped_profile.cc \
 	base/profiler/scoped_tracker.cc \
 	base/profiler/tracked_time.cc \
@@ -147,6 +152,11 @@
 	base/sys_info_posix.cc \
 	base/task/cancelable_task_tracker.cc \
 	base/task_runner.cc \
+	base/task_scheduler/scheduler_lock_impl.cc \
+	base/task_scheduler/sequence.cc \
+	base/task_scheduler/sequence_sort_key.cc \
+	base/task_scheduler/task.cc \
+	base/task_scheduler/task_traits.cc \
 	base/third_party/icu/icu_utf.cc \
 	base/third_party/nspr/prtime.cc \
 	base/threading/non_thread_safe_impl.cc \
@@ -185,9 +195,7 @@
 	base/trace_event/memory_dump_session_state.cc \
 	base/trace_event/process_memory_dump.cc \
 	base/trace_event/process_memory_maps.cc \
-	base/trace_event/process_memory_maps_dump_provider.cc \
 	base/trace_event/process_memory_totals.cc \
-	base/trace_event/process_memory_totals_dump_provider.cc \
 	base/trace_event/trace_buffer.cc \
 	base/trace_event/trace_config.cc \
 	base/trace_event/trace_event_argument.cc \
@@ -205,6 +213,7 @@
 libchromeLinuxSrc := \
 	base/files/file_path_watcher_linux.cc \
 	base/files/file_util_linux.cc \
+	base/memory/shared_memory_posix.cc \
 	base/posix/unix_domain_socket_linux.cc \
 	base/process/internal_linux.cc \
 	base/process/process_handle_linux.cc \
@@ -227,6 +236,7 @@
 	base/mac/libdispatch_task_runner.cc \
 	base/mac/scoped_mach_port.cc \
 	base/mac/scoped_nsautorelease_pool.mm \
+	base/memory/shared_memory_mac.cc \
 	base/message_loop/message_pump_mac.mm \
 	base/process/launch_mac.cc \
 	base/process/port_provider_mac.cc \
@@ -279,7 +289,6 @@
 	base/memory/linked_ptr_unittest.cc \
 	base/memory/ref_counted_memory_unittest.cc \
 	base/memory/ref_counted_unittest.cc \
-	base/memory/scoped_ptr_unittest.cc \
 	base/memory/scoped_vector_unittest.cc \
 	base/memory/singleton_unittest.cc \
 	base/memory/weak_ptr_unittest.cc \
@@ -293,6 +302,9 @@
 	base/metrics/histogram_macros_unittest.cc \
 	base/metrics/histogram_snapshot_manager_unittest.cc \
 	base/metrics/histogram_unittest.cc \
+	base/metrics/persistent_histogram_allocator_unittest.cc \
+	base/metrics/persistent_memory_allocator_unittest.cc \
+	base/metrics/persistent_sample_map_unittest.cc \
 	base/metrics/sample_map_unittest.cc \
 	base/metrics/sample_vector_unittest.cc \
 	base/metrics/sparse_histogram_unittest.cc \
@@ -328,6 +340,10 @@
 	base/sys_info_unittest.cc \
 	base/task/cancelable_task_tracker_unittest.cc \
 	base/task_runner_util_unittest.cc \
+	base/task_scheduler/scheduler_lock_unittest.cc \
+	base/task_scheduler/sequence_sort_key_unittest.cc \
+	base/task_scheduler/sequence_unittest.cc \
+	base/task_scheduler/task_traits.cc \
 	base/template_util_unittest.cc \
 	base/test/multiprocess_test.cc \
 	base/test/multiprocess_test_android.cc \
@@ -364,8 +380,6 @@
 	base/trace_event/memory_allocator_dump_unittest.cc \
 	base/trace_event/memory_dump_manager_unittest.cc \
 	base/trace_event/process_memory_dump_unittest.cc \
-	base/trace_event/process_memory_maps_dump_provider_unittest.cc \
-	base/trace_event/process_memory_totals_dump_provider_unittest.cc \
 	base/trace_event/trace_config_unittest.cc \
 	base/trace_event/trace_event_argument_unittest.cc \
 	base/trace_event/trace_event_synthetic_delay_unittest.cc \
@@ -402,7 +416,12 @@
 # ========================================================
 include $(CLEAR_VARS)
 LOCAL_MODULE := libchrome
-LOCAL_SRC_FILES := $(libchromeCommonSrc) $(libchromeLinuxSrc) base/sys_info_chromeos.cc
+LOCAL_SRC_FILES := \
+	$(libchromeCommonSrc) \
+	$(libchromeLinuxSrc) \
+	base/memory/shared_memory_android.cc \
+	base/sys_info_chromeos.cc \
+
 LOCAL_CPP_EXTENSION := $(libchromeCommonCppExtension)
 LOCAL_CFLAGS := $(libchromeCommonCFlags)
 LOCAL_CLANG := $(libchromeUseClang)
@@ -469,6 +488,7 @@
 LOCAL_MODULE := libchrome-crypto
 LOCAL_SRC_FILES := \
 	crypto/openssl_util.cc \
+	crypto/random.cc \
 	crypto/secure_hash_openssl.cc \
 	crypto/secure_util.cc \
 	crypto/sha2.cc \
diff --git a/SConstruct b/SConstruct
index a8cb148..ade614d 100644
--- a/SConstruct
+++ b/SConstruct
@@ -63,6 +63,7 @@
                 files/scoped_temp_dir.cc
                 guid.cc
                 guid_posix.cc
+                hash.cc
                 json/json_file_value_serializer.cc
                 json/json_parser.cc
                 json/json_reader.cc
@@ -76,6 +77,7 @@
                 md5.cc
                 memory/ref_counted.cc
                 memory/ref_counted_memory.cc
+                memory/shared_memory_posix.cc
                 memory/singleton.cc
                 memory/weak_ptr.cc
                 message_loop/incoming_task_queue.cc
@@ -92,6 +94,9 @@
                 metrics/histogram.cc
                 metrics/histogram_samples.cc
                 metrics/histogram_snapshot_manager.cc
+                metrics/persistent_histogram_allocator.cc
+                metrics/persistent_memory_allocator.cc
+                metrics/persistent_sample_map.cc
                 metrics/sample_map.cc
                 metrics/sample_vector.cc
                 metrics/sparse_histogram.cc
@@ -115,7 +120,6 @@
                 process/process_metrics_linux.cc
                 process/process_metrics_posix.cc
                 process/process_posix.cc
-                profiler/alternate_timer.cc
                 profiler/scoped_profile.cc
                 profiler/scoped_tracker.cc
                 profiler/tracked_time.cc
@@ -150,6 +154,11 @@
                 sys_info_posix.cc
                 task_runner.cc
                 task/cancelable_task_tracker.cc
+                task_scheduler/scheduler_lock_impl.cc
+                task_scheduler/sequence.cc
+                task_scheduler/sequence_sort_key.cc
+                task_scheduler/task.cc
+                task_scheduler/task_traits.cc
                 third_party/icu/icu_utf.cc
                 third_party/nspr/prtime.cc
                 threading/non_thread_safe_impl.cc
@@ -190,9 +199,7 @@
                 trace_event/memory_dump_session_state.cc
                 trace_event/process_memory_dump.cc
                 trace_event/process_memory_maps.cc
-                trace_event/process_memory_maps_dump_provider.cc
                 trace_event/process_memory_totals.cc
-                trace_event/process_memory_totals_dump_provider.cc
                 trace_event/trace_buffer.cc
                 trace_event/trace_config.cc
                 trace_event/trace_event_argument.cc
@@ -335,6 +342,7 @@
 env['CCFLAGS'] += ['-DOS_CHROMEOS',
                    '-DUSE_NSS_CERTS',
                    '-DUSE_SYSTEM_LIBEVENT',
+                   '-DNO_TCMALLOC',
                    '-fPIC',
                    '-fno-exceptions',
                    '-Wall',
diff --git a/base/BUILD.gn b/base/BUILD.gn
index 5d8510f..1cbc063 100644
--- a/base/BUILD.gn
+++ b/base/BUILD.gn
@@ -18,16 +18,28 @@
 # huge sequence of random-looking conditionals.
 
 import("//build/buildflag_header.gni")
+import("//build/config/allocator.gni")
 import("//build/config/compiler/compiler.gni")
 import("//build/config/nacl/config.gni")
+import("//build/config/sysroot.gni")
 import("//build/config/ui.gni")
 import("//build/nocompile.gni")
 import("//testing/test.gni")
 
+declare_args() {
+  # Override this value to give a specific build date.
+  # See //base/build_time.cc for more details.
+  override_build_date = "N/A"
+}
+
 if (is_android) {
   import("//build/config/android/rules.gni")
 }
 
+if (is_win) {
+  import("//build/config/win/visual_studio_version.gni")
+}
+
 config("base_flags") {
   if (is_clang) {
     cflags = [
@@ -52,6 +64,15 @@
       "/DELAYLOAD:setupapi.dll",
     ]
   }
+
+  copy("copy_dbghelp.dll") {
+    sources = [
+      "../build/win/dbghelp_xp/dbghelp.dll",
+    ]
+    outputs = [
+      "$root_out_dir/{{source_file_part}}",
+    ]
+  }
 }
 
 if (is_nacl_nonsfi) {
@@ -391,12 +412,16 @@
     "mac/launchd.h",
     "mac/libdispatch_task_runner.cc",
     "mac/libdispatch_task_runner.h",
-    "mac/mac_logging.cc",
     "mac/mac_logging.h",
+    "mac/mac_logging.mm",
     "mac/mac_util.h",
     "mac/mac_util.mm",
     "mac/mach_logging.cc",
     "mac/mach_logging.h",
+    "mac/mach_port_broker.h",
+    "mac/mach_port_broker.mm",
+    "mac/mach_port_util.cc",
+    "mac/mach_port_util.h",
     "mac/objc_property_releaser.h",
     "mac/objc_property_releaser.mm",
     "mac/os_crash_dumps.cc",
@@ -507,6 +532,12 @@
     "metrics/histogram_snapshot_manager.h",
     "metrics/metrics_hashes.cc",
     "metrics/metrics_hashes.h",
+    "metrics/persistent_histogram_allocator.cc",
+    "metrics/persistent_histogram_allocator.h",
+    "metrics/persistent_memory_allocator.cc",
+    "metrics/persistent_memory_allocator.h",
+    "metrics/persistent_sample_map.cc",
+    "metrics/persistent_sample_map.h",
     "metrics/sample_map.cc",
     "metrics/sample_map.h",
     "metrics/sample_vector.cc",
@@ -623,8 +654,6 @@
     "process/process_metrics_win.cc",
     "process/process_posix.cc",
     "process/process_win.cc",
-    "profiler/alternate_timer.cc",
-    "profiler/alternate_timer.h",
     "profiler/native_stack_sampler.cc",
     "profiler/native_stack_sampler.h",
     "profiler/native_stack_sampler_posix.cc",
@@ -737,6 +766,17 @@
     "task_runner.cc",
     "task_runner.h",
     "task_runner_util.h",
+    "task_scheduler/scheduler_lock.h",
+    "task_scheduler/scheduler_lock_impl.cc",
+    "task_scheduler/scheduler_lock_impl.h",
+    "task_scheduler/sequence.cc",
+    "task_scheduler/sequence.h",
+    "task_scheduler/sequence_sort_key.cc",
+    "task_scheduler/sequence_sort_key.h",
+    "task_scheduler/task.cc",
+    "task_scheduler/task.h",
+    "task_scheduler/task_traits.cc",
+    "task_scheduler/task_traits.h",
     "template_util.h",
     "third_party/dmg_fp/dmg_fp.h",
     "third_party/dmg_fp/dtoa_wrapper.cc",
@@ -847,11 +887,8 @@
     "trace_event/process_memory_dump.h",
     "trace_event/process_memory_maps.cc",
     "trace_event/process_memory_maps.h",
-    "trace_event/process_memory_maps_dump_provider.h",
     "trace_event/process_memory_totals.cc",
     "trace_event/process_memory_totals.h",
-    "trace_event/process_memory_totals_dump_provider.cc",
-    "trace_event/process_memory_totals_dump_provider.h",
     "trace_event/trace_buffer.cc",
     "trace_event/trace_buffer.h",
     "trace_event/trace_config.cc",
@@ -954,6 +991,8 @@
   ]
 
   deps = [
+    "//base/allocator",
+    "//base/allocator:features",
     "//base/third_party/dynamic_annotations",
     "//third_party/modp_b64",
   ]
@@ -961,9 +1000,21 @@
   public_deps = [
     ":base_paths",
     ":base_static",
+    ":build_date",
     ":debugging_flags",
   ]
 
+  # Needed for <atomic> if using newer C++ library than sysroot
+  if (!use_sysroot && (is_android || is_linux)) {
+    libs = [ "atomic" ]
+  }
+
+  if (use_experimental_allocator_shim) {
+    # The allocator shim is part of the base API. This is to allow clients of
+    # base should to install hooks into the allocator path.
+    public_deps += [ "//base/allocator:unified_allocator_shim" ]
+  }
+
   # Allow more direct string conversions on platforms with native utf8
   # strings
   if (is_mac || is_ios || is_chromeos) {
@@ -991,7 +1042,6 @@
       "sys_info_linux.cc",
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
-      "trace_event/process_memory_maps_dump_provider.cc",
     ]
     set_sources_assignment_filter(sources_assignment_filter)
 
@@ -1058,7 +1108,6 @@
       "sync_socket_posix.cc",
       "sys_info.cc",
       "sys_info_posix.cc",
-      "trace_event/process_memory_totals_dump_provider.cc",
       "trace_event/trace_event_system_stats_monitor.cc",
     ]
 
@@ -1114,6 +1163,9 @@
     data += [ "$root_build_dir/dbghelp.dll" ]
 
     deps += [ "//base/trace_event/etw_manifest:chrome_events_win" ]
+    if (current_toolchain == default_toolchain) {
+      deps += [ ":copy_dbghelp.dll" ]
+    }
 
     if (is_component_build) {
       # Copy the VS runtime DLLs into the isolate so that they don't have to be
@@ -1127,12 +1179,63 @@
 
       # These runtime files are copied to the output directory by the
       # vs_toolchain script that runs as part of toolchain configuration.
-      data += [
-        "$root_out_dir/msvcp120${vcrt_suffix}.dll",
-        "$root_out_dir/msvcr120${vcrt_suffix}.dll",
-      ]
+      if (visual_studio_version == "2015") {
+        data += [
+          "$root_out_dir/msvcp140${vcrt_suffix}.dll",
+          "$root_out_dir/vccorlib140${vcrt_suffix}.dll",
+          "$root_out_dir/vcruntime140${vcrt_suffix}.dll",
+
+          # Universal Windows 10 CRT files
+          "$root_out_dir/api-ms-win-core-console-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-datetime-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-debug-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-errorhandling-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-file-l2-1-0.dll",
+          "$root_out_dir/api-ms-win-core-handle-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-heap-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-interlocked-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-libraryloader-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-localization-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-memory-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-namedpipe-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processenvironment-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processthreads-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-processthreads-l1-1-1.dll",
+          "$root_out_dir/api-ms-win-core-profile-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-rtlsupport-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-string-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-synch-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-synch-l1-2-0.dll",
+          "$root_out_dir/api-ms-win-core-sysinfo-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-timezone-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-core-util-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-conio-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-convert-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-environment-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-filesystem-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-heap-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-locale-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-math-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-multibyte-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-private-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-process-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-runtime-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-stdio-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-string-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-time-l1-1-0.dll",
+          "$root_out_dir/api-ms-win-crt-utility-l1-1-0.dll",
+          "$root_out_dir/ucrtbase${vcrt_suffix}.dll",
+        ]
+      } else {
+        data += [
+          "$root_out_dir/msvcp120${vcrt_suffix}.dll",
+          "$root_out_dir/msvcr120${vcrt_suffix}.dll",
+        ]
+      }
       if (is_asan) {
-        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/3.8.0/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
+        data += [ "//third_party/llvm-build/Release+Asserts/lib/clang/3.9.0/lib/windows/clang_rt.asan_dynamic-i386.dll" ]
       }
     }
 
@@ -1157,6 +1260,7 @@
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
     ]
+    libs = [ "bsm" ]
   }
 
   # Mac or iOS.
@@ -1187,7 +1291,6 @@
     sources += [
       "trace_event/malloc_dump_provider.cc",
       "trace_event/malloc_dump_provider.h",
-      "trace_event/process_memory_maps_dump_provider.cc",
     ]
 
     if (is_asan || is_lsan || is_msan || is_tsan) {
@@ -1272,8 +1375,8 @@
       "mac/call_with_eh_frame.h",
       "mac/foundation_util.h",
       "mac/foundation_util.mm",
-      "mac/mac_logging.cc",
       "mac/mac_logging.h",
+      "mac/mac_logging.mm",
       "mac/mach_logging.cc",
       "mac/mach_logging.h",
       "mac/objc_property_releaser.h",
@@ -1287,6 +1390,7 @@
       "mac/scoped_nsobject.h",
       "mac/scoped_objc_class_swizzler.h",
       "mac/scoped_objc_class_swizzler.mm",
+      "memory/shared_memory_posix.cc",
       "message_loop/message_pump_mac.h",
       "message_loop/message_pump_mac.mm",
       "process/memory_stubs.cc",
@@ -1468,85 +1572,6 @@
   }
 }
 
-component("prefs") {
-  sources = [
-    "prefs/default_pref_store.cc",
-    "prefs/default_pref_store.h",
-    "prefs/json_pref_store.cc",
-    "prefs/json_pref_store.h",
-    "prefs/overlay_user_pref_store.cc",
-    "prefs/overlay_user_pref_store.h",
-    "prefs/pref_change_registrar.cc",
-    "prefs/pref_change_registrar.h",
-    "prefs/pref_member.cc",
-    "prefs/pref_member.h",
-    "prefs/pref_notifier_impl.cc",
-    "prefs/pref_notifier_impl.h",
-    "prefs/pref_registry.cc",
-    "prefs/pref_registry.h",
-    "prefs/pref_registry_simple.cc",
-    "prefs/pref_registry_simple.h",
-    "prefs/pref_service.cc",
-    "prefs/pref_service.h",
-    "prefs/pref_service_factory.cc",
-    "prefs/pref_service_factory.h",
-    "prefs/pref_store.cc",
-    "prefs/pref_store.h",
-    "prefs/pref_value_map.cc",
-    "prefs/pref_value_map.h",
-    "prefs/pref_value_store.cc",
-    "prefs/pref_value_store.h",
-    "prefs/scoped_user_pref_update.cc",
-    "prefs/scoped_user_pref_update.h",
-    "prefs/value_map_pref_store.cc",
-    "prefs/value_map_pref_store.h",
-  ]
-  if (!is_ios) {
-    sources += [
-      "prefs/base_prefs_export.h",
-      "prefs/persistent_pref_store.h",
-      "prefs/pref_filter.h",
-      "prefs/pref_notifier.h",
-      "prefs/pref_observer.h",
-      "prefs/writeable_pref_store.h",
-    ]
-  }
-
-  defines = [ "BASE_PREFS_IMPLEMENTATION" ]
-
-  deps = [
-    ":base",
-  ]
-
-  if (!is_debug) {
-    configs -= [ "//build/config/compiler:default_optimization" ]
-    configs += [ "//build/config/compiler:optimize_max" ]
-  }
-}
-
-source_set("prefs_test_support") {
-  testonly = true
-  sources = [
-    "prefs/mock_pref_change_callback.cc",
-    "prefs/mock_pref_change_callback.h",
-    "prefs/pref_store_observer_mock.cc",
-    "prefs/pref_store_observer_mock.h",
-    "prefs/testing_pref_service.cc",
-    "prefs/testing_pref_service.h",
-    "prefs/testing_pref_store.cc",
-    "prefs/testing_pref_store.h",
-  ]
-
-  public_deps = [
-    ":prefs",
-  ]
-  deps = [
-    ":base",
-    "//testing/gmock",
-    "//testing/gtest",
-  ]
-}
-
 source_set("message_loop_tests") {
   testonly = true
   sources = [
@@ -1581,6 +1606,15 @@
     ]
   }
 
+  loadable_module("scoped_handle_test_dll") {
+    sources = [
+      "win/scoped_handle_test_dll.cc",
+    ]
+    deps = [
+      ":base",
+    ]
+  }
+
   if (target_cpu == "x64") {
     # Must be a shared library so that it can be unloaded during testing.
     shared_library("base_profiler_test_support_library") {
@@ -1594,12 +1628,13 @@
   }
 }
 
-# TODO(GYP): Delete this after we've converted everything to GN.
-# The _run targets exist only for compatibility w/ GYP.
-group("base_unittests_run") {
-  testonly = true
-  deps = [
-    ":base_unittests",
+bundle_data("base_unittests_bundle_data") {
+  sources = [
+    "test/data",
+  ]
+  outputs = [
+    "{{bundle_resources_dir}}/" +
+        "{{source_root_relative_dir}}/{{source_file_part}}",
   ]
 }
 
@@ -1689,6 +1724,7 @@
     "mac/foundation_util_unittest.mm",
     "mac/libdispatch_task_runner_unittest.cc",
     "mac/mac_util_unittest.mm",
+    "mac/mach_port_broker_unittest.cc",
     "mac/objc_property_releaser_unittest.mm",
     "mac/scoped_nsobject_unittest.mm",
     "mac/scoped_objc_class_swizzler_unittest.mm",
@@ -1702,10 +1738,10 @@
     "memory/ptr_util_unittest.cc",
     "memory/ref_counted_memory_unittest.cc",
     "memory/ref_counted_unittest.cc",
-    "memory/scoped_ptr_unittest.cc",
     "memory/scoped_vector_unittest.cc",
     "memory/shared_memory_mac_unittest.cc",
     "memory/shared_memory_unittest.cc",
+    "memory/shared_memory_win_unittest.cc",
     "memory/singleton_unittest.cc",
     "memory/weak_ptr_unittest.cc",
     "message_loop/message_loop_task_runner_unittest.cc",
@@ -1720,6 +1756,9 @@
     "metrics/histogram_snapshot_manager_unittest.cc",
     "metrics/histogram_unittest.cc",
     "metrics/metrics_hashes_unittest.cc",
+    "metrics/persistent_histogram_allocator_unittest.cc",
+    "metrics/persistent_memory_allocator_unittest.cc",
+    "metrics/persistent_sample_map_unittest.cc",
     "metrics/sample_map_unittest.cc",
     "metrics/sample_vector_unittest.cc",
     "metrics/sparse_histogram_unittest.cc",
@@ -1733,16 +1772,6 @@
     "posix/file_descriptor_shuffle_unittest.cc",
     "posix/unix_domain_socket_linux_unittest.cc",
     "power_monitor/power_monitor_unittest.cc",
-    "prefs/default_pref_store_unittest.cc",
-    "prefs/json_pref_store_unittest.cc",
-    "prefs/overlay_user_pref_store_unittest.cc",
-    "prefs/pref_change_registrar_unittest.cc",
-    "prefs/pref_member_unittest.cc",
-    "prefs/pref_notifier_impl_unittest.cc",
-    "prefs/pref_service_unittest.cc",
-    "prefs/pref_value_map_unittest.cc",
-    "prefs/pref_value_store_unittest.cc",
-    "prefs/scoped_user_pref_update_unittest.cc",
     "process/memory_unittest.cc",
     "process/memory_unittest_mac.h",
     "process/memory_unittest_mac.mm",
@@ -1786,6 +1815,9 @@
     "system_monitor/system_monitor_unittest.cc",
     "task/cancelable_task_tracker_unittest.cc",
     "task_runner_util_unittest.cc",
+    "task_scheduler/scheduler_lock_unittest.cc",
+    "task_scheduler/sequence_sort_key_unittest.cc",
+    "task_scheduler/sequence_unittest.cc",
     "template_util_unittest.cc",
     "test/histogram_tester_unittest.cc",
     "test/icu_test_util.cc",
@@ -1824,7 +1856,6 @@
     "trace_event/memory_allocator_dump_unittest.cc",
     "trace_event/memory_dump_manager_unittest.cc",
     "trace_event/process_memory_dump_unittest.cc",
-    "trace_event/process_memory_totals_dump_provider_unittest.cc",
     "trace_event/trace_config_memory_test_util.h",
     "trace_event/trace_config_unittest.cc",
     "trace_event/trace_event_argument_unittest.cc",
@@ -1856,6 +1887,7 @@
     "win/shortcut_unittest.cc",
     "win/startup_information_unittest.cc",
     "win/win_util_unittest.cc",
+    "win/windows_version_unittest.cc",
     "win/wrapped_window_proc_unittest.cc",
   ]
 
@@ -1863,9 +1895,6 @@
     ":base",
     ":i18n",
     ":message_loop_tests",
-    ":prefs",
-    ":prefs_test_support",
-    "//base/allocator",
     "//base/test:run_all_unittests",
     "//base/test:test_support",
     "//base/third_party/dynamic_annotations",
@@ -1874,6 +1903,13 @@
     "//third_party/icu",
   ]
 
+  public_deps = [
+    ":base_unittests_bundle_data",
+  ]
+
+  # Some unittests depend on the ALLOCATOR_SHIM macro.
+  configs += [ "//base/allocator:allocator_shim_define" ]
+
   data = [
     "test/data/",
   ]
@@ -1925,7 +1961,10 @@
 
   if (is_linux) {
     sources -= [ "file_version_info_unittest.cc" ]
-    sources += [ "nix/xdg_util_unittest.cc" ]
+
+    if (is_desktop_linux) {
+      sources += [ "nix/xdg_util_unittest.cc" ]
+    }
 
     deps += [ "//base/test:malloc_wrapper" ]
 
@@ -1939,11 +1978,7 @@
     }
   }
 
-  if (is_linux || is_android) {
-    sources += [ "trace_event/process_memory_maps_dump_provider_unittest.cc" ]
-  }
-
-  if (!is_linux || use_ozone) {
+  if (!use_glib) {
     sources -= [ "message_loop/message_pump_glib_unittest.cc" ]
   }
 
@@ -1962,9 +1997,16 @@
     set_sources_assignment_filter(sources_assignment_filter)
   }
 
-  if (is_win && target_cpu == "x64") {
-    sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
-    deps += [ ":base_profiler_test_support_library" ]
+  if (is_win) {
+    deps += [ "//base:scoped_handle_test_dll" ]
+    if (target_cpu == "x64") {
+      sources += [ "profiler/win32_stack_frame_unwinder_unittest.cc" ]
+      deps += [ ":base_profiler_test_support_library" ]
+    }
+  }
+
+  if (use_experimental_allocator_shim) {
+    sources += [ "allocator/allocator_shim_unittest.cc" ]
   }
 
   # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
@@ -1980,13 +2022,37 @@
   }
 }
 
+action("build_date") {
+  script = "//build/write_build_date_header.py"
+
+  # Force recalculation if there's been a change.
+  inputs = [
+    "//build/util/LASTCHANGE",
+  ]
+  outputs = [
+    "$target_gen_dir/generated_build_date.h",
+  ]
+
+  args =
+      [ rebase_path("$target_gen_dir/generated_build_date.h", root_build_dir) ]
+
+  if (is_official_build) {
+    args += [ "official" ]
+  } else {
+    args += [ "default" ]
+  }
+
+  if (override_build_date != "N/A") {
+    args += [ override_build_date ]
+  }
+}
+
 if (enable_nocompile_tests) {
   nocompile_test("base_nocompile_tests") {
     sources = [
       "bind_unittest.nc",
       "callback_list_unittest.nc",
       "callback_unittest.nc",
-      "memory/scoped_ptr_unittest.nc",
       "memory/weak_ptr_unittest.nc",
     ]
 
@@ -2046,7 +2112,7 @@
   android_library("base_java") {
     srcjar_deps = [
       ":base_android_java_enums_srcjar",
-      ":base_multidex_gen",
+      ":base_build_config_gen",
       ":base_native_libraries_gen",
     ]
 
@@ -2057,9 +2123,10 @@
 
     DEPRECATED_java_in_dir = "android/java/src"
 
-    # A new version of NativeLibraries.java (with the actual correct values)
-    # will be created when creating an apk.
+    # New versions of BuildConfig.java and NativeLibraries.java
+    # (with the actual correct values) will be created when creating an apk.
     jar_excluded_patterns = [
+      "*/BuildConfig.class",
       "*/NativeLibraries.class",
       "*/NativeLibraries##*.class",
     ]
@@ -2094,6 +2161,7 @@
       "//third_party/robolectric:android-all-4.3_r2-robolectric-0",
       "//third_party/robolectric:robolectric_java",
     ]
+    srcjar_deps = [ ":base_build_config_gen" ]
   }
 
   # GYP: //base.gyp:base_junit_tests
@@ -2102,6 +2170,9 @@
       "android/junit/src/org/chromium/base/BaseChromiumApplicationTest.java",
       "android/junit/src/org/chromium/base/LogTest.java",
       "test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java",
+      "test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java",
     ]
     deps = [
       ":base_java",
@@ -2123,15 +2194,12 @@
     ]
   }
 
-  # GYP: //base/base.gyp:base_multidex_gen
-  java_cpp_template("base_multidex_gen") {
+  # GYP: //base/base.gyp:base_build_config_gen
+  java_cpp_template("base_build_config_gen") {
     sources = [
-      "android/java/templates/ChromiumMultiDex.template",
+      "android/java/templates/BuildConfig.template",
     ]
-    if (is_debug) {
-      defines = [ "MULTIDEX_CONFIGURATION_Debug" ]
-    }
-    package_name = "org/chromium/base/multidex"
+    package_name = "org/chromium/base"
   }
 
   # GYP: //base/base.gyp:base_native_libraries_gen
diff --git a/base/allocator/BUILD.gn b/base/allocator/BUILD.gn
index 9d09a35..887d544 100644
--- a/base/allocator/BUILD.gn
+++ b/base/allocator/BUILD.gn
@@ -2,6 +2,7 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+import("//build/buildflag_header.gni")
 import("//build/config/allocator.gni")
 import("//build/config/compiler/compiler.gni")
 
@@ -16,20 +17,22 @@
   enable_debugallocation = is_debug
 }
 
-# Only executables and not libraries should depend on the allocator target;
-# only the application (the final executable) knows what allocator makes sense.
 # This "allocator" meta-target will forward to the default allocator according
 # to the build settings.
 group("allocator") {
-  public_deps = []
-  if (use_allocator == "tcmalloc") {
-    public_deps += [ ":tcmalloc" ]
-  }
+  if (!is_nacl) {
+    deps = []
 
-  # This condition expresses the win_use_allocator_shim in the GYP build.
-  if (is_win && !is_component_build && visual_studio_version != "2015") {
-    public_deps += [ ":allocator_shim" ]
-  }
+    if (use_allocator == "tcmalloc") {
+      deps += [ ":tcmalloc" ]
+    }
+
+    # This condition expresses the win_use_allocator_shim in the GYP build.
+    if (is_win && !is_component_build && visual_studio_version != "2015") {
+      deps += [ ":allocator_shim" ]
+      all_dependent_configs = [ ":nocmt" ]
+    }
+  }  # !is_nacl
 }
 
 # This config defines ALLOCATOR_SHIM in the same conditions that the allocator
@@ -45,13 +48,17 @@
 }
 
 config("tcmalloc_flags") {
+  defines = []
   if (enable_debugallocation) {
-    defines = [
+    defines += [
       # Use debugallocation for Debug builds to catch problems early
       # and cleanly, http://crbug.com/30715 .
       "TCMALLOC_FOR_DEBUGALLOCATION",
     ]
   }
+  if (use_experimental_allocator_shim) {
+    defines += [ "TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC" ]
+  }
   if (is_clang) {
     cflags = [
       # tcmalloc initializes some fields in the wrong order.
@@ -107,10 +114,8 @@
       configs -= [ "//build/config/compiler:chromium_code" ]
       configs += [ "//build/config/compiler:no_chromium_code" ]
 
-      public_configs = [ ":nocmt" ]
       deps = [
         ":prep_libc",
-        "//base",
       ]
     }
   }
@@ -290,3 +295,35 @@
     deps += [ "//base/third_party/dynamic_annotations" ]
   }
 }  # use_allocator == "tcmalloc"
+
+buildflag_header("features") {
+  header = "features.h"
+  flags = [ "USE_EXPERIMENTAL_ALLOCATOR_SHIM=$use_experimental_allocator_shim" ]
+}
+
+if (use_experimental_allocator_shim) {
+  source_set("unified_allocator_shim") {
+    # TODO(primiano): support other platforms, currently this works only on
+    # Linux/CrOS. http://crbug.com/550886 .
+    configs += [ "//base:base_implementation" ]  # for BASE_EXPORT
+    visibility = [ "//base:base" ]
+    sources = [
+      "allocator_shim.cc",
+      "allocator_shim.h",
+      "allocator_shim_internals.h",
+      "allocator_shim_override_cpp_symbols.h",
+      "allocator_shim_override_libc_symbols.h",
+    ]
+    if (is_linux && use_allocator == "tcmalloc") {
+      sources += [
+        "allocator_shim_default_dispatch_to_tcmalloc.cc",
+        "allocator_shim_override_glibc_weak_symbols.h",
+      ]
+      deps = [
+        ":tcmalloc",
+      ]
+    } else if (is_linux && use_allocator == "none") {
+      sources += [ "allocator_shim_default_dispatch_to_glibc.cc" ]
+    }
+  }
+}
diff --git a/base/allocator/OWNERS b/base/allocator/OWNERS
index 5d9997b..f26394a 100644
--- a/base/allocator/OWNERS
+++ b/base/allocator/OWNERS
@@ -1,3 +1,4 @@
+primiano@chromium.org
 wfh@chromium.org
 
 # For changes to tcmalloc it is advisable to ask jar@chromium.org
diff --git a/base/allocator/README b/base/allocator/README
deleted file mode 100644
index 8a5595f..0000000
--- a/base/allocator/README
+++ /dev/null
@@ -1,56 +0,0 @@
-Notes about the Chrome memory allocator.
-
-Background
-----------
-We use this library as a generic way to fork into any of several allocators.
-Currently we can, at runtime, switch between:
-   the default windows allocator
-   the windows low-fragmentation-heap
-   tcmalloc
-
-The mechanism for hooking LIBCMT in windows is rather tricky.  The core
-problem is that by default, the windows library does not declare malloc and
-free as weak symbols.  Because of this, they cannot be overriden.  To work
-around this, we start with the LIBCMT.LIB, and manually remove all allocator
-related functions from it using the visual studio library tool.  Once removed,
-we can now link against the library and provide custom versions of the 
-allocator related functionality.
-
-
-Source code
------------
-This directory contains just the allocator (i.e. shim) layer that switches
-between the different underlying memory allocation implementations.
-
-The tcmalloc library originates outside of Chromium and exists in
-../../third_party/tcmalloc (currently, the actual location is defined in the
-allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
-track Chromium-specific changes independently from upstream changes.
-
-The general intent is to push local changes upstream so that over
-time we no longer need any forked files.
-
-
-Adding a new allocator
-----------------------
-Adding a new allocator requires definition of the following five functions:
-
-  extern "C" {
-    bool init();
-    void* malloc(size_t s);
-    void* realloc(void* p, size_t s);
-    void free(void* s);
-    size_t msize(void* p);
-  }
-
-All other allocation related functions (new/delete/calloc/etc) have been
-implemented generically to work across all allocators.
-
-
-Usage
------
-You can use the different allocators by setting the environment variable
-CHROME_ALLOCATOR to:
-   "tcmalloc"  - TC Malloc (default)
-   "winheap"   - Windows default heap
-   "winlfh"    - Windows Low-Fragmentation heap
diff --git a/base/allocator/README.md b/base/allocator/README.md
new file mode 100644
index 0000000..968e6b0
--- /dev/null
+++ b/base/allocator/README.md
@@ -0,0 +1,106 @@
+This document describes how malloc / new calls are routed in the various Chrome
+platforms.
+
+Bare in mind that the chromium codebase does not always just use `malloc()`.
+Some examples:
+ - Large parts of the renderer (Blink) use two home-brewed allocators,
+   PartitionAlloc and BlinkGC (Oilpan).
+ - Some subsystems, such as the V8 JavaScript engine, handle memory management
+   autonomously.
+ - Various parts of the codebase use abstractions such as `SharedMemory` or
+   `DiscardableMemory` which, similarly to the above, have their own page-level
+   memory management.
+
+Background
+----------
+The `allocator` target defines at compile-time the platform-specific choice of
+the allocator and extra-hooks which services calls to malloc/new. The relevant
+build-time flags involved are `use_allocator` and `win_use_allocator_shim`.
+
+The default choices are as follows:
+
+**Windows**  
+`use_allocator: winheap`, the default Windows heap.
+Additionally, `static_library` (i.e. non-component) builds have a shim
+layer wrapping malloc/new, which is controlled by `win_use_allocator_shim`.  
+The shim layer provides extra security features, such as preventing large
+allocations that can hit signed vs. unsigned bugs in third_party code.
+
+**Linux Desktop / CrOS**  
+`use_allocator: tcmalloc`, a forked copy of tcmalloc which resides in
+`third_party/tcmalloc/chromium`. Setting `use_allocator: none` causes the build
+to fall back to the system (Glibc) symbols.
+
+**Android**  
+`use_allocator: none`, always use the allocator symbols coming from Android's
+libc (Bionic). As it is developed as part of the OS, it is considered to be
+optimized for small devices and more memory-efficient than other choices.  
+The actual implementation backing malloc symbols in Bionic is up to the board
+config and can vary (typically *dlmalloc* or *jemalloc* on most Nexus devices).
+
+**Mac/iOS**  
+`use_allocator: none`, we always use the system's allocator implementation.
+
+In addition, when building for `asan` / `msan` / `syzyasan` `valgrind`, the
+both the allocator and the shim layer are disabled.
+
+Layering and build deps
+-----------------------
+The `allocator` target provides both the source files for tcmalloc (where
+applicable) and the linker flags required for the Windows shim layer.
+The `base` target is (almost) the only one depending on `allocator`. No other
+targets should depend on it, with the exception of the very few executables /
+dynamic libraries that don't depend, either directly or indirectly, on `base`
+within the scope of a linker unit.
+
+More importantly, **no other place outside of `/base` should depend on the
+specific allocator** (e.g., directly include `third_party/tcmalloc`).
+If such a functional dependency is required that should be achieved using
+abstractions in `base` (see `/base/allocator/allocator_extension.h` and
+`/base/memory/`)
+
+**Why `base` depends on `allocator`?**  
+Because it needs to provide services that depend on the actual allocator
+implementation. In the past `base` used to pretend to be allocator-agnostic
+and get the dependencies injected by other layers. This ended up being an
+inconsistent mess.
+See the [allocator cleanup doc][url-allocator-cleanup] for more context.
+
+Linker unit targets (executables and shared libraries) that depend in some way
+on `base` (most of the targets in the codebase) get automatically the correct
+set of linker flags to pull in tcmalloc or the Windows shim-layer.
+
+
+Source code
+-----------
+This directory contains just the allocator (i.e. shim) layer that switches
+between the different underlying memory allocation implementations.
+
+The tcmalloc library originates outside of Chromium and exists in
+`../../third_party/tcmalloc` (currently, the actual location is defined in the
+allocator.gyp file). The third party sources use a vendor-branch SCM pattern to
+track Chromium-specific changes independently from upstream changes.
+
+The general intent is to push local changes upstream so that over
+time we no longer need any forked files.
+
+
+Appendixes
+----------
+**How does the Windows shim layer replace the malloc symbols?**  
+The mechanism for hooking LIBCMT in Windows is rather tricky.  The core
+problem is that by default, the Windows library does not declare malloc and
+free as weak symbols.  Because of this, they cannot be overridden.  To work
+around this, we start with the LIBCMT.LIB, and manually remove all allocator
+related functions from it using the visual studio library tool.  Once removed,
+we can now link against the library and provide custom versions of the
+allocator related functionality.
+See the script `preb_libc.py` in this folder.
+
+Related links
+-------------
+- [Allocator Cleanup Doc - Jan 2016][url-allocator-cleanup]
+- [Proposal to use PartitionAlloc as default allocator](https://crbug.com/339604)
+- [Memory-Infra: Tools to profile memory usage in Chrome](components/tracing/docs/memory_infra.md)
+
+[url-allocator-cleanup]: https://docs.google.com/document/d/1V77Kgp_4tfaaWPEZVxNevoD02wXiatnAv7Ssgr0hmjg/edit?usp=sharing
diff --git a/base/allocator/allocator.gyp b/base/allocator/allocator.gyp
index 45a95bb..d906eea 100644
--- a/base/allocator/allocator.gyp
+++ b/base/allocator/allocator.gyp
@@ -22,15 +22,22 @@
     'disable_debugallocation%': 0,
   },
   'targets': [
-    # Only executables and not libraries should depend on the
-    # allocator target; only the application (the final executable)
-    # knows what allocator makes sense.
+    # The only targets that should depend on allocator are 'base' and
+    # executables that don't depend, directly or indirectly, on base (a few).
+    # All the other targets get a transitive dependency on this target via base.
     {
       'target_name': 'allocator',
-      # TODO(primiano): This should be type: none for the noop cases (an empty
-      # static lib can confuse some gyp generators). Fix it once the refactoring
-      # (crbug.com/564618) bring this file to a saner state (fewer conditions).
-      'type': 'static_library',
+      'variables': {
+        'conditions': [
+          ['use_allocator!="none" or (OS=="win" and win_use_allocator_shim==1)', {
+            'allocator_target_type%': 'static_library',
+          }, {
+            'allocator_target_type%': 'none',
+          }],
+        ],
+      },
+      'type': '<(allocator_target_type)',
+      'toolsets': ['host', 'target'],
       'conditions': [
         ['OS=="win" and win_use_allocator_shim==1', {
           'msvs_settings': {
@@ -51,6 +58,16 @@
           'sources': [
             'allocator_shim_win.cc',
           ],
+          'link_settings': {
+            'msvs_settings': {
+              'VCLinkerTool': {
+                'IgnoreDefaultLibraryNames': ['libcmtd.lib', 'libcmt.lib'],
+                'AdditionalDependencies': [
+                  '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib'
+                ],
+              },
+            },
+          },
           'configurations': {
             'Debug_Base': {
               'msvs_settings': {
@@ -60,20 +77,6 @@
               },
             },
           },
-          'direct_dependent_settings': {
-            'configurations': {
-              'Common_Base': {
-                'msvs_settings': {
-                  'VCLinkerTool': {
-                    'IgnoreDefaultLibraryNames': ['libcmtd.lib', 'libcmt.lib'],
-                    'AdditionalDependencies': [
-                      '<(SHARED_INTERMEDIATE_DIR)/allocator/libcmt.lib'
-                    ],
-                  },
-                },
-              },
-            },
-          },
         }],  # OS=="win"
         ['use_allocator=="tcmalloc"', {
           # Disable the heap checker in tcmalloc.
@@ -337,6 +340,11 @@
                 '<(tcmalloc_dir)/src/profiler.cc',
               ],
             }],
+            ['use_experimental_allocator_shim==1', {
+              'defines': [
+                'TCMALLOC_DONT_REPLACE_SYSTEM_ALLOC',
+              ],
+            }]
           ],
           'configurations': {
             'Debug_Base': {
@@ -370,12 +378,26 @@
         }],
       ],  # conditions of 'allocator' target.
     },  # 'allocator' target.
+    {
+      # GN: //base/allocator:features
+      # When referenced from a target that might be compiled in the host
+      # toolchain, always refer to 'allocator_features#target'.
+      'target_name': 'allocator_features',
+      'includes': [ '../../build/buildflag_header.gypi' ],
+      'variables': {
+        'buildflag_header_path': 'base/allocator/features.h',
+        'buildflag_flags': [
+          'USE_EXPERIMENTAL_ALLOCATOR_SHIM=<(use_experimental_allocator_shim)',
+        ],
+      },
+    },  # 'allocator_features' target.
   ],  # targets.
   'conditions': [
-    ['OS=="win" and component!="shared_library"', {
+    ['OS=="win" and win_use_allocator_shim==1', {
       'targets': [
         {
           'target_name': 'libcmt',
+          'toolsets': ['host', 'target'],
           'type': 'none',
           'actions': [
             {
@@ -398,5 +420,39 @@
         },
       ],
     }],
+    ['use_experimental_allocator_shim==1', {
+      'targets': [
+        {
+          # GN: //base/allocator:unified_allocator_shim
+          'target_name': 'unified_allocator_shim',
+          'toolsets': ['host', 'target'],
+          'type': 'static_library',
+          'defines': [ 'BASE_IMPLEMENTATION' ],
+          'sources': [
+            'allocator_shim.cc',
+            'allocator_shim.h',
+            'allocator_shim_internals.h',
+            'allocator_shim_override_cpp_symbols.h',
+            'allocator_shim_override_libc_symbols.h',
+          ],
+          'include_dirs': [
+            '../..',
+          ],
+          'conditions': [
+            ['OS=="linux" and use_allocator=="tcmalloc"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_tcmalloc.cc',
+                'allocator_shim_override_glibc_weak_symbols.h',
+              ],
+            }],
+            ['OS=="linux" and use_allocator=="none"', {
+              'sources': [
+                'allocator_shim_default_dispatch_to_glibc.cc',
+              ],
+            }],
+          ]
+        },  # 'unified_allocator_shim' target.
+      ],
+    }]
   ],
 }
diff --git a/base/allocator/allocator_extension.cc b/base/allocator/allocator_extension.cc
index 4f0b3a9..9a3d114 100644
--- a/base/allocator/allocator_extension.cc
+++ b/base/allocator/allocator_extension.cc
@@ -6,34 +6,54 @@
 
 #include "base/logging.h"
 
+#if defined(USE_TCMALLOC)
+#include "third_party/tcmalloc/chromium/src/gperftools/heap-profiler.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_extension.h"
+#include "third_party/tcmalloc/chromium/src/gperftools/malloc_hook.h"
+#endif
+
 namespace base {
 namespace allocator {
 
-namespace {
-ReleaseFreeMemoryFunction g_release_free_memory_function = nullptr;
-GetNumericPropertyFunction g_get_numeric_property_function = nullptr;
-}
-
 void ReleaseFreeMemory() {
-  if (g_release_free_memory_function)
-    g_release_free_memory_function();
+#if defined(USE_TCMALLOC)
+  ::MallocExtension::instance()->ReleaseFreeMemory();
+#endif
 }
 
 bool GetNumericProperty(const char* name, size_t* value) {
-  return g_get_numeric_property_function &&
-         g_get_numeric_property_function(name, value);
+#if defined(USE_TCMALLOC)
+  return ::MallocExtension::instance()->GetNumericProperty(name, value);
+#endif
+  return false;
 }
 
-void SetReleaseFreeMemoryFunction(
-    ReleaseFreeMemoryFunction release_free_memory_function) {
-  DCHECK(!g_release_free_memory_function);
-  g_release_free_memory_function = release_free_memory_function;
+bool IsHeapProfilerRunning() {
+#if defined(USE_TCMALLOC)
+  return ::IsHeapProfilerRunning();
+#endif
+  return false;
 }
 
-void SetGetNumericPropertyFunction(
-    GetNumericPropertyFunction get_numeric_property_function) {
-  DCHECK(!g_get_numeric_property_function);
-  g_get_numeric_property_function = get_numeric_property_function;
+void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook) {
+// TODO(sque): Use allocator shim layer instead.
+#if defined(USE_TCMALLOC)
+  // Make sure no hooks get overwritten.
+  auto prev_alloc_hook = MallocHook::SetNewHook(alloc_hook);
+  if (alloc_hook)
+    DCHECK(!prev_alloc_hook);
+
+  auto prev_free_hook = MallocHook::SetDeleteHook(free_hook);
+  if (free_hook)
+    DCHECK(!prev_free_hook);
+#endif
+}
+
+int GetCallStack(void** stack, int max_stack_size) {
+#if defined(USE_TCMALLOC)
+  return MallocHook::GetCallerStackTrace(stack, max_stack_size, 0);
+#endif
+  return 0;
 }
 
 }  // namespace allocator
diff --git a/base/allocator/allocator_extension.h b/base/allocator/allocator_extension.h
index 3be2cea..9f2775a 100644
--- a/base/allocator/allocator_extension.h
+++ b/base/allocator/allocator_extension.h
@@ -13,8 +13,9 @@
 namespace base {
 namespace allocator {
 
-typedef void (*ReleaseFreeMemoryFunction)();
-typedef bool (*GetNumericPropertyFunction)(const char* name, size_t* value);
+// Callback types for alloc and free.
+using AllocHookFunc = void (*)(const void*, size_t);
+using FreeHookFunc = void (*)(const void*);
 
 // Request that the allocator release any free memory it knows about to the
 // system.
@@ -26,20 +27,23 @@
 // |name| or |value| cannot be NULL
 BASE_EXPORT bool GetNumericProperty(const char* name, size_t* value);
 
-// These settings allow specifying a callback used to implement the allocator
-// extension functions.  These are optional, but if set they must only be set
-// once.  These will typically called in an allocator-specific initialization
-// routine.
+BASE_EXPORT bool IsHeapProfilerRunning();
+
+// Register callbacks for alloc and free. Can only store one callback at a time
+// for each of alloc and free.
+BASE_EXPORT void SetHooks(AllocHookFunc alloc_hook, FreeHookFunc free_hook);
+
+// Attempts to unwind the call stack from the current location where this
+// function is being called from. Must be called from a hook function registered
+// by calling SetSingle{Alloc,Free}Hook, directly or indirectly.
 //
-// No threading promises are made.  The caller is responsible for making sure
-// these pointers are set before any other threads attempt to call the above
-// functions.
-
-BASE_EXPORT void SetReleaseFreeMemoryFunction(
-    ReleaseFreeMemoryFunction release_free_memory_function);
-
-BASE_EXPORT void SetGetNumericPropertyFunction(
-    GetNumericPropertyFunction get_numeric_property_function);
+// Arguments:
+//   stack:          pointer to a pre-allocated array of void*'s.
+//   max_stack_size: indicates the size of the array in |stack|.
+//
+// Returns the number of call stack frames stored in |stack|, or 0 if no call
+// stack information is available.
+BASE_EXPORT int GetCallStack(void** stack, int max_stack_size);
 
 }  // namespace allocator
 }  // namespace base
diff --git a/base/at_exit.cc b/base/at_exit.cc
index 0fba355..f9aa2d1 100644
--- a/base/at_exit.cc
+++ b/base/at_exit.cc
@@ -21,7 +21,8 @@
 // this for thread-safe access, since it will only be modified in testing.
 static AtExitManager* g_top_manager = NULL;
 
-AtExitManager::AtExitManager() : next_manager_(g_top_manager) {
+AtExitManager::AtExitManager()
+    : processing_callbacks_(false), next_manager_(g_top_manager) {
 // If multiple modules instantiate AtExitManagers they'll end up living in this
 // module... they have to coexist.
 #if !defined(COMPONENT_BUILD)
@@ -55,6 +56,7 @@
   }
 
   AutoLock lock(g_top_manager->lock_);
+  DCHECK(!g_top_manager->processing_callbacks_);
   g_top_manager->stack_.push(task);
 }
 
@@ -65,16 +67,28 @@
     return;
   }
 
-  AutoLock lock(g_top_manager->lock_);
-
-  while (!g_top_manager->stack_.empty()) {
-    base::Closure task = g_top_manager->stack_.top();
-    task.Run();
-    g_top_manager->stack_.pop();
+  // Callbacks may try to add new callbacks, so run them without holding
+  // |lock_|. This is an error and caught by the DCHECK in RegisterTask(), but
+  // handle it gracefully in release builds so we don't deadlock.
+  std::stack<base::Closure> tasks;
+  {
+    AutoLock lock(g_top_manager->lock_);
+    tasks.swap(g_top_manager->stack_);
+    g_top_manager->processing_callbacks_ = true;
   }
+
+  while (!tasks.empty()) {
+    base::Closure task = tasks.top();
+    task.Run();
+    tasks.pop();
+  }
+
+  // Expect that all callbacks have been run.
+  DCHECK(g_top_manager->stack_.empty());
 }
 
-AtExitManager::AtExitManager(bool shadow) : next_manager_(g_top_manager) {
+AtExitManager::AtExitManager(bool shadow)
+    : processing_callbacks_(false), next_manager_(g_top_manager) {
   DCHECK(shadow || !g_top_manager);
   g_top_manager = this;
 }
diff --git a/base/at_exit.h b/base/at_exit.h
index 04e3f76..02e18ed 100644
--- a/base/at_exit.h
+++ b/base/at_exit.h
@@ -59,6 +59,7 @@
  private:
   base::Lock lock_;
   std::stack<base::Closure> stack_;
+  bool processing_callbacks_;
   AtExitManager* next_manager_;  // Stack of managers to allow shadowing.
 
   DISALLOW_COPY_AND_ASSIGN(AtExitManager);
diff --git a/base/base.gyp b/base/base.gyp
index dc484f4..d7f3519 100644
--- a/base/base.gyp
+++ b/base/base.gyp
@@ -21,8 +21,11 @@
         'optimize': 'max',
       },
       'dependencies': [
+        'allocator/allocator.gyp:allocator',
+        'allocator/allocator.gyp:allocator_features#target',
         'base_debugging_flags#target',
         'base_static',
+        'base_build_date#target',
         '../testing/gtest.gyp:gtest_prod',
         '../third_party/modp_b64/modp_b64.gyp:modp_b64',
         'third_party/dynamic_annotations/dynamic_annotations.gyp:dynamic_annotations',
@@ -137,6 +140,14 @@
             }],
           ],
         }],
+        ['use_sysroot==0 and (OS == "android" or OS == "linux")', {
+          'link_settings': {
+            'libraries': [
+              # Needed for <atomic> when building with newer C++ library.
+              '-latomic',
+            ],
+          },
+        }],
         ['OS == "win"', {
           # Specify delayload for base.dll.
           'msvs_settings': {
@@ -192,6 +203,7 @@
               '$(SDKROOT)/System/Library/Frameworks/Foundation.framework',
               '$(SDKROOT)/System/Library/Frameworks/IOKit.framework',
               '$(SDKROOT)/System/Library/Frameworks/Security.framework',
+              '$(SDKROOT)/usr/lib/libbsm.dylib',
             ],
           },
         }],
@@ -224,6 +236,9 @@
             'sync_socket_posix.cc',
           ]
         }],
+        ['use_experimental_allocator_shim==1', {
+          'dependencies': [ 'allocator/allocator.gyp:unified_allocator_shim']
+        }],
       ],
       'sources': [
         'auto_reset.h',
@@ -311,82 +326,6 @@
       ],
     },
     {
-      'target_name': 'base_prefs',
-      'type': '<(component)',
-      'variables': {
-        'enable_wexit_time_destructors': 1,
-        'optimize': 'max',
-      },
-      'dependencies': [
-        'base',
-      ],
-      'export_dependent_settings': [
-        'base',
-      ],
-      'defines': [
-        'BASE_PREFS_IMPLEMENTATION',
-      ],
-      'sources': [
-        'prefs/base_prefs_export.h',
-        'prefs/default_pref_store.cc',
-        'prefs/default_pref_store.h',
-        'prefs/json_pref_store.cc',
-        'prefs/json_pref_store.h',
-        'prefs/overlay_user_pref_store.cc',
-        'prefs/overlay_user_pref_store.h',
-        'prefs/persistent_pref_store.h',
-        'prefs/pref_change_registrar.cc',
-        'prefs/pref_change_registrar.h',
-        'prefs/pref_filter.h',
-        'prefs/pref_member.cc',
-        'prefs/pref_member.h',
-        'prefs/pref_notifier.h',
-        'prefs/pref_notifier_impl.cc',
-        'prefs/pref_notifier_impl.h',
-        'prefs/pref_observer.h',
-        'prefs/pref_registry.cc',
-        'prefs/pref_registry.h',
-        'prefs/pref_registry_simple.cc',
-        'prefs/pref_registry_simple.h',
-        'prefs/pref_service.cc',
-        'prefs/pref_service.h',
-        'prefs/pref_service_factory.cc',
-        'prefs/pref_service_factory.h',
-        'prefs/pref_store.cc',
-        'prefs/pref_store.h',
-        'prefs/pref_value_map.cc',
-        'prefs/pref_value_map.h',
-        'prefs/pref_value_store.cc',
-        'prefs/pref_value_store.h',
-        'prefs/scoped_user_pref_update.cc',
-        'prefs/scoped_user_pref_update.h',
-        'prefs/value_map_pref_store.cc',
-        'prefs/value_map_pref_store.h',
-        'prefs/writeable_pref_store.h',
-      ],
-      'includes': [
-        '../build/android/increase_size_for_speed.gypi',
-      ],
-    },
-    {
-      'target_name': 'base_prefs_test_support',
-      'type': 'static_library',
-      'dependencies': [
-        'base',
-        'base_prefs',
-        '../testing/gmock.gyp:gmock',
-      ],
-      'sources': [
-        'prefs/mock_pref_change_callback.cc',
-        'prefs/pref_store_observer_mock.cc',
-        'prefs/pref_store_observer_mock.h',
-        'prefs/testing_pref_service.cc',
-        'prefs/testing_pref_service.h',
-        'prefs/testing_pref_store.cc',
-        'prefs/testing_pref_store.h',
-      ],
-    },
-    {
       # This is the subset of files from base that should not be used with a
       # dynamic library. Note that this library cannot depend on base because
       # base depends on base_static.
@@ -515,6 +454,7 @@
         'mac/foundation_util_unittest.mm',
         'mac/libdispatch_task_runner_unittest.cc',
         'mac/mac_util_unittest.mm',
+        'mac/mach_port_broker_unittest.cc',
         'mac/objc_property_releaser_unittest.mm',
         'mac/scoped_nsobject_unittest.mm',
         'mac/scoped_objc_class_swizzler_unittest.mm',
@@ -530,11 +470,10 @@
         'memory/ptr_util_unittest.cc',
         'memory/ref_counted_memory_unittest.cc',
         'memory/ref_counted_unittest.cc',
-        'memory/scoped_ptr_unittest.cc',
-        'memory/scoped_ptr_unittest.nc',
         'memory/scoped_vector_unittest.cc',
-        'memory/shared_memory_unittest.cc',
         'memory/shared_memory_mac_unittest.cc',
+        'memory/shared_memory_unittest.cc',
+        'memory/shared_memory_win_unittest.cc',
         'memory/singleton_unittest.cc',
         'memory/weak_ptr_unittest.cc',
         'memory/weak_ptr_unittest.nc',
@@ -551,6 +490,9 @@
         'metrics/histogram_snapshot_manager_unittest.cc',
         'metrics/histogram_unittest.cc',
         'metrics/metrics_hashes_unittest.cc',
+        'metrics/persistent_histogram_allocator_unittest.cc',
+        'metrics/persistent_memory_allocator_unittest.cc',
+        'metrics/persistent_sample_map_unittest.cc',
         'metrics/sample_map_unittest.cc',
         'metrics/sample_vector_unittest.cc',
         'metrics/sparse_histogram_unittest.cc',
@@ -564,17 +506,6 @@
         'posix/file_descriptor_shuffle_unittest.cc',
         'posix/unix_domain_socket_linux_unittest.cc',
         'power_monitor/power_monitor_unittest.cc',
-        'prefs/default_pref_store_unittest.cc',
-        'prefs/json_pref_store_unittest.cc',
-        'prefs/mock_pref_change_callback.h',
-        'prefs/overlay_user_pref_store_unittest.cc',
-        'prefs/pref_change_registrar_unittest.cc',
-        'prefs/pref_member_unittest.cc',
-        'prefs/pref_notifier_impl_unittest.cc',
-        'prefs/pref_service_unittest.cc',
-        'prefs/pref_value_map_unittest.cc',
-        'prefs/pref_value_store_unittest.cc',
-        'prefs/scoped_user_pref_update_unittest.cc',
         'process/memory_unittest.cc',
         'process/memory_unittest_mac.h',
         'process/memory_unittest_mac.mm',
@@ -618,6 +549,9 @@
         'system_monitor/system_monitor_unittest.cc',
         'task/cancelable_task_tracker_unittest.cc',
         'task_runner_util_unittest.cc',
+        'task_scheduler/scheduler_lock_unittest.cc',
+        'task_scheduler/sequence_sort_key_unittest.cc',
+        'task_scheduler/sequence_unittest.cc',
         'template_util_unittest.cc',
         'test/histogram_tester_unittest.cc',
         'test/test_pending_task_unittest.cc',
@@ -669,6 +603,7 @@
         'win/shortcut_unittest.cc',
         'win/startup_information_unittest.cc',
         'win/win_util_unittest.cc',
+        'win/windows_version_unittest.cc',
         'win/wrapped_window_proc_unittest.cc',
         '<@(trace_event_test_sources)',
       ],
@@ -676,8 +611,6 @@
         'base',
         'base_i18n',
         'base_message_loop_tests',
-        'base_prefs',
-        'base_prefs_test_support',
         'base_static',
         'run_all_unittests',
         'test_support_base',
@@ -761,14 +694,7 @@
           'dependencies': [
             'malloc_wrapper',
           ],
-          'conditions': [
-            ['use_allocator!="none"', {
-              'dependencies': [
-                'allocator/allocator.gyp:allocator',
-              ],
-            }],
-          ]},
-        ],
+        }],
         [ 'OS == "win" and target_arch == "x64"', {
           'sources': [
             'profiler/win32_stack_frame_unwinder_unittest.cc',
@@ -778,6 +704,9 @@
           ],
         }],
         ['OS == "win"', {
+          'dependencies': [
+            'scoped_handle_test_dll'
+          ],
           'sources!': [
             'file_descriptor_shuffle_unittest.cc',
             'files/dir_reader_posix_unittest.cc',
@@ -789,16 +718,6 @@
             4267,
           ],
           'conditions': [
-            # This is needed so base_unittests uses the allocator shim, as
-            # SecurityTest.MemoryAllocationRestriction* tests are dependent
-            # on tcmalloc.
-            # TODO(wfh): crbug.com/246278 Move tcmalloc specific tests into
-            # their own test suite.
-            ['win_use_allocator_shim==1', {
-              'dependencies': [
-                'allocator/allocator.gyp:allocator',
-              ],
-            }],
             ['icu_use_data_file_flag==0', {
               # This is needed to trigger the dll copy step on windows.
               # TODO(mark): This should not be necessary.
@@ -812,6 +731,9 @@
             'third_party/libevent/libevent.gyp:libevent'
           ],
         }],
+        ['use_experimental_allocator_shim==1', {
+          'sources': [ 'allocator/allocator_shim_unittest.cc']
+        }],
       ],  # conditions
       'target_conditions': [
         ['OS == "ios" and _toolset != "host"', {
@@ -1096,6 +1018,35 @@
         ],
       },
     },
+    {
+      'type': 'none',
+      'target_name': 'base_build_date',
+      'hard_dependency': 1,
+      'actions': [{
+        'action_name': 'generate_build_date_headers',
+        'inputs': [
+          '<(DEPTH)/build/write_build_date_header.py',
+          '<(DEPTH)/build/util/LASTCHANGE'
+        ],
+        'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h' ],
+        'action': [
+          'python', '<(DEPTH)/build/write_build_date_header.py',
+          '<(SHARED_INTERMEDIATE_DIR)/base/generated_build_date.h',
+          '<(build_type)'
+        ]
+      }],
+      'conditions': [
+        [ 'buildtype == "Official"', {
+          'variables': {
+            'build_type': 'official'
+          }
+        }, {
+          'variables': {
+            'build_type': 'default'
+          }
+        }],
+      ]
+    },
   ],
   'conditions': [
     ['OS=="ios" and "<(GENERATOR)"=="ninja"', {
@@ -1151,6 +1102,7 @@
             'base_target': 1,
           },
           'dependencies': [
+            'base_build_date',
             'base_debugging_flags#target',
             'base_static_win64',
             '../third_party/modp_b64/modp_b64.gyp:modp_b64_win64',
@@ -1480,18 +1432,15 @@
           'includes': [ '../build/android/java_cpp_template.gypi' ],
         },
         {
-          # GN: //base:base_multidex_gen
-          'target_name': 'base_multidex_gen',
+          # GN: //base:base_build_config_gen
+          'target_name': 'base_build_config_gen',
           'type': 'none',
           'sources': [
-            'android/java/templates/ChromiumMultiDex.template',
+            'android/java/templates/BuildConfig.template',
           ],
           'variables': {
-            'package_name': 'org/chromium/base/multidex',
+            'package_name': 'org/chromium/base',
             'template_deps': [],
-            'additional_gcc_preprocess_options': [
-              '--defines', 'MULTIDEX_CONFIGURATION_<(CONFIGURATION_NAME)',
-            ],
           },
           'includes': ['../build/android/java_cpp_template.gypi'],
         },
@@ -1510,18 +1459,26 @@
           'type': 'none',
           'variables': {
             'java_in_dir': 'android/java',
-            'jar_excluded_classes': [ '*/NativeLibraries.class' ],
+            'jar_excluded_classes': [
+              '*/BuildConfig.class',
+              '*/NativeLibraries.class',
+            ],
           },
           'dependencies': [
             'base_java_application_state',
             'base_java_library_load_from_apk_status_codes',
             'base_java_library_process_type',
             'base_java_memory_pressure_level',
-            'base_multidex_gen',
+            'base_build_config_gen',
             'base_native_libraries_gen',
             '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
             '../third_party/jsr-305/jsr-305.gyp:jsr_305_javalib',
           ],
+          'all_dependent_settings': {
+            'variables': {
+              'generate_build_config': 1,
+            },
+          },
           'includes': [ '../build/java.gypi' ],
         },
         {
@@ -1583,6 +1540,7 @@
           'target_name': 'base_junit_test_support',
           'type': 'none',
           'dependencies': [
+            'base_build_config_gen',
             '../testing/android/junit/junit_test.gyp:junit_test_support',
             '../third_party/android_tools/android_tools.gyp:android_support_multidex_javalib',
           ],
@@ -1604,13 +1562,21 @@
             '../testing/android/junit/junit_test.gyp:junit_test_support',
           ],
           'variables': {
-             'main_class': 'org.chromium.testing.local.JunitTestMain',
-             'src_paths': [
-               '../base/android/junit/',
-               '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
-             ],
-           },
-          'includes': [ '../build/host_jar.gypi' ],
+            'main_class': 'org.chromium.testing.local.JunitTestMain',
+            'src_paths': [
+              '../base/android/junit/',
+              '../base/test/android/junit/src/org/chromium/base/test/util/DisableIfTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/MinAndroidSdkLevelSkipCheckTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/RestrictionSkipCheckTest.java',
+              '../base/test/android/junit/src/org/chromium/base/test/util/SkipCheckTest.java',
+            ],
+            'test_type': 'junit',
+            'wrapper_script_name': 'helper/<(_target_name)',
+          },
+          'includes': [
+            '../build/android/test_runner.gypi',
+            '../build/host_jar.gypi',
+          ],
         },
         {
           # GN: //base:base_javatests
@@ -1722,6 +1688,16 @@
             },
           },
         },
+        {
+          'target_name': 'scoped_handle_test_dll',
+          'type': 'loadable_module',
+          'dependencies': [
+            'base',
+          ],
+          'sources': [
+            'win/scoped_handle_test_dll.cc',
+          ],
+        },
       ],
     }],
     ['test_isolation_mode != "noop"', {
diff --git a/base/base.gypi b/base/base.gypi
index bb028bd..5d7693f 100644
--- a/base/base.gypi
+++ b/base/base.gypi
@@ -300,12 +300,16 @@
           'mac/launchd.h',
           'mac/libdispatch_task_runner.cc',
           'mac/libdispatch_task_runner.h',
-          'mac/mac_logging.cc',
           'mac/mac_logging.h',
+          'mac/mac_logging.mm',
           'mac/mac_util.h',
           'mac/mac_util.mm',
           'mac/mach_logging.cc',
           'mac/mach_logging.h',
+          'mac/mach_port_broker.h',
+          'mac/mach_port_broker.mm',
+          'mac/mach_port_util.cc',
+          'mac/mach_port_util.h',
           'mac/objc_property_releaser.h',
           'mac/objc_property_releaser.mm',
           'mac/os_crash_dumps.cc',
@@ -408,6 +412,12 @@
           'metrics/histogram_snapshot_manager.h',
           'metrics/metrics_hashes.cc',
           'metrics/metrics_hashes.h',
+          'metrics/persistent_histogram_allocator.cc',
+          'metrics/persistent_histogram_allocator.h',
+          'metrics/persistent_memory_allocator.cc',
+          'metrics/persistent_memory_allocator.h',
+          'metrics/persistent_sample_map.cc',
+          'metrics/persistent_sample_map.h',
           'metrics/sample_map.cc',
           'metrics/sample_map.h',
           'metrics/sample_vector.cc',
@@ -518,8 +528,6 @@
           'process/process_metrics_win.cc',
           'process/process_posix.cc',
           'process/process_win.cc',
-          'profiler/alternate_timer.cc',
-          'profiler/alternate_timer.h',
           'profiler/native_stack_sampler.cc',
           'profiler/native_stack_sampler.h',
           'profiler/native_stack_sampler_posix.cc',
@@ -628,6 +636,17 @@
           'task_runner.cc',
           'task_runner.h',
           'task_runner_util.h',
+          'task_scheduler/scheduler_lock.h',
+          'task_scheduler/scheduler_lock_impl.cc',
+          'task_scheduler/scheduler_lock_impl.h',
+          'task_scheduler/sequence.cc',
+          'task_scheduler/sequence.h',
+          'task_scheduler/sequence_sort_key.cc',
+          'task_scheduler/sequence_sort_key.h',
+          'task_scheduler/task.cc',
+          'task_scheduler/task.h',
+          'task_scheduler/task_traits.cc',
+          'task_scheduler/task_traits.h',
           'template_util.h',
           'third_party/dmg_fp/dmg_fp.h',
           'third_party/dmg_fp/dtoa_wrapper.cc',
@@ -914,6 +933,7 @@
               # Exclude unsupported features on iOS.
               ['exclude', '^files/file_path_watcher.*'],
               ['exclude', '^threading/platform_thread_internal_posix\\.(h|cc)'],
+              ['exclude', '^trace_event/malloc_dump_provider\\.(h|cc)$'],
             ],
             'sources': [
               'process/memory_stubs.cc',
diff --git a/base/base_nacl.gyp b/base/base_nacl.gyp
index 675cbd6..2713565 100644
--- a/base/base_nacl.gyp
+++ b/base/base_nacl.gyp
@@ -41,6 +41,7 @@
           },
           'dependencies': [
             'base.gyp:base_debugging_flags',
+            'base.gyp:base_build_date',
           ],
         },
         {
@@ -63,6 +64,7 @@
             ],
           },
           'dependencies': [
+            'base.gyp:base_build_date',
             '../third_party/icu/icu_nacl.gyp:icudata_nacl',
             '../third_party/icu/icu_nacl.gyp:icui18n_nacl',
             '../third_party/icu/icu_nacl.gyp:icuuc_nacl',
@@ -118,6 +120,7 @@
           ],
           'dependencies': [
             'base.gyp:base_debugging_flags',
+            'base.gyp:base_build_date',
             'third_party/libevent/libevent_nacl_nonsfi.gyp:event_nacl_nonsfi',
           ],
         },
@@ -141,6 +144,7 @@
             ],
           },
           'dependencies': [
+            'base.gyp:base_build_date',
             'base_nacl_nonsfi',
             '../testing/gtest_nacl.gyp:gtest_nacl',
           ],
diff --git a/base/base_switches.cc b/base/base_switches.cc
index 02b2229..d1a38e4 100644
--- a/base/base_switches.cc
+++ b/base/base_switches.cc
@@ -46,6 +46,11 @@
 // to the test framework that the current process is a child process.
 const char kTestChildProcess[]              = "test-child-process";
 
+// When running certain tests that spawn child processes, this switch indicates
+// to the test framework that the current process should not initialize ICU to
+// avoid creating any scoped handles too early in startup.
+const char kTestDoNotInitializeIcu[]        = "test-do-not-initialize-icu";
+
 // Gives the default maximal active V-logging level; 0 is the default.
 // Normally positive values are used for V-logging levels.
 const char kV[]                             = "v";
diff --git a/base/base_switches.h b/base/base_switches.h
index c97a629..300c5f7 100644
--- a/base/base_switches.h
+++ b/base/base_switches.h
@@ -22,6 +22,7 @@
 extern const char kProfilerTiming[];
 extern const char kProfilerTimingDisabledValue[];
 extern const char kTestChildProcess[];
+extern const char kTestDoNotInitializeIcu[];
 extern const char kTraceToFile[];
 extern const char kTraceToFileName[];
 extern const char kV[];
diff --git a/base/bind.h b/base/bind.h
index 770e457..46dbb91 100644
--- a/base/bind.h
+++ b/base/bind.h
@@ -6,7 +6,6 @@
 #define BASE_BIND_H_
 
 #include "base/bind_internal.h"
-#include "base/callback_internal.h"
 
 // -----------------------------------------------------------------------------
 // Usage documentation
@@ -47,14 +46,27 @@
 
 namespace base {
 
+namespace internal {
+
+// Don't use Alias Template directly here to avoid a compile error on MSVC2013.
 template <typename Functor, typename... Args>
-base::Callback<
-    typename internal::BindState<
-        typename internal::FunctorTraits<Functor>::RunnableType,
-        typename internal::FunctorTraits<Functor>::RunType,
-        typename internal::CallbackParamTraits<Args>::StorageType...>
-            ::UnboundRunType>
-Bind(Functor functor, const Args&... args) {
+struct MakeUnboundRunTypeImpl {
+  using Type =
+      typename BindState<
+          typename FunctorTraits<Functor>::RunnableType,
+          typename FunctorTraits<Functor>::RunType,
+          Args...>::UnboundRunType;
+};
+
+}  // namespace internal
+
+template <typename Functor, typename... Args>
+using MakeUnboundRunType =
+    typename internal::MakeUnboundRunTypeImpl<Functor, Args...>::Type;
+
+template <typename Functor, typename... Args>
+base::Callback<MakeUnboundRunType<Functor, Args...>>
+Bind(Functor functor, Args&&... args) {
   // Type aliases for how to store and run the functor.
   using RunnableType = typename internal::FunctorTraits<Functor>::RunnableType;
   using RunType = typename internal::FunctorTraits<Functor>::RunType;
@@ -88,12 +100,11 @@
       !internal::HasRefCountedParamAsRawPtr<is_method, Args...>::value,
       "a parameter is a refcounted type and needs scoped_refptr");
 
-  using BindState = internal::BindState<
-      RunnableType, RunType,
-      typename internal::CallbackParamTraits<Args>::StorageType...>;
+  using BindState = internal::BindState<RunnableType, RunType, Args...>;
 
   return Callback<typename BindState::UnboundRunType>(
-      new BindState(internal::MakeRunnable(functor), args...));
+      new BindState(internal::MakeRunnable(functor),
+                    std::forward<Args>(args)...));
 }
 
 }  // namespace base
diff --git a/base/bind_helpers.h b/base/bind_helpers.h
index b97558c..117fc68 100644
--- a/base/bind_helpers.h
+++ b/base/bind_helpers.h
@@ -145,15 +145,11 @@
 
 #include <stddef.h>
 
-#include <map>
-#include <memory>
 #include <type_traits>
 #include <utility>
-#include <vector>
 
 #include "base/callback.h"
 #include "base/memory/weak_ptr.h"
-#include "base/template_util.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -265,21 +261,21 @@
 // Helpers to assert that arguments of a recounted type are bound with a
 // scoped_refptr.
 template <bool IsClasstype, typename T>
-struct UnsafeBindtoRefCountedArgHelper : false_type {
+struct UnsafeBindtoRefCountedArgHelper : std::false_type {
 };
 
 template <typename T>
 struct UnsafeBindtoRefCountedArgHelper<true, T>
-    : integral_constant<bool, SupportsAddRefAndRelease<T>::value> {
+    : std::integral_constant<bool, SupportsAddRefAndRelease<T>::value> {
 };
 
 template <typename T>
-struct UnsafeBindtoRefCountedArg : false_type {
+struct UnsafeBindtoRefCountedArg : std::false_type {
 };
 
 template <typename T>
 struct UnsafeBindtoRefCountedArg<T*>
-    : UnsafeBindtoRefCountedArgHelper<is_class<T>::value, T> {
+    : UnsafeBindtoRefCountedArgHelper<std::is_class<T>::value, T> {
 };
 
 template <typename T>
@@ -381,7 +377,7 @@
       : is_valid_(true), scoper_(std::move(scoper)) {}
   PassedWrapper(const PassedWrapper& other)
       : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-  T Pass() const {
+  T Take() const {
     CHECK(is_valid_);
     is_valid_ = false;
     return std::move(scoper_);
@@ -392,161 +388,41 @@
   mutable T scoper_;
 };
 
-// Specialize PassedWrapper for std::unique_ptr used by base::Passed().
-// Use std::move() to transfer the data from one storage to another.
-template <typename T, typename D>
-class PassedWrapper<std::unique_ptr<T, D>> {
- public:
-  explicit PassedWrapper(std::unique_ptr<T, D> scoper)
-      : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
-      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-
-  std::unique_ptr<T, D> Pass() const {
-    CHECK(is_valid_);
-    is_valid_ = false;
-    return std::move(scoper_);
-  }
-
- private:
-  mutable bool is_valid_;
-  mutable std::unique_ptr<T, D> scoper_;
-};
-
-// Specialize PassedWrapper for std::vector<std::unique_ptr<T>>.
-template <typename T, typename D, typename A>
-class PassedWrapper<std::vector<std::unique_ptr<T, D>, A>> {
- public:
-  explicit PassedWrapper(std::vector<std::unique_ptr<T, D>, A> scoper)
-      : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
-      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-
-  std::vector<std::unique_ptr<T, D>, A> Pass() const {
-    CHECK(is_valid_);
-    is_valid_ = false;
-    return std::move(scoper_);
-  }
-
- private:
-  mutable bool is_valid_;
-  mutable std::vector<std::unique_ptr<T, D>, A> scoper_;
-};
-
-// Specialize PassedWrapper for std::map<K, std::unique_ptr<T>>.
-template <typename K, typename T, typename D, typename C, typename A>
-class PassedWrapper<std::map<K, std::unique_ptr<T, D>, C, A>> {
- public:
-  explicit PassedWrapper(std::map<K, std::unique_ptr<T, D>, C, A> scoper)
-      : is_valid_(true), scoper_(std::move(scoper)) {}
-  PassedWrapper(const PassedWrapper& other)
-      : is_valid_(other.is_valid_), scoper_(std::move(other.scoper_)) {}
-
-  std::map<K, std::unique_ptr<T, D>, C, A> Pass() const {
-    CHECK(is_valid_);
-    is_valid_ = false;
-    return std::move(scoper_);
-  }
-
- private:
-  mutable bool is_valid_;
-  mutable std::map<K, std::unique_ptr<T, D>, C, A> scoper_;
-};
-
 // Unwrap the stored parameters for the wrappers above.
 template <typename T>
-struct UnwrapTraits {
-  using ForwardType = const T&;
-  static ForwardType Unwrap(const T& o) { return o; }
-};
+const T& Unwrap(const T& o) {
+  return o;
+}
 
 template <typename T>
-struct UnwrapTraits<UnretainedWrapper<T> > {
-  using ForwardType = T*;
-  static ForwardType Unwrap(UnretainedWrapper<T> unretained) {
-    return unretained.get();
-  }
-};
+T* Unwrap(UnretainedWrapper<T> unretained) {
+  return unretained.get();
+}
 
 template <typename T>
-struct UnwrapTraits<ConstRefWrapper<T> > {
-  using ForwardType = const T&;
-  static ForwardType Unwrap(ConstRefWrapper<T> const_ref) {
-    return const_ref.get();
-  }
-};
+const T& Unwrap(ConstRefWrapper<T> const_ref) {
+  return const_ref.get();
+}
 
 template <typename T>
-struct UnwrapTraits<scoped_refptr<T> > {
-  using ForwardType = T*;
-  static ForwardType Unwrap(const scoped_refptr<T>& o) { return o.get(); }
-};
+T* Unwrap(const scoped_refptr<T>& o) {
+  return o.get();
+}
 
 template <typename T>
-struct UnwrapTraits<WeakPtr<T> > {
-  using ForwardType = const WeakPtr<T>&;
-  static ForwardType Unwrap(const WeakPtr<T>& o) { return o; }
-};
+const WeakPtr<T>& Unwrap(const WeakPtr<T>& o) {
+  return o;
+}
 
 template <typename T>
-struct UnwrapTraits<OwnedWrapper<T> > {
-  using ForwardType = T*;
-  static ForwardType Unwrap(const OwnedWrapper<T>& o) {
-    return o.get();
-  }
-};
+T* Unwrap(const OwnedWrapper<T>& o) {
+  return o.get();
+}
 
 template <typename T>
-struct UnwrapTraits<PassedWrapper<T> > {
-  using ForwardType = T;
-  static T Unwrap(PassedWrapper<T>& o) {
-    return o.Pass();
-  }
-};
-
-// Utility for handling different refcounting semantics in the Bind()
-// function.
-template <bool is_method, typename... T>
-struct MaybeScopedRefPtr;
-
-template <bool is_method>
-struct MaybeScopedRefPtr<is_method> {
-  MaybeScopedRefPtr() {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<false, T, Rest...> {
-  MaybeScopedRefPtr(const T&, const Rest&...) {}
-};
-
-template <typename T, size_t n, typename... Rest>
-struct MaybeScopedRefPtr<false, T[n], Rest...> {
-  MaybeScopedRefPtr(const T*, const Rest&...) {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, T, Rest...> {
-  MaybeScopedRefPtr(const T& /* o */, const Rest&...) {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, T*, Rest...> {
-  MaybeScopedRefPtr(T* o, const Rest&...) : ref_(o) {}
-  scoped_refptr<T> ref_;
-};
-
-// No need to additionally AddRef() and Release() since we are storing a
-// scoped_refptr<> inside the storage object already.
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, scoped_refptr<T>, Rest...> {
-  MaybeScopedRefPtr(const scoped_refptr<T>&, const Rest&...) {}
-};
-
-template <typename T, typename... Rest>
-struct MaybeScopedRefPtr<true, const T*, Rest...> {
-  MaybeScopedRefPtr(const T* o, const Rest&...) : ref_(o) {}
-  scoped_refptr<const T> ref_;
-};
+T Unwrap(PassedWrapper<T>& o) {
+  return o.Take();
+}
 
 // IsWeakMethod is a helper that determine if we are binding a WeakPtr<> to a
 // method.  It is used internally by Bind() to select the correct
@@ -556,14 +432,14 @@
 // The first argument should be the type of the object that will be received by
 // the method.
 template <bool IsMethod, typename... Args>
-struct IsWeakMethod : public false_type {};
+struct IsWeakMethod : public std::false_type {};
 
 template <typename T, typename... Args>
-struct IsWeakMethod<true, WeakPtr<T>, Args...> : public true_type {};
+struct IsWeakMethod<true, WeakPtr<T>, Args...> : public std::true_type {};
 
 template <typename T, typename... Args>
 struct IsWeakMethod<true, ConstRefWrapper<WeakPtr<T>>, Args...>
-    : public true_type {};
+    : public std::true_type {};
 
 
 // Packs a list of types to hold them in a single type.
@@ -686,15 +562,12 @@
 // Both versions of Passed() prevent T from being an lvalue reference. The first
 // via use of enable_if, and the second takes a T* which will not bind to T&.
 template <typename T,
-          typename std::enable_if<internal::IsMoveOnlyType<T>::value &&
-                                  !std::is_lvalue_reference<T>::value>::type* =
+          typename std::enable_if<!std::is_lvalue_reference<T>::value>::type* =
               nullptr>
 static inline internal::PassedWrapper<T> Passed(T&& scoper) {
   return internal::PassedWrapper<T>(std::move(scoper));
 }
-template <typename T,
-          typename std::enable_if<internal::IsMoveOnlyType<T>::value>::type* =
-              nullptr>
+template <typename T>
 static inline internal::PassedWrapper<T> Passed(T* scoper) {
   return internal::PassedWrapper<T>(std::move(*scoper));
 }
diff --git a/base/bind_internal.h b/base/bind_internal.h
index ac7cd00..199467c 100644
--- a/base/bind_internal.h
+++ b/base/bind_internal.h
@@ -68,7 +68,7 @@
 // Implementation note: This non-specialized case handles zero-arity case only.
 // Non-zero-arity cases should be handled by the specialization below.
 template <typename List>
-struct HasNonConstReferenceItem : false_type {};
+struct HasNonConstReferenceItem : std::false_type {};
 
 // Implementation note: Select true_type if the first parameter is a non-const
 // reference.  Otherwise, skip the first parameter and check rest of parameters
@@ -76,7 +76,7 @@
 template <typename T, typename... Args>
 struct HasNonConstReferenceItem<TypeList<T, Args...>>
     : std::conditional<is_non_const_reference<T>::value,
-                       true_type,
+                       std::true_type,
                        HasNonConstReferenceItem<TypeList<Args...>>>::type {};
 
 // HasRefCountedTypeAsRawPtr selects true_type when any of the |Args| is a raw
@@ -84,7 +84,7 @@
 // Implementation note: This non-specialized case handles zero-arity case only.
 // Non-zero-arity cases should be handled by the specialization below.
 template <typename... Args>
-struct HasRefCountedTypeAsRawPtr : false_type {};
+struct HasRefCountedTypeAsRawPtr : std::false_type {};
 
 // Implementation note: Select true_type if the first parameter is a raw pointer
 // to a RefCounted type. Otherwise, skip the first parameter and check rest of
@@ -92,7 +92,7 @@
 template <typename T, typename... Args>
 struct HasRefCountedTypeAsRawPtr<T, Args...>
     : std::conditional<NeedsScopedRefptrButGetsRawPtr<T>::value,
-                       true_type,
+                       std::true_type,
                        HasRefCountedTypeAsRawPtr<Args...>>::type {};
 
 // BindsArrayToFirstArg selects true_type when |is_method| is true and the first
@@ -101,10 +101,11 @@
 // zero-arity case only.  Other cases should be handled by the specialization
 // below.
 template <bool is_method, typename... Args>
-struct BindsArrayToFirstArg : false_type {};
+struct BindsArrayToFirstArg : std::false_type {};
 
 template <typename T, typename... Args>
-struct BindsArrayToFirstArg<true, T, Args...> : is_array<T> {};
+struct BindsArrayToFirstArg<true, T, Args...>
+    : std::is_array<typename std::remove_reference<T>::type> {};
 
 // HasRefCountedParamAsRawPtr is the same to HasRefCountedTypeAsRawPtr except
 // when |is_method| is true HasRefCountedParamAsRawPtr skips the first argument.
@@ -153,8 +154,9 @@
       : function_(function) {
   }
 
-  R Run(typename CallbackParamTraits<Args>::ForwardType... args) {
-    return function_(CallbackForward(args)...);
+  template <typename... RunArgs>
+  R Run(RunArgs&&... args) {
+    return function_(std::forward<RunArgs>(args)...);
   }
 
  private:
@@ -168,14 +170,15 @@
   // MSVC 2013 doesn't support Type Alias of function types.
   // Revisit this after we update it to newer version.
   typedef R RunType(T*, Args...);
-  using IsMethod = true_type;
+  using IsMethod = std::true_type;
 
   explicit RunnableAdapter(R(T::*method)(Args...))
       : method_(method) {
   }
 
-  R Run(T* object, typename CallbackParamTraits<Args>::ForwardType... args) {
-    return (object->*method_)(CallbackForward(args)...);
+  template <typename... RunArgs>
+  R Run(T* object, RunArgs&&... args) {
+    return (object->*method_)(std::forward<RunArgs>(args)...);
   }
 
  private:
@@ -187,15 +190,15 @@
 class RunnableAdapter<R(T::*)(Args...) const> {
  public:
   using RunType = R(const T*, Args...);
-  using IsMethod = true_type;
+  using IsMethod = std::true_type;
 
   explicit RunnableAdapter(R(T::*method)(Args...) const)
       : method_(method) {
   }
 
-  R Run(const T* object,
-        typename CallbackParamTraits<Args>::ForwardType... args) {
-    return (object->*method_)(CallbackForward(args)...);
+  template <typename... RunArgs>
+  R Run(const T* object, RunArgs&&... args) {
+    return (object->*method_)(std::forward<RunArgs>(args)...);
   }
 
  private:
@@ -280,42 +283,46 @@
 //
 // WeakCalls similarly need special syntax that is applied to the first
 // argument to check if they should no-op themselves.
-template <bool IsWeakCall, typename ReturnType, typename Runnable,
-          typename ArgsType>
+template <bool IsWeakCall, typename ReturnType, typename Runnable>
 struct InvokeHelper;
 
-template <typename ReturnType, typename Runnable, typename... Args>
-struct InvokeHelper<false, ReturnType, Runnable, TypeList<Args...>> {
-  static ReturnType MakeItSo(Runnable runnable, Args... args) {
-    return runnable.Run(CallbackForward(args)...);
+template <typename ReturnType, typename Runnable>
+struct InvokeHelper<false, ReturnType, Runnable> {
+  template <typename... RunArgs>
+  static ReturnType MakeItSo(Runnable runnable, RunArgs&&... args) {
+    return runnable.Run(std::forward<RunArgs>(args)...);
   }
 };
 
-template <typename Runnable, typename... Args>
-struct InvokeHelper<false, void, Runnable, TypeList<Args...>> {
-  static void MakeItSo(Runnable runnable, Args... args) {
-    runnable.Run(CallbackForward(args)...);
+template <typename Runnable>
+struct InvokeHelper<false, void, Runnable> {
+  template <typename... RunArgs>
+  static void MakeItSo(Runnable runnable, RunArgs&&... args) {
+    runnable.Run(std::forward<RunArgs>(args)...);
   }
 };
 
-template <typename Runnable, typename BoundWeakPtr, typename... Args>
-struct InvokeHelper<true, void, Runnable, TypeList<BoundWeakPtr, Args...>> {
-  static void MakeItSo(Runnable runnable, BoundWeakPtr weak_ptr, Args... args) {
+template <typename Runnable>
+struct InvokeHelper<true, void, Runnable> {
+  template <typename BoundWeakPtr, typename... RunArgs>
+  static void MakeItSo(Runnable runnable,
+                       BoundWeakPtr weak_ptr,
+                       RunArgs&&... args) {
     if (!weak_ptr.get()) {
       return;
     }
-    runnable.Run(weak_ptr.get(), CallbackForward(args)...);
+    runnable.Run(weak_ptr.get(), std::forward<RunArgs>(args)...);
   }
 };
 
 #if !defined(_MSC_VER)
 
-template <typename ReturnType, typename Runnable, typename ArgsType>
-struct InvokeHelper<true, ReturnType, Runnable, ArgsType> {
+template <typename ReturnType, typename Runnable>
+struct InvokeHelper<true, ReturnType, Runnable> {
   // WeakCalls are only supported for functions with a void return type.
   // Otherwise, the function result would be undefined if the the WeakPtr<>
   // is invalidated.
-  static_assert(is_void<ReturnType>::value,
+  static_assert(std::is_void<ReturnType>::value,
                 "weak_ptrs can only bind to methods without return values");
 };
 
@@ -324,33 +331,48 @@
 // Invoker<>
 //
 // See description at the top of the file.
-template <typename BoundIndices,
-          typename StorageType, typename Unwrappers,
+template <typename BoundIndices, typename StorageType,
           typename InvokeHelperType, typename UnboundForwardRunType>
 struct Invoker;
 
 template <size_t... bound_indices,
           typename StorageType,
-          typename... Unwrappers,
           typename InvokeHelperType,
           typename R,
-          typename... UnboundForwardArgs>
+          typename... UnboundArgs>
 struct Invoker<IndexSequence<bound_indices...>,
-               StorageType, TypeList<Unwrappers...>,
-               InvokeHelperType, R(UnboundForwardArgs...)> {
-  static R Run(BindStateBase* base,
-               UnboundForwardArgs... unbound_args) {
+               StorageType,
+               InvokeHelperType,
+               R(UnboundArgs...)> {
+  static R Run(BindStateBase* base, UnboundArgs&&... unbound_args) {
     StorageType* storage = static_cast<StorageType*>(base);
     // Local references to make debugger stepping easier. If in a debugger,
     // you really want to warp ahead and step through the
     // InvokeHelper<>::MakeItSo() call below.
     return InvokeHelperType::MakeItSo(
-        storage->runnable_,
-        Unwrappers::Unwrap(get<bound_indices>(storage->bound_args_))...,
-        CallbackForward(unbound_args)...);
+        storage->runnable_, Unwrap(get<bound_indices>(storage->bound_args_))...,
+        std::forward<UnboundArgs>(unbound_args)...);
   }
 };
 
+// Used to implement MakeArgsStorage.
+template <bool is_method, typename... BoundArgs>
+struct MakeArgsStorageImpl {
+  using Type = std::tuple<BoundArgs...>;
+};
+
+template <typename Obj, typename... BoundArgs>
+struct MakeArgsStorageImpl<true, Obj*, BoundArgs...> {
+  using Type = std::tuple<scoped_refptr<Obj>, BoundArgs...>;
+};
+
+// Constructs a tuple type to store BoundArgs into BindState.
+// This wraps the first argument into a scoped_refptr if |is_method| is true and
+// the first argument is a raw pointer.
+// Other arguments are adjusted for store and packed into a tuple.
+template <bool is_method, typename... BoundArgs>
+using MakeArgsStorage = typename MakeArgsStorageImpl<
+  is_method, typename std::decay<BoundArgs>::type...>::Type;
 
 // BindState<>
 //
@@ -376,40 +398,31 @@
   using StorageType = BindState<Runnable, R(Args...), BoundArgs...>;
   using RunnableType = Runnable;
 
+  enum { is_method = HasIsMethodTag<Runnable>::value };
+
   // true_type if Runnable is a method invocation and the first bound argument
   // is a WeakPtr.
   using IsWeakCall =
-      IsWeakMethod<HasIsMethodTag<Runnable>::value, BoundArgs...>;
+      IsWeakMethod<is_method, typename std::decay<BoundArgs>::type...>;
 
   using BoundIndices = MakeIndexSequence<sizeof...(BoundArgs)>;
-  using Unwrappers = TypeList<UnwrapTraits<BoundArgs>...>;
-  using UnboundForwardArgs = DropTypeListItem<
-      sizeof...(BoundArgs),
-      TypeList<typename CallbackParamTraits<Args>::ForwardType...>>;
-  using UnboundForwardRunType = MakeFunctionType<R, UnboundForwardArgs>;
-
-  using InvokeHelperArgs = ConcatTypeLists<
-      TypeList<typename UnwrapTraits<BoundArgs>::ForwardType...>,
-      UnboundForwardArgs>;
-  using InvokeHelperType =
-      InvokeHelper<IsWeakCall::value, R, Runnable, InvokeHelperArgs>;
+  using InvokeHelperType = InvokeHelper<IsWeakCall::value, R, Runnable>;
 
   using UnboundArgs = DropTypeListItem<sizeof...(BoundArgs), TypeList<Args...>>;
 
  public:
-  using InvokerType = Invoker<BoundIndices, StorageType, Unwrappers,
-                              InvokeHelperType, UnboundForwardRunType>;
   using UnboundRunType = MakeFunctionType<R, UnboundArgs>;
+  using InvokerType =
+      Invoker<BoundIndices, StorageType, InvokeHelperType, UnboundRunType>;
 
-  BindState(const Runnable& runnable, const BoundArgs&... bound_args)
+  template <typename... ForwardArgs>
+  BindState(const Runnable& runnable, ForwardArgs&&... bound_args)
       : BindStateBase(&Destroy),
         runnable_(runnable),
-        ref_(bound_args...),
-        bound_args_(bound_args...) {}
+        bound_args_(std::forward<ForwardArgs>(bound_args)...) {}
 
   RunnableType runnable_;
-  MaybeScopedRefPtr<HasIsMethodTag<Runnable>::value, BoundArgs...> ref_;
-  Tuple<BoundArgs...> bound_args_;
+  MakeArgsStorage<is_method, BoundArgs...> bound_args_;
 
  private:
   ~BindState() {}
diff --git a/base/bind_unittest.cc b/base/bind_unittest.cc
index 25b4a10..405dde8 100644
--- a/base/bind_unittest.cc
+++ b/base/bind_unittest.cc
@@ -6,6 +6,7 @@
 
 #include <memory>
 #include <utility>
+#include <vector>
 
 #include "base/callback.h"
 #include "base/macros.h"
@@ -92,43 +93,87 @@
   void NonVirtualSet() { value = kChildValue; }
 };
 
-// Used for probing the number of copies that occur if a type must be coerced
-// during argument forwarding in the Run() methods.
-struct DerivedCopyCounter {
-  DerivedCopyCounter(int* copies, int* assigns)
-      : copies_(copies), assigns_(assigns) {
-  }
+// Used for probing the number of copies and moves that occur if a type must be
+// coerced during argument forwarding in the Run() methods.
+struct DerivedCopyMoveCounter {
+  DerivedCopyMoveCounter(int* copies,
+                         int* assigns,
+                         int* move_constructs,
+                         int* move_assigns)
+      : copies_(copies),
+        assigns_(assigns),
+        move_constructs_(move_constructs),
+        move_assigns_(move_assigns) {}
   int* copies_;
   int* assigns_;
+  int* move_constructs_;
+  int* move_assigns_;
 };
 
-// Used for probing the number of copies in an argument.
-class CopyCounter {
+// Used for probing the number of copies and moves in an argument.
+class CopyMoveCounter {
  public:
-  CopyCounter(int* copies, int* assigns)
-      : copies_(copies), assigns_(assigns) {
+  CopyMoveCounter(int* copies,
+                  int* assigns,
+                  int* move_constructs,
+                  int* move_assigns)
+      : copies_(copies),
+        assigns_(assigns),
+        move_constructs_(move_constructs),
+        move_assigns_(move_assigns) {}
+
+  CopyMoveCounter(const CopyMoveCounter& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*copies_)++;
   }
 
-  CopyCounter(const CopyCounter& other)
+  CopyMoveCounter(CopyMoveCounter&& other)
       : copies_(other.copies_),
-        assigns_(other.assigns_) {
-    (*copies_)++;
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*move_constructs_)++;
   }
 
   // Probing for copies from coercion.
-  explicit CopyCounter(const DerivedCopyCounter& other)
+  explicit CopyMoveCounter(const DerivedCopyMoveCounter& other)
       : copies_(other.copies_),
-        assigns_(other.assigns_) {
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
     (*copies_)++;
   }
 
-  const CopyCounter& operator=(const CopyCounter& rhs) {
+  // Probing for moves from coercion.
+  explicit CopyMoveCounter(DerivedCopyMoveCounter&& other)
+      : copies_(other.copies_),
+        assigns_(other.assigns_),
+        move_constructs_(other.move_constructs_),
+        move_assigns_(other.move_assigns_) {
+    (*move_constructs_)++;
+  }
+
+  const CopyMoveCounter& operator=(const CopyMoveCounter& rhs) {
     copies_ = rhs.copies_;
     assigns_ = rhs.assigns_;
+    move_constructs_ = rhs.move_constructs_;
+    move_assigns_ = rhs.move_assigns_;
 
-    if (assigns_) {
-      (*assigns_)++;
-    }
+    (*assigns_)++;
+
+    return *this;
+  }
+
+  const CopyMoveCounter& operator=(CopyMoveCounter&& rhs) {
+    copies_ = rhs.copies_;
+    assigns_ = rhs.assigns_;
+    move_constructs_ = rhs.move_constructs_;
+    move_assigns_ = rhs.move_assigns_;
+
+    (*move_assigns_)++;
 
     return *this;
   }
@@ -140,6 +185,47 @@
  private:
   int* copies_;
   int* assigns_;
+  int* move_constructs_;
+  int* move_assigns_;
+};
+
+// Used for probing the number of copies in an argument. The instance is a
+// copyable and non-movable type.
+class CopyCounter {
+ public:
+  CopyCounter(int* copies, int* assigns)
+      : counter_(copies, assigns, nullptr, nullptr) {}
+  CopyCounter(const CopyCounter& other) : counter_(other.counter_) {}
+  CopyCounter& operator=(const CopyCounter& other) {
+    counter_ = other.counter_;
+    return *this;
+  }
+
+  explicit CopyCounter(const DerivedCopyMoveCounter& other) : counter_(other) {}
+
+  int copies() const { return counter_.copies(); }
+
+ private:
+  CopyMoveCounter counter_;
+};
+
+// Used for probing the number of moves in an argument. The instance is a
+// non-copyable and movable type.
+class MoveCounter {
+ public:
+  MoveCounter(int* move_constructs, int* move_assigns)
+      : counter_(nullptr, nullptr, move_constructs, move_assigns) {}
+  MoveCounter(MoveCounter&& other) : counter_(std::move(other.counter_)) {}
+  MoveCounter& operator=(MoveCounter&& other) {
+    counter_ = std::move(other.counter_);
+    return *this;
+  }
+
+  explicit MoveCounter(DerivedCopyMoveCounter&& other)
+      : counter_(std::move(other)) {}
+
+ private:
+  CopyMoveCounter counter_;
 };
 
 class DeleteCounter {
@@ -190,7 +276,7 @@
   return s;
 }
 
-int GetCopies(const CopyCounter& counter) {
+int GetCopies(const CopyMoveCounter& counter) {
   return counter.copies();
 }
 
@@ -338,8 +424,8 @@
 //     preserve virtual dispatch).
 TEST_F(BindTest, FunctionTypeSupport) {
   EXPECT_CALL(static_func_mock_, VoidMethod0());
-  EXPECT_CALL(has_ref_, AddRef()).Times(5);
-  EXPECT_CALL(has_ref_, Release()).Times(5);
+  EXPECT_CALL(has_ref_, AddRef()).Times(4);
+  EXPECT_CALL(has_ref_, Release()).Times(4);
   EXPECT_CALL(has_ref_, VoidMethod0()).Times(2);
   EXPECT_CALL(has_ref_, VoidConstMethod0()).Times(2);
 
@@ -669,12 +755,16 @@
 
   int copies = 0;
   int assigns = 0;
-  CopyCounter counter(&copies, &assigns);
+  int move_constructs = 0;
+  int move_assigns = 0;
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
   Callback<int()> all_const_ref_cb =
       Bind(&GetCopies, ConstRef(counter));
   EXPECT_EQ(0, all_const_ref_cb.Run());
   EXPECT_EQ(0, copies);
   EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
 }
 
 TEST_F(BindTest, ScopedRefptr) {
@@ -717,37 +807,57 @@
   EXPECT_EQ(1, deletes);
 }
 
-// Passed() wrapper support.
+// Tests for Passed() wrapper support:
 //   - Passed() can be constructed from a pointer to scoper.
 //   - Passed() can be constructed from a scoper rvalue.
 //   - Using Passed() gives Callback Ownership.
 //   - Ownership is transferred from Callback to callee on the first Run().
 //   - Callback supports unbound arguments.
-TEST_F(BindTest, ScopedPtr) {
+template <typename T>
+class BindMoveOnlyTypeTest : public ::testing::Test {
+};
+
+struct CustomDeleter {
+  void operator()(DeleteCounter* c) { delete c; }
+};
+
+using MoveOnlyTypesToTest =
+    ::testing::Types<scoped_ptr<DeleteCounter>,
+                     std::unique_ptr<DeleteCounter>,
+                     std::unique_ptr<DeleteCounter, CustomDeleter>>;
+TYPED_TEST_CASE(BindMoveOnlyTypeTest, MoveOnlyTypesToTest);
+
+TYPED_TEST(BindMoveOnlyTypeTest, PassedToBoundCallback) {
   int deletes = 0;
 
-  // Tests the Passed() function's support for pointers.
-  scoped_ptr<DeleteCounter> ptr(new DeleteCounter(&deletes));
-  Callback<scoped_ptr<DeleteCounter>()> unused_callback =
-      Bind(&PassThru<scoped_ptr<DeleteCounter> >, Passed(&ptr));
+  TypeParam ptr(new DeleteCounter(&deletes));
+  Callback<TypeParam()> callback = Bind(&PassThru<TypeParam>, Passed(&ptr));
   EXPECT_FALSE(ptr.get());
   EXPECT_EQ(0, deletes);
 
   // If we never invoke the Callback, it retains ownership and deletes.
-  unused_callback.Reset();
+  callback.Reset();
   EXPECT_EQ(1, deletes);
+}
 
-  // Tests the Passed() function's support for rvalues.
-  deletes = 0;
-  DeleteCounter* counter = new DeleteCounter(&deletes);
-  Callback<scoped_ptr<DeleteCounter>()> callback =
-      Bind(&PassThru<scoped_ptr<DeleteCounter> >,
-           Passed(scoped_ptr<DeleteCounter>(counter)));
-  EXPECT_FALSE(ptr.get());
+TYPED_TEST(BindMoveOnlyTypeTest, PassedWithRvalue) {
+  int deletes = 0;
+  Callback<TypeParam()> callback = Bind(
+      &PassThru<TypeParam>, Passed(TypeParam(new DeleteCounter(&deletes))));
   EXPECT_EQ(0, deletes);
 
-  // Check that ownership can be transferred back out.
-  scoped_ptr<DeleteCounter> result = callback.Run();
+  // If we never invoke the Callback, it retains ownership and deletes.
+  callback.Reset();
+  EXPECT_EQ(1, deletes);
+}
+
+// Check that ownership can be transferred back out.
+TYPED_TEST(BindMoveOnlyTypeTest, ReturnMoveOnlyType) {
+  int deletes = 0;
+  DeleteCounter* counter = new DeleteCounter(&deletes);
+  Callback<TypeParam()> callback =
+      Bind(&PassThru<TypeParam>, Passed(TypeParam(counter)));
+  TypeParam result = callback.Run();
   ASSERT_EQ(counter, result.get());
   EXPECT_EQ(0, deletes);
 
@@ -758,58 +868,49 @@
   // Ensure that we actually did get ownership.
   result.reset();
   EXPECT_EQ(1, deletes);
-
-  // Test unbound argument forwarding.
-  Callback<scoped_ptr<DeleteCounter>(scoped_ptr<DeleteCounter>)> cb_unbound =
-      Bind(&PassThru<scoped_ptr<DeleteCounter> >);
-  ptr.reset(new DeleteCounter(&deletes));
-  cb_unbound.Run(std::move(ptr));
 }
 
-TEST_F(BindTest, UniquePtr) {
+TYPED_TEST(BindMoveOnlyTypeTest, UnboundForwarding) {
   int deletes = 0;
-
-  // Tests the Passed() function's support for pointers.
-  std::unique_ptr<DeleteCounter> ptr(new DeleteCounter(&deletes));
-  Callback<std::unique_ptr<DeleteCounter>()> unused_callback =
-      Bind(&PassThru<std::unique_ptr<DeleteCounter>>, Passed(&ptr));
-  EXPECT_FALSE(ptr.get());
-  EXPECT_EQ(0, deletes);
-
-  // If we never invoke the Callback, it retains ownership and deletes.
-  unused_callback.Reset();
-  EXPECT_EQ(1, deletes);
-
-  // Tests the Passed() function's support for rvalues.
-  deletes = 0;
-  DeleteCounter* counter = new DeleteCounter(&deletes);
-  Callback<std::unique_ptr<DeleteCounter>()> callback =
-      Bind(&PassThru<std::unique_ptr<DeleteCounter>>,
-           Passed(std::unique_ptr<DeleteCounter>(counter)));
-  EXPECT_FALSE(ptr.get());
-  EXPECT_EQ(0, deletes);
-
-  // Check that ownership can be transferred back out.
-  std::unique_ptr<DeleteCounter> result = callback.Run();
-  ASSERT_EQ(counter, result.get());
-  EXPECT_EQ(0, deletes);
-
-  // Resetting does not delete since ownership was transferred.
-  callback.Reset();
-  EXPECT_EQ(0, deletes);
-
-  // Ensure that we actually did get ownership.
-  result.reset();
-  EXPECT_EQ(1, deletes);
-
+  TypeParam ptr(new DeleteCounter(&deletes));
   // Test unbound argument forwarding.
-  Callback<std::unique_ptr<DeleteCounter>(std::unique_ptr<DeleteCounter>)>
-      cb_unbound = Bind(&PassThru<std::unique_ptr<DeleteCounter>>);
-  ptr.reset(new DeleteCounter(&deletes));
+  Callback<TypeParam(TypeParam)> cb_unbound = Bind(&PassThru<TypeParam>);
   cb_unbound.Run(std::move(ptr));
+  EXPECT_EQ(1, deletes);
 }
 
-// Argument Copy-constructor usage for non-reference parameters.
+void VerifyVector(const std::vector<scoped_ptr<int>>& v) {
+  ASSERT_EQ(1u, v.size());
+  EXPECT_EQ(12345, *v[0]);
+}
+
+std::vector<scoped_ptr<int>> AcceptAndReturnMoveOnlyVector(
+    std::vector<scoped_ptr<int>> v) {
+  VerifyVector(v);
+  return v;
+}
+
+// Test that a vector containing move-only types can be used with Callback.
+TEST_F(BindTest, BindMoveOnlyVector) {
+  using MoveOnlyVector = std::vector<scoped_ptr<int>>;
+
+  MoveOnlyVector v;
+  v.push_back(make_scoped_ptr(new int(12345)));
+
+  // Early binding should work:
+  base::Callback<MoveOnlyVector()> bound_cb =
+      base::Bind(&AcceptAndReturnMoveOnlyVector, Passed(&v));
+  MoveOnlyVector intermediate_result = bound_cb.Run();
+  VerifyVector(intermediate_result);
+
+  // As should passing it as an argument to Run():
+  base::Callback<MoveOnlyVector(MoveOnlyVector)> unbound_cb =
+      base::Bind(&AcceptAndReturnMoveOnlyVector);
+  MoveOnlyVector final_result = unbound_cb.Run(std::move(intermediate_result));
+  VerifyVector(final_result);
+}
+
+// Argument copy-constructor usage for non-reference copy-only parameters.
 //   - Bound arguments are only copied once.
 //   - Forwarded arguments are only copied once.
 //   - Forwarded arguments with coercions are only copied twice (once for the
@@ -819,28 +920,148 @@
   int assigns = 0;
 
   CopyCounter counter(&copies, &assigns);
-
-  Callback<void()> copy_cb =
-      Bind(&VoidPolymorphic<CopyCounter>::Run, counter);
-  EXPECT_GE(1, copies);
+  Bind(&VoidPolymorphic<CopyCounter>::Run, counter);
+  EXPECT_EQ(1, copies);
   EXPECT_EQ(0, assigns);
 
   copies = 0;
   assigns = 0;
-  Callback<void(CopyCounter)> forward_cb =
-      Bind(&VoidPolymorphic<CopyCounter>::Run);
-  forward_cb.Run(counter);
-  EXPECT_GE(1, copies);
+  Bind(&VoidPolymorphic<CopyCounter>::Run, CopyCounter(&copies, &assigns));
+  EXPECT_EQ(1, copies);
   EXPECT_EQ(0, assigns);
 
   copies = 0;
   assigns = 0;
-  DerivedCopyCounter derived(&copies, &assigns);
-  Callback<void(CopyCounter)> coerce_cb =
-      Bind(&VoidPolymorphic<CopyCounter>::Run);
-  coerce_cb.Run(CopyCounter(derived));
-  EXPECT_GE(2, copies);
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(counter);
+  EXPECT_EQ(2, copies);
   EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(&copies, &assigns));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  DerivedCopyMoveCounter derived(&copies, &assigns, nullptr, nullptr);
+  Bind(&VoidPolymorphic<CopyCounter>::Run).Run(CopyCounter(derived));
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+
+  copies = 0;
+  assigns = 0;
+  Bind(&VoidPolymorphic<CopyCounter>::Run)
+      .Run(CopyCounter(
+          DerivedCopyMoveCounter(&copies, &assigns, nullptr, nullptr)));
+  EXPECT_EQ(2, copies);
+  EXPECT_EQ(0, assigns);
+}
+
+// Argument move-constructor usage for move-only parameters.
+//   - Bound arguments passed by move are not copied.
+TEST_F(BindTest, ArgumentMoves) {
+  int move_constructs = 0;
+  int move_assigns = 0;
+
+  Bind(&VoidPolymorphic<const MoveCounter&>::Run,
+       MoveCounter(&move_constructs, &move_assigns));
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  // TODO(tzik): Support binding move-only type into a non-reference parameter
+  // of a variant of Callback.
+
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<MoveCounter>::Run)
+      .Run(MoveCounter(&move_constructs, &move_assigns));
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<MoveCounter>::Run)
+      .Run(MoveCounter(DerivedCopyMoveCounter(
+          nullptr, nullptr, &move_constructs, &move_assigns)));
+  EXPECT_EQ(2, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+}
+
+// Argument constructor usage for non-reference movable-copyable
+// parameters.
+//   - Bound arguments passed by move are not copied.
+//   - Forwarded arguments are only copied once.
+//   - Forwarded arguments with coercions are only copied once and moved once.
+TEST_F(BindTest, ArgumentCopiesAndMoves) {
+  int copies = 0;
+  int assigns = 0;
+  int move_constructs = 0;
+  int move_assigns = 0;
+
+  CopyMoveCounter counter(&copies, &assigns, &move_constructs, &move_assigns);
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run, counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(0, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run,
+       CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run).Run(counter);
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(&copies, &assigns, &move_constructs, &move_assigns));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  DerivedCopyMoveCounter derived_counter(&copies, &assigns, &move_constructs,
+                                         &move_assigns);
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(derived_counter));
+  EXPECT_EQ(1, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(1, move_constructs);
+  EXPECT_EQ(0, move_assigns);
+
+  copies = 0;
+  assigns = 0;
+  move_constructs = 0;
+  move_assigns = 0;
+  Bind(&VoidPolymorphic<CopyMoveCounter>::Run)
+      .Run(CopyMoveCounter(DerivedCopyMoveCounter(
+          &copies, &assigns, &move_constructs, &move_assigns)));
+  EXPECT_EQ(0, copies);
+  EXPECT_EQ(0, assigns);
+  EXPECT_EQ(2, move_constructs);
+  EXPECT_EQ(0, move_assigns);
 }
 
 // Callback construction and assignment tests.
diff --git a/base/build_time.h b/base/build_time.h
index 4f0abc3..83c9875 100644
--- a/base/build_time.h
+++ b/base/build_time.h
@@ -10,17 +10,19 @@
 
 namespace base {
 
-// GetBuildTime returns the time at which the current binary was built.
+// GetBuildTime returns the time at which the current binary was built,
+// rounded down to 5:00:00am at the start of the day in UTC.
 //
-// This uses the __DATE__ and __TIME__ macros, which don't trigger a rebuild
-// when they change. However, official builds will always be rebuilt from
-// scratch.
+// This uses a generated file, which doesn't trigger a rebuild when the time
+// changes. It will, however, be updated whenever //build/util/LASTCHANGE
+// changes.
 //
-// Also, since __TIME__ doesn't include a timezone, this value should only be
-// considered accurate to a day.
+// This value should only be considered accurate to within a day.
+// It will always be in the past.
 //
-// NOTE: This function is disabled except for the official builds, by default
-// the date returned is "Sep 02 2008 08:00:00 PST".
+// Note: If the build is not official (i.e. is_official_build = false)
+// this time will be set to 5:00:00am on the most recent first Sunday
+// of a month.
 Time BASE_EXPORT GetBuildTime();
 
 }  // namespace base
diff --git a/base/callback.h b/base/callback.h
index 3bf0008..c04e90d 100644
--- a/base/callback.h
+++ b/base/callback.h
@@ -7,7 +7,6 @@
 
 #include "base/callback_forward.h"
 #include "base/callback_internal.h"
-#include "base/template_util.h"
 
 // NOTE: Header files that do not require the full definition of Callback or
 // Closure should #include "base/callback_forward.h" instead of this file.
@@ -341,63 +340,65 @@
 //      void Bar(char* ptr);
 //      Bind(&Foo, "test");
 //      Bind(&Bar, "test");  // This fails because ptr is not const.
-
-namespace base {
-
-// First, we forward declare the Callback class template. This informs the
-// compiler that the template only has 1 type parameter which is the function
-// signature that the Callback is representing.
-//
-// After this, create template specializations for 0-7 parameters. Note that
-// even though the template typelist grows, the specialization still
-// only has one type: the function signature.
 //
 // If you are thinking of forward declaring Callback in your own header file,
 // please include "base/callback_forward.h" instead.
 
+namespace base {
 namespace internal {
 template <typename Runnable, typename RunType, typename... BoundArgsType>
 struct BindState;
 }  // namespace internal
 
-template <typename R, typename... Args>
-class Callback<R(Args...)> : public internal::CallbackBase {
+template <typename R, typename... Args, internal::CopyMode copy_mode>
+class Callback<R(Args...), copy_mode>
+    : public internal::CallbackBase<copy_mode> {
  public:
   // MSVC 2013 doesn't support Type Alias of function types.
   // Revisit this after we update it to newer version.
   typedef R RunType(Args...);
 
-  Callback() : CallbackBase(nullptr) { }
+  Callback() : internal::CallbackBase<copy_mode>(nullptr) {}
 
-  template <typename Runnable, typename BindRunType, typename... BoundArgsType>
+  template <typename Runnable, typename BindRunType, typename... BoundArgs>
   explicit Callback(
-      internal::BindState<Runnable, BindRunType, BoundArgsType...>* bind_state)
-      : CallbackBase(bind_state) {
+      internal::BindState<Runnable, BindRunType, BoundArgs...>* bind_state)
+      : internal::CallbackBase<copy_mode>(bind_state) {
     // Force the assignment to a local variable of PolymorphicInvoke
     // so the compiler will typecheck that the passed in Run() method has
     // the correct type.
     PolymorphicInvoke invoke_func =
-        &internal::BindState<Runnable, BindRunType, BoundArgsType...>
+        &internal::BindState<Runnable, BindRunType, BoundArgs...>
             ::InvokerType::Run;
-    polymorphic_invoke_ = reinterpret_cast<InvokeFuncStorage>(invoke_func);
+    using InvokeFuncStorage =
+        typename internal::CallbackBase<copy_mode>::InvokeFuncStorage;
+    this->polymorphic_invoke_ =
+        reinterpret_cast<InvokeFuncStorage>(invoke_func);
   }
 
   bool Equals(const Callback& other) const {
-    return CallbackBase::Equals(other);
+    return this->EqualsInternal(other);
   }
 
-  R Run(typename internal::CallbackParamTraits<Args>::ForwardType... args)
-      const {
+  // Run() makes an extra copy compared to directly calling the bound function
+  // if an argument is passed-by-value and is copyable-but-not-movable:
+  // i.e. below copies CopyableNonMovableType twice.
+  //   void F(CopyableNonMovableType) {}
+  //   Bind(&F).Run(CopyableNonMovableType());
+  //
+  // We can not fully apply Perfect Forwarding idiom to the callchain from
+  // Callback::Run() to the target function. Perfect Forwarding requires
+  // knowing how the caller will pass the arguments. However, the signature of
+  // InvokerType::Run() needs to be fixed in the callback constructor, so Run()
+  // cannot template its arguments based on how it's called.
+  R Run(Args... args) const {
     PolymorphicInvoke f =
-        reinterpret_cast<PolymorphicInvoke>(polymorphic_invoke_);
-
-    return f(bind_state_.get(), internal::CallbackForward(args)...);
+        reinterpret_cast<PolymorphicInvoke>(this->polymorphic_invoke_);
+    return f(this->bind_state_.get(), std::forward<Args>(args)...);
   }
 
  private:
-  using PolymorphicInvoke =
-      R(*)(internal::BindStateBase*,
-           typename internal::CallbackParamTraits<Args>::ForwardType...);
+  using PolymorphicInvoke = R (*)(internal::BindStateBase*, Args&&...);
 };
 
 }  // namespace base
diff --git a/base/callback_forward.h b/base/callback_forward.h
index a9a263a..8b9b89c 100644
--- a/base/callback_forward.h
+++ b/base/callback_forward.h
@@ -6,8 +6,19 @@
 #define BASE_CALLBACK_FORWARD_H_
 
 namespace base {
+namespace internal {
 
-template <typename Sig>
+// CopyMode is used to control the copyablity of a Callback.
+// MoveOnly indicates the Callback is not copyable but movable, and Copyable
+// indicates it is copyable and movable.
+enum class CopyMode {
+  MoveOnly, Copyable,
+};
+
+}  // namespace internal
+
+template <typename Signature,
+          internal::CopyMode copy_mode = internal::CopyMode::Copyable>
 class Callback;
 
 // Syntactic sugar to make Callback<void()> easier to declare since it
diff --git a/base/callback_internal.cc b/base/callback_internal.cc
index 2553fe7..4c8ccae 100644
--- a/base/callback_internal.cc
+++ b/base/callback_internal.cc
@@ -18,29 +18,66 @@
     destructor_(this);
 }
 
-CallbackBase::CallbackBase(const CallbackBase& c) = default;
-CallbackBase& CallbackBase::operator=(const CallbackBase& c) = default;
-
-void CallbackBase::Reset() {
-  polymorphic_invoke_ = NULL;
-  // NULL the bind_state_ last, since it may be holding the last ref to whatever
-  // object owns us, and we may be deleted after that.
-  bind_state_ = NULL;
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(CallbackBase&& c)
+    : bind_state_(std::move(c.bind_state_)),
+      polymorphic_invoke_(c.polymorphic_invoke_) {
+  c.polymorphic_invoke_ = nullptr;
 }
 
-bool CallbackBase::Equals(const CallbackBase& other) const {
+CallbackBase<CopyMode::MoveOnly>&
+CallbackBase<CopyMode::MoveOnly>::operator=(CallbackBase&& c) {
+  bind_state_ = std::move(c.bind_state_);
+  polymorphic_invoke_ = c.polymorphic_invoke_;
+  c.polymorphic_invoke_ = nullptr;
+  return *this;
+}
+
+void CallbackBase<CopyMode::MoveOnly>::Reset() {
+  polymorphic_invoke_ = nullptr;
+  // NULL the bind_state_ last, since it may be holding the last ref to whatever
+  // object owns us, and we may be deleted after that.
+  bind_state_ = nullptr;
+}
+
+bool CallbackBase<CopyMode::MoveOnly>::EqualsInternal(
+    const CallbackBase& other) const {
   return bind_state_.get() == other.bind_state_.get() &&
          polymorphic_invoke_ == other.polymorphic_invoke_;
 }
 
-CallbackBase::CallbackBase(BindStateBase* bind_state)
-    : bind_state_(bind_state),
-      polymorphic_invoke_(NULL) {
+CallbackBase<CopyMode::MoveOnly>::CallbackBase(
+    BindStateBase* bind_state)
+    : bind_state_(bind_state) {
   DCHECK(!bind_state_.get() || bind_state_->ref_count_ == 1);
 }
 
-CallbackBase::~CallbackBase() {
+CallbackBase<CopyMode::MoveOnly>::~CallbackBase() {}
+
+CallbackBase<CopyMode::Copyable>::CallbackBase(
+    const CallbackBase& c)
+    : CallbackBase<CopyMode::MoveOnly>(nullptr) {
+  bind_state_ = c.bind_state_;
+  polymorphic_invoke_ = c.polymorphic_invoke_;
 }
 
+CallbackBase<CopyMode::Copyable>::CallbackBase(CallbackBase&& c)
+    : CallbackBase<CopyMode::MoveOnly>(std::move(c)) {}
+
+CallbackBase<CopyMode::Copyable>&
+CallbackBase<CopyMode::Copyable>::operator=(const CallbackBase& c) {
+  bind_state_ = c.bind_state_;
+  polymorphic_invoke_ = c.polymorphic_invoke_;
+  return *this;
+}
+
+CallbackBase<CopyMode::Copyable>&
+CallbackBase<CopyMode::Copyable>::operator=(CallbackBase&& c) {
+  *static_cast<CallbackBase<CopyMode::MoveOnly>*>(this) = std::move(c);
+  return *this;
+}
+
+template class CallbackBase<CopyMode::MoveOnly>;
+template class CallbackBase<CopyMode::Copyable>;
+
 }  // namespace internal
 }  // namespace base
diff --git a/base/callback_internal.h b/base/callback_internal.h
index 630a5c4..439ce6d 100644
--- a/base/callback_internal.h
+++ b/base/callback_internal.h
@@ -16,13 +16,14 @@
 
 #include "base/atomic_ref_count.h"
 #include "base/base_export.h"
+#include "base/callback_forward.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/scoped_ptr.h"
-#include "base/template_util.h"
 
 namespace base {
 namespace internal {
+template <CopyMode copy_mode>
 class CallbackBase;
 
 // BindStateBase is used to provide an opaque handle that the Callback
@@ -44,6 +45,7 @@
 
  private:
   friend class scoped_refptr<BindStateBase>;
+  template <CopyMode copy_mode>
   friend class CallbackBase;
 
   void AddRef();
@@ -59,10 +61,13 @@
 
 // Holds the Callback methods that don't require specialization to reduce
 // template bloat.
-class BASE_EXPORT CallbackBase {
+// CallbackBase<MoveOnly> is a direct base class of MoveOnly callbacks, and
+// CallbackBase<Copyable> uses CallbackBase<MoveOnly> for its implementation.
+template <>
+class BASE_EXPORT CallbackBase<CopyMode::MoveOnly> {
  public:
-  CallbackBase(const CallbackBase& c);
-  CallbackBase& operator=(const CallbackBase& c);
+  CallbackBase(CallbackBase&& c);
+  CallbackBase& operator=(CallbackBase&& c);
 
   // Returns true if Callback is null (doesn't refer to anything).
   bool is_null() const { return bind_state_.get() == NULL; }
@@ -78,7 +83,7 @@
   using InvokeFuncStorage = void(*)();
 
   // Returns true if this callback equals |other|. |other| may be null.
-  bool Equals(const CallbackBase& other) const;
+  bool EqualsInternal(const CallbackBase& other) const;
 
   // Allow initializing of |bind_state_| via the constructor to avoid default
   // initialization of the scoped_refptr.  We do not also initialize
@@ -92,9 +97,27 @@
   ~CallbackBase();
 
   scoped_refptr<BindStateBase> bind_state_;
-  InvokeFuncStorage polymorphic_invoke_;
+  InvokeFuncStorage polymorphic_invoke_ = nullptr;
 };
 
+// CallbackBase<Copyable> is a direct base class of Copyable Callbacks.
+template <>
+class BASE_EXPORT CallbackBase<CopyMode::Copyable>
+    : public CallbackBase<CopyMode::MoveOnly> {
+ public:
+  CallbackBase(const CallbackBase& c);
+  CallbackBase(CallbackBase&& c);
+  CallbackBase& operator=(const CallbackBase& c);
+  CallbackBase& operator=(CallbackBase&& c);
+ protected:
+  explicit CallbackBase(BindStateBase* bind_state)
+      : CallbackBase<CopyMode::MoveOnly>(bind_state) {}
+  ~CallbackBase() {}
+};
+
+extern template class CallbackBase<CopyMode::MoveOnly>;
+extern template class CallbackBase<CopyMode::Copyable>;
+
 // A helper template to determine if given type is non-const move-only-type,
 // i.e. if a value of the given type should be passed via std::move() in a
 // destructive way. Types are considered to be move-only if they have a
@@ -104,7 +127,13 @@
 // confuses template deduction in VS2013 with certain types such as
 // std::unique_ptr.
 // TODO(dcheng): Revisit this when Windows switches to VS2015 by default.
+
 template <typename T> struct IsMoveOnlyType {
+  // Types YesType and NoType are guaranteed such that sizeof(YesType) <
+  // sizeof(NoType).
+  using YesType = char;
+  struct NoType { YesType dummy[2]; };
+
   template <typename U>
   static YesType Test(const typename U::MoveOnlyTypeForCPP03*);
 
@@ -112,13 +141,19 @@
   static NoType Test(...);
 
   static const bool value = sizeof((Test<T>(0))) == sizeof(YesType) &&
-                            !is_const<T>::value;
+                            !std::is_const<T>::value;
 };
 
 // Specialization of IsMoveOnlyType so that std::unique_ptr is still considered
 // move-only, even without the sentinel member.
-template <typename T>
-struct IsMoveOnlyType<std::unique_ptr<T>> : std::true_type {};
+template <typename T, typename D>
+struct IsMoveOnlyType<std::unique_ptr<T, D>> : std::true_type {};
+
+// Specialization of std::vector, so that it's considered move-only if the
+// element type is move-only. Allocator is explicitly ignored when determining
+// move-only status of the std::vector.
+template <typename T, typename Allocator>
+struct IsMoveOnlyType<std::vector<T, Allocator>> : IsMoveOnlyType<T> {};
 
 template <typename>
 struct CallbackParamTraitsForMoveOnlyType;
@@ -131,16 +166,7 @@
 // http://connect.microsoft.com/VisualStudio/feedbackdetail/view/957801/compilation-error-with-variadic-templates
 //
 // This is a typetraits object that's used to take an argument type, and
-// extract a suitable type for storing and forwarding arguments.
-//
-// In particular, it strips off references, and converts arrays to
-// pointers for storage; and it avoids accidentally trying to create a
-// "reference of a reference" if the argument is a reference type.
-//
-// This array type becomes an issue for storage because we are passing bound
-// parameters by const reference. In this case, we end up passing an actual
-// array type in the initializer list which C++ does not allow.  This will
-// break passing of C-string literals.
+// extract a suitable type for forwarding arguments.
 template <typename T>
 struct CallbackParamTraits
     : std::conditional<IsMoveOnlyType<T>::value,
@@ -151,18 +177,6 @@
 template <typename T>
 struct CallbackParamTraitsForNonMoveOnlyType {
   using ForwardType = const T&;
-  using StorageType = T;
-};
-
-// The Storage should almost be impossible to trigger unless someone manually
-// specifies type of the bind parameters.  However, in case they do,
-// this will guard against us accidentally storing a reference parameter.
-//
-// The ForwardType should only be used for unbound arguments.
-template <typename T>
-struct CallbackParamTraitsForNonMoveOnlyType<T&> {
-  using ForwardType = T&;
-  using StorageType = T;
 };
 
 // Note that for array types, we implicitly add a const in the conversion. This
@@ -173,14 +187,12 @@
 template <typename T, size_t n>
 struct CallbackParamTraitsForNonMoveOnlyType<T[n]> {
   using ForwardType = const T*;
-  using StorageType = const T*;
 };
 
 // See comment for CallbackParamTraits<T[n]>.
 template <typename T>
 struct CallbackParamTraitsForNonMoveOnlyType<T[]> {
   using ForwardType = const T*;
-  using StorageType = const T*;
 };
 
 // Parameter traits for movable-but-not-copyable scopers.
@@ -199,7 +211,6 @@
 template <typename T>
 struct CallbackParamTraitsForMoveOnlyType {
   using ForwardType = T;
-  using StorageType = T;
 };
 
 // CallbackForward() is a very limited simulation of C++11's std::forward()
diff --git a/base/containers/hash_tables.h b/base/containers/hash_tables.h
index c421ddd..8da7b67 100644
--- a/base/containers/hash_tables.h
+++ b/base/containers/hash_tables.h
@@ -1,281 +1,75 @@
 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
-//
-
-//
-// Deal with the differences between Microsoft and GNU implemenations
-// of hash_map. Allows all platforms to use |base::hash_map| and
-// |base::hash_set|.
-//  eg:
-//   base::hash_map<int> my_map;
-//   base::hash_set<int> my_set;
-//
-// NOTE: It is an explicit non-goal of this class to provide a generic hash
-// function for pointers.  If you want to hash a pointers to a particular class,
-// please define the template specialization elsewhere (for example, in its
-// header file) and keep it specific to just pointers to that class.  This is
-// because identity hashes are not desirable for all types that might show up
-// in containers as pointers.
 
 #ifndef BASE_CONTAINERS_HASH_TABLES_H_
 #define BASE_CONTAINERS_HASH_TABLES_H_
 
-#include <stddef.h>
-#include <stdint.h>
-
-#include <utility>
-
-#include "base/strings/string16.h"
-#include "build/build_config.h"
-
-#if defined(COMPILER_MSVC)
+#include <cstddef>
 #include <unordered_map>
 #include <unordered_set>
+#include <utility>
 
-#define BASE_HASH_NAMESPACE std
+#include "base/hash.h"
 
-#elif defined(COMPILER_GCC)
+// This header file is deprecated. Use the corresponding C++11 type
+// instead. https://crbug.com/576864
 
+// Use a custom hasher instead.
 #define BASE_HASH_NAMESPACE base_hash
 
-// This is a hack to disable the gcc 4.4 warning about hash_map and hash_set
-// being deprecated.  We can get rid of this when we upgrade to VS2008 and we
-// can use <tr1/unordered_map> and <tr1/unordered_set>.
-#ifdef __DEPRECATED
-#define CHROME_OLD__DEPRECATED __DEPRECATED
-#undef __DEPRECATED
-#endif
-
-#include <ext/hash_map>
-#include <ext/hash_set>
-#define BASE_HASH_IMPL_NAMESPACE __gnu_cxx
-
-#include <string>
-
-#ifdef CHROME_OLD__DEPRECATED
-#define __DEPRECATED CHROME_OLD__DEPRECATED
-#undef CHROME_OLD__DEPRECATED
-#endif
-
 namespace BASE_HASH_NAMESPACE {
 
-// The pre-standard hash behaves like C++11's std::hash, except around pointers.
-// const char* is specialized to hash the C string and hash functions for
-// general T* are missing. Define a BASE_HASH_NAMESPACE::hash which aligns with
-// the C++11 behavior.
-
+// A separate hasher which, by default, forwards to std::hash. This is so legacy
+// uses of BASE_HASH_NAMESPACE with base::hash_map do not interfere with
+// std::hash mid-transition.
 template<typename T>
 struct hash {
-  std::size_t operator()(const T& value) const {
-    return BASE_HASH_IMPL_NAMESPACE::hash<T>()(value);
-  }
+  std::size_t operator()(const T& value) const { return std::hash<T>()(value); }
 };
 
-template<typename T>
-struct hash<T*> {
-  std::size_t operator()(T* value) const {
-    return BASE_HASH_IMPL_NAMESPACE::hash<uintptr_t>()(
-        reinterpret_cast<uintptr_t>(value));
+// Use base::IntPairHash from base/hash.h as a custom hasher instead.
+template <typename Type1, typename Type2>
+struct hash<std::pair<Type1, Type2>> {
+  std::size_t operator()(std::pair<Type1, Type2> value) const {
+    return base::HashInts(value.first, value.second);
   }
 };
 
-// The GNU C++ library provides identity hash functions for many integral types,
-// but not for |long long|.  This hash function will truncate if |size_t| is
-// narrower than |long long|.  This is probably good enough for what we will
-// use it for.
-
-#define DEFINE_TRIVIAL_HASH(integral_type) \
-    template<> \
-    struct hash<integral_type> { \
-      std::size_t operator()(integral_type value) const { \
-        return static_cast<std::size_t>(value); \
-      } \
-    }
-
-DEFINE_TRIVIAL_HASH(long long);
-DEFINE_TRIVIAL_HASH(unsigned long long);
-
-#undef DEFINE_TRIVIAL_HASH
-
-// Implement string hash functions so that strings of various flavors can
-// be used as keys in STL maps and sets.  The hash algorithm comes from the
-// GNU C++ library, in <tr1/functional>.  It is duplicated here because GCC
-// versions prior to 4.3.2 are unable to compile <tr1/functional> when RTTI
-// is disabled, as it is in our build.
-
-#define DEFINE_STRING_HASH(string_type) \
-    template<> \
-    struct hash<string_type> { \
-      std::size_t operator()(const string_type& s) const { \
-        std::size_t result = 0; \
-        for (string_type::const_iterator i = s.begin(); i != s.end(); ++i) \
-          result = (result * 131) + *i; \
-        return result; \
-      } \
-    }
-
-DEFINE_STRING_HASH(std::string);
-DEFINE_STRING_HASH(base::string16);
-
-#undef DEFINE_STRING_HASH
-
 }  // namespace BASE_HASH_NAMESPACE
 
-#else  // COMPILER
-#error define BASE_HASH_NAMESPACE for your compiler
-#endif  // COMPILER
-
 namespace base {
 
-// On MSVC, use the C++11 containers.
-#if defined(COMPILER_MSVC)
-
-template<class Key, class T,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
+// Use std::unordered_map instead.
+template <class Key,
+          class T,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<std::pair<const Key, T>>>
 using hash_map = std::unordered_map<Key, T, Hash, Pred, Alloc>;
 
-template<class Key, class T,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
+// Use std::unordered_multimap instead.
+template <class Key,
+          class T,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<std::pair<const Key, T>>>
 using hash_multimap = std::unordered_multimap<Key, T, Hash, Pred, Alloc>;
 
-template<class Key,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
+// Use std::unordered_multiset instead.
+template <class Key,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<Key>>
 using hash_multiset = std::unordered_multiset<Key, Hash, Pred, Alloc>;
 
-template<class Key,
-         class Hash = std::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
+// Use std::unordered_set instead.
+template <class Key,
+          class Hash = BASE_HASH_NAMESPACE::hash<Key>,
+          class Pred = std::equal_to<Key>,
+          class Alloc = std::allocator<Key>>
 using hash_set = std::unordered_set<Key, Hash, Pred, Alloc>;
 
-#else  // !COMPILER_MSVC
-
-// Otherwise, use the pre-standard ones, but override the default hash to match
-// C++11.
-template<class Key, class T,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
-using hash_map = BASE_HASH_IMPL_NAMESPACE::hash_map<Key, T, Hash, Pred, Alloc>;
-
-template<class Key, class T,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<std::pair<const Key, T>>>
-using hash_multimap =
-    BASE_HASH_IMPL_NAMESPACE::hash_multimap<Key, T, Hash, Pred, Alloc>;
-
-template<class Key,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
-using hash_multiset =
-    BASE_HASH_IMPL_NAMESPACE::hash_multiset<Key, Hash, Pred, Alloc>;
-
-template<class Key,
-         class Hash = BASE_HASH_NAMESPACE::hash<Key>,
-         class Pred = std::equal_to<Key>,
-         class Alloc = std::allocator<Key>>
-using hash_set = BASE_HASH_IMPL_NAMESPACE::hash_set<Key, Hash, Pred, Alloc>;
-
-#undef BASE_HASH_IMPL_NAMESPACE
-
-#endif  // COMPILER_MSVC
-
-// Implement hashing for pairs of at-most 32 bit integer values.
-// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
-// multiply-add hashing. This algorithm, as described in
-// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
-// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
-//
-//   h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
-//
-// Contact danakj@chromium.org for any questions.
-inline std::size_t HashInts32(uint32_t value1, uint32_t value2) {
-  uint64_t value1_64 = value1;
-  uint64_t hash64 = (value1_64 << 32) | value2;
-
-  if (sizeof(std::size_t) >= sizeof(uint64_t))
-    return static_cast<std::size_t>(hash64);
-
-  uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
-  uint32_t shift_random = 10121U << 16;
-
-  hash64 = hash64 * odd_random + shift_random;
-  std::size_t high_bits = static_cast<std::size_t>(
-      hash64 >> (8 * (sizeof(uint64_t) - sizeof(std::size_t))));
-  return high_bits;
-}
-
-// Implement hashing for pairs of up-to 64-bit integer values.
-// We use the compound integer hash method to produce a 64-bit hash code, by
-// breaking the two 64-bit inputs into 4 32-bit values:
-// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
-// Then we reduce our result to 32 bits if required, similar to above.
-inline std::size_t HashInts64(uint64_t value1, uint64_t value2) {
-  uint32_t short_random1 = 842304669U;
-  uint32_t short_random2 = 619063811U;
-  uint32_t short_random3 = 937041849U;
-  uint32_t short_random4 = 3309708029U;
-
-  uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
-  uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
-  uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
-  uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
-
-  uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
-  uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
-  uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
-  uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
-
-  uint64_t hash64 = product1 + product2 + product3 + product4;
-
-  if (sizeof(std::size_t) >= sizeof(uint64_t))
-    return static_cast<std::size_t>(hash64);
-
-  uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
-  uint32_t shift_random = 20591U << 16;
-
-  hash64 = hash64 * odd_random + shift_random;
-  std::size_t high_bits = static_cast<std::size_t>(
-      hash64 >> (8 * (sizeof(uint64_t) - sizeof(std::size_t))));
-  return high_bits;
-}
-
-template<typename T1, typename T2>
-inline std::size_t HashPair(T1 value1, T2 value2) {
-  // This condition is expected to be compile-time evaluated and optimised away
-  // in release builds.
-  if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
-    return HashInts64(value1, value2);
-
-  return HashInts32(value1, value2);
-}
-
 }  // namespace base
 
-namespace BASE_HASH_NAMESPACE {
-
-// Implement methods for hashing a pair of integers, so they can be used as
-// keys in STL containers.
-
-template<typename Type1, typename Type2>
-struct hash<std::pair<Type1, Type2> > {
-  std::size_t operator()(std::pair<Type1, Type2> value) const {
-    return base::HashPair(value.first, value.second);
-  }
-};
-
-}  // namespace BASE_HASH_NAMESPACE
-
-#undef DEFINE_PAIR_HASH_FUNCTION_START
-#undef DEFINE_PAIR_HASH_FUNCTION_END
-
 #endif  // BASE_CONTAINERS_HASH_TABLES_H_
diff --git a/base/containers/mru_cache.h b/base/containers/mru_cache.h
index 272a773..6c1d626 100644
--- a/base/containers/mru_cache.h
+++ b/base/containers/mru_cache.h
@@ -19,11 +19,12 @@
 #include <stddef.h>
 
 #include <algorithm>
+#include <functional>
 #include <list>
 #include <map>
+#include <unordered_map>
 #include <utility>
 
-#include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/macros.h"
 
@@ -34,16 +35,17 @@
 // This template is used to standardize map type containers that can be used
 // by MRUCacheBase. This level of indirection is necessary because of the way
 // that template template params and default template params interact.
-template <class KeyType, class ValueType>
+template <class KeyType, class ValueType, class CompareType>
 struct MRUCacheStandardMap {
-  typedef std::map<KeyType, ValueType> Type;
+  typedef std::map<KeyType, ValueType, CompareType> Type;
 };
 
 // Base class for the MRU cache specializations defined below.
-// The deletor will get called on all payloads that are being removed or
-// replaced.
-template <class KeyType, class PayloadType, class DeletorType,
-          template <typename, typename> class MapType = MRUCacheStandardMap>
+template <class KeyType,
+          class PayloadType,
+          class HashOrCompareType,
+          template <typename, typename, typename> class MapType =
+              MRUCacheStandardMap>
 class MRUCacheBase {
  public:
   // The payload of the list. This maintains a copy of the key so we can
@@ -53,7 +55,8 @@
  private:
   typedef std::list<value_type> PayloadList;
   typedef typename MapType<KeyType,
-                           typename PayloadList::iterator>::Type KeyIndex;
+                           typename PayloadList::iterator,
+                           HashOrCompareType>::Type KeyIndex;
 
  public:
   typedef typename PayloadList::size_type size_type;
@@ -69,18 +72,9 @@
   // a new item is inserted. If the caller wants to manager this itself (for
   // example, maybe it has special work to do when something is evicted), it
   // can pass NO_AUTO_EVICT to not restrict the cache size.
-  explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {
-  }
+  explicit MRUCacheBase(size_type max_size) : max_size_(max_size) {}
 
-  MRUCacheBase(size_type max_size, const DeletorType& deletor)
-      : max_size_(max_size), deletor_(deletor) {
-  }
-
-  virtual ~MRUCacheBase() {
-    iterator i = begin();
-    while (i != end())
-      i = Erase(i);
-  }
+  virtual ~MRUCacheBase() {}
 
   size_type max_size() const { return max_size_; }
 
@@ -88,14 +82,14 @@
   // the same key, it is removed prior to insertion. An iterator indicating the
   // inserted item will be returned (this will always be the front of the list).
   //
-  // The payload will be copied. In the case of an OwningMRUCache, this function
-  // will take ownership of the pointer.
-  iterator Put(const KeyType& key, const PayloadType& payload) {
+  // The payload will be forwarded.
+  template <typename Payload>
+  iterator Put(const KeyType& key, Payload&& payload) {
     // Remove any existing payload with that key.
     typename KeyIndex::iterator index_iter = index_.find(key);
     if (index_iter != index_.end()) {
-      // Erase the reference to it. This will call the deletor on the removed
-      // element. The index reference will be replaced in the code below.
+      // Erase the reference to it. The index reference will be replaced in the
+      // code below.
       Erase(index_iter->second);
     } else if (max_size_ != NO_AUTO_EVICT) {
       // New item is being inserted which might make it larger than the maximum
@@ -103,7 +97,7 @@
       ShrinkToSize(max_size_ - 1);
     }
 
-    ordering_.push_front(value_type(key, payload));
+    ordering_.push_front(value_type(key, std::forward<Payload>(payload)));
     index_.insert(std::make_pair(key, ordering_.begin()));
     return ordering_.begin();
   }
@@ -144,14 +138,12 @@
   void Swap(MRUCacheBase& other) {
     ordering_.swap(other.ordering_);
     index_.swap(other.index_);
-    std::swap(deletor_, other.deletor_);
     std::swap(max_size_, other.max_size_);
   }
 
   // Erases the item referenced by the given iterator. An iterator to the item
   // following it will be returned. The iterator must be valid.
   iterator Erase(iterator pos) {
-    deletor_(pos->second);
     index_.erase(pos->first);
     return ordering_.erase(pos);
   }
@@ -174,9 +166,6 @@
 
   // Deletes everything from the cache.
   void Clear() {
-    for (typename PayloadList::iterator i(ordering_.begin());
-         i != ordering_.end(); ++i)
-      deletor_(i->second);
     index_.clear();
     ordering_.clear();
   }
@@ -213,101 +202,50 @@
 
   size_type max_size_;
 
-  DeletorType deletor_;
-
   DISALLOW_COPY_AND_ASSIGN(MRUCacheBase);
 };
 
 // MRUCache --------------------------------------------------------------------
 
-// A functor that does nothing. Used by the MRUCache.
-template<class PayloadType>
-class MRUCacheNullDeletor {
- public:
-  void operator()(const PayloadType& payload) {}
-};
-
 // A container that does not do anything to free its data. Use this when storing
 // value types (as opposed to pointers) in the list.
 template <class KeyType, class PayloadType>
-class MRUCache : public MRUCacheBase<KeyType,
-                                     PayloadType,
-                                     MRUCacheNullDeletor<PayloadType> > {
+class MRUCache : public MRUCacheBase<KeyType, PayloadType, std::less<KeyType>> {
  private:
-  typedef MRUCacheBase<KeyType, PayloadType,
-      MRUCacheNullDeletor<PayloadType> > ParentType;
+  using ParentType = MRUCacheBase<KeyType, PayloadType, std::less<KeyType>>;
 
  public:
   // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
   explicit MRUCache(typename ParentType::size_type max_size)
-      : ParentType(max_size) {
-  }
-  virtual ~MRUCache() {
-  }
+      : ParentType(max_size) {}
+  virtual ~MRUCache() {}
 
  private:
   DISALLOW_COPY_AND_ASSIGN(MRUCache);
 };
 
-// OwningMRUCache --------------------------------------------------------------
-
-template<class PayloadType>
-class MRUCachePointerDeletor {
- public:
-  void operator()(const PayloadType& payload) { delete payload; }
-};
-
-// A cache that owns the payload type, which must be a non-const pointer type.
-// The pointers will be deleted when they are removed, replaced, or when the
-// cache is destroyed.
-template <class KeyType, class PayloadType>
-class OwningMRUCache
-    : public MRUCacheBase<KeyType,
-                          PayloadType,
-                          MRUCachePointerDeletor<PayloadType> > {
- private:
-  typedef MRUCacheBase<KeyType, PayloadType,
-      MRUCachePointerDeletor<PayloadType> > ParentType;
-
- public:
-  // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
-  explicit OwningMRUCache(typename ParentType::size_type max_size)
-      : ParentType(max_size) {
-  }
-  virtual ~OwningMRUCache() {
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(OwningMRUCache);
-};
-
 // HashingMRUCache ------------------------------------------------------------
 
-template <class KeyType, class ValueType>
+template <class KeyType, class ValueType, class HashType>
 struct MRUCacheHashMap {
-  typedef base::hash_map<KeyType, ValueType> Type;
+  typedef std::unordered_map<KeyType, ValueType, HashType> Type;
 };
 
-// This class is similar to MRUCache, except that it uses base::hash_map as
-// the map type instead of std::map. Note that your KeyType must be hashable
-// to use this cache.
-template <class KeyType, class PayloadType>
-class HashingMRUCache : public MRUCacheBase<KeyType,
-                                            PayloadType,
-                                            MRUCacheNullDeletor<PayloadType>,
-                                            MRUCacheHashMap> {
+// This class is similar to MRUCache, except that it uses std::unordered_map as
+// the map type instead of std::map. Note that your KeyType must be hashable to
+// use this cache or you need to provide a hashing class.
+template <class KeyType, class PayloadType, class HashType = std::hash<KeyType>>
+class HashingMRUCache
+    : public MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap> {
  private:
-  typedef MRUCacheBase<KeyType, PayloadType,
-                       MRUCacheNullDeletor<PayloadType>,
-                       MRUCacheHashMap> ParentType;
+  using ParentType =
+      MRUCacheBase<KeyType, PayloadType, HashType, MRUCacheHashMap>;
 
  public:
   // See MRUCacheBase, noting the possibility of using NO_AUTO_EVICT.
   explicit HashingMRUCache(typename ParentType::size_type max_size)
-      : ParentType(max_size) {
-  }
-  virtual ~HashingMRUCache() {
-  }
+      : ParentType(max_size) {}
+  virtual ~HashingMRUCache() {}
 
  private:
   DISALLOW_COPY_AND_ASSIGN(HashingMRUCache);
diff --git a/base/containers/scoped_ptr_hash_map.h b/base/containers/scoped_ptr_hash_map.h
index 189c314..dd100c6 100644
--- a/base/containers/scoped_ptr_hash_map.h
+++ b/base/containers/scoped_ptr_hash_map.h
@@ -18,6 +18,8 @@
 
 namespace base {
 
+// Deprecated. Use std::unordered_map instead. https://crbug.com/579229
+//
 // This type acts like a hash_map<K, scoped_ptr<V, D> >, based on top of
 // base::hash_map. The ScopedPtrHashMap has ownership of all values in the data
 // structure.
diff --git a/base/containers/small_map.h b/base/containers/small_map.h
index 427736c..82ed6c5 100644
--- a/base/containers/small_map.h
+++ b/base/containers/small_map.h
@@ -517,7 +517,7 @@
       array_[i].Destroy();
       --size_;
       if (i != size_) {
-        array_[i].Init(*array_[size_]);
+        array_[i].InitFromMove(std::move(array_[size_]));
         array_[size_].Destroy();
       }
     } else {
@@ -594,7 +594,7 @@
     ManualConstructor<value_type> temp_array[kArraySize];
 
     for (int i = 0; i < kArraySize; i++) {
-      temp_array[i].Init(*array_[i]);
+      temp_array[i].InitFromMove(std::move(array_[i]));
       array_[i].Destroy();
     }
 
@@ -604,7 +604,7 @@
 
     // Insert elements into it.
     for (int i = 0; i < kArraySize; i++) {
-      map_->insert(*temp_array[i]);
+      map_->insert(std::move(*temp_array[i]));
       temp_array[i].Destroy();
     }
   }
diff --git a/base/debug/OWNERS b/base/debug/OWNERS
deleted file mode 100644
index 4976ab1..0000000
--- a/base/debug/OWNERS
+++ /dev/null
@@ -1,3 +0,0 @@
-per-file trace_event*=nduca@chromium.org
-per-file trace_event*=dsinclair@chromium.org
-per-file trace_event_android.cc=wangxianzhu@chromium.org
diff --git a/base/debug/stack_trace_posix.cc b/base/debug/stack_trace_posix.cc
index 98e6c2e..d6a03f3 100644
--- a/base/debug/stack_trace_posix.cc
+++ b/base/debug/stack_trace_posix.cc
@@ -177,7 +177,7 @@
 
     handler->HandleOutput("\n");
   }
-#elif !defined(__UCLIBC__)
+#else
   bool printed = false;
 
   // Below part is async-signal unsafe (uses malloc), so execute it only
diff --git a/base/feature_list.cc b/base/feature_list.cc
new file mode 100644
index 0000000..d10c60b
--- /dev/null
+++ b/base/feature_list.cc
@@ -0,0 +1,240 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <utility>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_split.h"
+#include "base/strings/string_util.h"
+
+namespace base {
+
+namespace {
+
+// Pointer to the FeatureList instance singleton that was set via
+// FeatureList::SetInstance(). Does not use base/memory/singleton.h in order to
+// have more control over initialization timing. Leaky.
+FeatureList* g_instance = nullptr;
+
+// Some characters are not allowed to appear in feature names or the associated
+// field trial names, as they are used as special characters for command-line
+// serialization. This function checks that the strings are ASCII (since they
+// are used in command-line API functions that require ASCII) and whether there
+// are any reserved characters present, returning true if the string is valid.
+// Only called in DCHECKs.
+bool IsValidFeatureOrFieldTrialName(const std::string& name) {
+  return IsStringASCII(name) && name.find_first_of(",<") == std::string::npos;
+}
+
+}  // namespace
+
+FeatureList::FeatureList() : initialized_(false) {}
+
+FeatureList::~FeatureList() {}
+
+void FeatureList::InitializeFromCommandLine(
+    const std::string& enable_features,
+    const std::string& disable_features) {
+  DCHECK(!initialized_);
+
+  // Process disabled features first, so that disabled ones take precedence over
+  // enabled ones (since RegisterOverride() uses insert()).
+  RegisterOverridesFromCommandLine(disable_features, OVERRIDE_DISABLE_FEATURE);
+  RegisterOverridesFromCommandLine(enable_features, OVERRIDE_ENABLE_FEATURE);
+}
+
+bool FeatureList::IsFeatureOverriddenFromCommandLine(
+    const std::string& feature_name,
+    OverrideState state) const {
+  auto it = overrides_.find(feature_name);
+  return it != overrides_.end() && it->second.overridden_state == state &&
+         !it->second.overridden_by_field_trial;
+}
+
+void FeatureList::AssociateReportingFieldTrial(
+    const std::string& feature_name,
+    OverrideState for_overridden_state,
+    FieldTrial* field_trial) {
+  DCHECK(
+      IsFeatureOverriddenFromCommandLine(feature_name, for_overridden_state));
+
+  // Only one associated field trial is supported per feature. This is generally
+  // enforced server-side.
+  OverrideEntry* entry = &overrides_.find(feature_name)->second;
+  if (entry->field_trial) {
+    NOTREACHED() << "Feature " << feature_name
+                 << " already has trial: " << entry->field_trial->trial_name()
+                 << ", associating trial: " << field_trial->trial_name();
+    return;
+  }
+
+  entry->field_trial = field_trial;
+}
+
+void FeatureList::RegisterFieldTrialOverride(const std::string& feature_name,
+                                             OverrideState override_state,
+                                             FieldTrial* field_trial) {
+  DCHECK(field_trial);
+  DCHECK(!ContainsKey(overrides_, feature_name) ||
+         !overrides_.find(feature_name)->second.field_trial)
+      << "Feature " << feature_name
+      << " has conflicting field trial overrides: "
+      << overrides_.find(feature_name)->second.field_trial->trial_name()
+      << " / " << field_trial->trial_name();
+
+  RegisterOverride(feature_name, override_state, field_trial);
+}
+
+void FeatureList::GetFeatureOverrides(std::string* enable_overrides,
+                                      std::string* disable_overrides) {
+  DCHECK(initialized_);
+
+  enable_overrides->clear();
+  disable_overrides->clear();
+
+  for (const auto& entry : overrides_) {
+    std::string* target_list = nullptr;
+    switch (entry.second.overridden_state) {
+      case OVERRIDE_ENABLE_FEATURE:
+        target_list = enable_overrides;
+        break;
+      case OVERRIDE_DISABLE_FEATURE:
+        target_list = disable_overrides;
+        break;
+    }
+
+    if (!target_list->empty())
+      target_list->push_back(',');
+    target_list->append(entry.first);
+    if (entry.second.field_trial) {
+      target_list->push_back('<');
+      target_list->append(entry.second.field_trial->trial_name());
+    }
+  }
+}
+
+// static
+bool FeatureList::IsEnabled(const Feature& feature) {
+  return GetInstance()->IsFeatureEnabled(feature);
+}
+
+// static
+std::vector<std::string> FeatureList::SplitFeatureListString(
+    const std::string& input) {
+  return SplitString(input, ",", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
+}
+
+// static
+void FeatureList::InitializeInstance() {
+  if (g_instance)
+    return;
+  SetInstance(make_scoped_ptr(new FeatureList));
+}
+
+// static
+FeatureList* FeatureList::GetInstance() {
+  return g_instance;
+}
+
+// static
+void FeatureList::SetInstance(scoped_ptr<FeatureList> instance) {
+  DCHECK(!g_instance);
+  instance->FinalizeInitialization();
+
+  // Note: Intentional leak of global singleton.
+  g_instance = instance.release();
+}
+
+// static
+void FeatureList::ClearInstanceForTesting() {
+  delete g_instance;
+  g_instance = nullptr;
+}
+
+void FeatureList::FinalizeInitialization() {
+  DCHECK(!initialized_);
+  initialized_ = true;
+}
+
+bool FeatureList::IsFeatureEnabled(const Feature& feature) {
+  DCHECK(initialized_);
+  DCHECK(IsValidFeatureOrFieldTrialName(feature.name)) << feature.name;
+  DCHECK(CheckFeatureIdentity(feature)) << feature.name;
+
+  auto it = overrides_.find(feature.name);
+  if (it != overrides_.end()) {
+    const OverrideEntry& entry = it->second;
+
+    // Activate the corresponding field trial, if necessary.
+    if (entry.field_trial)
+      entry.field_trial->group();
+
+    // TODO(asvitkine) Expand this section as more support is added.
+    return entry.overridden_state == OVERRIDE_ENABLE_FEATURE;
+  }
+  // Otherwise, return the default state.
+  return feature.default_state == FEATURE_ENABLED_BY_DEFAULT;
+}
+
+void FeatureList::RegisterOverridesFromCommandLine(
+    const std::string& feature_list,
+    OverrideState overridden_state) {
+  for (const auto& value : SplitFeatureListString(feature_list)) {
+    StringPiece feature_name(value);
+    base::FieldTrial* trial = nullptr;
+
+    // The entry may be of the form FeatureName<FieldTrialName - in which case,
+    // this splits off the field trial name and associates it with the override.
+    std::string::size_type pos = feature_name.find('<');
+    if (pos != std::string::npos) {
+      feature_name.set(value.data(), pos);
+      trial = base::FieldTrialList::Find(value.substr(pos + 1));
+    }
+
+    RegisterOverride(feature_name, overridden_state, trial);
+  }
+}
+
+void FeatureList::RegisterOverride(StringPiece feature_name,
+                                   OverrideState overridden_state,
+                                   FieldTrial* field_trial) {
+  DCHECK(!initialized_);
+  if (field_trial) {
+    DCHECK(IsValidFeatureOrFieldTrialName(field_trial->trial_name()))
+        << field_trial->trial_name();
+  }
+
+  // Note: The semantics of insert() is that it does not overwrite the entry if
+  // one already exists for the key. Thus, only the first override for a given
+  // feature name takes effect.
+  overrides_.insert(std::make_pair(
+      feature_name.as_string(), OverrideEntry(overridden_state, field_trial)));
+}
+
+bool FeatureList::CheckFeatureIdentity(const Feature& feature) {
+  AutoLock auto_lock(feature_identity_tracker_lock_);
+
+  auto it = feature_identity_tracker_.find(feature.name);
+  if (it == feature_identity_tracker_.end()) {
+    // If it's not tracked yet, register it.
+    feature_identity_tracker_[feature.name] = &feature;
+    return true;
+  }
+  // Compare address of |feature| to the existing tracked entry.
+  return it->second == &feature;
+}
+
+FeatureList::OverrideEntry::OverrideEntry(OverrideState overridden_state,
+                                          FieldTrial* field_trial)
+    : overridden_state(overridden_state),
+      field_trial(field_trial),
+      overridden_by_field_trial(field_trial != nullptr) {}
+
+}  // namespace base
diff --git a/base/feature_list.h b/base/feature_list.h
new file mode 100644
index 0000000..875d3b5
--- /dev/null
+++ b/base/feature_list.h
@@ -0,0 +1,238 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_FEATURE_LIST_H_
+#define BASE_FEATURE_LIST_H_
+
+#include <map>
+#include <string>
+#include <vector>
+
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_piece.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class FieldTrial;
+
+// Specifies whether a given feature is enabled or disabled by default.
+enum FeatureState {
+  FEATURE_DISABLED_BY_DEFAULT,
+  FEATURE_ENABLED_BY_DEFAULT,
+};
+
+// The Feature struct is used to define the default state for a feature. See
+// comment below for more details. There must only ever be one struct instance
+// for a given feature name - generally defined as a constant global variable or
+// file static.
+struct BASE_EXPORT Feature {
+  // The name of the feature. This should be unique to each feature and is used
+  // for enabling/disabling features via command line flags and experiments.
+  const char* const name;
+
+  // The default state (i.e. enabled or disabled) for this feature.
+  const FeatureState default_state;
+};
+
+// The FeatureList class is used to determine whether a given feature is on or
+// off. It provides an authoritative answer, taking into account command-line
+// overrides and experimental control.
+//
+// The basic use case is for any feature that can be toggled (e.g. through
+// command-line or an experiment) to have a defined Feature struct, e.g.:
+//
+//   const base::Feature kMyGreatFeature {
+//     "MyGreatFeature", base::FEATURE_ENABLED_BY_DEFAULT
+//   };
+//
+// Then, client code that wishes to query the state of the feature would check:
+//
+//   if (base::FeatureList::IsEnabled(kMyGreatFeature)) {
+//     // Feature code goes here.
+//   }
+//
+// Behind the scenes, the above call would take into account any command-line
+// flags to enable or disable the feature, any experiments that may control it
+// and finally its default state (in that order of priority), to determine
+// whether the feature is on.
+//
+// Features can be explicitly forced on or off by specifying a list of comma-
+// separated feature names via the following command-line flags:
+//
+//   --enable-features=Feature5,Feature7
+//   --disable-features=Feature1,Feature2,Feature3
+//
+// After initialization (which should be done single-threaded), the FeatureList
+// API is thread safe.
+//
+// Note: This class is a singleton, but does not use base/memory/singleton.h in
+// order to have control over its initialization sequence. Specifically, the
+// intended use is to create an instance of this class and fully initialize it,
+// before setting it as the singleton for a process, via SetInstance().
+class BASE_EXPORT FeatureList {
+ public:
+  FeatureList();
+  ~FeatureList();
+
+  // Initializes feature overrides via command-line flags |enable_features| and
+  // |disable_features|, each of which is a comma-separated list of features to
+  // enable or disable, respectively. If a feature appears on both lists, then
+  // it will be disabled. If a list entry has the format "FeatureName<TrialName"
+  // then this initialization will also associate the feature state override
+  // with the named field trial, if it exists. Must only be invoked during the
+  // initialization phase (before FinalizeInitialization() has been called).
+  void InitializeFromCommandLine(const std::string& enable_features,
+                                 const std::string& disable_features);
+
+  // Specifies whether a feature override enables or disables the feature.
+  enum OverrideState {
+    OVERRIDE_DISABLE_FEATURE,
+    OVERRIDE_ENABLE_FEATURE,
+  };
+
+  // Returns true if the state of |feature_name| has been overridden via
+  // |InitializeFromCommandLine()|.
+  bool IsFeatureOverriddenFromCommandLine(const std::string& feature_name,
+                                          OverrideState state) const;
+
+  // Associates a field trial for reporting purposes corresponding to the
+  // command-line setting the feature state to |for_overridden_state|. The trial
+  // will be activated when the state of the feature is first queried. This
+  // should be called during registration, after InitializeFromCommandLine() has
+  // been called but before the instance is registered via SetInstance().
+  void AssociateReportingFieldTrial(const std::string& feature_name,
+                                    OverrideState for_overridden_state,
+                                    FieldTrial* field_trial);
+
+  // Registers a field trial to override the enabled state of the specified
+  // feature to |override_state|. Command-line overrides still take precedence
+  // over field trials, so this will have no effect if the feature is being
+  // overridden from the command-line. The associated field trial will be
+  // activated when the feature state for this feature is queried. This should
+  // be called during registration, after InitializeFromCommandLine() has been
+  // called but before the instance is registered via SetInstance().
+  void RegisterFieldTrialOverride(const std::string& feature_name,
+                                  OverrideState override_state,
+                                  FieldTrial* field_trial);
+
+  // Returns comma-separated lists of feature names (in the same format that is
+  // accepted by InitializeFromCommandLine()) corresponding to features that
+  // have been overridden - either through command-line or via FieldTrials. For
+  // those features that have an associated FieldTrial, the output entry will be
+  // of the format "FeatureName<TrialName", where "TrialName" is the name of the
+  // FieldTrial. Must be called only after the instance has been initialized and
+  // registered.
+  void GetFeatureOverrides(std::string* enable_overrides,
+                           std::string* disable_overrides);
+
+  // Returns whether the given |feature| is enabled. Must only be called after
+  // the singleton instance has been registered via SetInstance(). Additionally,
+  // a feature with a given name must only have a single corresponding Feature
+  // struct, which is checked in builds with DCHECKs enabled.
+  static bool IsEnabled(const Feature& feature);
+
+  // Splits a comma-separated string containing feature names into a vector.
+  static std::vector<std::string> SplitFeatureListString(
+      const std::string& input);
+
+  // Initializes and sets a default instance of FeatureList if one has not yet
+  // already been set. No-op otherwise.
+  static void InitializeInstance();
+
+  // Returns the singleton instance of FeatureList. Will return null until an
+  // instance is registered via SetInstance().
+  static FeatureList* GetInstance();
+
+  // Registers the given |instance| to be the singleton feature list for this
+  // process. This should only be called once and |instance| must not be null.
+  static void SetInstance(scoped_ptr<FeatureList> instance);
+
+  // Clears the previously-registered singleton instance for tests.
+  static void ClearInstanceForTesting();
+
+ private:
+  FRIEND_TEST_ALL_PREFIXES(FeatureListTest, CheckFeatureIdentity);
+
+  struct OverrideEntry {
+    // The overridden enable (on/off) state of the feature.
+    const OverrideState overridden_state;
+
+    // An optional associated field trial, which will be activated when the
+    // state of the feature is queried for the first time. Weak pointer to the
+    // FieldTrial object that is owned by the FieldTrialList singleton.
+    base::FieldTrial* field_trial;
+
+    // Specifies whether the feature's state is overridden by |field_trial|.
+    // If it's not, and |field_trial| is not null, it means it is simply an
+    // associated field trial for reporting purposes (and |overridden_state|
+    // came from the command-line).
+    const bool overridden_by_field_trial;
+
+    // TODO(asvitkine): Expand this as more support is added.
+
+    // Constructs an OverrideEntry for the given |overridden_state|. If
+    // |field_trial| is not null, it implies that |overridden_state| comes from
+    // the trial, so |overridden_by_field_trial| will be set to true.
+    OverrideEntry(OverrideState overridden_state, FieldTrial* field_trial);
+  };
+
+  // Finalizes the initialization state of the FeatureList, so that no further
+  // overrides can be registered. This is called by SetInstance() on the
+  // singleton feature list that is being registered.
+  void FinalizeInitialization();
+
+  // Returns whether the given |feature| is enabled. This is invoked by the
+  // public FeatureList::IsEnabled() static function on the global singleton.
+  // Requires the FeatureList to have already been fully initialized.
+  bool IsFeatureEnabled(const Feature& feature);
+
+  // For each feature name in comma-separated list of strings |feature_list|,
+  // registers an override with the specified |overridden_state|. Also, will
+  // associate an optional named field trial if the entry is of the format
+  // "FeatureName<TrialName".
+  void RegisterOverridesFromCommandLine(const std::string& feature_list,
+                                        OverrideState overridden_state);
+
+  // Registers an override for feature |feature_name|. The override specifies
+  // whether the feature should be on or off (via |overridden_state|), which
+  // will take precedence over the feature's default state. If |field_trial| is
+  // not null, registers the specified field trial object to be associated with
+  // the feature, which will activate the field trial when the feature state is
+  // queried. If an override is already registered for the given feature, it
+  // will not be changed.
+  void RegisterOverride(StringPiece feature_name,
+                        OverrideState overridden_state,
+                        FieldTrial* field_trial);
+
+  // Verifies that there's only a single definition of a Feature struct for a
+  // given feature name. Keeps track of the first seen Feature struct for each
+  // feature. Returns false when called on a Feature struct with a different
+  // address than the first one it saw for that feature name. Used only from
+  // DCHECKs and tests.
+  bool CheckFeatureIdentity(const Feature& feature);
+
+  // Map from feature name to an OverrideEntry struct for the feature, if it
+  // exists.
+  std::map<std::string, OverrideEntry> overrides_;
+
+  // Locked map that keeps track of seen features, to ensure a single feature is
+  // only defined once. This verification is only done in builds with DCHECKs
+  // enabled.
+  Lock feature_identity_tracker_lock_;
+  std::map<std::string, const Feature*> feature_identity_tracker_;
+
+  // Whether this object has been fully initialized. This gets set to true as a
+  // result of FinalizeInitialization().
+  bool initialized_;
+
+  DISALLOW_COPY_AND_ASSIGN(FeatureList);
+};
+
+}  // namespace base
+
+#endif  // BASE_FEATURE_LIST_H_
diff --git a/base/feature_list_unittest.cc b/base/feature_list_unittest.cc
new file mode 100644
index 0000000..11cf179
--- /dev/null
+++ b/base/feature_list_unittest.cc
@@ -0,0 +1,358 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/feature_list.h"
+
+#include <stddef.h>
+
+#include <utility>
+
+#include "base/format_macros.h"
+#include "base/macros.h"
+#include "base/metrics/field_trial.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+namespace {
+
+const char kFeatureOnByDefaultName[] = "OnByDefault";
+struct Feature kFeatureOnByDefault {
+  kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+};
+
+const char kFeatureOffByDefaultName[] = "OffByDefault";
+struct Feature kFeatureOffByDefault {
+  kFeatureOffByDefaultName, FEATURE_DISABLED_BY_DEFAULT
+};
+
+std::string SortFeatureListString(const std::string& feature_list) {
+  std::vector<std::string> features =
+      FeatureList::SplitFeatureListString(feature_list);
+  std::sort(features.begin(), features.end());
+  return JoinString(features, ",");
+}
+
+}  // namespace
+
+class FeatureListTest : public testing::Test {
+ public:
+  FeatureListTest() : feature_list_(nullptr) {
+    RegisterFeatureListInstance(make_scoped_ptr(new FeatureList));
+  }
+  ~FeatureListTest() override { ClearFeatureListInstance(); }
+
+  void RegisterFeatureListInstance(scoped_ptr<FeatureList> feature_list) {
+    FeatureList::ClearInstanceForTesting();
+    feature_list_ = feature_list.get();
+    FeatureList::SetInstance(std::move(feature_list));
+  }
+  void ClearFeatureListInstance() {
+    FeatureList::ClearInstanceForTesting();
+    feature_list_ = nullptr;
+  }
+
+  FeatureList* feature_list() { return feature_list_; }
+
+ private:
+  // Weak. Owned by the FeatureList::SetInstance().
+  FeatureList* feature_list_;
+
+  DISALLOW_COPY_AND_ASSIGN(FeatureListTest);
+};
+
+TEST_F(FeatureListTest, DefaultStates) {
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOnByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine) {
+  struct {
+    const char* enable_features;
+    const char* disable_features;
+    bool expected_feature_on_state;
+    bool expected_feature_off_state;
+  } test_cases[] = {
+      {"", "", true, false},
+      {"OffByDefault", "", true, true},
+      {"OffByDefault", "OnByDefault", false, true},
+      {"OnByDefault,OffByDefault", "", true, true},
+      {"", "OnByDefault,OffByDefault", false, false},
+      // In the case an entry is both, disable takes precedence.
+      {"OnByDefault", "OnByDefault,OffByDefault", false, false},
+  };
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+                                    test_case.enable_features,
+                                    test_case.disable_features));
+
+    ClearFeatureListInstance();
+    scoped_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(test_case.enable_features,
+                                            test_case.disable_features);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    EXPECT_EQ(test_case.expected_feature_on_state,
+              FeatureList::IsEnabled(kFeatureOnByDefault))
+        << i;
+    EXPECT_EQ(test_case.expected_feature_off_state,
+              FeatureList::IsEnabled(kFeatureOffByDefault))
+        << i;
+  }
+}
+
+TEST_F(FeatureListTest, CheckFeatureIdentity) {
+  // Tests that CheckFeatureIdentity() correctly detects when two different
+  // structs with the same feature name are passed to it.
+
+  // Call it twice for each feature at the top of the file, since the first call
+  // makes it remember the entry and the second call will verify it.
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+  EXPECT_TRUE(feature_list()->CheckFeatureIdentity(kFeatureOffByDefault));
+
+  // Now, call it with a distinct struct for |kFeatureOnByDefaultName|, which
+  // should return false.
+  struct Feature kFeatureOnByDefault2 {
+    kFeatureOnByDefaultName, FEATURE_ENABLED_BY_DEFAULT
+  };
+  EXPECT_FALSE(feature_list()->CheckFeatureIdentity(kFeatureOnByDefault2));
+}
+
+TEST_F(FeatureListTest, FieldTrialOverrides) {
+  struct {
+    FeatureList::OverrideState trial1_state;
+    FeatureList::OverrideState trial2_state;
+  } test_cases[] = {
+      {FeatureList::OVERRIDE_DISABLE_FEATURE,
+       FeatureList::OVERRIDE_DISABLE_FEATURE},
+      {FeatureList::OVERRIDE_DISABLE_FEATURE,
+       FeatureList::OVERRIDE_ENABLE_FEATURE},
+      {FeatureList::OVERRIDE_ENABLE_FEATURE,
+       FeatureList::OVERRIDE_DISABLE_FEATURE},
+      {FeatureList::OVERRIDE_ENABLE_FEATURE,
+       FeatureList::OVERRIDE_ENABLE_FEATURE},
+  };
+
+  FieldTrial::ActiveGroup active_group;
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]", i));
+
+    ClearFeatureListInstance();
+
+    FieldTrialList field_trial_list(nullptr);
+    scoped_ptr<FeatureList> feature_list(new FeatureList);
+
+    FieldTrial* trial1 = FieldTrialList::CreateFieldTrial("TrialExample1", "A");
+    FieldTrial* trial2 = FieldTrialList::CreateFieldTrial("TrialExample2", "B");
+    feature_list->RegisterFieldTrialOverride(kFeatureOnByDefaultName,
+                                             test_case.trial1_state, trial1);
+    feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+                                             test_case.trial2_state, trial2);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    // Initially, neither trial should be active.
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+    const bool expected_enabled_1 =
+        (test_case.trial1_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+    EXPECT_EQ(expected_enabled_1, FeatureList::IsEnabled(kFeatureOnByDefault));
+    // The above should have activated |trial1|.
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+
+    const bool expected_enabled_2 =
+        (test_case.trial2_state == FeatureList::OVERRIDE_ENABLE_FEATURE);
+    EXPECT_EQ(expected_enabled_2, FeatureList::IsEnabled(kFeatureOffByDefault));
+    // The above should have activated |trial2|.
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial1->trial_name()));
+    EXPECT_TRUE(FieldTrialList::IsTrialActive(trial2->trial_name()));
+  }
+}
+
+TEST_F(FeatureListTest, CommandLineTakesPrecedenceOverFieldTrial) {
+  ClearFeatureListInstance();
+
+  FieldTrialList field_trial_list(nullptr);
+  scoped_ptr<FeatureList> feature_list(new FeatureList);
+
+  // The feature is explicitly enabled on the command-line.
+  feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+  // But the FieldTrial would set the feature to disabled.
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("TrialExample2", "A");
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE, trial);
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+  // Command-line should take precedence.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  // Since the feature is on due to the command-line, and not as a result of the
+  // field trial, the field trial should not be activated (since the Associate*
+  // API wasn't used.)
+  EXPECT_FALSE(FieldTrialList::IsTrialActive(trial->trial_name()));
+}
+
+TEST_F(FeatureListTest, IsFeatureOverriddenFromCommandLine) {
+  ClearFeatureListInstance();
+
+  FieldTrialList field_trial_list(nullptr);
+  scoped_ptr<FeatureList> feature_list(new FeatureList);
+
+  // No features are overridden from the command line yet
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Now, enable |kFeatureOffByDefaultName| via the command-line.
+  feature_list->InitializeFromCommandLine(kFeatureOffByDefaultName, "");
+
+  // It should now be overridden for the enabled group.
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Register a field trial to associate with the feature and ensure that the
+  // results are still the same.
+  feature_list->AssociateReportingFieldTrial(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+      FieldTrialList::CreateFieldTrial("Trial1", "A"));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_TRUE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+
+  // Now, register a field trial to override |kFeatureOnByDefaultName| state
+  // and check that the function still returns false for that feature.
+  feature_list->RegisterFieldTrialOverride(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+      FieldTrialList::CreateFieldTrial("Trial2", "A"));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE));
+  EXPECT_FALSE(feature_list->IsFeatureOverriddenFromCommandLine(
+      kFeatureOnByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE));
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  // Check the expected feature states for good measure.
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOnByDefault));
+}
+
+TEST_F(FeatureListTest, AssociateReportingFieldTrial) {
+  struct {
+    const char* enable_features;
+    const char* disable_features;
+    bool expected_enable_trial_created;
+    bool expected_disable_trial_created;
+  } test_cases[] = {
+      // If no enable/disable flags are specified, no trials should be created.
+      {"", "", false, false},
+      // Enabling the feature should result in the enable trial created.
+      {kFeatureOffByDefaultName, "", true, false},
+      // Disabling the feature should result in the disable trial created.
+      {"", kFeatureOffByDefaultName, false, true},
+  };
+
+  const char kTrialName[] = "ForcingTrial";
+  const char kForcedOnGroupName[] = "ForcedOn";
+  const char kForcedOffGroupName[] = "ForcedOff";
+
+  for (size_t i = 0; i < arraysize(test_cases); ++i) {
+    const auto& test_case = test_cases[i];
+    SCOPED_TRACE(base::StringPrintf("Test[%" PRIuS "]: [%s] [%s]", i,
+                                    test_case.enable_features,
+                                    test_case.disable_features));
+
+    ClearFeatureListInstance();
+
+    FieldTrialList field_trial_list(nullptr);
+    scoped_ptr<FeatureList> feature_list(new FeatureList);
+    feature_list->InitializeFromCommandLine(test_case.enable_features,
+                                            test_case.disable_features);
+
+    FieldTrial* enable_trial = nullptr;
+    if (feature_list->IsFeatureOverriddenFromCommandLine(
+            kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE)) {
+      enable_trial = base::FieldTrialList::CreateFieldTrial(kTrialName,
+                                                            kForcedOnGroupName);
+      feature_list->AssociateReportingFieldTrial(
+          kFeatureOffByDefaultName, FeatureList::OVERRIDE_ENABLE_FEATURE,
+          enable_trial);
+    }
+    FieldTrial* disable_trial = nullptr;
+    if (feature_list->IsFeatureOverriddenFromCommandLine(
+            kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE)) {
+      disable_trial = base::FieldTrialList::CreateFieldTrial(
+          kTrialName, kForcedOffGroupName);
+      feature_list->AssociateReportingFieldTrial(
+          kFeatureOffByDefaultName, FeatureList::OVERRIDE_DISABLE_FEATURE,
+          disable_trial);
+    }
+    EXPECT_EQ(test_case.expected_enable_trial_created, enable_trial != nullptr);
+    EXPECT_EQ(test_case.expected_disable_trial_created,
+              disable_trial != nullptr);
+    RegisterFeatureListInstance(std::move(feature_list));
+
+    EXPECT_FALSE(FieldTrialList::IsTrialActive(kTrialName));
+    if (disable_trial) {
+      EXPECT_FALSE(FeatureList::IsEnabled(kFeatureOffByDefault));
+      EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+      EXPECT_EQ(kForcedOffGroupName, disable_trial->group_name());
+    } else if (enable_trial) {
+      EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+      EXPECT_TRUE(FieldTrialList::IsTrialActive(kTrialName));
+      EXPECT_EQ(kForcedOnGroupName, enable_trial->group_name());
+    }
+  }
+}
+
+TEST_F(FeatureListTest, GetFeatureOverrides) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  scoped_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,X", "D");
+
+  FieldTrial* trial = FieldTrialList::CreateFieldTrial("Trial", "Group");
+  feature_list->RegisterFieldTrialOverride(kFeatureOffByDefaultName,
+                                           FeatureList::OVERRIDE_ENABLE_FEATURE,
+                                           trial);
+
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  std::string enable_features;
+  std::string disable_features;
+  FeatureList::GetInstance()->GetFeatureOverrides(&enable_features,
+                                                  &disable_features);
+  EXPECT_EQ("A,OffByDefault<Trial,X", SortFeatureListString(enable_features));
+  EXPECT_EQ("D", SortFeatureListString(disable_features));
+}
+
+TEST_F(FeatureListTest, InitializeFromCommandLine_WithFieldTrials) {
+  ClearFeatureListInstance();
+  FieldTrialList field_trial_list(nullptr);
+  FieldTrialList::CreateFieldTrial("Trial", "Group");
+  scoped_ptr<FeatureList> feature_list(new FeatureList);
+  feature_list->InitializeFromCommandLine("A,OffByDefault<Trial,X", "D");
+  RegisterFeatureListInstance(std::move(feature_list));
+
+  EXPECT_FALSE(FieldTrialList::IsTrialActive("Trial"));
+  EXPECT_TRUE(FeatureList::IsEnabled(kFeatureOffByDefault));
+  EXPECT_TRUE(FieldTrialList::IsTrialActive("Trial"));
+}
+
+}  // namespace base
diff --git a/base/files/file_path.cc b/base/files/file_path.cc
index 2677258..d3cb53d 100644
--- a/base/files/file_path.cc
+++ b/base/files/file_path.cc
@@ -502,10 +502,10 @@
   // Don't append a separator if the path is empty (indicating the current
   // directory) or if the path component is empty (indicating nothing to
   // append).
-  if (appended.length() > 0 && new_path.path_.length() > 0) {
+  if (!appended.empty() && !new_path.path_.empty()) {
     // Don't append a separator if the path still ends with a trailing
     // separator after stripping (indicating the root directory).
-    if (!IsSeparator(new_path.path_[new_path.path_.length() - 1])) {
+    if (!IsSeparator(new_path.path_.back())) {
       // Don't append a separator if the path is just a drive letter.
       if (FindDriveLetter(new_path.path_) + 1 != new_path.path_.length()) {
         new_path.path_.append(1, kSeparators[0]);
@@ -610,7 +610,7 @@
 }
 
 // static
-FilePath FilePath::FromUTF8Unsafe(const std::string& utf8) {
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
 #if defined(SYSTEM_NATIVE_UTF8)
   return FilePath(utf8);
 #else
@@ -619,11 +619,11 @@
 }
 
 // static
-FilePath FilePath::FromUTF16Unsafe(const string16& utf16) {
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
 #if defined(SYSTEM_NATIVE_UTF8)
   return FilePath(UTF16ToUTF8(utf16));
 #else
-  return FilePath(SysWideToNativeMB(UTF16ToWide(utf16)));
+  return FilePath(SysWideToNativeMB(UTF16ToWide(utf16.as_string())));
 #endif
 }
 
@@ -647,16 +647,24 @@
 }
 
 // static
-FilePath FilePath::FromUTF8Unsafe(const std::string& utf8) {
+FilePath FilePath::FromUTF8Unsafe(StringPiece utf8) {
   return FilePath(UTF8ToWide(utf8));
 }
 
 // static
-FilePath FilePath::FromUTF16Unsafe(const string16& utf16) {
+FilePath FilePath::FromUTF16Unsafe(StringPiece16 utf16) {
   return FilePath(utf16);
 }
 #endif
 
+void FilePath::GetSizeForPickle(PickleSizer* sizer) const {
+#if defined(OS_WIN)
+  sizer->AddString16(path_);
+#else
+  sizer->AddString(path_);
+#endif
+}
+
 void FilePath::WriteToPickle(Pickle* pickle) const {
 #if defined(OS_WIN)
   pickle->WriteString16(path_);
diff --git a/base/files/file_path.h b/base/files/file_path.h
index 89e9cbf..3234df7 100644
--- a/base/files/file_path.h
+++ b/base/files/file_path.h
@@ -138,6 +138,7 @@
 
 class Pickle;
 class PickleIterator;
+class PickleSizer;
 
 // An abstraction to isolate users from the differences between native
 // pathnames on different platforms.
@@ -372,11 +373,12 @@
   // internally calls SysWideToNativeMB() on POSIX systems other than Mac
   // and Chrome OS, to mitigate the encoding issue. See the comment at
   // AsUTF8Unsafe() for details.
-  static FilePath FromUTF8Unsafe(const std::string& utf8);
+  static FilePath FromUTF8Unsafe(StringPiece utf8);
 
   // Similar to FromUTF8Unsafe, but accepts UTF-16 instead.
-  static FilePath FromUTF16Unsafe(const string16& utf16);
+  static FilePath FromUTF16Unsafe(StringPiece16 utf16);
 
+  void GetSizeForPickle(PickleSizer* sizer) const;
   void WriteToPickle(Pickle* pickle) const;
   bool ReadFromPickle(PickleIterator* iter);
 
diff --git a/base/files/file_util.cc b/base/files/file_util.cc
index 9e35b67..3169370 100644
--- a/base/files/file_util.cc
+++ b/base/files/file_util.cc
@@ -124,9 +124,9 @@
 }
 #endif  // !defined(OS_NACL_NONSFI)
 
-bool ReadFileToString(const FilePath& path,
-                      std::string* contents,
-                      size_t max_size) {
+bool ReadFileToStringWithMaxSize(const FilePath& path,
+                                 std::string* contents,
+                                 size_t max_size) {
   if (contents)
     contents->clear();
   if (path.ReferencesParent())
@@ -162,7 +162,8 @@
 }
 
 bool ReadFileToString(const FilePath& path, std::string* contents) {
-  return ReadFileToString(path, contents, std::numeric_limits<size_t>::max());
+  return ReadFileToStringWithMaxSize(path, contents,
+                                     std::numeric_limits<size_t>::max());
 }
 
 #if !defined(OS_NACL_NONSFI)
diff --git a/base/files/file_util.h b/base/files/file_util.h
index dfc10a3..05b3cbf 100644
--- a/base/files/file_util.h
+++ b/base/files/file_util.h
@@ -154,9 +154,9 @@
 // |max_size|.
 // |contents| may be NULL, in which case this function is useful for its side
 // effect of priming the disk cache (could be used for unit tests).
-BASE_EXPORT bool ReadFileToString(const FilePath& path,
-                                  std::string* contents,
-                                  size_t max_size);
+BASE_EXPORT bool ReadFileToStringWithMaxSize(const FilePath& path,
+                                             std::string* contents,
+                                             size_t max_size);
 
 #if defined(OS_POSIX)
 
diff --git a/base/files/file_util_posix.cc b/base/files/file_util_posix.cc
index e2e4446..3f5baa0 100644
--- a/base/files/file_util_posix.cc
+++ b/base/files/file_util_posix.cc
@@ -351,12 +351,12 @@
 #endif  // !defined(OS_NACL_NONSFI)
 
 bool SetNonBlocking(int fd) {
-  int flags = fcntl(fd, F_GETFL, 0);
+  const int flags = fcntl(fd, F_GETFL);
   if (flags == -1)
     return false;
   if (flags & O_NONBLOCK)
     return true;
-  if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1)
+  if (HANDLE_EINTR(fcntl(fd, F_SETFL, flags | O_NONBLOCK)) == -1)
     return false;
   return true;
 }
diff --git a/base/hash.cc b/base/hash.cc
new file mode 100644
index 0000000..4274772
--- /dev/null
+++ b/base/hash.cc
@@ -0,0 +1,16 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/hash.h"
+
+#include <functional>
+
+namespace base {
+
+uint32_t SuperFastHash(const char* data, int len) {
+  std::hash<std::string> hash_fn;
+  return hash_fn(std::string(data, len));
+}
+
+}  // namespace base
diff --git a/base/hash.h b/base/hash.h
index 9c0e7a5..97e251c 100644
--- a/base/hash.h
+++ b/base/hash.h
@@ -5,21 +5,118 @@
 #ifndef BASE_HASH_H_
 #define BASE_HASH_H_
 
-#include <cstdint>
-#include <functional>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <limits>
 #include <string>
+#include <utility>
+
+#include "base/base_export.h"
+#include "base/logging.h"
 
 namespace base {
 
-// Deprecated: just use std::hash directly
-//
+// WARNING: This hash function should not be used for any cryptographic purpose.
+BASE_EXPORT uint32_t SuperFastHash(const char* data, int len);
+
+// Computes a hash of a memory buffer |data| of a given |length|.
+// WARNING: This hash function should not be used for any cryptographic purpose.
+inline uint32_t Hash(const char* data, size_t length) {
+  if (length > static_cast<size_t>(std::numeric_limits<int>::max())) {
+    NOTREACHED();
+    return 0;
+  }
+  return SuperFastHash(data, static_cast<int>(length));
+}
+
 // Computes a hash of a string |str|.
 // WARNING: This hash function should not be used for any cryptographic purpose.
 inline uint32_t Hash(const std::string& str) {
-  std::hash<std::string> hash_fn;
-  return hash_fn(str);
+  return Hash(str.data(), str.size());
 }
 
+// Implement hashing for pairs of at-most 32 bit integer values.
+// When size_t is 32 bits, we turn the 64-bit hash code into 32 bits by using
+// multiply-add hashing. This algorithm, as described in
+// Theorem 4.3.3 of the thesis "Über die Komplexität der Multiplikation in
+// eingeschränkten Branchingprogrammmodellen" by Woelfel, is:
+//
+//   h32(x32, y32) = (h64(x32, y32) * rand_odd64 + rand16 * 2^16) % 2^64 / 2^32
+//
+// Contact danakj@chromium.org for any questions.
+inline size_t HashInts32(uint32_t value1, uint32_t value2) {
+  uint64_t value1_64 = value1;
+  uint64_t hash64 = (value1_64 << 32) | value2;
+
+  if (sizeof(size_t) >= sizeof(uint64_t))
+    return static_cast<size_t>(hash64);
+
+  uint64_t odd_random = 481046412LL << 32 | 1025306955LL;
+  uint32_t shift_random = 10121U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  size_t high_bits =
+      static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+  return high_bits;
+}
+
+// Implement hashing for pairs of up-to 64-bit integer values.
+// We use the compound integer hash method to produce a 64-bit hash code, by
+// breaking the two 64-bit inputs into 4 32-bit values:
+// http://opendatastructures.org/versions/edition-0.1d/ods-java/node33.html#SECTION00832000000000000000
+// Then we reduce our result to 32 bits if required, similar to above.
+inline size_t HashInts64(uint64_t value1, uint64_t value2) {
+  uint32_t short_random1 = 842304669U;
+  uint32_t short_random2 = 619063811U;
+  uint32_t short_random3 = 937041849U;
+  uint32_t short_random4 = 3309708029U;
+
+  uint32_t value1a = static_cast<uint32_t>(value1 & 0xffffffff);
+  uint32_t value1b = static_cast<uint32_t>((value1 >> 32) & 0xffffffff);
+  uint32_t value2a = static_cast<uint32_t>(value2 & 0xffffffff);
+  uint32_t value2b = static_cast<uint32_t>((value2 >> 32) & 0xffffffff);
+
+  uint64_t product1 = static_cast<uint64_t>(value1a) * short_random1;
+  uint64_t product2 = static_cast<uint64_t>(value1b) * short_random2;
+  uint64_t product3 = static_cast<uint64_t>(value2a) * short_random3;
+  uint64_t product4 = static_cast<uint64_t>(value2b) * short_random4;
+
+  uint64_t hash64 = product1 + product2 + product3 + product4;
+
+  if (sizeof(size_t) >= sizeof(uint64_t))
+    return static_cast<size_t>(hash64);
+
+  uint64_t odd_random = 1578233944LL << 32 | 194370989LL;
+  uint32_t shift_random = 20591U << 16;
+
+  hash64 = hash64 * odd_random + shift_random;
+  size_t high_bits =
+      static_cast<size_t>(hash64 >> (8 * (sizeof(uint64_t) - sizeof(size_t))));
+  return high_bits;
+}
+
+template <typename T1, typename T2>
+inline size_t HashInts(T1 value1, T2 value2) {
+  // This condition is expected to be compile-time evaluated and optimised away
+  // in release builds.
+  if (sizeof(T1) > sizeof(uint32_t) || (sizeof(T2) > sizeof(uint32_t)))
+    return HashInts64(value1, value2);
+
+  return HashInts32(value1, value2);
+}
+
+// A templated hasher for pairs of integer types.
+template <typename T>
+struct IntPairHash;
+
+template <typename Type1, typename Type2>
+struct IntPairHash<std::pair<Type1, Type2>> {
+  size_t operator()(std::pair<Type1, Type2> value) const {
+    return HashInts(value.first, value.second);
+  }
+};
+
 }  // namespace base
 
 #endif  // BASE_HASH_H_
diff --git a/base/id_map.h b/base/id_map.h
index 15c6662..ef6b156 100644
--- a/base/id_map.h
+++ b/base/id_map.h
@@ -160,7 +160,7 @@
   template<class ReturnType>
   class Iterator {
    public:
-    Iterator(IDMap<T, OS>* map)
+    Iterator(IDMap<T, OS, K>* map)
         : map_(map),
           iter_(map_->data_.begin()) {
       Init();
@@ -226,7 +226,7 @@
       }
     }
 
-    IDMap<T, OS>* map_;
+    IDMap<T, OS, K>* map_;
     typename HashTable::const_iterator iter_;
   };
 
diff --git a/base/id_map_unittest.cc b/base/id_map_unittest.cc
index 7a07a28..a3f0808 100644
--- a/base/id_map_unittest.cc
+++ b/base/id_map_unittest.cc
@@ -365,6 +365,13 @@
   map.AddWithID(&obj1, kId1);
   EXPECT_EQ(&obj1, map.Lookup(kId1));
 
+  IDMap<TestObject, IDMapExternalPointer, int64_t>::const_iterator iter(&map);
+  ASSERT_FALSE(iter.IsAtEnd());
+  EXPECT_EQ(kId1, iter.GetCurrentKey());
+  EXPECT_EQ(&obj1, iter.GetCurrentValue());
+  iter.Advance();
+  ASSERT_TRUE(iter.IsAtEnd());
+
   map.Remove(kId1);
   EXPECT_TRUE(map.IsEmpty());
 }
diff --git a/base/ios/OWNERS b/base/ios/OWNERS
index dc0be62..06f5ff1 100644
--- a/base/ios/OWNERS
+++ b/base/ios/OWNERS
@@ -1,4 +1,3 @@
 droger@chromium.org
 qsr@chromium.org
 rohitrao@chromium.org
-stuartmorgan@chromium.org
diff --git a/base/location.h b/base/location.h
index d3bb23c..21e270c 100644
--- a/base/location.h
+++ b/base/location.h
@@ -11,7 +11,7 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/containers/hash_tables.h"
+#include "base/hash.h"
 
 namespace tracked_objects {
 
@@ -59,7 +59,7 @@
       // it comes from __FILE__, so no need to check the contents of the string.
       // See the definition of FROM_HERE in location.h, and how it is used
       // elsewhere.
-      return base::HashPair(reinterpret_cast<uintptr_t>(location.file_name()),
+      return base::HashInts(reinterpret_cast<uintptr_t>(location.file_name()),
                             location.line_number());
     }
   };
diff --git a/base/logging.cc b/base/logging.cc
index 3450b9a..1ebb84f 100644
--- a/base/logging.cc
+++ b/base/logging.cc
@@ -877,7 +877,7 @@
 }
 
 void RawLog(int level, const char* message) {
-  if (level >= g_min_log_level) {
+  if (level >= g_min_log_level && message) {
     size_t bytes_written = 0;
     const size_t message_len = strlen(message);
     int rv;
@@ -930,5 +930,5 @@
 }  // namespace logging
 
 std::ostream& std::operator<<(std::ostream& out, const wchar_t* wstr) {
-  return out << base::WideToUTF8(wstr);
+  return out << (wstr ? base::WideToUTF8(wstr) : std::string());
 }
diff --git a/base/logging.h b/base/logging.h
index c79c84c..06f38f4 100644
--- a/base/logging.h
+++ b/base/logging.h
@@ -402,9 +402,6 @@
 #define LOG_IF(severity, condition) \
   LAZY_STREAM(LOG_STREAM(severity), LOG_IS_ON(severity) && (condition))
 
-#define SYSLOG(severity) LOG(severity)
-#define SYSLOG_IF(severity, condition) LOG_IF(severity, condition)
-
 // The VLOG macros log with negative verbosities.
 #define VLOG_STREAM(verbose_level) \
   logging::LogMessage(__FILE__, __LINE__, -verbose_level).stream()
@@ -437,8 +434,6 @@
 
 #define LOG_ASSERT(condition)  \
   LOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
-#define SYSLOG_ASSERT(condition) \
-  SYSLOG_IF(FATAL, !(condition)) << "Assert failed: " #condition ". "
 
 #if defined(OS_WIN)
 #define PLOG_STREAM(severity) \
@@ -836,12 +831,6 @@
   DISALLOW_COPY_AND_ASSIGN(LogMessage);
 };
 
-// A non-macro interface to the log facility; (useful
-// when the logging level is not a compile-time constant).
-inline void LogAtLevel(int log_level, const std::string& msg) {
-  LogMessage(__FILE__, __LINE__, log_level).stream() << msg;
-}
-
 // This class is used to explicitly ignore values in the conditional
 // logging macros.  This avoids compiler warnings like "value computed
 // is not used" and "statement has no effect".
diff --git a/base/mac/foundation_util.mm b/base/mac/foundation_util.mm
index 524f17c..6ae5df3 100644
--- a/base/mac/foundation_util.mm
+++ b/base/mac/foundation_util.mm
@@ -165,7 +165,7 @@
 
   // The first component may be "/" or "//", etc. Only append '/' if it doesn't
   // already end in '/'.
-  if (bundle_name[bundle_name.length() - 1] != '/')
+  if (bundle_name.back() != '/')
     bundle_name += '/';
 
   // Go through the remaining components.
diff --git a/base/mac/mac_logging.h b/base/mac/mac_logging.h
index f558902..30e43ea 100644
--- a/base/mac/mac_logging.h
+++ b/base/mac/mac_logging.h
@@ -29,6 +29,9 @@
 
 namespace logging {
 
+// Returns a UTF8 description from an OS X Status error.
+BASE_EXPORT std::string DescriptionFromOSStatus(OSStatus err);
+
 class BASE_EXPORT OSStatusLogMessage : public logging::LogMessage {
  public:
   OSStatusLogMessage(const char* file_path,
diff --git a/base/mac/mac_logging.mm b/base/mac/mac_logging.mm
new file mode 100644
index 0000000..381ad30
--- /dev/null
+++ b/base/mac/mac_logging.mm
@@ -0,0 +1,47 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mac_logging.h"
+
+#import <Foundation/Foundation.h>
+
+#include <iomanip>
+
+#include "build/build_config.h"
+
+#if !defined(OS_IOS)
+#include <CoreServices/CoreServices.h>
+#endif
+
+namespace logging {
+
+std::string DescriptionFromOSStatus(OSStatus err) {
+  NSError* error =
+      [NSError errorWithDomain:NSOSStatusErrorDomain code:err userInfo:nil];
+  return error.description.UTF8String;
+}
+
+OSStatusLogMessage::OSStatusLogMessage(const char* file_path,
+                                       int line,
+                                       LogSeverity severity,
+                                       OSStatus status)
+    : LogMessage(file_path, line, severity),
+      status_(status) {
+}
+
+OSStatusLogMessage::~OSStatusLogMessage() {
+#if defined(OS_IOS)
+  // TODO(ios): Consider using NSError with NSOSStatusErrorDomain to try to
+  // get a description of the failure.
+  stream() << ": " << status_;
+#else
+  stream() << ": "
+           << DescriptionFromOSStatus(status_)
+           << " ("
+           << status_
+           << ")";
+#endif
+}
+
+}  // namespace logging
diff --git a/base/mac/mach_port_broker.h b/base/mac/mach_port_broker.h
new file mode 100644
index 0000000..ba08b6f
--- /dev/null
+++ b/base/mac/mach_port_broker.h
@@ -0,0 +1,108 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_BROKER_H_
+#define BASE_MAC_MACH_PORT_BROKER_H_
+
+#include <mach/mach.h>
+
+#include <map>
+#include <string>
+
+#include "base/base_export.h"
+#include "base/mac/dispatch_source_mach.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/process/port_provider_mac.h"
+#include "base/process/process_handle.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+// On OS X, the task port of a process is required to collect metrics about the
+// process, and to insert Mach ports into the process. Running |task_for_pid()|
+// is only allowed for privileged code. However, a process has port rights to
+// all its subprocesses, so let the child processes send their Mach port to the
+// parent over IPC.
+//
+// Mach ports can only be sent over Mach IPC, not over the |socketpair()| that
+// the regular IPC system uses. Hence, the child processes opens a Mach
+// connection shortly after launching and ipc their mach data to the parent
+// process. A single |MachPortBroker| with a given name is expected to exist in
+// the parent process.
+//
+// Since this data arrives over a separate channel, it is not available
+// immediately after a child process has been started.
+class BASE_EXPORT MachPortBroker : public base::PortProvider {
+ public:
+  // For use in child processes. This will send the task port of the current
+  // process over Mach IPC to the port registered by name (via this class) in
+  // the parent process. Returns true if the message was sent successfully
+  // and false if otherwise.
+  static bool ChildSendTaskPortToParent(const std::string& name);
+
+  // Returns the Mach port name to use when sending or receiving messages.
+  // Does the Right Thing in the browser and in child processes.
+  static std::string GetMachPortName(const std::string& name, bool is_child);
+
+  MachPortBroker(const std::string& name);
+  ~MachPortBroker() override;
+
+  // Performs any initialization work.
+  bool Init();
+
+  // Adds a placeholder to the map for the given pid with MACH_PORT_NULL.
+  // Callers are expected to later update the port with FinalizePid(). Callers
+  // MUST acquire the lock given by GetLock() before calling this method (and
+  // release the lock afterwards).
+  void AddPlaceholderForPid(base::ProcessHandle pid);
+
+  // Removes |pid| from the task port map. Callers MUST acquire the lock given
+  // by GetLock() before calling this method (and release the lock afterwards).
+  void InvalidatePid(base::ProcessHandle pid);
+
+  // The lock that protects this MachPortBroker object. Callers MUST acquire
+  // and release this lock around calls to AddPlaceholderForPid(),
+  // InvalidatePid(), and FinalizePid();
+  base::Lock& GetLock() { return lock_; }
+
+  // Implement |base::PortProvider|.
+  mach_port_t TaskForPid(base::ProcessHandle process) const override;
+
+ private:
+  friend class MachPortBrokerTest;
+
+  // Message handler that is invoked on |dispatch_source_| when an
+  // incoming message needs to be received.
+  void HandleRequest();
+
+  // Updates the mapping for |pid| to include the given |mach_info|.  Does
+  // nothing if PlaceholderForPid() has not already been called for the given
+  // |pid|. Callers MUST acquire the lock given by GetLock() before calling
+  // this method (and release the lock afterwards).
+  void FinalizePid(base::ProcessHandle pid, mach_port_t task_port);
+
+  // Name used to identify a particular port broker.
+  const std::string name_;
+
+  // The Mach port on which the server listens.
+  base::mac::ScopedMachReceiveRight server_port_;
+
+  // The dispatch source and queue on which Mach messages will be received.
+  scoped_ptr<base::DispatchSourceMach> dispatch_source_;
+
+  // Stores mach info for every process in the broker.
+  typedef std::map<base::ProcessHandle, mach_port_t> MachMap;
+  MachMap mach_map_;
+
+  // Mutex that guards |mach_map_|.
+  mutable base::Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(MachPortBroker);
+};
+
+}  // namespace base
+
+#endif  // BASE_MAC_MACH_PORT_BROKER_H_
diff --git a/base/mac/mach_port_broker.mm b/base/mac/mach_port_broker.mm
new file mode 100644
index 0000000..bd47017
--- /dev/null
+++ b/base/mac/mach_port_broker.mm
@@ -0,0 +1,189 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include <bsm/libbsm.h>
+#include <servers/bootstrap.h>
+
+#include "base/logging.h"
+#include "base/mac/foundation_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/strings/string_util.h"
+#include "base/strings/stringprintf.h"
+
+namespace base {
+
+namespace {
+
+// Mach message structure used in the child as a sending message.
+struct MachPortBroker_ChildSendMsg {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t child_task_port;
+};
+
+// Complement to the ChildSendMsg, this is used in the parent for receiving
+// a message. Contains a message trailer with audit information.
+struct MachPortBroker_ParentRecvMsg : public MachPortBroker_ChildSendMsg {
+  mach_msg_audit_trailer_t trailer;
+};
+
+}  // namespace
+
+// static
+bool MachPortBroker::ChildSendTaskPortToParent(const std::string& name) {
+  // Look up the named MachPortBroker port that's been registered with the
+  // bootstrap server.
+  mach_port_t parent_port;
+  kern_return_t kr = bootstrap_look_up(bootstrap_port,
+      const_cast<char*>(GetMachPortName(name, true).c_str()), &parent_port);
+  if (kr != KERN_SUCCESS) {
+    BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_look_up";
+    return false;
+  }
+  base::mac::ScopedMachSendRight scoped_right(parent_port);
+
+  // Create the check in message. This will copy a send right on this process'
+  // (the child's) task port and send it to the parent.
+  MachPortBroker_ChildSendMsg msg;
+  bzero(&msg, sizeof(msg));
+  msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND) |
+                         MACH_MSGH_BITS_COMPLEX;
+  msg.header.msgh_remote_port = parent_port;
+  msg.header.msgh_size = sizeof(msg);
+  msg.body.msgh_descriptor_count = 1;
+  msg.child_task_port.name = mach_task_self();
+  msg.child_task_port.disposition = MACH_MSG_TYPE_PORT_SEND;
+  msg.child_task_port.type = MACH_MSG_PORT_DESCRIPTOR;
+
+  kr = mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg),
+      0, MACH_PORT_NULL, 100 /*milliseconds*/, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_msg";
+    return false;
+  }
+
+  return true;
+}
+
+// static
+std::string MachPortBroker::GetMachPortName(const std::string& name,
+                                            bool is_child) {
+  // In child processes, use the parent's pid.
+  const pid_t pid = is_child ? getppid() : getpid();
+  return base::StringPrintf(
+      "%s.%s.%d", base::mac::BaseBundleID(), name.c_str(), pid);
+}
+
+mach_port_t MachPortBroker::TaskForPid(base::ProcessHandle pid) const {
+  base::AutoLock lock(lock_);
+  MachPortBroker::MachMap::const_iterator it = mach_map_.find(pid);
+  if (it == mach_map_.end())
+    return MACH_PORT_NULL;
+  return it->second;
+}
+
+MachPortBroker::MachPortBroker(const std::string& name) : name_(name) {}
+
+MachPortBroker::~MachPortBroker() {}
+
+bool MachPortBroker::Init() {
+  DCHECK(server_port_.get() == MACH_PORT_NULL);
+
+  // Check in with launchd and publish the service name.
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(
+      bootstrap_port, GetMachPortName(name_, false).c_str(), &port);
+  if (kr != KERN_SUCCESS) {
+    BOOTSTRAP_LOG(ERROR, kr) << "bootstrap_check_in";
+    return false;
+  }
+  server_port_.reset(port);
+
+  // Start the dispatch source.
+  std::string queue_name =
+      base::StringPrintf("%s.MachPortBroker", base::mac::BaseBundleID());
+  dispatch_source_.reset(new base::DispatchSourceMach(
+      queue_name.c_str(), server_port_.get(), ^{ HandleRequest(); }));
+  dispatch_source_->Resume();
+
+  return true;
+}
+
+void MachPortBroker::AddPlaceholderForPid(base::ProcessHandle pid) {
+  lock_.AssertAcquired();
+  DCHECK_EQ(0u, mach_map_.count(pid));
+  mach_map_[pid] = MACH_PORT_NULL;
+}
+
+void MachPortBroker::InvalidatePid(base::ProcessHandle pid) {
+  lock_.AssertAcquired();
+
+  MachMap::iterator mach_it = mach_map_.find(pid);
+  if (mach_it != mach_map_.end()) {
+    kern_return_t kr = mach_port_deallocate(mach_task_self(), mach_it->second);
+    MACH_LOG_IF(WARNING, kr != KERN_SUCCESS, kr) << "mach_port_deallocate";
+    mach_map_.erase(mach_it);
+  }
+}
+
+void MachPortBroker::HandleRequest() {
+  MachPortBroker_ParentRecvMsg msg;
+  bzero(&msg, sizeof(msg));
+  msg.header.msgh_size = sizeof(msg);
+  msg.header.msgh_local_port = server_port_.get();
+
+  const mach_msg_option_t options = MACH_RCV_MSG |
+      MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_AUDIT) |
+      MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_AUDIT);
+
+  kern_return_t kr = mach_msg(&msg.header,
+                              options,
+                              0,
+                              sizeof(msg),
+                              server_port_.get(),
+                              MACH_MSG_TIMEOUT_NONE,
+                              MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "mach_msg";
+    return;
+  }
+
+  // Use the kernel audit information to make sure this message is from
+  // a task that this process spawned. The kernel audit token contains the
+  // unspoofable pid of the task that sent the message.
+  //
+  // TODO(rsesek): In the 10.7 SDK, there's audit_token_to_pid().
+  pid_t child_pid;
+  audit_token_to_au32(msg.trailer.msgh_audit,
+      NULL, NULL, NULL, NULL, NULL, &child_pid, NULL, NULL);
+
+  mach_port_t child_task_port = msg.child_task_port.name;
+
+  // Take the lock and update the broker information.
+  {
+    base::AutoLock lock(lock_);
+    FinalizePid(child_pid, child_task_port);
+  }
+  NotifyObservers(child_pid);
+}
+
+void MachPortBroker::FinalizePid(base::ProcessHandle pid,
+                                 mach_port_t task_port) {
+  lock_.AssertAcquired();
+
+  MachMap::iterator it = mach_map_.find(pid);
+  if (it == mach_map_.end()) {
+    // Do nothing for unknown pids.
+    LOG(ERROR) << "Unknown process " << pid << " is sending Mach IPC messages!";
+    return;
+  }
+
+  DCHECK(it->second == MACH_PORT_NULL);
+  if (it->second == MACH_PORT_NULL)
+    it->second = task_port;
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_broker_unittest.cc b/base/mac/mach_port_broker_unittest.cc
new file mode 100644
index 0000000..c15afb6
--- /dev/null
+++ b/base/mac/mach_port_broker_unittest.cc
@@ -0,0 +1,132 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_broker.h"
+
+#include "base/command_line.h"
+#include "base/synchronization/lock.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+const char kBootstrapPortName[] = "thisisatest";
+}
+
+class MachPortBrokerTest : public testing::Test,
+                           public base::PortProvider::Observer {
+ public:
+  MachPortBrokerTest()
+      : broker_(kBootstrapPortName),
+        event_(true, false),
+        received_process_(kNullProcessHandle) {
+    broker_.AddObserver(this);
+  }
+  ~MachPortBrokerTest() override {
+    broker_.RemoveObserver(this);
+  }
+
+  // Helper function to acquire/release locks and call |PlaceholderForPid()|.
+  void AddPlaceholderForPid(base::ProcessHandle pid) {
+    base::AutoLock lock(broker_.GetLock());
+    broker_.AddPlaceholderForPid(pid);
+  }
+
+  // Helper function to acquire/release locks and call |FinalizePid()|.
+  void FinalizePid(base::ProcessHandle pid,
+                   mach_port_t task_port) {
+    base::AutoLock lock(broker_.GetLock());
+    broker_.FinalizePid(pid, task_port);
+  }
+
+  void WaitForTaskPort() {
+    event_.Wait();
+  }
+
+  // base::PortProvider::Observer:
+  void OnReceivedTaskPort(ProcessHandle process) override {
+    received_process_ = process;
+    event_.Signal();
+  }
+
+ protected:
+  MachPortBroker broker_;
+  WaitableEvent event_;
+  ProcessHandle received_process_;
+};
+
+TEST_F(MachPortBrokerTest, Locks) {
+  // Acquire and release the locks.  Nothing bad should happen.
+  base::AutoLock lock(broker_.GetLock());
+}
+
+TEST_F(MachPortBrokerTest, AddPlaceholderAndFinalize) {
+  // Add a placeholder for PID 1.
+  AddPlaceholderForPid(1);
+  EXPECT_EQ(0u, broker_.TaskForPid(1));
+
+  // Finalize PID 1.
+  FinalizePid(1, 100u);
+  EXPECT_EQ(100u, broker_.TaskForPid(1));
+
+  // Should be no entry for PID 2.
+  EXPECT_EQ(0u, broker_.TaskForPid(2));
+}
+
+TEST_F(MachPortBrokerTest, FinalizeUnknownPid) {
+  // Finalizing an entry for an unknown pid should not add it to the map.
+  FinalizePid(1u, 100u);
+  EXPECT_EQ(0u, broker_.TaskForPid(1u));
+}
+
+MULTIPROCESS_TEST_MAIN(MachPortBrokerTestChild) {
+  CHECK(base::MachPortBroker::ChildSendTaskPortToParent(kBootstrapPortName));
+  return 0;
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChild) {
+  ASSERT_TRUE(broker_.Init());
+  CommandLine command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  broker_.GetLock().Acquire();
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "MachPortBrokerTestChild", command_line, LaunchOptions());
+  broker_.AddPlaceholderForPid(test_child_process.Handle());
+  broker_.GetLock().Release();
+
+  WaitForTaskPort();
+  EXPECT_EQ(test_child_process.Handle(), received_process_);
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+
+  EXPECT_NE(static_cast<mach_port_t>(MACH_PORT_NULL),
+            broker_.TaskForPid(test_child_process.Handle()));
+}
+
+TEST_F(MachPortBrokerTest, ReceivePortFromChildWithoutAdding) {
+  ASSERT_TRUE(broker_.Init());
+  CommandLine command_line(
+      base::GetMultiProcessTestChildBaseCommandLine());
+  broker_.GetLock().Acquire();
+  base::Process test_child_process = base::SpawnMultiProcessTestChild(
+      "MachPortBrokerTestChild", command_line, LaunchOptions());
+  broker_.GetLock().Release();
+
+  int rv = -1;
+  ASSERT_TRUE(test_child_process.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+
+  EXPECT_EQ(static_cast<mach_port_t>(MACH_PORT_NULL),
+            broker_.TaskForPid(test_child_process.Handle()));
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_util.cc b/base/mac/mach_port_util.cc
new file mode 100644
index 0000000..0eee210
--- /dev/null
+++ b/base/mac/mach_port_util.cc
@@ -0,0 +1,136 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/mac/mach_port_util.h"
+
+#include "base/logging.h"
+
+namespace base {
+
+namespace {
+
+// Struct for sending a complex Mach message.
+struct MachSendComplexMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+
+// Struct for receiving a complex message.
+struct MachReceiveComplexMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+}  // namespace
+
+kern_return_t SendMachPort(mach_port_t endpoint,
+                           mach_port_t port_to_send,
+                           int disposition) {
+  MachSendComplexMessage send_msg;
+  send_msg.header.msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_msg.header.msgh_size = sizeof(send_msg);
+  send_msg.header.msgh_remote_port = endpoint;
+  send_msg.header.msgh_local_port = MACH_PORT_NULL;
+  send_msg.header.msgh_reserved = 0;
+  send_msg.header.msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+
+  kern_return_t kr =
+      mach_msg(&send_msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT,
+               send_msg.header.msgh_size,
+               0,                // receive limit
+               MACH_PORT_NULL,   // receive name
+               0,                // timeout
+               MACH_PORT_NULL);  // notification port
+
+  if (kr != KERN_SUCCESS)
+    mach_port_deallocate(mach_task_self(), endpoint);
+
+  return kr;
+}
+
+base::mac::ScopedMachSendRight ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceiveComplexMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &recv_msg.header;
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+
+  kern_return_t kr =
+      mach_msg(recv_hdr, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
+               recv_hdr->msgh_size, port_to_listen_on, 0, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+  if (recv_msg.header.msgh_id != 0)
+    return base::mac::ScopedMachSendRight(MACH_PORT_NULL);
+  return base::mac::ScopedMachSendRight(recv_msg.data.name);
+}
+
+mach_port_name_t CreateIntermediateMachPort(
+    mach_port_t task_port,
+    base::mac::ScopedMachSendRight port_to_insert,
+    MachCreateError* error_code) {
+  DCHECK_NE(mach_task_self(), task_port);
+  DCHECK_NE(static_cast<mach_port_name_t>(MACH_PORT_NULL), task_port);
+
+  // Make a port with receive rights in the destination task.
+  mach_port_name_t endpoint;
+  kern_return_t kr =
+      mach_port_allocate(task_port, MACH_PORT_RIGHT_RECEIVE, &endpoint);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_MAKE_RECEIVE_PORT;
+    return MACH_PORT_NULL;
+  }
+
+  // Change its message queue limit so that it accepts one message.
+  mach_port_limits limits = {};
+  limits.mpl_qlimit = 1;
+  kr = mach_port_set_attributes(task_port, endpoint, MACH_PORT_LIMITS_INFO,
+                                reinterpret_cast<mach_port_info_t>(&limits),
+                                MACH_PORT_LIMITS_INFO_COUNT);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_SET_ATTRIBUTES;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+
+  // Get a send right.
+  mach_port_t send_once_right;
+  mach_msg_type_name_t send_right_type;
+  kr =
+      mach_port_extract_right(task_port, endpoint, MACH_MSG_TYPE_MAKE_SEND_ONCE,
+                              &send_once_right, &send_right_type);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_EXTRACT_DEST_RIGHT;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+  DCHECK_EQ(static_cast<mach_msg_type_name_t>(MACH_MSG_TYPE_PORT_SEND_ONCE),
+            send_right_type);
+
+  // This call takes ownership of |send_once_right|.
+  kr = base::SendMachPort(
+      send_once_right, port_to_insert.get(), MACH_MSG_TYPE_COPY_SEND);
+  if (kr != KERN_SUCCESS) {
+    if (error_code)
+      *error_code = MachCreateError::ERROR_SEND_MACH_PORT;
+    mach_port_deallocate(task_port, endpoint);
+    return MACH_PORT_NULL;
+  }
+
+  // Endpoint is intentionally leaked into the destination task. An IPC must be
+  // sent to the destination task so that it can clean up this port.
+  return endpoint;
+}
+
+}  // namespace base
diff --git a/base/mac/mach_port_util.h b/base/mac/mach_port_util.h
new file mode 100644
index 0000000..f7a7f32
--- /dev/null
+++ b/base/mac/mach_port_util.h
@@ -0,0 +1,48 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MAC_MACH_PORT_UTIL_H_
+#define BASE_MAC_MACH_PORT_UTIL_H_
+
+#include <mach/mach.h>
+
+#include "base/base_export.h"
+#include "base/mac/scoped_mach_port.h"
+
+namespace base {
+
+enum class MachCreateError {
+    ERROR_MAKE_RECEIVE_PORT,
+    ERROR_SET_ATTRIBUTES,
+    ERROR_EXTRACT_DEST_RIGHT,
+    ERROR_SEND_MACH_PORT,
+};
+
+// Sends a Mach port to |dest_port|. Assumes that |dest_port| is a send once
+// right. Takes ownership of |dest_port|.
+BASE_EXPORT kern_return_t SendMachPort(mach_port_t dest_port,
+                                       mach_port_t port_to_send,
+                                       int disposition);
+
+// Receives a Mach port from |port_to_listen_on|, which should have exactly one
+// queued message. Returns |MACH_PORT_NULL| on any error.
+BASE_EXPORT base::mac::ScopedMachSendRight ReceiveMachPort(
+    mach_port_t port_to_listen_on);
+
+// Creates an intermediate Mach port in |task_port| and sends |port_to_insert|
+// as a mach_msg to the intermediate Mach port.
+// |task_port| is the task port of another process.
+// |port_to_insert| must be a send right in the current task's name space.
+// Returns the intermediate port on success, and MACH_PORT_NULL on failure.
+// On failure, |error_code| is set if not null.
+// This method takes ownership of |port_to_insert|. On success, ownership is
+// passed to the intermediate Mach port.
+BASE_EXPORT mach_port_name_t CreateIntermediateMachPort(
+    mach_port_t task_port,
+    base::mac::ScopedMachSendRight port_to_insert,
+    MachCreateError* error_code);
+
+}  // namespace base
+
+#endif  // BASE_MAC_MACH_PORT_UTIL_H_
diff --git a/base/memory/manual_constructor.h b/base/memory/manual_constructor.h
index 56081a1..f401f62 100644
--- a/base/memory/manual_constructor.h
+++ b/base/memory/manual_constructor.h
@@ -54,8 +54,12 @@
   inline const Type& operator*() const { return *get(); }
 
   template <typename... Ts>
-  inline void Init(const Ts&... params) {
-    new(space_.void_data()) Type(params...);
+  inline void Init(Ts&&... params) {
+    new(space_.void_data()) Type(std::forward<Ts>(params)...);
+  }
+
+  inline void InitFromMove(ManualConstructor<Type>&& o) {
+    Init(std::move(*o));
   }
 
   inline void Destroy() {
diff --git a/base/memory/raw_scoped_refptr_mismatch_checker.h b/base/memory/raw_scoped_refptr_mismatch_checker.h
index 09f982b..5dbc183 100644
--- a/base/memory/raw_scoped_refptr_mismatch_checker.h
+++ b/base/memory/raw_scoped_refptr_mismatch_checker.h
@@ -5,10 +5,10 @@
 #ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
 #define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
 
+#include <tuple>
+#include <type_traits>
+
 #include "base/memory/ref_counted.h"
-#include "base/template_util.h"
-#include "base/tuple.h"
-#include "build/build_config.h"
 
 // It is dangerous to post a task with a T* argument where T is a subtype of
 // RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
@@ -25,20 +25,14 @@
 
 template <typename T>
 struct NeedsScopedRefptrButGetsRawPtr {
-#if defined(OS_WIN)
-  enum {
-    value = base::false_type::value
-  };
-#else
   enum {
     // Human readable translation: you needed to be a scoped_refptr if you are a
     // raw pointer type and are convertible to a RefCounted(Base|ThreadSafeBase)
     // type.
-    value = (is_pointer<T>::value &&
-             (is_convertible<T, subtle::RefCountedBase*>::value ||
-              is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
+    value = (std::is_pointer<T>::value &&
+             (std::is_convertible<T, subtle::RefCountedBase*>::value ||
+              std::is_convertible<T, subtle::RefCountedThreadSafeBase*>::value))
   };
-#endif
 };
 
 template <typename Params>
@@ -47,14 +41,14 @@
 };
 
 template <>
-struct ParamsUseScopedRefptrCorrectly<Tuple<>> {
+struct ParamsUseScopedRefptrCorrectly<std::tuple<>> {
   enum { value = 1 };
 };
 
 template <typename Head, typename... Tail>
-struct ParamsUseScopedRefptrCorrectly<Tuple<Head, Tail...>> {
+struct ParamsUseScopedRefptrCorrectly<std::tuple<Head, Tail...>> {
   enum { value = !NeedsScopedRefptrButGetsRawPtr<Head>::value &&
-                 ParamsUseScopedRefptrCorrectly<Tuple<Tail...>>::value };
+                  ParamsUseScopedRefptrCorrectly<std::tuple<Tail...>>::value };
 };
 
 }  // namespace internal
diff --git a/base/memory/ref_counted.h b/base/memory/ref_counted.h
index a1c1269..e739514 100644
--- a/base/memory/ref_counted.h
+++ b/base/memory/ref_counted.h
@@ -360,15 +360,25 @@
  private:
   template <typename U> friend class scoped_refptr;
 
-  // Allow scoped_refptr<T> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
+  // Implement "Safe Bool Idiom"
+  // https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Safe_bool
   //
-  // Note that this trick is only safe when the == and != operators
-  // are declared explicitly, as otherwise "refptr1 == refptr2"
-  // will compile but do the wrong thing (i.e., convert to Testable
-  // and then do the comparison).
+  // Allow scoped_refptr<T> to be used in boolean expressions such as
+  //   if (ref_ptr_instance)
+  // But do not become convertible to a real bool (which is dangerous).
+  //   Implementation requires:
+  //     typedef Testable
+  //     operator Testable() const
+  //     operator==
+  //     operator!=
+  //
+  // == and != operators must be declared explicitly or dissallowed, as
+  // otherwise "ptr1 == ptr2" will compile but do the wrong thing (i.e., convert
+  // to Testable and then do the comparison).
+  //
+  // C++11 provides for "explicit operator bool()", however it is currently
+  // banned due to MSVS2013. https://chromium-cpp.appspot.com/#core-blacklist
   typedef T* scoped_refptr::*Testable;
-
  public:
   operator Testable() const { return ptr_ ? &scoped_refptr::ptr_ : nullptr; }
 
@@ -416,8 +426,6 @@
   return scoped_refptr<T>(t);
 }
 
-// Temporary operator overloads to facilitate the transition. See
-// https://crbug.com/110610.
 template <typename T, typename U>
 bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
   return lhs.get() == rhs;
diff --git a/base/memory/ref_counted_memory.cc b/base/memory/ref_counted_memory.cc
index 7bbd317..26b78f3 100644
--- a/base/memory/ref_counted_memory.cc
+++ b/base/memory/ref_counted_memory.cc
@@ -38,9 +38,9 @@
 RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
     : data_(p, p + size) {}
 
-RefCountedBytes* RefCountedBytes::TakeVector(
+scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
     std::vector<unsigned char>* to_destroy) {
-  RefCountedBytes* bytes = new RefCountedBytes;
+  scoped_refptr<RefCountedBytes> bytes(new RefCountedBytes);
   bytes->data_.swap(*to_destroy);
   return bytes;
 }
diff --git a/base/memory/ref_counted_memory.h b/base/memory/ref_counted_memory.h
index f37a860..aa22c9e 100644
--- a/base/memory/ref_counted_memory.h
+++ b/base/memory/ref_counted_memory.h
@@ -81,7 +81,8 @@
   // Constructs a RefCountedBytes object by performing a swap. (To non
   // destructively build a RefCountedBytes, use the constructor that takes a
   // vector.)
-  static RefCountedBytes* TakeVector(std::vector<unsigned char>* to_destroy);
+  static scoped_refptr<RefCountedBytes> TakeVector(
+      std::vector<unsigned char>* to_destroy);
 
   // Overridden from RefCountedMemory:
   const unsigned char* front() const override;
diff --git a/base/memory/ref_counted_unittest.cc b/base/memory/ref_counted_unittest.cc
index dbc6f33..3f56b4a 100644
--- a/base/memory/ref_counted_unittest.cc
+++ b/base/memory/ref_counted_unittest.cc
@@ -150,10 +150,31 @@
 }
 
 TEST(RefCountedUnitTest, BooleanTesting) {
-  scoped_refptr<SelfAssign> p;
-  EXPECT_FALSE(p);
-  p = new SelfAssign;
-  EXPECT_TRUE(p);
+  scoped_refptr<SelfAssign> ptr_to_an_instance = new SelfAssign;
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  scoped_refptr<SelfAssign> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
 }
 
 TEST(RefCountedUnitTest, Equality) {
diff --git a/base/memory/scoped_ptr.h b/base/memory/scoped_ptr.h
index 89b90ac..2d2c0ec 100644
--- a/base/memory/scoped_ptr.h
+++ b/base/memory/scoped_ptr.h
@@ -33,6 +33,15 @@
 //     foo[10].Method();     // Foo::Method on the 10th element.
 //   }
 //
+// Scopers are testable as booleans:
+//   {
+//     scoped_ptr<Foo> foo;
+//     if (!foo)
+//       foo.reset(new Foo());
+//     if (foo)
+//       LOG(INFO) << "This code is reached."
+//   }
+//
 // These scopers also implement part of the functionality of C++11 unique_ptr
 // in that they are "movable but not copyable."  You can use the scopers in
 // the parameter and return types of functions to signify ownership transfer
@@ -81,7 +90,8 @@
 // This is an implementation designed to match the anticipated future TR2
 // implementation of the scoped_ptr class.
 
-#include <assert.h>
+// TODO(dcheng): Clean up these headers, but there are likely lots of existing
+// IWYU violations.
 #include <stddef.h>
 #include <stdlib.h>
 
@@ -91,17 +101,13 @@
 #include <utility>
 
 #include "base/compiler_specific.h"
+#include "base/logging.h"
 #include "base/macros.h"
 #include "base/move.h"
-#include "base/template_util.h"
+#include "build/build_config.h"
 
 namespace base {
 
-namespace subtle {
-class RefCountedBase;
-class RefCountedThreadSafeBase;
-}  // namespace subtle
-
 // Function object which invokes 'free' on its parameter, which must be
 // a pointer. Can be used to store malloc-allocated pointers in scoped_ptr:
 //
@@ -113,485 +119,10 @@
   }
 };
 
-namespace internal {
-
-template <typename T> struct IsNotRefCounted {
-  enum {
-    value = !base::is_convertible<T*, base::subtle::RefCountedBase*>::value &&
-        !base::is_convertible<T*, base::subtle::RefCountedThreadSafeBase*>::
-            value
-  };
-};
-
-// Minimal implementation of the core logic of scoped_ptr, suitable for
-// reuse in both scoped_ptr and its specializations.
-template <class T, class D>
-class scoped_ptr_impl {
- public:
-  explicit scoped_ptr_impl(T* p) : data_(p) {}
-
-  // Initializer for deleters that have data parameters.
-  scoped_ptr_impl(T* p, const D& d) : data_(p, d) {}
-
-  // Templated constructor that destructively takes the value from another
-  // scoped_ptr_impl.
-  template <typename U, typename V>
-  scoped_ptr_impl(scoped_ptr_impl<U, V>* other)
-      : data_(other->release(), other->get_deleter()) {
-    // We do not support move-only deleters.  We could modify our move
-    // emulation to have base::subtle::move() and base::subtle::forward()
-    // functions that are imperfect emulations of their C++11 equivalents,
-    // but until there's a requirement, just assume deleters are copyable.
-  }
-
-  template <typename U, typename V>
-  void TakeState(scoped_ptr_impl<U, V>* other) {
-    // See comment in templated constructor above regarding lack of support
-    // for move-only deleters.
-    reset(other->release());
-    get_deleter() = other->get_deleter();
-  }
-
-  ~scoped_ptr_impl() {
-    // Match libc++, which calls reset() in its destructor.
-    // Use nullptr as the new value for three reasons:
-    // 1. libc++ does it.
-    // 2. Avoids infinitely recursing into destructors if two classes are owned
-    //    in a reference cycle (see ScopedPtrTest.ReferenceCycle).
-    // 3. If |this| is accessed in the future, in a use-after-free bug, attempts
-    //    to dereference |this|'s pointer should cause either a failure or a
-    //    segfault closer to the problem. If |this| wasn't reset to nullptr,
-    //    the access would cause the deleted memory to be read or written
-    //    leading to other more subtle issues.
-    reset(nullptr);
-  }
-
-  void reset(T* p) {
-    // Match C++11's definition of unique_ptr::reset(), which requires changing
-    // the pointer before invoking the deleter on the old pointer. This prevents
-    // |this| from being accessed after the deleter is run, which may destroy
-    // |this|.
-    T* old = data_.ptr;
-    data_.ptr = p;
-    if (old != nullptr)
-      static_cast<D&>(data_)(old);
-  }
-
-  T* get() const { return data_.ptr; }
-
-  D& get_deleter() { return data_; }
-  const D& get_deleter() const { return data_; }
-
-  void swap(scoped_ptr_impl& p2) {
-    // Standard swap idiom: 'using std::swap' ensures that std::swap is
-    // present in the overload set, but we call swap unqualified so that
-    // any more-specific overloads can be used, if available.
-    using std::swap;
-    swap(static_cast<D&>(data_), static_cast<D&>(p2.data_));
-    swap(data_.ptr, p2.data_.ptr);
-  }
-
-  T* release() {
-    T* old_ptr = data_.ptr;
-    data_.ptr = nullptr;
-    return old_ptr;
-  }
-
- private:
-  // Needed to allow type-converting constructor.
-  template <typename U, typename V> friend class scoped_ptr_impl;
-
-  // Use the empty base class optimization to allow us to have a D
-  // member, while avoiding any space overhead for it when D is an
-  // empty class.  See e.g. http://www.cantrip.org/emptyopt.html for a good
-  // discussion of this technique.
-  struct Data : public D {
-    explicit Data(T* ptr_in) : ptr(ptr_in) {}
-    Data(T* ptr_in, const D& other) : D(other), ptr(ptr_in) {}
-    T* ptr;
-  };
-
-  Data data_;
-
-  DISALLOW_COPY_AND_ASSIGN(scoped_ptr_impl);
-};
-
-}  // namespace internal
-
 }  // namespace base
 
-// A scoped_ptr<T> is like a T*, except that the destructor of scoped_ptr<T>
-// automatically deletes the pointer it holds (if any).
-// That is, scoped_ptr<T> owns the T object that it points to.
-// Like a T*, a scoped_ptr<T> may hold either nullptr or a pointer to a T
-// object. Also like T*, scoped_ptr<T> is thread-compatible, and once you
-// dereference it, you get the thread safety guarantees of T.
-//
-// The size of scoped_ptr is small. On most compilers, when using the
-// std::default_delete, sizeof(scoped_ptr<T>) == sizeof(T*). Custom deleters
-// will increase the size proportional to whatever state they need to have. See
-// comments inside scoped_ptr_impl<> for details.
-//
-// Current implementation targets having a strict subset of  C++11's
-// unique_ptr<> features. Known deficiencies include not supporting move-only
-// deleteres, function pointers as deleters, and deleters with reference
-// types.
-template <class T, class D = std::default_delete<T>>
-class scoped_ptr {
-  DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(scoped_ptr)
-
-  static_assert(!std::is_array<T>::value,
-                "scoped_ptr doesn't support array with size");
-  static_assert(base::internal::IsNotRefCounted<T>::value,
-                "T is a refcounted type and needs a scoped_refptr");
-
- public:
-  // The element and deleter types.
-  using element_type = T;
-  using deleter_type = D;
-
-  // Constructor.  Defaults to initializing with nullptr.
-  scoped_ptr() : impl_(nullptr) {}
-
-  // Constructor.  Takes ownership of p.
-  explicit scoped_ptr(element_type* p) : impl_(p) {}
-
-  // Constructor.  Allows initialization of a stateful deleter.
-  scoped_ptr(element_type* p, const D& d) : impl_(p, d) {}
-
-  // Constructor.  Allows construction from a nullptr.
-  scoped_ptr(std::nullptr_t) : impl_(nullptr) {}
-
-  // Move constructor.
-  //
-  // IMPLEMENTATION NOTE: Clang requires a move constructor to be defined (and
-  // not just the conversion constructor) in order to warn on pessimizing moves.
-  // The requirements for the move constructor are specified in C++11
-  // 20.7.1.2.1.15-17, which has some subtleties around reference deleters. As
-  // we don't support reference (or move-only) deleters, the post conditions are
-  // trivially true: we always copy construct the deleter from other's deleter.
-  scoped_ptr(scoped_ptr&& other) : impl_(&other.impl_) {}
-
-  // Conversion constructor.  Allows construction from a scoped_ptr rvalue for a
-  // convertible type and deleter.
-  //
-  // IMPLEMENTATION NOTE: C++ 20.7.1.2.1.19 requires this constructor to only
-  // participate in overload resolution if all the following are true:
-  // - U is implicitly convertible to T: this is important for 2 reasons:
-  //     1. So type traits don't incorrectly return true, e.g.
-  //          std::is_convertible<scoped_ptr<Base>, scoped_ptr<Derived>>::value
-  //        should be false.
-  //     2. To make sure code like this compiles:
-  //        void F(scoped_ptr<int>);
-  //        void F(scoped_ptr<Base>);
-  //        // Ambiguous since both conversion constructors match.
-  //        F(scoped_ptr<Derived>());
-  // - U is not an array type: to prevent conversions from scoped_ptr<T[]> to
-  //   scoped_ptr<T>.
-  // - D is a reference type and E is the same type, or D is not a reference
-  //   type and E is implicitly convertible to D: again, we don't support
-  //   reference deleters, so we only worry about the latter requirement.
-  template <typename U,
-            typename E,
-            typename std::enable_if<!std::is_array<U>::value &&
-                                    std::is_convertible<U*, T*>::value &&
-                                    std::is_convertible<E, D>::value>::type* =
-                nullptr>
-  scoped_ptr(scoped_ptr<U, E>&& other)
-      : impl_(&other.impl_) {}
-
-  // operator=.
-  //
-  // IMPLEMENTATION NOTE: Unlike the move constructor, Clang does not appear to
-  // require a move assignment operator to trigger the pessimizing move warning:
-  // in this case, the warning triggers when moving a temporary. For consistency
-  // with the move constructor, we define it anyway. C++11 20.7.1.2.3.1-3
-  // defines several requirements around this: like the move constructor, the
-  // requirements are simplified by the fact that we don't support move-only or
-  // reference deleters.
-  scoped_ptr& operator=(scoped_ptr&& rhs) {
-    impl_.TakeState(&rhs.impl_);
-    return *this;
-  }
-
-  // operator=.  Allows assignment from a scoped_ptr rvalue for a convertible
-  // type and deleter.
-  //
-  // IMPLEMENTATION NOTE: C++11 unique_ptr<> keeps this operator= distinct from
-  // the normal move assignment operator. C++11 20.7.1.2.3.4-7 contains the
-  // requirement for this operator, but like the conversion constructor, the
-  // requirements are greatly simplified by not supporting move-only or
-  // reference deleters.
-  template <typename U,
-            typename E,
-            typename std::enable_if<!std::is_array<U>::value &&
-                                    std::is_convertible<U*, T*>::value &&
-                                    // Note that this really should be
-                                    // std::is_assignable, but <type_traits>
-                                    // appears to be missing this on some
-                                    // platforms. This is close enough (though
-                                    // it's not the same).
-                                    std::is_convertible<D, E>::value>::type* =
-                nullptr>
-  scoped_ptr& operator=(scoped_ptr<U, E>&& rhs) {
-    impl_.TakeState(&rhs.impl_);
-    return *this;
-  }
-
-  // operator=.  Allows assignment from a nullptr. Deletes the currently owned
-  // object, if any.
-  scoped_ptr& operator=(std::nullptr_t) {
-    reset();
-    return *this;
-  }
-
-  // Reset.  Deletes the currently owned object, if any.
-  // Then takes ownership of a new object, if given.
-  void reset(element_type* p = nullptr) { impl_.reset(p); }
-
-  // Accessors to get the owned object.
-  // operator* and operator-> will assert() if there is no current object.
-  element_type& operator*() const {
-    assert(impl_.get() != nullptr);
-    return *impl_.get();
-  }
-  element_type* operator->() const  {
-    assert(impl_.get() != nullptr);
-    return impl_.get();
-  }
-  element_type* get() const { return impl_.get(); }
-
-  // Access to the deleter.
-  deleter_type& get_deleter() { return impl_.get_deleter(); }
-  const deleter_type& get_deleter() const { return impl_.get_deleter(); }
-
-  // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
-  //
-  // Note that this trick is only safe when the == and != operators
-  // are declared explicitly, as otherwise "scoped_ptr1 ==
-  // scoped_ptr2" will compile but do the wrong thing (i.e., convert
-  // to Testable and then do the comparison).
- private:
-  typedef base::internal::scoped_ptr_impl<element_type, deleter_type>
-      scoped_ptr::*Testable;
-
- public:
-  operator Testable() const {
-    return impl_.get() ? &scoped_ptr::impl_ : nullptr;
-  }
-
-  // Swap two scoped pointers.
-  void swap(scoped_ptr& p2) {
-    impl_.swap(p2.impl_);
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object. If this object
-  // holds a nullptr, the return value is nullptr. After this operation, this
-  // object will hold a nullptr, and will not own the object any more.
-  element_type* release() WARN_UNUSED_RESULT {
-    return impl_.release();
-  }
-
- private:
-  // Needed to reach into |impl_| in the constructor.
-  template <typename U, typename V> friend class scoped_ptr;
-  base::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
-
-  // Forbidden for API compatibility with std::unique_ptr.
-  explicit scoped_ptr(int disallow_construction_from_null);
-};
-
-template <class T, class D>
-class scoped_ptr<T[], D> {
-  DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(scoped_ptr)
-
- public:
-  // The element and deleter types.
-  using element_type = T;
-  using deleter_type = D;
-
-  // Constructor.  Defaults to initializing with nullptr.
-  scoped_ptr() : impl_(nullptr) {}
-
-  // Constructor. Stores the given array. Note that the argument's type
-  // must exactly match T*. In particular:
-  // - it cannot be a pointer to a type derived from T, because it is
-  //   inherently unsafe in the general case to access an array through a
-  //   pointer whose dynamic type does not match its static type (eg., if
-  //   T and the derived types had different sizes access would be
-  //   incorrectly calculated). Deletion is also always undefined
-  //   (C++98 [expr.delete]p3). If you're doing this, fix your code.
-  // - it cannot be const-qualified differently from T per unique_ptr spec
-  //   (http://cplusplus.github.com/LWG/lwg-active.html#2118). Users wanting
-  //   to work around this may use const_cast<const T*>().
-  explicit scoped_ptr(element_type* array) : impl_(array) {}
-
-  // Constructor.  Allows construction from a nullptr.
-  scoped_ptr(std::nullptr_t) : impl_(nullptr) {}
-
-  // Constructor.  Allows construction from a scoped_ptr rvalue.
-  scoped_ptr(scoped_ptr&& other) : impl_(&other.impl_) {}
-
-  // operator=.  Allows assignment from a scoped_ptr rvalue.
-  scoped_ptr& operator=(scoped_ptr&& rhs) {
-    impl_.TakeState(&rhs.impl_);
-    return *this;
-  }
-
-  // operator=.  Allows assignment from a nullptr. Deletes the currently owned
-  // array, if any.
-  scoped_ptr& operator=(std::nullptr_t) {
-    reset();
-    return *this;
-  }
-
-  // Reset.  Deletes the currently owned array, if any.
-  // Then takes ownership of a new object, if given.
-  void reset(element_type* array = nullptr) { impl_.reset(array); }
-
-  // Accessors to get the owned array.
-  element_type& operator[](size_t i) const {
-    assert(impl_.get() != nullptr);
-    return impl_.get()[i];
-  }
-  element_type* get() const { return impl_.get(); }
-
-  // Access to the deleter.
-  deleter_type& get_deleter() { return impl_.get_deleter(); }
-  const deleter_type& get_deleter() const { return impl_.get_deleter(); }
-
-  // Allow scoped_ptr<element_type> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
- private:
-  typedef base::internal::scoped_ptr_impl<element_type, deleter_type>
-      scoped_ptr::*Testable;
-
- public:
-  operator Testable() const {
-    return impl_.get() ? &scoped_ptr::impl_ : nullptr;
-  }
-
-  // Swap two scoped pointers.
-  void swap(scoped_ptr& p2) {
-    impl_.swap(p2.impl_);
-  }
-
-  // Release a pointer.
-  // The return value is the current pointer held by this object. If this object
-  // holds a nullptr, the return value is nullptr. After this operation, this
-  // object will hold a nullptr, and will not own the object any more.
-  element_type* release() WARN_UNUSED_RESULT {
-    return impl_.release();
-  }
-
- private:
-  // Force element_type to be a complete type.
-  enum { type_must_be_complete = sizeof(element_type) };
-
-  // Actually hold the data.
-  base::internal::scoped_ptr_impl<element_type, deleter_type> impl_;
-
-  // Disable initialization from any type other than element_type*, by
-  // providing a constructor that matches such an initialization, but is
-  // private and has no definition. This is disabled because it is not safe to
-  // call delete[] on an array whose static type does not match its dynamic
-  // type.
-  template <typename U> explicit scoped_ptr(U* array);
-  explicit scoped_ptr(int disallow_construction_from_null);
-
-  // Disable reset() from any type other than element_type*, for the same
-  // reasons as the constructor above.
-  template <typename U> void reset(U* array);
-  void reset(int disallow_reset_from_null);
-};
-
-// Free functions
-template <class T, class D>
-void swap(scoped_ptr<T, D>& p1, scoped_ptr<T, D>& p2) {
-  p1.swap(p2);
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator==(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return p1.get() == p2.get();
-}
-template <class T, class D>
-bool operator==(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return p.get() == nullptr;
-}
-template <class T, class D>
-bool operator==(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return p.get() == nullptr;
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator!=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return !(p1 == p2);
-}
-template <class T, class D>
-bool operator!=(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return !(p == nullptr);
-}
-template <class T, class D>
-bool operator!=(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return !(p == nullptr);
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator<(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return p1.get() < p2.get();
-}
-template <class T, class D>
-bool operator<(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  auto* ptr = p.get();
-  return ptr < static_cast<decltype(ptr)>(nullptr);
-}
-template <class T, class D>
-bool operator<(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  auto* ptr = p.get();
-  return static_cast<decltype(ptr)>(nullptr) < ptr;
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator>(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return p2 < p1;
-}
-template <class T, class D>
-bool operator>(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return nullptr < p;
-}
-template <class T, class D>
-bool operator>(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return p < nullptr;
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator<=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return !(p1 > p2);
-}
-template <class T, class D>
-bool operator<=(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return !(p > nullptr);
-}
-template <class T, class D>
-bool operator<=(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return !(nullptr > p);
-}
-
-template <class T1, class D1, class T2, class D2>
-bool operator>=(const scoped_ptr<T1, D1>& p1, const scoped_ptr<T2, D2>& p2) {
-  return !(p1 < p2);
-}
-template <class T, class D>
-bool operator>=(const scoped_ptr<T, D>& p, std::nullptr_t) {
-  return !(p < nullptr);
-}
-template <class T, class D>
-bool operator>=(std::nullptr_t, const scoped_ptr<T, D>& p) {
-  return !(nullptr < p);
-}
+template <typename T, typename D = std::default_delete<T>>
+using scoped_ptr = std::unique_ptr<T, D>;
 
 // A function to convert T* into scoped_ptr<T>
 // Doing e.g. make_scoped_ptr(new FooBarBaz<type>(arg)) is a shorter notation
@@ -601,9 +132,4 @@
   return scoped_ptr<T>(ptr);
 }
 
-template <typename T>
-std::ostream& operator<<(std::ostream& out, const scoped_ptr<T>& p) {
-  return out << p.get();
-}
-
 #endif  // BASE_MEMORY_SCOPED_PTR_H_
diff --git a/base/memory/scoped_ptr_unittest.cc b/base/memory/scoped_ptr_unittest.cc
deleted file mode 100644
index 4f0e784..0000000
--- a/base/memory/scoped_ptr_unittest.cc
+++ /dev/null
@@ -1,842 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/memory/scoped_ptr.h"
-
-#include <stddef.h>
-
-#include <sstream>
-
-#include "base/bind.h"
-#include "base/callback.h"
-#include "base/macros.h"
-#include "build/build_config.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace {
-
-// Used to test depth subtyping.
-class ConDecLoggerParent {
- public:
-  virtual ~ConDecLoggerParent() {}
-
-  virtual void SetPtr(int* ptr) = 0;
-
-  virtual int SomeMeth(int x) const = 0;
-};
-
-class ConDecLogger : public ConDecLoggerParent {
- public:
-  ConDecLogger() : ptr_(NULL) { }
-  explicit ConDecLogger(int* ptr) { SetPtr(ptr); }
-  ~ConDecLogger() override { --*ptr_; }
-
-  void SetPtr(int* ptr) override {
-    ptr_ = ptr;
-    ++*ptr_;
-  }
-
-  int SomeMeth(int x) const override { return x; }
-
- private:
-  int* ptr_;
-
-  DISALLOW_COPY_AND_ASSIGN(ConDecLogger);
-};
-
-struct CountingDeleter {
-  explicit CountingDeleter(int* count) : count_(count) {}
-  inline void operator()(double* ptr) const {
-    (*count_)++;
-  }
-  int* count_;
-};
-
-// Used to test assignment of convertible deleters.
-struct CountingDeleterChild : public CountingDeleter {
-  explicit CountingDeleterChild(int* count) : CountingDeleter(count) {}
-};
-
-class OverloadedNewAndDelete {
- public:
-  void* operator new(size_t size) {
-    g_new_count++;
-    return malloc(size);
-  }
-
-  void operator delete(void* ptr) {
-    g_delete_count++;
-    free(ptr);
-  }
-
-  static void ResetCounters() {
-    g_new_count = 0;
-    g_delete_count = 0;
-  }
-
-  static int new_count() { return g_new_count; }
-  static int delete_count() { return g_delete_count; }
-
- private:
-  static int g_new_count;
-  static int g_delete_count;
-};
-
-int OverloadedNewAndDelete::g_new_count = 0;
-int OverloadedNewAndDelete::g_delete_count = 0;
-
-scoped_ptr<ConDecLogger> PassThru(scoped_ptr<ConDecLogger> logger) {
-  return logger;
-}
-
-void GrabAndDrop(scoped_ptr<ConDecLogger> logger) {
-}
-
-// Do not delete this function!  It's existence is to test that you can
-// return a temporarily constructed version of the scoper.
-scoped_ptr<ConDecLogger> TestReturnOfType(int* constructed) {
-  return scoped_ptr<ConDecLogger>(new ConDecLogger(constructed));
-}
-
-}  // namespace
-
-TEST(ScopedPtrTest, ScopedPtr) {
-  int constructed = 0;
-
-  // Ensure size of scoped_ptr<> doesn't increase unexpectedly.
-  static_assert(sizeof(int*) >= sizeof(scoped_ptr<int>),
-                "scoped_ptr shouldn't be larger than the raw pointer");
-
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    EXPECT_EQ(10, scoper->SomeMeth(10));
-    EXPECT_EQ(10, scoper.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test reset() and release()
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoper.reset();
-    EXPECT_EQ(0, constructed);
-    EXPECT_FALSE(scoper.get());
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    ConDecLogger* take = scoper.release();
-    EXPECT_EQ(1, constructed);
-    EXPECT_FALSE(scoper.get());
-    delete take;
-    EXPECT_EQ(0, constructed);
-
-    scoper.reset(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test swap().
-  {
-    scoped_ptr<ConDecLogger> scoper1;
-    scoped_ptr<ConDecLogger> scoper2;
-    EXPECT_TRUE(scoper1.get() == scoper2.get());
-    EXPECT_FALSE(scoper1.get() != scoper2.get());
-
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoper1.reset(logger);
-    EXPECT_EQ(logger, scoper1.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(logger, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ScopedPtrDepthSubtyping) {
-  int constructed = 0;
-
-  // Test construction from a scoped_ptr to a derived class.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent(std::move(scoper));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-
-    EXPECT_EQ(10, scoper_parent->SomeMeth(10));
-    EXPECT_EQ(10, scoper_parent.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper_parent).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test assignment from a scoped_ptr to a derived class.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<ConDecLoggerParent> scoper_parent;
-    scoper_parent = std::move(scoper);
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_parent.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test construction of a scoped_ptr with an additional const annotation.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<const ConDecLogger> scoper_const(std::move(scoper));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_const.get());
-    EXPECT_FALSE(scoper.get());
-
-    EXPECT_EQ(10, scoper_const->SomeMeth(10));
-    EXPECT_EQ(10, scoper_const.get()->SomeMeth(10));
-    EXPECT_EQ(10, (*scoper_const).SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test assignment to a scoped_ptr with an additional const annotation.
-  {
-    scoped_ptr<ConDecLogger> scoper(new ConDecLogger(&constructed));
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<const ConDecLogger> scoper_const;
-    scoper_const = std::move(scoper);
-    EXPECT_EQ(1, constructed);
-    EXPECT_TRUE(scoper_const.get());
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test assignment to a scoped_ptr deleter of parent type.
-  {
-    // Custom deleters never touch these value.
-    double dummy_value, dummy_value2;
-    int deletes = 0;
-    int alternate_deletes = 0;
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    scoped_ptr<double, CountingDeleterChild> scoper_child(
-        &dummy_value2, CountingDeleterChild(&alternate_deletes));
-
-    EXPECT_TRUE(scoper);
-    EXPECT_TRUE(scoper_child);
-    EXPECT_EQ(0, deletes);
-    EXPECT_EQ(0, alternate_deletes);
-
-    // Test this compiles and correctly overwrites the deleter state.
-    scoper = std::move(scoper_child);
-    EXPECT_TRUE(scoper);
-    EXPECT_FALSE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(0, alternate_deletes);
-
-    scoper.reset();
-    EXPECT_FALSE(scoper);
-    EXPECT_FALSE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(1, alternate_deletes);
-
-    scoper_child.reset(&dummy_value);
-    EXPECT_TRUE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(1, alternate_deletes);
-    scoped_ptr<double, CountingDeleter> scoper_construct(
-        std::move(scoper_child));
-    EXPECT_TRUE(scoper_construct);
-    EXPECT_FALSE(scoper_child);
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(1, alternate_deletes);
-
-    scoper_construct.reset();
-    EXPECT_EQ(1, deletes);
-    EXPECT_EQ(2, alternate_deletes);
-  }
-}
-
-TEST(ScopedPtrTest, ScopedPtrWithArray) {
-  static const int kNumLoggers = 12;
-
-  int constructed = 0;
-
-  {
-    scoped_ptr<ConDecLogger[]> scoper(new ConDecLogger[kNumLoggers]);
-    EXPECT_TRUE(scoper);
-    EXPECT_EQ(&scoper[0], scoper.get());
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-
-    EXPECT_EQ(10, scoper.get()->SomeMeth(10));
-    EXPECT_EQ(10, scoper[2].SomeMeth(10));
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test reset() and release()
-  {
-    scoped_ptr<ConDecLogger[]> scoper;
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper.release());
-    EXPECT_FALSE(scoper.get());
-    scoper.reset();
-    EXPECT_FALSE(scoper.get());
-
-    scoper.reset(new ConDecLogger[kNumLoggers]);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-    scoper.reset();
-    EXPECT_EQ(0, constructed);
-
-    scoper.reset(new ConDecLogger[kNumLoggers]);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(12, constructed);
-    ConDecLogger* ptr = scoper.release();
-    EXPECT_EQ(12, constructed);
-    delete[] ptr;
-    EXPECT_EQ(0, constructed);
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test swap() and type-safe Boolean.
-  {
-    scoped_ptr<ConDecLogger[]> scoper1;
-    scoped_ptr<ConDecLogger[]> scoper2;
-    EXPECT_TRUE(scoper1.get() == scoper2.get());
-    EXPECT_FALSE(scoper1.get() != scoper2.get());
-
-    ConDecLogger* loggers = new ConDecLogger[kNumLoggers];
-    for (int i = 0; i < kNumLoggers; ++i) {
-      loggers[i].SetPtr(&constructed);
-    }
-    scoper1.reset(loggers);
-    EXPECT_TRUE(scoper1);
-    EXPECT_EQ(loggers, scoper1.get());
-    EXPECT_FALSE(scoper2);
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(loggers, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  {
-    ConDecLogger* loggers = new ConDecLogger[kNumLoggers];
-    scoped_ptr<ConDecLogger[]> scoper(loggers);
-    EXPECT_TRUE(scoper);
-    for (int i = 0; i < kNumLoggers; ++i) {
-      scoper[i].SetPtr(&constructed);
-    }
-    EXPECT_EQ(kNumLoggers, constructed);
-
-    // Test moving with constructor;
-    scoped_ptr<ConDecLogger[]> scoper2(std::move(scoper));
-    EXPECT_EQ(kNumLoggers, constructed);
-
-    // Test moving with assignment;
-    scoped_ptr<ConDecLogger[]> scoper3;
-    scoper3 = std::move(scoper2);
-    EXPECT_EQ(kNumLoggers, constructed);
-    EXPECT_FALSE(scoper);
-    EXPECT_FALSE(scoper2);
-    EXPECT_TRUE(scoper3);
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, MoveBehavior) {
-  int constructed = 0;
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Test moving with constructor;
-    scoped_ptr<ConDecLogger> scoper2(std::move(scoper));
-    EXPECT_EQ(1, constructed);
-
-    // Test moving with assignment;
-    scoped_ptr<ConDecLogger> scoper3;
-    scoper3 = std::move(scoper2);
-    EXPECT_EQ(1, constructed);
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_TRUE(scoper3.get());
-  }
-
-#if !defined(OS_ANDROID) && !defined(OS_LINUX)
-  // Test uncaught Pass() does not have side effects, because Pass()
-  // is implemented by std::move().
-  // TODO(danakj): Remove this test case when we remove Pass().
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    scoped_ptr<ConDecLogger>&& rvalue = scoper.Pass();
-    // The Pass() function mimics std::move(), which does not have side-effects.
-    EXPECT_TRUE(scoper.get());
-    EXPECT_TRUE(rvalue);
-  }
-  EXPECT_EQ(0, constructed);
-#endif
-
-  // Test that passing to function which does nothing does not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    GrabAndDrop(std::move(scoper));
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, ReturnTypeBehavior) {
-  int constructed = 0;
-
-  // Test that we can return a scoped_ptr.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    PassThru(std::move(scoper));
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Test uncaught return type not leak.
-  {
-    ConDecLogger* logger = new ConDecLogger(&constructed);
-    scoped_ptr<ConDecLogger> scoper(logger);
-    EXPECT_EQ(1, constructed);
-
-    // Should auto-destruct logger by end of scope.
-    PassThru(std::move(scoper));
-    EXPECT_FALSE(scoper.get());
-  }
-  EXPECT_EQ(0, constructed);
-
-  // Call TestReturnOfType() so the compiler doesn't warn for an unused
-  // function.
-  {
-    TestReturnOfType(&constructed);
-  }
-  EXPECT_EQ(0, constructed);
-}
-
-TEST(ScopedPtrTest, CustomDeleter) {
-  double dummy_value;  // Custom deleter never touches this value.
-  int deletes = 0;
-  int alternate_deletes = 0;
-
-  // Normal delete support.
-  {
-    deletes = 0;
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    EXPECT_EQ(0, deletes);
-    EXPECT_TRUE(scoper.get());
-  }
-  EXPECT_EQ(1, deletes);
-
-  // Test reset() and release().
-  deletes = 0;
-  {
-    scoped_ptr<double, CountingDeleter> scoper(NULL,
-                                               CountingDeleter(&deletes));
-    EXPECT_FALSE(scoper.get());
-    EXPECT_FALSE(scoper.release());
-    EXPECT_FALSE(scoper.get());
-    scoper.reset();
-    EXPECT_FALSE(scoper.get());
-    EXPECT_EQ(0, deletes);
-
-    scoper.reset(&dummy_value);
-    scoper.reset();
-    EXPECT_EQ(1, deletes);
-
-    scoper.reset(&dummy_value);
-    EXPECT_EQ(&dummy_value, scoper.release());
-  }
-  EXPECT_EQ(1, deletes);
-
-  // Test get_deleter().
-  deletes = 0;
-  alternate_deletes = 0;
-  {
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    // Call deleter manually.
-    EXPECT_EQ(0, deletes);
-    scoper.get_deleter()(&dummy_value);
-    EXPECT_EQ(1, deletes);
-
-    // Deleter is still there after reset.
-    scoper.reset();
-    EXPECT_EQ(2, deletes);
-    scoper.get_deleter()(&dummy_value);
-    EXPECT_EQ(3, deletes);
-
-    // Deleter can be assigned into (matches C++11 unique_ptr<> spec).
-    scoper.get_deleter() = CountingDeleter(&alternate_deletes);
-    scoper.reset(&dummy_value);
-    EXPECT_EQ(0, alternate_deletes);
-
-  }
-  EXPECT_EQ(3, deletes);
-  EXPECT_EQ(1, alternate_deletes);
-
-  // Test operator= deleter support.
-  deletes = 0;
-  alternate_deletes = 0;
-  {
-    double dummy_value2;
-    scoped_ptr<double, CountingDeleter> scoper(&dummy_value,
-                                               CountingDeleter(&deletes));
-    scoped_ptr<double, CountingDeleter> scoper2(
-        &dummy_value2,
-        CountingDeleter(&alternate_deletes));
-    EXPECT_EQ(0, deletes);
-    EXPECT_EQ(0, alternate_deletes);
-
-    // Pass the second deleter through a constructor and an operator=. Then
-    // reinitialize the empty scopers to ensure that each one is deleting
-    // properly.
-    scoped_ptr<double, CountingDeleter> scoper3(std::move(scoper2));
-    scoper = std::move(scoper3);
-    EXPECT_EQ(1, deletes);
-
-    scoper2.reset(&dummy_value2);
-    scoper3.reset(&dummy_value2);
-    EXPECT_EQ(0, alternate_deletes);
-
-  }
-  EXPECT_EQ(1, deletes);
-  EXPECT_EQ(3, alternate_deletes);
-
-  // Test swap(), and type-safe Boolean.
-  {
-    scoped_ptr<double, CountingDeleter> scoper1(NULL,
-                                                CountingDeleter(&deletes));
-    scoped_ptr<double, CountingDeleter> scoper2(NULL,
-                                                CountingDeleter(&deletes));
-    EXPECT_TRUE(scoper1.get() == scoper2.get());
-    EXPECT_FALSE(scoper1.get() != scoper2.get());
-
-    scoper1.reset(&dummy_value);
-    EXPECT_TRUE(scoper1);
-    EXPECT_EQ(&dummy_value, scoper1.get());
-    EXPECT_FALSE(scoper2);
-    EXPECT_FALSE(scoper2.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-
-    scoper2.swap(scoper1);
-    EXPECT_EQ(&dummy_value, scoper2.get());
-    EXPECT_FALSE(scoper1.get());
-    EXPECT_FALSE(scoper1.get() == scoper2.get());
-    EXPECT_TRUE(scoper1.get() != scoper2.get());
-  }
-}
-
-// Sanity check test for overloaded new and delete operators. Does not do full
-// coverage of reset/release/move operations as that is redundant with the
-// above.
-TEST(ScopedPtrTest, OverloadedNewAndDelete) {
-  {
-    OverloadedNewAndDelete::ResetCounters();
-    scoped_ptr<OverloadedNewAndDelete> scoper(new OverloadedNewAndDelete());
-    EXPECT_TRUE(scoper.get());
-
-    scoped_ptr<OverloadedNewAndDelete> scoper2(std::move(scoper));
-  }
-  EXPECT_EQ(1, OverloadedNewAndDelete::delete_count());
-  EXPECT_EQ(1, OverloadedNewAndDelete::new_count());
-}
-
-scoped_ptr<int> NullIntReturn() {
-  return nullptr;
-}
-
-TEST(ScopedPtrTest, Nullptr) {
-  scoped_ptr<int> scoper1(nullptr);
-  scoped_ptr<int> scoper2(new int);
-  scoper2 = nullptr;
-  scoped_ptr<int> scoper3(NullIntReturn());
-  scoped_ptr<int> scoper4 = NullIntReturn();
-  EXPECT_EQ(nullptr, scoper1.get());
-  EXPECT_EQ(nullptr, scoper2.get());
-  EXPECT_EQ(nullptr, scoper3.get());
-  EXPECT_EQ(nullptr, scoper4.get());
-}
-
-scoped_ptr<int[]> NullIntArrayReturn() {
-  return nullptr;
-}
-
-TEST(ScopedPtrTest, NullptrArray) {
-  scoped_ptr<int[]> scoper1(nullptr);
-  scoped_ptr<int[]> scoper2(new int[3]);
-  scoper2 = nullptr;
-  scoped_ptr<int[]> scoper3(NullIntArrayReturn());
-  scoped_ptr<int[]> scoper4 = NullIntArrayReturn();
-  EXPECT_EQ(nullptr, scoper1.get());
-  EXPECT_EQ(nullptr, scoper2.get());
-  EXPECT_EQ(nullptr, scoper3.get());
-  EXPECT_EQ(nullptr, scoper4.get());
-}
-
-class Super {};
-class Sub : public Super {};
-
-scoped_ptr<Sub> SubClassReturn() {
-  return make_scoped_ptr(new Sub);
-}
-
-TEST(ScopedPtrTest, Conversion) {
-  scoped_ptr<Sub> sub1(new Sub);
-  scoped_ptr<Sub> sub2(new Sub);
-
-  // Upcast with move works.
-  scoped_ptr<Super> super1 = std::move(sub1);
-  super1 = std::move(sub2);
-
-  // Upcast with an rvalue works.
-  scoped_ptr<Super> super2 = SubClassReturn();
-  super2 = SubClassReturn();
-}
-
-// Logging a scoped_ptr<T> to an ostream shouldn't convert it to a boolean
-// value first.
-TEST(ScopedPtrTest, LoggingDoesntConvertToBoolean) {
-  scoped_ptr<int> x(new int);
-  std::stringstream s1;
-  s1 << x;
-
-  std::stringstream s2;
-  s2 << x.get();
-
-  EXPECT_EQ(s2.str(), s1.str());
-}
-
-TEST(ScopedPtrTest, ReferenceCycle) {
-  struct StructB;
-  struct StructA {
-    scoped_ptr<StructB> b;
-  };
-
-  struct StructB {
-    scoped_ptr<StructA> a;
-  };
-
-  // Create a reference cycle.
-  StructA* a = new StructA;
-  a->b.reset(new StructB);
-  a->b->a.reset(a);
-
-  // Break the cycle by calling reset(). This will cause |a| (and hence, |a->b|)
-  // to be deleted before the call to reset() returns. This tests that the
-  // implementation of scoped_ptr::reset() doesn't access |this| after it
-  // deletes the underlying pointer. This behaviour is consistent with the
-  // definition of unique_ptr::reset in C++11.
-  a->b.reset();
-
-  // Go again, but this time, break the cycle by invoking |a|'s destructor. This
-  // tests that the implementation of ~scoped_ptr doesn't infinitely recurse
-  // into the destructors of |a| and |a->b|. Note, deleting |a| instead will
-  // cause |a| to be double-free'd because |a->b| owns |a| and deletes it via
-  // its destructor.
-  a = new StructA;
-  a->b.reset(new StructB);
-  a->b->a.reset(a);
-  a->~StructA();
-}
-
-TEST(ScopedPtrTest, Operators) {
-  struct Parent {};
-  struct Child : public Parent {};
-
-  scoped_ptr<Parent> p(new Parent);
-  scoped_ptr<Parent> p2(new Parent);
-  scoped_ptr<Child> c(new Child);
-  scoped_ptr<Parent> pnull;
-
-  // Operator==.
-  EXPECT_TRUE(p == p);
-  EXPECT_FALSE(p == c);
-  EXPECT_FALSE(p == p2);
-  EXPECT_FALSE(p == pnull);
-
-  EXPECT_FALSE(p == nullptr);
-  EXPECT_FALSE(nullptr == p);
-  EXPECT_TRUE(pnull == nullptr);
-  EXPECT_TRUE(nullptr == pnull);
-
-  // Operator!=.
-  EXPECT_FALSE(p != p);
-  EXPECT_TRUE(p != c);
-  EXPECT_TRUE(p != p2);
-  EXPECT_TRUE(p != pnull);
-
-  EXPECT_TRUE(p != nullptr);
-  EXPECT_TRUE(nullptr != p);
-  EXPECT_FALSE(pnull != nullptr);
-  EXPECT_FALSE(nullptr != pnull);
-
-  // Compare two scoped_ptr<T>.
-  EXPECT_EQ(p.get() < p2.get(), p < p2);
-  EXPECT_EQ(p.get() <= p2.get(), p <= p2);
-  EXPECT_EQ(p.get() > p2.get(), p > p2);
-  EXPECT_EQ(p.get() >= p2.get(), p >= p2);
-  EXPECT_EQ(p2.get() < p.get(), p2 < p);
-  EXPECT_EQ(p2.get() <= p.get(), p2 <= p);
-  EXPECT_EQ(p2.get() > p.get(), p2 > p);
-  EXPECT_EQ(p2.get() >= p.get(), p2 >= p);
-
-  // And convertible scoped_ptr<T> and scoped_ptr<U>.
-  EXPECT_EQ(p.get() < c.get(), p < c);
-  EXPECT_EQ(p.get() <= c.get(), p <= c);
-  EXPECT_EQ(p.get() > c.get(), p > c);
-  EXPECT_EQ(p.get() >= c.get(), p >= c);
-  EXPECT_EQ(c.get() < p.get(), c < p);
-  EXPECT_EQ(c.get() <= p.get(), c <= p);
-  EXPECT_EQ(c.get() > p.get(), c > p);
-  EXPECT_EQ(c.get() >= p.get(), c >= p);
-
-  // Compare to nullptr.
-  EXPECT_TRUE(p > nullptr);
-  EXPECT_FALSE(nullptr > p);
-  EXPECT_FALSE(pnull > nullptr);
-  EXPECT_FALSE(nullptr > pnull);
-
-  EXPECT_TRUE(p >= nullptr);
-  EXPECT_FALSE(nullptr >= p);
-  EXPECT_TRUE(pnull >= nullptr);
-  EXPECT_TRUE(nullptr >= pnull);
-
-  EXPECT_FALSE(p < nullptr);
-  EXPECT_TRUE(nullptr < p);
-  EXPECT_FALSE(pnull < nullptr);
-  EXPECT_FALSE(nullptr < pnull);
-
-  EXPECT_FALSE(p <= nullptr);
-  EXPECT_TRUE(nullptr <= p);
-  EXPECT_TRUE(pnull <= nullptr);
-  EXPECT_TRUE(nullptr <= pnull);
-};
-
-TEST(ScopedPtrTest, ArrayOperators) {
-  struct Parent {};
-  struct Child : public Parent {};
-
-  scoped_ptr<Parent[]> p(new Parent[1]);
-  scoped_ptr<Parent[]> p2(new Parent[1]);
-  scoped_ptr<Child[]> c(new Child[1]);
-  scoped_ptr<Parent[]> pnull;
-
-  // Operator==.
-  EXPECT_TRUE(p == p);
-  EXPECT_FALSE(p == c);
-  EXPECT_FALSE(p == p2);
-  EXPECT_FALSE(p == pnull);
-
-  EXPECT_FALSE(p == nullptr);
-  EXPECT_FALSE(nullptr == p);
-  EXPECT_TRUE(pnull == nullptr);
-  EXPECT_TRUE(nullptr == pnull);
-
-  // Operator!=.
-  EXPECT_FALSE(p != p);
-  EXPECT_TRUE(p != c);
-  EXPECT_TRUE(p != p2);
-  EXPECT_TRUE(p != pnull);
-
-  EXPECT_TRUE(p != nullptr);
-  EXPECT_TRUE(nullptr != p);
-  EXPECT_FALSE(pnull != nullptr);
-  EXPECT_FALSE(nullptr != pnull);
-
-  // Compare two scoped_ptr<T>.
-  EXPECT_EQ(p.get() < p2.get(), p < p2);
-  EXPECT_EQ(p.get() <= p2.get(), p <= p2);
-  EXPECT_EQ(p.get() > p2.get(), p > p2);
-  EXPECT_EQ(p.get() >= p2.get(), p >= p2);
-  EXPECT_EQ(p2.get() < p.get(), p2 < p);
-  EXPECT_EQ(p2.get() <= p.get(), p2 <= p);
-  EXPECT_EQ(p2.get() > p.get(), p2 > p);
-  EXPECT_EQ(p2.get() >= p.get(), p2 >= p);
-
-  // And convertible scoped_ptr<T> and scoped_ptr<U>.
-  EXPECT_EQ(p.get() < c.get(), p < c);
-  EXPECT_EQ(p.get() <= c.get(), p <= c);
-  EXPECT_EQ(p.get() > c.get(), p > c);
-  EXPECT_EQ(p.get() >= c.get(), p >= c);
-  EXPECT_EQ(c.get() < p.get(), c < p);
-  EXPECT_EQ(c.get() <= p.get(), c <= p);
-  EXPECT_EQ(c.get() > p.get(), c > p);
-  EXPECT_EQ(c.get() >= p.get(), c >= p);
-
-  // Compare to nullptr.
-  EXPECT_TRUE(p > nullptr);
-  EXPECT_FALSE(nullptr > p);
-  EXPECT_FALSE(pnull > nullptr);
-  EXPECT_FALSE(nullptr > pnull);
-
-  EXPECT_TRUE(p >= nullptr);
-  EXPECT_FALSE(nullptr >= p);
-  EXPECT_TRUE(pnull >= nullptr);
-  EXPECT_TRUE(nullptr >= pnull);
-
-  EXPECT_FALSE(p < nullptr);
-  EXPECT_TRUE(nullptr < p);
-  EXPECT_FALSE(pnull < nullptr);
-  EXPECT_FALSE(nullptr < pnull);
-
-  EXPECT_FALSE(p <= nullptr);
-  EXPECT_TRUE(nullptr <= p);
-  EXPECT_TRUE(pnull <= nullptr);
-  EXPECT_TRUE(nullptr <= pnull);
-}
diff --git a/base/memory/scoped_ptr_unittest.nc b/base/memory/scoped_ptr_unittest.nc
deleted file mode 100644
index 10b45a1..0000000
--- a/base/memory/scoped_ptr_unittest.nc
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a "No Compile Test" suite.
-// http://dev.chromium.org/developers/testing/no-compile-tests
-
-#include "base/memory/scoped_ptr.h"
-
-#include <utility>
-
-#include "base/macros.h"
-#include "base/memory/ref_counted.h"
-
-namespace {
-
-class Parent {
-};
-
-class Child : public Parent {
-};
-
-class RefCountedClass : public base::RefCountedThreadSafe<RefCountedClass> {
-};
-
-}  // namespace
-
-#if defined(NCTEST_NO_PASS_DOWNCAST)  // [r"fatal error: no viable conversion from returned value of type 'scoped_ptr<\(anonymous namespace\)::Parent>' to function return type 'scoped_ptr<\(anonymous namespace\)::Child>'"]
-
-scoped_ptr<Child> DowncastUsingPassAs(scoped_ptr<Parent> object) {
-  return object;
-}
-
-#elif defined(NCTEST_NO_REF_COUNTED_SCOPED_PTR)  // [r"fatal error: static_assert failed \"T is a refcounted type and needs a scoped_refptr\""]
-
-// scoped_ptr<> should not work for ref-counted objects.
-void WontCompile() {
-  scoped_ptr<RefCountedClass> x;
-}
-
-#elif defined(NCTEST_NO_ARRAY_WITH_SIZE)  // [r"fatal error: static_assert failed \"scoped_ptr doesn't support array with size\""]
-
-void WontCompile() {
-  scoped_ptr<int[10]> x;
-}
-
-#elif defined(NCTEST_NO_PASS_FROM_ARRAY)  // [r"fatal error: no viable overloaded '='"]
-
-void WontCompile() {
-  scoped_ptr<int[]> a;
-  scoped_ptr<int*> b;
-  b = std::move(a);
-}
-
-#elif defined(NCTEST_NO_PASS_TO_ARRAY)  // [r"fatal error: no viable overloaded '='"]
-
-void WontCompile() {
-  scoped_ptr<int*> a;
-  scoped_ptr<int[]> b;
-  b = std::move(a);
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_FROM_ARRAY)  // [r"fatal error: no matching constructor for initialization of 'scoped_ptr<int \*>'"]
-
-void WontCompile() {
-  scoped_ptr<int[]> a;
-  scoped_ptr<int*> b(std::move(a));
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_TO_ARRAY)  // [r"fatal error: no matching constructor for initialization of 'scoped_ptr<int \[\]>'"]
-
-void WontCompile() {
-  scoped_ptr<int*> a;
-  scoped_ptr<int[]> b(std::move(a));
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_SCOPED_PTR_ARRAY_FROM_NULL)  // [r"is ambiguous"]
-
-void WontCompile() {
-  scoped_ptr<int[]> x(NULL);
-}
-
-#elif defined(NCTEST_NO_CONSTRUCT_SCOPED_PTR_ARRAY_FROM_DERIVED)  // [r"fatal error: calling a private constructor of class 'scoped_ptr<\(anonymous namespace\)::Parent \[\], std::default_delete<\(anonymous namespace\)::Parent \[\]> >'"]
-
-void WontCompile() {
-  scoped_ptr<Parent[]> x(new Child[1]);
-}
-
-#elif defined(NCTEST_NO_RESET_SCOPED_PTR_ARRAY_FROM_NULL)  // [r"is ambiguous"]
-
-void WontCompile() {
-  scoped_ptr<int[]> x;
-  x.reset(NULL);
-}
-
-#elif defined(NCTEST_NO_RESET_SCOPED_PTR_ARRAY_FROM_DERIVED)  // [r"fatal error: 'reset' is a private member of 'scoped_ptr<\(anonymous namespace\)::Parent \[\], std::default_delete<\(anonymous namespace\)::Parent \[\]> >'"]
-
-void WontCompile() {
-  scoped_ptr<Parent[]> x;
-  x.reset(new Child[1]);
-}
-
-#elif defined(NCTEST_NO_DELETER_REFERENCE)  // [r"fatal error: base specifier must name a class"]
-
-struct Deleter {
-  void operator()(int*) {}
-};
-
-// Current implementation doesn't support Deleter Reference types. Enabling
-// support would require changes to the behavior of the constructors to match
-// including the use of SFINAE to discard the type-converting constructor
-// as per C++11 20.7.1.2.1.19.
-void WontCompile() {
-  Deleter d;
-  int n;
-  scoped_ptr<int*, Deleter&> a(&n, d);
-}
-
-#endif
diff --git a/base/memory/shared_memory.h b/base/memory/shared_memory.h
index a94b399..13238aa 100644
--- a/base/memory/shared_memory.h
+++ b/base/memory/shared_memory.h
@@ -82,15 +82,6 @@
   // that |read_only| matches the permissions of the handle.
   SharedMemory(const SharedMemoryHandle& handle, bool read_only);
 
-#if defined(OS_WIN)
-  // Create a new SharedMemory object from an existing, open
-  // shared memory file that was created by a remote process and not shared
-  // to the current process.
-  SharedMemory(const SharedMemoryHandle& handle,
-               bool read_only,
-               ProcessHandle process);
-#endif
-
   // Closes any open files.
   ~SharedMemory();
 
diff --git a/base/memory/shared_memory_android.cc b/base/memory/shared_memory_android.cc
new file mode 100644
index 0000000..dfc8e6f
--- /dev/null
+++ b/base/memory/shared_memory_android.cc
@@ -0,0 +1,72 @@
+// Copyright (c) 2011 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <stddef.h>
+#include <sys/mman.h>
+
+#include "base/logging.h"
+
+#if defined(__ANDROID__)
+#include <cutils/ashmem.h>
+#else
+#include "third_party/ashmem/ashmem.h"
+#endif
+
+namespace base {
+
+// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
+// will automatically pin the region. We never explicitly call pin/unpin. When
+// all the file descriptors from different processes associated with the region
+// are closed, the memory buffer will go away.
+
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  DCHECK_EQ(-1, mapped_file_ );
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // "name" is just a label in ashmem. It is visible in /proc/pid/maps.
+  mapped_file_ = ashmem_create_region(
+      options.name_deprecated == NULL ? "" : options.name_deprecated->c_str(),
+      options.size);
+  if (-1 == mapped_file_) {
+    DLOG(ERROR) << "Shared memory creation failed";
+    return false;
+  }
+
+  int err = ashmem_set_prot_region(mapped_file_,
+                                   PROT_READ | PROT_WRITE | PROT_EXEC);
+  if (err < 0) {
+    DLOG(ERROR) << "Error " << err << " when setting protection of ashmem";
+    return false;
+  }
+
+  // Android doesn't appear to have a way to drop write access on an ashmem
+  // segment for a single descriptor.  http://crbug.com/320865
+  readonly_mapped_file_ = dup(mapped_file_);
+  if (-1 == readonly_mapped_file_) {
+    DPLOG(ERROR) << "dup() failed";
+    return false;
+  }
+
+  requested_size_ = options.size;
+
+  return true;
+}
+
+bool SharedMemory::Delete(const std::string& /* name */) {
+  // Like on Windows, this is intentionally returning true as ashmem will
+  // automatically releases the resource when all FDs on it are closed.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& /* name */, bool /* read_only */) {
+  // ashmem doesn't support name mapping
+  NOTIMPLEMENTED();
+  return false;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_handle.h b/base/memory/shared_memory_handle.h
new file mode 100644
index 0000000..5befcdd
--- /dev/null
+++ b/base/memory/shared_memory_handle.h
@@ -0,0 +1,211 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+#define BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
+
+#include <stddef.h>
+
+#include "build/build_config.h"
+
+#if defined(OS_WIN)
+#include <windows.h>
+#include "base/process/process_handle.h"
+#elif defined(OS_MACOSX) && !defined(OS_IOS)
+#include <mach/mach.h>
+#include <sys/types.h>
+#include "base/base_export.h"
+#include "base/file_descriptor_posix.h"
+#include "base/macros.h"
+#include "base/process/process_handle.h"
+#elif defined(OS_POSIX)
+#include <sys/types.h>
+#include "base/file_descriptor_posix.h"
+#endif
+
+namespace base {
+
+class Pickle;
+
+// SharedMemoryHandle is a platform specific type which represents
+// the underlying OS handle to a shared memory segment.
+#if defined(OS_POSIX) && !(defined(OS_MACOSX) && !defined(OS_IOS))
+typedef FileDescriptor SharedMemoryHandle;
+#elif defined(OS_WIN)
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+  // The default constructor returns an invalid SharedMemoryHandle.
+  SharedMemoryHandle();
+  SharedMemoryHandle(HANDLE h, base::ProcessId pid);
+
+  // Standard copy constructor. The new instance shares the underlying OS
+  // primitives.
+  SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+  // Standard assignment operator. The updated instance shares the underlying
+  // OS primitives.
+  SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+  // Comparison operators.
+  bool operator==(const SharedMemoryHandle& handle) const;
+  bool operator!=(const SharedMemoryHandle& handle) const;
+
+  // Closes the underlying OS resources.
+  void Close() const;
+
+  // Whether the underlying OS primitive is valid.
+  bool IsValid() const;
+
+  // Whether |pid_| is the same as the current process's id.
+  bool BelongsToCurrentProcess() const;
+
+  // Whether handle_ needs to be duplicated into the destination process when
+  // an instance of this class is passed over a Chrome IPC channel.
+  bool NeedsBrokering() const;
+
+  void SetOwnershipPassesToIPC(bool ownership_passes);
+  bool OwnershipPassesToIPC() const;
+
+  HANDLE GetHandle() const;
+  base::ProcessId GetPID() const;
+
+ private:
+  HANDLE handle_;
+
+  // The process in which |handle_| is valid and can be used. If |handle_| is
+  // invalid, this will be kNullProcessId.
+  base::ProcessId pid_;
+
+  // Whether passing this object as a parameter to an IPC message passes
+  // ownership of |handle_| to the IPC stack. This is meant to mimic the
+  // behavior of the |auto_close| parameter of FileDescriptor. This member only
+  // affects attachment-brokered SharedMemoryHandles.
+  // Defaults to |false|.
+  bool ownership_passes_to_ipc_;
+};
+#else
+class BASE_EXPORT SharedMemoryHandle {
+ public:
+  // The values of these enums must not change, as they are used by the
+  // histogram OSX.SharedMemory.Mechanism.
+  enum Type {
+    // The SharedMemoryHandle is backed by a POSIX fd.
+    POSIX,
+    // The SharedMemoryHandle is backed by the Mach primitive "memory object".
+    MACH,
+  };
+  static const int TypeMax = 2;
+
+  // The format that should be used to transmit |Type| over the wire.
+  typedef int TypeWireFormat;
+
+  // The default constructor returns an invalid SharedMemoryHandle.
+  SharedMemoryHandle();
+
+  // Constructs a SharedMemoryHandle backed by the components of a
+  // FileDescriptor. The newly created instance has the same ownership semantics
+  // as base::FileDescriptor. This typically means that the SharedMemoryHandle
+  // takes ownership of the |fd| if |auto_close| is true. Unfortunately, it's
+  // common for existing code to make shallow copies of SharedMemoryHandle, and
+  // the one that is finally passed into a base::SharedMemory is the one that
+  // "consumes" the fd.
+  explicit SharedMemoryHandle(const base::FileDescriptor& file_descriptor);
+  SharedMemoryHandle(int fd, bool auto_close);
+
+  // Makes a Mach-based SharedMemoryHandle of the given size. On error,
+  // subsequent calls to IsValid() return false.
+  explicit SharedMemoryHandle(mach_vm_size_t size);
+
+  // Makes a Mach-based SharedMemoryHandle from |memory_object|, a named entry
+  // in the task with process id |pid|. The memory region has size |size|.
+  SharedMemoryHandle(mach_port_t memory_object,
+                     mach_vm_size_t size,
+                     base::ProcessId pid);
+
+  // Standard copy constructor. The new instance shares the underlying OS
+  // primitives.
+  SharedMemoryHandle(const SharedMemoryHandle& handle);
+
+  // Standard assignment operator. The updated instance shares the underlying
+  // OS primitives.
+  SharedMemoryHandle& operator=(const SharedMemoryHandle& handle);
+
+  // Duplicates the underlying OS resources.
+  SharedMemoryHandle Duplicate() const;
+
+  // Comparison operators.
+  bool operator==(const SharedMemoryHandle& handle) const;
+  bool operator!=(const SharedMemoryHandle& handle) const;
+
+  // Returns the type.
+  Type GetType() const;
+
+  // Whether the underlying OS primitive is valid. Once the SharedMemoryHandle
+  // is backed by a valid OS primitive, it becomes immutable.
+  bool IsValid() const;
+
+  // Sets the POSIX fd backing the SharedMemoryHandle. Requires that the
+  // SharedMemoryHandle be backed by a POSIX fd.
+  void SetFileHandle(int fd, bool auto_close);
+
+  // This method assumes that the SharedMemoryHandle is backed by a POSIX fd.
+  // This is eventually no longer going to be true, so please avoid adding new
+  // uses of this method.
+  const FileDescriptor GetFileDescriptor() const;
+
+  // Exposed so that the SharedMemoryHandle can be transported between
+  // processes.
+  mach_port_t GetMemoryObject() const;
+
+  // Returns false on a failure to determine the size. On success, populates the
+  // output variable |size|.
+  bool GetSize(size_t* size) const;
+
+  // The SharedMemoryHandle must be valid.
+  // Returns whether the SharedMemoryHandle was successfully mapped into memory.
+  // On success, |memory| is an output variable that contains the start of the
+  // mapped memory.
+  bool MapAt(off_t offset, size_t bytes, void** memory, bool read_only);
+
+  // Closes the underlying OS primitive.
+  void Close() const;
+
+  void SetOwnershipPassesToIPC(bool ownership_passes);
+  bool OwnershipPassesToIPC() const;
+
+ private:
+  // Shared code between copy constructor and operator=.
+  void CopyRelevantData(const SharedMemoryHandle& handle);
+
+  Type type_;
+
+  // Each instance of a SharedMemoryHandle is backed either by a POSIX fd or a
+  // mach port. |type_| determines the backing member.
+  union {
+    FileDescriptor file_descriptor_;
+
+    struct {
+      mach_port_t memory_object_;
+
+      // The size of the shared memory region when |type_| is MACH. Only
+      // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+      mach_vm_size_t size_;
+
+      // The pid of the process in which |memory_object_| is usable. Only
+      // relevant if |memory_object_| is not |MACH_PORT_NULL|.
+      base::ProcessId pid_;
+
+      // Whether passing this object as a parameter to an IPC message passes
+      // ownership of |memory_object_| to the IPC stack. This is meant to mimic
+      // the behavior of the |auto_close| parameter of FileDescriptor.
+      // Defaults to |false|.
+      bool ownership_passes_to_ipc_;
+    };
+  };
+};
+#endif
+
+}  // namespace base
+
+#endif  // BASE_MEMORY_SHARED_MEMORY_HANDLE_H_
diff --git a/base/memory/shared_memory_mac.cc b/base/memory/shared_memory_mac.cc
new file mode 100644
index 0000000..6fc299d
--- /dev/null
+++ b/base/memory/shared_memory_mac.cc
@@ -0,0 +1,488 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <mach/mach_vm.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/scoped_mach_vm.h"
+#include "base/metrics/field_trial.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_MACOSX)
+#include "base/mac/foundation_util.h"
+#endif  // OS_MACOSX
+
+namespace base {
+
+namespace {
+
+const char kTrialName[] = "MacMemoryMechanism";
+const char kTrialMach[] = "Mach";
+const char kTrialPosix[] = "Posix";
+
+SharedMemoryHandle::Type GetABTestMechanism() {
+  static bool found_group = false;
+  static SharedMemoryHandle::Type group = SharedMemoryHandle::MACH;
+
+  if (found_group)
+    return group;
+
+  const std::string group_name =
+      base::FieldTrialList::FindFullName(kTrialName);
+  if (group_name == kTrialMach) {
+    group = SharedMemoryHandle::MACH;
+    found_group = true;
+  } else if (group_name == kTrialPosix) {
+    group = SharedMemoryHandle::POSIX;
+    found_group = true;
+  } else {
+    group = SharedMemoryHandle::MACH;
+  }
+
+  return group;
+}
+
+// Emits a histogram entry indicating which type of SharedMemory was created.
+void EmitMechanism(SharedMemoryHandle::Type type) {
+  UMA_HISTOGRAM_ENUMERATION("OSX.SharedMemory.Mechanism", type,
+                            SharedMemoryHandle::TypeMax);
+}
+
+// Returns whether the operation succeeded.
+// |new_handle| is an output variable, populated on success. The caller takes
+// ownership of the underlying memory object.
+// |handle| is the handle to copy.
+// If |handle| is already mapped, |mapped_addr| is its mapped location.
+// Otherwise, |mapped_addr| should be |nullptr|.
+bool MakeMachSharedMemoryHandleReadOnly(SharedMemoryHandle* new_handle,
+                                        SharedMemoryHandle handle,
+                                        void* mapped_addr) {
+  if (!handle.IsValid())
+    return false;
+
+  size_t size;
+  CHECK(handle.GetSize(&size));
+
+  // Map if necessary.
+  void* temp_addr = mapped_addr;
+  base::mac::ScopedMachVM scoper;
+  if (!temp_addr) {
+    // Intentionally lower current prot and max prot to |VM_PROT_READ|.
+    kern_return_t kr = mach_vm_map(
+        mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
+        size, 0, VM_FLAGS_ANYWHERE, handle.GetMemoryObject(), 0, FALSE,
+        VM_PROT_READ, VM_PROT_READ, VM_INHERIT_NONE);
+    if (kr != KERN_SUCCESS)
+      return false;
+    scoper.reset(reinterpret_cast<vm_address_t>(temp_addr),
+                 mach_vm_round_page(size));
+  }
+
+  // Make new memory object.
+  mach_port_t named_right;
+  kern_return_t kr = mach_make_memory_entry_64(
+      mach_task_self(), reinterpret_cast<memory_object_size_t*>(&size),
+      reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
+      &named_right, MACH_PORT_NULL);
+  if (kr != KERN_SUCCESS)
+    return false;
+
+  *new_handle = SharedMemoryHandle(named_right, size, base::GetCurrentProcId());
+  return true;
+}
+
+struct ScopedPathUnlinkerTraits {
+  static FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(FilePath* path) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::Unlink"));
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
+
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFILE* fp,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path) {
+  // Q: Why not use the shm_open() etc. APIs?
+  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
+  FilePath directory;
+  ScopedPathUnlinker path_unlinker;
+  if (GetShmemTempDir(options.executable, &directory)) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::OpenTemporaryFile"));
+    fp->reset(CreateAndOpenTemporaryFileInDir(directory, path));
+
+    // Deleting the file prevents anyone else from mapping it in (making it
+    // private), and prevents the need for cleanup (once the last fd is
+    // closed, it is truly freed).
+    if (*fp)
+      path_unlinker.reset(path);
+  }
+
+  if (*fp) {
+    if (options.share_read_only) {
+      // TODO(erikchen): Remove ScopedTracker below once
+      // http://crbug.com/466437 is fixed.
+      tracked_objects::ScopedTracker tracking_profile(
+          FROM_HERE_WITH_EXPLICIT_FUNCTION(
+              "466437 SharedMemory::Create::OpenReadonly"));
+      // Also open as readonly so that we can ShareReadOnlyToProcess.
+      readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+      if (!readonly_fd->is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+        fp->reset();
+        return false;
+      }
+    }
+  }
+  return true;
+}
+
+}  // namespace
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+    : type(SharedMemoryHandle::MACH),
+      size(0),
+      executable(false),
+      share_read_only(false) {
+  if (mac::IsOSLionOrLater()) {
+    // A/B test the mechanism. Once the experiment is over, this will always be
+    // set to SharedMemoryHandle::MACH.
+    // http://crbug.com/547261
+    type = GetABTestMechanism();
+  } else {
+    // Mach shared memory isn't supported on OSX 10.6 or older.
+    type = SharedMemoryHandle::POSIX;
+  }
+}
+
+SharedMemory::SharedMemory()
+    : mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
+      readonly_mapped_file_(-1),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(false),
+      requested_size_(0) {}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : shm_(handle),
+      mapped_memory_mechanism_(SharedMemoryHandle::POSIX),
+      readonly_mapped_file_(-1),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(read_only),
+      requested_size_(0) {}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.IsValid();
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+  return SharedMemoryHandle();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  handle.Close();
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.Duplicate();
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.GetFileDescriptor().fd;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+bool SharedMemory::CreateAndMapAnonymousPosix(size_t size) {
+  return CreateAnonymousPosix(size) && Map(size);
+}
+
+bool SharedMemory::CreateAnonymousPosix(size_t size) {
+  SharedMemoryCreateOptions options;
+  options.type = SharedMemoryHandle::POSIX;
+  options.size = size;
+  return Create(options);
+}
+
+// static
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t* size) {
+  return handle.GetSize(size);
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+  // is fixed.
+  tracked_objects::ScopedTracker tracking_profile1(
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(
+          "466437 SharedMemory::Create::Start"));
+  DCHECK(!shm_.IsValid());
+  if (options.size == 0) return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  EmitMechanism(options.type);
+
+  if (options.type == SharedMemoryHandle::MACH) {
+    shm_ = SharedMemoryHandle(options.size);
+    requested_size_ = options.size;
+    return shm_.IsValid();
+  }
+
+  // This function theoretically can block on the disk. Both profiling of real
+  // users and local instrumentation shows that this is a real problem.
+  // https://code.google.com/p/chromium/issues/detail?id=466437
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  ScopedFILE fp;
+  ScopedFD readonly_fd;
+
+  FilePath path;
+  bool result = CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+  if (!result)
+    return false;
+
+  if (!fp) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    return false;
+  }
+
+  // Get current size.
+  struct stat stat;
+  if (fstat(fileno(fp.get()), &stat) != 0)
+    return false;
+  const size_t current_size = stat.st_size;
+  if (current_size != options.size) {
+    if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+      return false;
+  }
+  requested_size_ = options.size;
+
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (!shm_.IsValid())
+    return false;
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+  if (memory_)
+    return false;
+
+  bool success = shm_.MapAt(offset, bytes, &memory_, read_only_);
+  if (success) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+                      (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+    mapped_memory_mechanism_ = shm_.GetType();
+  } else {
+    memory_ = NULL;
+  }
+
+  return success;
+}
+
+bool SharedMemory::Unmap() {
+  if (memory_ == NULL)
+    return false;
+
+  switch (mapped_memory_mechanism_) {
+    case SharedMemoryHandle::POSIX:
+      munmap(memory_, mapped_size_);
+      break;
+    case SharedMemoryHandle::MACH:
+      mach_vm_deallocate(mach_task_self(),
+                         reinterpret_cast<mach_vm_address_t>(memory_),
+                         mapped_size_);
+      break;
+  }
+
+  memory_ = NULL;
+  mapped_size_ = 0;
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  switch (shm_.GetType()) {
+    case SharedMemoryHandle::POSIX:
+      return SharedMemoryHandle(shm_.GetFileDescriptor().fd, false);
+    case SharedMemoryHandle::MACH:
+      return shm_;
+  }
+}
+
+void SharedMemory::Close() {
+  shm_.Close();
+  shm_ = SharedMemoryHandle();
+  if (shm_.GetType() == SharedMemoryHandle::POSIX) {
+    if (readonly_mapped_file_ > 0) {
+      if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+        PLOG(ERROR) << "close";
+      readonly_mapped_file_ = -1;
+    }
+  }
+}
+
+bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
+  DCHECK(!shm_.IsValid());
+  DCHECK_EQ(-1, readonly_mapped_file_);
+  if (fp == NULL)
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  struct stat st = {};
+  if (fstat(fileno(fp.get()), &st))
+    NOTREACHED();
+  if (readonly_fd.is_valid()) {
+    struct stat readonly_st = {};
+    if (fstat(readonly_fd.get(), &readonly_st))
+      NOTREACHED();
+    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+      return false;
+    }
+  }
+
+  int mapped_file = HANDLE_EINTR(dup(fileno(fp.get())));
+  if (mapped_file == -1) {
+    if (errno == EMFILE) {
+      LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
+      return false;
+    } else {
+      NOTREACHED() << "Call to dup failed, errno=" << errno;
+    }
+  }
+  shm_ = SharedMemoryHandle(mapped_file, false);
+  readonly_mapped_file_ = readonly_fd.release();
+
+  return true;
+}
+
+bool SharedMemory::ShareToProcessCommon(ProcessHandle process,
+                                        SharedMemoryHandle* new_handle,
+                                        bool close_self,
+                                        ShareMode share_mode) {
+  if (shm_.GetType() == SharedMemoryHandle::MACH) {
+    DCHECK(shm_.IsValid());
+
+    bool success = false;
+    switch (share_mode) {
+      case SHARE_CURRENT_MODE:
+        *new_handle = shm_.Duplicate();
+        success = true;
+        break;
+      case SHARE_READONLY:
+        success = MakeMachSharedMemoryHandleReadOnly(new_handle, shm_, memory_);
+        break;
+    }
+
+    if (success)
+      new_handle->SetOwnershipPassesToIPC(true);
+
+    if (close_self) {
+      Unmap();
+      Close();
+    }
+
+    return success;
+  }
+
+  int handle_to_dup = -1;
+  switch (share_mode) {
+    case SHARE_CURRENT_MODE:
+      handle_to_dup = shm_.GetFileDescriptor().fd;
+      break;
+    case SHARE_READONLY:
+      // We could imagine re-opening the file from /dev/fd, but that can't make
+      // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
+      CHECK_GE(readonly_mapped_file_, 0);
+      handle_to_dup = readonly_mapped_file_;
+      break;
+  }
+
+  const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
+  if (new_fd < 0) {
+    if (close_self) {
+      Unmap();
+      Close();
+    }
+    DPLOG(ERROR) << "dup() failed.";
+    return false;
+  }
+
+  new_handle->SetFileHandle(new_fd, true);
+
+  if (close_self) {
+    Unmap();
+    Close();
+  }
+
+  return true;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_mac_unittest.cc b/base/memory/shared_memory_mac_unittest.cc
new file mode 100644
index 0000000..bcb1f2b
--- /dev/null
+++ b/base/memory/shared_memory_mac_unittest.cc
@@ -0,0 +1,489 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <mach/mach.h>
+#include <mach/mach_vm.h>
+#include <servers/bootstrap.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/command_line.h"
+#include "base/mac/mac_util.h"
+#include "base/mac/mach_logging.h"
+#include "base/mac/scoped_mach_port.h"
+#include "base/macros.h"
+#include "base/memory/shared_memory.h"
+#include "base/process/process_handle.h"
+#include "base/rand_util.h"
+#include "base/strings/stringprintf.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/test/test_timeouts.h"
+#include "testing/multiprocess_func_list.h"
+
+namespace base {
+
+namespace {
+
+// Gets the current and maximum protection levels of the memory region.
+// Returns whether the operation was successful.
+// |current| and |max| are output variables only populated on success.
+bool GetProtections(void* address, size_t size, int* current, int* max) {
+  vm_region_info_t region_info;
+  mach_vm_address_t mem_address = reinterpret_cast<mach_vm_address_t>(address);
+  mach_vm_size_t mem_size = size;
+  vm_region_basic_info_64 basic_info;
+
+  region_info = reinterpret_cast<vm_region_recurse_info_t>(&basic_info);
+  vm_region_flavor_t flavor = VM_REGION_BASIC_INFO_64;
+  memory_object_name_t memory_object;
+  mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
+
+  kern_return_t kr =
+      mach_vm_region(mach_task_self(), &mem_address, &mem_size, flavor,
+                     region_info, &count, &memory_object);
+  if (kr != KERN_SUCCESS) {
+    MACH_LOG(ERROR, kr) << "Failed to get region info.";
+    return false;
+  }
+
+  *current = basic_info.protection;
+  *max = basic_info.max_protection;
+  return true;
+}
+
+// Creates a new SharedMemory with the given |size|, filled with 'a'.
+scoped_ptr<SharedMemory> CreateSharedMemory(int size) {
+  SharedMemoryHandle shm(size);
+  if (!shm.IsValid()) {
+    LOG(ERROR) << "Failed to make SharedMemoryHandle";
+    return nullptr;
+  }
+  scoped_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(size);
+  memset(shared_memory->memory(), 'a', size);
+  return shared_memory;
+}
+
+static const std::string g_service_switch_name = "service_name";
+
+// Structs used to pass a mach port from client to server.
+struct MachSendPortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+};
+struct MachReceivePortMessage {
+  mach_msg_header_t header;
+  mach_msg_body_t body;
+  mach_msg_port_descriptor_t data;
+  mach_msg_trailer_t trailer;
+};
+
+// Makes the current process into a Mach Server with the given |service_name|.
+mach_port_t BecomeMachServer(const char* service_name) {
+  mach_port_t port;
+  kern_return_t kr = bootstrap_check_in(bootstrap_port, service_name, &port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "BecomeMachServer";
+  return port;
+}
+
+// Returns the mach port for the Mach Server with the given |service_name|.
+mach_port_t LookupServer(const char* service_name) {
+  mach_port_t server_port;
+  kern_return_t kr =
+      bootstrap_look_up(bootstrap_port, service_name, &server_port);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "LookupServer";
+  return server_port;
+}
+
+mach_port_t MakeReceivingPort() {
+  mach_port_t client_port;
+  kern_return_t kr =
+      mach_port_allocate(mach_task_self(),         // our task is acquiring
+                         MACH_PORT_RIGHT_RECEIVE,  // a new receive right
+                         &client_port);            // with this name
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "MakeReceivingPort";
+  return client_port;
+}
+
+// Blocks until a mach message is sent to |server_port|. This mach message
+// must contain a mach port. Returns that mach port.
+mach_port_t ReceiveMachPort(mach_port_t port_to_listen_on) {
+  MachReceivePortMessage recv_msg;
+  mach_msg_header_t* recv_hdr = &(recv_msg.header);
+  recv_hdr->msgh_local_port = port_to_listen_on;
+  recv_hdr->msgh_size = sizeof(recv_msg);
+  kern_return_t kr =
+      mach_msg(recv_hdr,               // message buffer
+               MACH_RCV_MSG,           // option indicating service
+               0,                      // send size
+               recv_hdr->msgh_size,    // size of header + body
+               port_to_listen_on,      // receive name
+               MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+               MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "ReceiveMachPort";
+  mach_port_t other_task_port = recv_msg.data.name;
+  return other_task_port;
+}
+
+// Passes a copy of the send right of |port_to_send| to |receiving_port|.
+void SendMachPort(mach_port_t receiving_port,
+                  mach_port_t port_to_send,
+                  int disposition) {
+  MachSendPortMessage send_msg;
+  mach_msg_header_t* send_hdr;
+  send_hdr = &(send_msg.header);
+  send_hdr->msgh_bits =
+      MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0) | MACH_MSGH_BITS_COMPLEX;
+  send_hdr->msgh_size = sizeof(send_msg);
+  send_hdr->msgh_remote_port = receiving_port;
+  send_hdr->msgh_local_port = MACH_PORT_NULL;
+  send_hdr->msgh_reserved = 0;
+  send_hdr->msgh_id = 0;
+  send_msg.body.msgh_descriptor_count = 1;
+  send_msg.data.name = port_to_send;
+  send_msg.data.disposition = disposition;
+  send_msg.data.type = MACH_MSG_PORT_DESCRIPTOR;
+  int kr = mach_msg(send_hdr,               // message buffer
+                    MACH_SEND_MSG,          // option indicating send
+                    send_hdr->msgh_size,    // size of header + body
+                    0,                      // receive limit
+                    MACH_PORT_NULL,         // receive name
+                    MACH_MSG_TIMEOUT_NONE,  // no timeout, wait forever
+                    MACH_PORT_NULL);        // no notification port
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "SendMachPort";
+}
+
+std::string CreateRandomServiceName() {
+  return StringPrintf("SharedMemoryMacMultiProcessTest.%llu", RandUint64());
+}
+
+// Sets up the mach communication ports with the server. Returns a port to which
+// the server will send mach objects.
+mach_port_t CommonChildProcessSetUp() {
+  CommandLine cmd_line = *CommandLine::ForCurrentProcess();
+  std::string service_name =
+      cmd_line.GetSwitchValueASCII(g_service_switch_name);
+  mac::ScopedMachSendRight server_port(LookupServer(service_name.c_str()));
+  mach_port_t client_port = MakeReceivingPort();
+
+  // Send the port that this process is listening on to the server.
+  SendMachPort(server_port.get(), client_port, MACH_MSG_TYPE_MAKE_SEND);
+  return client_port;
+}
+
+// The number of active names in the current task's port name space.
+mach_msg_type_number_t GetActiveNameCount() {
+  mach_port_name_array_t name_array;
+  mach_msg_type_number_t names_count;
+  mach_port_type_array_t type_array;
+  mach_msg_type_number_t types_count;
+  kern_return_t kr = mach_port_names(mach_task_self(), &name_array,
+                                     &names_count, &type_array, &types_count);
+  MACH_CHECK(kr == KERN_SUCCESS, kr) << "GetActiveNameCount";
+  return names_count;
+}
+
+}  // namespace
+
+class SharedMemoryMacMultiProcessTest : public MultiProcessTest {
+ public:
+  SharedMemoryMacMultiProcessTest() {}
+
+  CommandLine MakeCmdLine(const std::string& procname) override {
+    CommandLine command_line = MultiProcessTest::MakeCmdLine(procname);
+    // Pass the service name to the child process.
+    command_line.AppendSwitchASCII(g_service_switch_name, service_name_);
+    return command_line;
+  }
+
+  void SetUpChild(const std::string& name) {
+    // Make a random service name so that this test doesn't conflict with other
+    // similar tests.
+    service_name_ = CreateRandomServiceName();
+    server_port_.reset(BecomeMachServer(service_name_.c_str()));
+    child_process_ = SpawnChild(name);
+    client_port_.reset(ReceiveMachPort(server_port_.get()));
+  }
+
+  static const int s_memory_size = 99999;
+
+ protected:
+  std::string service_name_;
+
+  // A port on which the main process listens for mach messages from the child
+  // process.
+  mac::ScopedMachReceiveRight server_port_;
+
+  // A port on which the child process listens for mach messages from the main
+  // process.
+  mac::ScopedMachSendRight client_port_;
+
+  base::Process child_process_;
+  DISALLOW_COPY_AND_ASSIGN(SharedMemoryMacMultiProcessTest);
+};
+
+// Tests that content written to shared memory in the server process can be read
+// by the child process.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemory) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  SetUpChild("MachBasedSharedMemoryClient");
+
+  scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(client_port_.get(), shared_memory->handle().GetMemoryObject(),
+               MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         GetCurrentProcId());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(SharedMemoryMacMultiProcessTest::s_memory_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (int i = 0; i < SharedMemoryMacMultiProcessTest::s_memory_size; ++i) {
+    DCHECK_EQ(start[i], 'a');
+  }
+  return 0;
+}
+
+// Tests that mapping shared memory with an offset works correctly.
+TEST_F(SharedMemoryMacMultiProcessTest, MachBasedSharedMemoryWithOffset) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  SetUpChild("MachBasedSharedMemoryWithOffsetClient");
+
+  SharedMemoryHandle shm(s_memory_size);
+  ASSERT_TRUE(shm.IsValid());
+  SharedMemory shared_memory(shm, false);
+  shared_memory.Map(s_memory_size);
+
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  char* start = static_cast<char*>(shared_memory.memory());
+  memset(start, 'a', page_size);
+  memset(start + page_size, 'b', page_size);
+  memset(start + 2 * page_size, 'c', page_size);
+
+  // Send the underlying memory object to the client process.
+  SendMachPort(
+      client_port_.get(), shm.GetMemoryObject(), MACH_MSG_TYPE_COPY_SEND);
+  int rv = -1;
+  ASSERT_TRUE(child_process_.WaitForExitWithTimeout(
+      TestTimeouts::action_timeout(), &rv));
+  EXPECT_EQ(0, rv);
+}
+
+MULTIPROCESS_TEST_MAIN(MachBasedSharedMemoryWithOffsetClient) {
+  mac::ScopedMachReceiveRight client_port(CommonChildProcessSetUp());
+  // The next mach port should be for a memory object.
+  mach_port_t memory_object = ReceiveMachPort(client_port.get());
+  SharedMemoryHandle shm(memory_object,
+                         SharedMemoryMacMultiProcessTest::s_memory_size,
+                         GetCurrentProcId());
+  SharedMemory shared_memory(shm, false);
+  size_t page_size = SysInfo::VMAllocationGranularity();
+  shared_memory.MapAt(page_size, 2 * page_size);
+  const char* start = static_cast<const char*>(shared_memory.memory());
+  for (size_t i = 0; i < page_size; ++i) {
+    DCHECK_EQ(start[i], 'b');
+  }
+  for (size_t i = page_size; i < 2 * page_size; ++i) {
+    DCHECK_EQ(start[i], 'c');
+  }
+  return 0;
+}
+
+// Tests that duplication and closing has the right effect on Mach reference
+// counts.
+TEST_F(SharedMemoryMacMultiProcessTest, MachDuplicateAndClose) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size);
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Duplicating the SharedMemoryHandle increments the ref count, but doesn't
+  // make a new name.
+  shm.Duplicate();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The first time has
+  // no effect.
+  shm.Close();
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Closing the SharedMemoryHandle decrements the ref count. The second time
+  // destroys the port.
+  shm.Close();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that Mach shared memory can be mapped and unmapped.
+TEST_F(SharedMemoryMacMultiProcessTest, MachUnmapMap) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  scoped_ptr<SharedMemory> shared_memory = CreateSharedMemory(s_memory_size);
+  ASSERT_TRUE(shared_memory->Unmap());
+  ASSERT_TRUE(shared_memory->Map(s_memory_size));
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that passing a SharedMemoryHandle to a SharedMemory object also passes
+// ownership, and that destroying the SharedMemory closes the SharedMemoryHandle
+// as well.
+TEST_F(SharedMemoryMacMultiProcessTest, MachSharedMemoryTakesOwnership) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  // Making a new SharedMemoryHandle increments the name count.
+  SharedMemoryHandle shm(s_memory_size);
+  ASSERT_TRUE(shm.IsValid());
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Name count doesn't change when mapping the memory.
+  scoped_ptr<SharedMemory> shared_memory(new SharedMemory(shm, false));
+  shared_memory->Map(s_memory_size);
+  EXPECT_EQ(active_name_count + 1, GetActiveNameCount());
+
+  // Destroying the SharedMemory object frees the resource.
+  shared_memory.reset();
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the read-only flag works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachReadOnly) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+
+  SharedMemoryHandle shm2 = shared_memory->handle().Duplicate();
+  ASSERT_TRUE(shm2.IsValid());
+  SharedMemory shared_memory2(shm2, true);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method ShareToProcess() works.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcess) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2;
+    ASSERT_TRUE(shared_memory->ShareToProcess(GetCurrentProcId(), &shm2));
+    ASSERT_TRUE(shm2.IsValid());
+    SharedMemory shared_memory2(shm2, true);
+    shared_memory2.Map(s_memory_size);
+
+    ASSERT_EQ(0, memcmp(shared_memory->memory(), shared_memory2.memory(),
+                        s_memory_size));
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+// Tests that the method ShareReadOnlyToProcess() creates a memory object that
+// is read only.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonly) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+
+  // Check the protection levels.
+  int current_prot, max_prot;
+  ASSERT_TRUE(GetProtections(shared_memory->memory(),
+                             shared_memory->mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, current_prot);
+  ASSERT_EQ(VM_PROT_READ | VM_PROT_WRITE, max_prot);
+
+  // Make a new memory object.
+  SharedMemoryHandle shm2;
+  ASSERT_TRUE(shared_memory->ShareReadOnlyToProcess(GetCurrentProcId(), &shm2));
+  ASSERT_TRUE(shm2.IsValid());
+
+  // Mapping with |readonly| set to |false| should fail.
+  SharedMemory shared_memory2(shm2, false);
+  shared_memory2.Map(s_memory_size);
+  ASSERT_EQ(nullptr, shared_memory2.memory());
+
+  // Now trying mapping with |readonly| set to |true|.
+  SharedMemory shared_memory3(shm2.Duplicate(), true);
+  shared_memory3.Map(s_memory_size);
+  ASSERT_NE(nullptr, shared_memory3.memory());
+
+  // Check the protection levels.
+  ASSERT_TRUE(GetProtections(shared_memory3.memory(),
+                             shared_memory3.mapped_size(), &current_prot,
+                             &max_prot));
+  ASSERT_EQ(VM_PROT_READ, current_prot);
+  ASSERT_EQ(VM_PROT_READ, max_prot);
+
+  // The memory should still be readonly, since the underlying memory object
+  // is readonly.
+  ASSERT_DEATH(memset(shared_memory2.memory(), 'b', s_memory_size), "");
+}
+
+// Tests that the method ShareReadOnlyToProcess() doesn't leak.
+TEST_F(SharedMemoryMacMultiProcessTest, MachShareToProcessReadonlyLeak) {
+  // Mach-based SharedMemory isn't support on OSX 10.6.
+  if (mac::IsOSSnowLeopard())
+    return;
+
+  mach_msg_type_number_t active_name_count = GetActiveNameCount();
+
+  {
+    scoped_ptr<SharedMemory> shared_memory(CreateSharedMemory(s_memory_size));
+
+    SharedMemoryHandle shm2;
+    ASSERT_TRUE(
+        shared_memory->ShareReadOnlyToProcess(GetCurrentProcId(), &shm2));
+    ASSERT_TRUE(shm2.IsValid());
+
+    // Intentionally map with |readonly| set to |false|.
+    SharedMemory shared_memory2(shm2, false);
+    shared_memory2.Map(s_memory_size);
+  }
+
+  EXPECT_EQ(active_name_count, GetActiveNameCount());
+}
+
+}  //  namespace base
diff --git a/base/memory/shared_memory_posix.cc b/base/memory/shared_memory_posix.cc
new file mode 100644
index 0000000..a05e866
--- /dev/null
+++ b/base/memory/shared_memory_posix.cc
@@ -0,0 +1,505 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/memory/shared_memory.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "base/files/file_util.h"
+#include "base/files/scoped_file.h"
+#include "base/logging.h"
+#include "base/posix/eintr_wrapper.h"
+#include "base/posix/safe_strerror.h"
+#include "base/process/process_metrics.h"
+#include "base/profiler/scoped_tracker.h"
+#include "base/scoped_generic.h"
+#include "base/strings/utf_string_conversions.h"
+#include "build/build_config.h"
+
+#if defined(OS_ANDROID)
+#include "base/os_compat_android.h"
+#include "third_party/ashmem/ashmem.h"
+#elif defined(__ANDROID__)
+#include <cutils/ashmem.h>
+#endif
+
+namespace base {
+
+namespace {
+
+struct ScopedPathUnlinkerTraits {
+  static FilePath* InvalidValue() { return nullptr; }
+
+  static void Free(FilePath* path) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::Unlink"));
+    if (unlink(path->value().c_str()))
+      PLOG(WARNING) << "unlink";
+  }
+};
+
+// Unlinks the FilePath when the object is destroyed.
+typedef ScopedGeneric<FilePath*, ScopedPathUnlinkerTraits> ScopedPathUnlinker;
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// Makes a temporary file, fdopens it, and then unlinks it. |fp| is populated
+// with the fdopened FILE. |readonly_fd| is populated with the opened fd if
+// options.share_read_only is true. |path| is populated with the location of
+// the file before it was unlinked.
+// Returns false if there's an unhandled failure.
+bool CreateAnonymousSharedMemory(const SharedMemoryCreateOptions& options,
+                                 ScopedFILE* fp,
+                                 ScopedFD* readonly_fd,
+                                 FilePath* path) {
+  // It doesn't make sense to have a open-existing private piece of shmem
+  DCHECK(!options.open_existing_deprecated);
+  // Q: Why not use the shm_open() etc. APIs?
+  // A: Because they're limited to 4mb on OS X.  FFFFFFFUUUUUUUUUUU
+  FilePath directory;
+  ScopedPathUnlinker path_unlinker;
+  if (GetShmemTempDir(options.executable, &directory)) {
+    // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+    // is fixed.
+    tracked_objects::ScopedTracker tracking_profile(
+        FROM_HERE_WITH_EXPLICIT_FUNCTION(
+            "466437 SharedMemory::Create::OpenTemporaryFile"));
+    fp->reset(base::CreateAndOpenTemporaryFileInDir(directory, path));
+
+    // Deleting the file prevents anyone else from mapping it in (making it
+    // private), and prevents the need for cleanup (once the last fd is
+    // closed, it is truly freed).
+    if (*fp)
+      path_unlinker.reset(path);
+  }
+
+  if (*fp) {
+    if (options.share_read_only) {
+      // TODO(erikchen): Remove ScopedTracker below once
+      // http://crbug.com/466437 is fixed.
+      tracked_objects::ScopedTracker tracking_profile(
+          FROM_HERE_WITH_EXPLICIT_FUNCTION(
+              "466437 SharedMemory::Create::OpenReadonly"));
+      // Also open as readonly so that we can ShareReadOnlyToProcess.
+      readonly_fd->reset(HANDLE_EINTR(open(path->value().c_str(), O_RDONLY)));
+      if (!readonly_fd->is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path->value() << "\", O_RDONLY) failed";
+        fp->reset();
+        return false;
+      }
+    }
+  }
+  return true;
+}
+#endif  // !defined(OS_ANDROID) &&  !defined(__ANDROID__)
+}
+
+SharedMemoryCreateOptions::SharedMemoryCreateOptions()
+    : name_deprecated(nullptr),
+      open_existing_deprecated(false),
+      size(0),
+      executable(false),
+      share_read_only(false) {}
+
+SharedMemory::SharedMemory()
+    : mapped_file_(-1),
+      readonly_mapped_file_(-1),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(false),
+      requested_size_(0) {
+}
+
+SharedMemory::SharedMemory(const SharedMemoryHandle& handle, bool read_only)
+    : mapped_file_(handle.fd),
+      readonly_mapped_file_(-1),
+      mapped_size_(0),
+      memory_(NULL),
+      read_only_(read_only),
+      requested_size_(0) {
+}
+
+SharedMemory::~SharedMemory() {
+  Unmap();
+  Close();
+}
+
+// static
+bool SharedMemory::IsHandleValid(const SharedMemoryHandle& handle) {
+  return handle.fd >= 0;
+}
+
+// static
+SharedMemoryHandle SharedMemory::NULLHandle() {
+  return SharedMemoryHandle();
+}
+
+// static
+void SharedMemory::CloseHandle(const SharedMemoryHandle& handle) {
+  DCHECK_GE(handle.fd, 0);
+  if (IGNORE_EINTR(close(handle.fd)) < 0)
+    DPLOG(ERROR) << "close";
+}
+
+// static
+size_t SharedMemory::GetHandleLimit() {
+  return base::GetMaxFds();
+}
+
+// static
+SharedMemoryHandle SharedMemory::DuplicateHandle(
+    const SharedMemoryHandle& handle) {
+  int duped_handle = HANDLE_EINTR(dup(handle.fd));
+  if (duped_handle < 0)
+    return base::SharedMemory::NULLHandle();
+  return base::FileDescriptor(duped_handle, true);
+}
+
+// static
+int SharedMemory::GetFdFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle) {
+  return handle.fd;
+}
+
+bool SharedMemory::CreateAndMapAnonymous(size_t size) {
+  return CreateAnonymous(size) && Map(size);
+}
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+// static
+bool SharedMemory::GetSizeFromSharedMemoryHandle(
+    const SharedMemoryHandle& handle,
+    size_t* size) {
+  struct stat st;
+  if (fstat(handle.fd, &st) != 0)
+    return false;
+  if (st.st_size < 0)
+    return false;
+  *size = st.st_size;
+  return true;
+}
+
+// Chromium mostly only uses the unique/private shmem as specified by
+// "name == L"". The exception is in the StatsTable.
+// TODO(jrg): there is no way to "clean up" all unused named shmem if
+// we restart from a crash.  (That isn't a new problem, but it is a problem.)
+// In case we want to delete it later, it may be useful to save the value
+// of mem_filename after FilePathForMemoryName().
+bool SharedMemory::Create(const SharedMemoryCreateOptions& options) {
+  // TODO(erikchen): Remove ScopedTracker below once http://crbug.com/466437
+  // is fixed.
+  tracked_objects::ScopedTracker tracking_profile1(
+      FROM_HERE_WITH_EXPLICIT_FUNCTION(
+          "466437 SharedMemory::Create::Start"));
+  DCHECK_EQ(-1, mapped_file_);
+  if (options.size == 0) return false;
+
+  if (options.size > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  ScopedFILE fp;
+  bool fix_size = true;
+  ScopedFD readonly_fd;
+
+  FilePath path;
+  if (options.name_deprecated == NULL || options.name_deprecated->empty()) {
+    bool result =
+        CreateAnonymousSharedMemory(options, &fp, &readonly_fd, &path);
+    if (!result)
+      return false;
+  } else {
+    if (!FilePathForMemoryName(*options.name_deprecated, &path))
+      return false;
+
+    // Make sure that the file is opened without any permission
+    // to other users on the system.
+    const mode_t kOwnerOnly = S_IRUSR | S_IWUSR;
+
+    // First, try to create the file.
+    int fd = HANDLE_EINTR(
+        open(path.value().c_str(), O_RDWR | O_CREAT | O_EXCL, kOwnerOnly));
+    if (fd == -1 && options.open_existing_deprecated) {
+      // If this doesn't work, try and open an existing file in append mode.
+      // Opening an existing file in a world writable directory has two main
+      // security implications:
+      // - Attackers could plant a file under their control, so ownership of
+      //   the file is checked below.
+      // - Attackers could plant a symbolic link so that an unexpected file
+      //   is opened, so O_NOFOLLOW is passed to open().
+      fd = HANDLE_EINTR(
+          open(path.value().c_str(), O_RDWR | O_APPEND | O_NOFOLLOW));
+
+      // Check that the current user owns the file.
+      // If uid != euid, then a more complex permission model is used and this
+      // API is not appropriate.
+      const uid_t real_uid = getuid();
+      const uid_t effective_uid = geteuid();
+      struct stat sb;
+      if (fd >= 0 &&
+          (fstat(fd, &sb) != 0 || sb.st_uid != real_uid ||
+           sb.st_uid != effective_uid)) {
+        LOG(ERROR) <<
+            "Invalid owner when opening existing shared memory file.";
+        close(fd);
+        return false;
+      }
+
+      // An existing file was opened, so its size should not be fixed.
+      fix_size = false;
+    }
+
+    if (options.share_read_only) {
+      // Also open as readonly so that we can ShareReadOnlyToProcess.
+      readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+      if (!readonly_fd.is_valid()) {
+        DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+        close(fd);
+        fd = -1;
+        return false;
+      }
+    }
+    if (fd >= 0) {
+      // "a+" is always appropriate: if it's a new file, a+ is similar to w+.
+      fp.reset(fdopen(fd, "a+"));
+    }
+  }
+  if (fp && fix_size) {
+    // Get current size.
+    struct stat stat;
+    if (fstat(fileno(fp.get()), &stat) != 0)
+      return false;
+    const size_t current_size = stat.st_size;
+    if (current_size != options.size) {
+      if (HANDLE_EINTR(ftruncate(fileno(fp.get()), options.size)) != 0)
+        return false;
+    }
+    requested_size_ = options.size;
+  }
+  if (fp == NULL) {
+    PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
+    FilePath dir = path.DirName();
+    if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
+      PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
+      if (dir.value() == "/dev/shm") {
+        LOG(FATAL) << "This is frequently caused by incorrect permissions on "
+                   << "/dev/shm.  Try 'sudo chmod 1777 /dev/shm' to fix.";
+      }
+    }
+    return false;
+  }
+
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+
+// Our current implementation of shmem is with mmap()ing of files.
+// These files need to be deleted explicitly.
+// In practice this call is only needed for unit tests.
+bool SharedMemory::Delete(const std::string& name) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  if (PathExists(path))
+    return base::DeleteFile(path, false);
+
+  // Doesn't exist, so success.
+  return true;
+}
+
+bool SharedMemory::Open(const std::string& name, bool read_only) {
+  FilePath path;
+  if (!FilePathForMemoryName(name, &path))
+    return false;
+
+  read_only_ = read_only;
+
+  const char *mode = read_only ? "r" : "r+";
+  ScopedFILE fp(base::OpenFile(path, mode));
+  ScopedFD readonly_fd(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
+  if (!readonly_fd.is_valid()) {
+    DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
+    return false;
+  }
+  return PrepareMapFile(std::move(fp), std::move(readonly_fd));
+}
+#endif  // !defined(OS_ANDROID) && !defined(__ANDROID__)
+
+bool SharedMemory::MapAt(off_t offset, size_t bytes) {
+  if (mapped_file_ == -1)
+    return false;
+
+  if (bytes > static_cast<size_t>(std::numeric_limits<int>::max()))
+    return false;
+
+  if (memory_)
+    return false;
+
+#if defined(OS_ANDROID) || defined(__ANDROID__)
+  // On Android, Map can be called with a size and offset of zero to use the
+  // ashmem-determined size.
+  if (bytes == 0) {
+    DCHECK_EQ(0, offset);
+    int ashmem_bytes = ashmem_get_size_region(mapped_file_);
+    if (ashmem_bytes < 0)
+      return false;
+    bytes = ashmem_bytes;
+  }
+#endif
+
+  memory_ = mmap(NULL, bytes, PROT_READ | (read_only_ ? 0 : PROT_WRITE),
+                 MAP_SHARED, mapped_file_, offset);
+
+  bool mmap_succeeded = memory_ != (void*)-1 && memory_ != NULL;
+  if (mmap_succeeded) {
+    mapped_size_ = bytes;
+    DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(memory_) &
+        (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  } else {
+    memory_ = NULL;
+  }
+
+  return mmap_succeeded;
+}
+
+bool SharedMemory::Unmap() {
+  if (memory_ == NULL)
+    return false;
+
+  munmap(memory_, mapped_size_);
+  memory_ = NULL;
+  mapped_size_ = 0;
+  return true;
+}
+
+SharedMemoryHandle SharedMemory::handle() const {
+  return FileDescriptor(mapped_file_, false);
+}
+
+void SharedMemory::Close() {
+  if (mapped_file_ > 0) {
+    if (IGNORE_EINTR(close(mapped_file_)) < 0)
+      PLOG(ERROR) << "close";
+    mapped_file_ = -1;
+  }
+  if (readonly_mapped_file_ > 0) {
+    if (IGNORE_EINTR(close(readonly_mapped_file_)) < 0)
+      PLOG(ERROR) << "close";
+    readonly_mapped_file_ = -1;
+  }
+}
+
+#if !defined(OS_ANDROID) && !defined(__ANDROID__)
+bool SharedMemory::PrepareMapFile(ScopedFILE fp, ScopedFD readonly_fd) {
+  DCHECK_EQ(-1, mapped_file_);
+  DCHECK_EQ(-1, readonly_mapped_file_);
+  if (fp == NULL)
+    return false;
+
+  // This function theoretically can block on the disk, but realistically
+  // the temporary files we create will just go into the buffer cache
+  // and be deleted before they ever make it out to disk.
+  base::ThreadRestrictions::ScopedAllowIO allow_io;
+
+  struct stat st = {};
+  if (fstat(fileno(fp.get()), &st))
+    NOTREACHED();
+  if (readonly_fd.is_valid()) {
+    struct stat readonly_st = {};
+    if (fstat(readonly_fd.get(), &readonly_st))
+      NOTREACHED();
+    if (st.st_dev != readonly_st.st_dev || st.st_ino != readonly_st.st_ino) {
+      LOG(ERROR) << "writable and read-only inodes don't match; bailing";
+      return false;
+    }
+  }
+
+  mapped_file_ = HANDLE_EINTR(dup(fileno(fp.get())));
+  if (mapped_file_ == -1) {
+    if (errno == EMFILE) {
+      LOG(WARNING) << "Shared memory creation failed; out of file descriptors";
+      return false;
+    } else {
+      NOTREACHED() << "Call to dup failed, errno=" << errno;
+    }
+  }
+  readonly_mapped_file_ = readonly_fd.release();
+
+  return true;
+}
+
+// For the given shmem named |mem_name|, return a filename to mmap()
+// (and possibly create).  Modifies |filename|.  Return false on
+// error, or true of we are happy.
+bool SharedMemory::FilePathForMemoryName(const std::string& mem_name,
+                                         FilePath* path) {
+  // mem_name will be used for a filename; make sure it doesn't
+  // contain anything which will confuse us.
+  DCHECK_EQ(std::string::npos, mem_name.find('/'));
+  DCHECK_EQ(std::string::npos, mem_name.find('\0'));
+
+  FilePath temp_dir;
+  if (!GetShmemTempDir(false, &temp_dir))
+    return false;
+
+#if defined(GOOGLE_CHROME_BUILD)
+  std::string name_base = std::string("com.google.Chrome");
+#else
+  std::string name_base = std::string("org.chromium.Chromium");
+#endif
+  *path = temp_dir.AppendASCII(name_base + ".shmem." + mem_name);
+  return true;
+}
+#endif  // !defined(OS_ANDROID) && !defined(__ANDROID__)
+
+bool SharedMemory::ShareToProcessCommon(ProcessHandle /* process */,
+                                        SharedMemoryHandle* new_handle,
+                                        bool close_self,
+                                        ShareMode share_mode) {
+  int handle_to_dup = -1;
+  switch(share_mode) {
+    case SHARE_CURRENT_MODE:
+      handle_to_dup = mapped_file_;
+      break;
+    case SHARE_READONLY:
+      // We could imagine re-opening the file from /dev/fd, but that can't make
+      // it readonly on Mac: https://codereview.chromium.org/27265002/#msg10
+      CHECK_GE(readonly_mapped_file_, 0);
+      handle_to_dup = readonly_mapped_file_;
+      break;
+  }
+
+  const int new_fd = HANDLE_EINTR(dup(handle_to_dup));
+  if (new_fd < 0) {
+    if (close_self) {
+      Unmap();
+      Close();
+    }
+    DPLOG(ERROR) << "dup() failed.";
+    return false;
+  }
+
+  new_handle->fd = new_fd;
+  new_handle->auto_close = true;
+
+  if (close_self) {
+    Unmap();
+    Close();
+  }
+
+  return true;
+}
+
+}  // namespace base
diff --git a/base/memory/shared_memory_unittest.cc b/base/memory/shared_memory_unittest.cc
new file mode 100644
index 0000000..cfb0b32
--- /dev/null
+++ b/base/memory/shared_memory_unittest.cc
@@ -0,0 +1,713 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "base/atomicops.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/memory/shared_memory_handle.h"
+#include "base/process/kill.h"
+#include "base/rand_util.h"
+#include "base/strings/string_number_conversions.h"
+#include "base/sys_info.h"
+#include "base/test/multiprocess_test.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/multiprocess_func_list.h"
+
+#if defined(OS_POSIX)
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if defined(OS_WIN)
+#include "base/win/scoped_handle.h"
+#endif
+
+namespace base {
+
+namespace {
+
+#if !defined(OS_MACOSX)
+// Each thread will open the shared memory.  Each thread will take a different 4
+// byte int pointer, and keep changing it, with some small pauses in between.
+// Verify that each thread's value in the shared memory is always correct.
+class MultipleThreadMain : public PlatformThread::Delegate {
+ public:
+  explicit MultipleThreadMain(int16_t id) : id_(id) {}
+  ~MultipleThreadMain() override {}
+
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  // PlatformThread::Delegate interface.
+  void ThreadMain() override {
+    const uint32_t kDataSize = 1024;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
+    EXPECT_TRUE(rv);
+    rv = memory.Map(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memory.memory()) + id_;
+    EXPECT_EQ(0, *ptr);
+
+    for (int idx = 0; idx < 100; idx++) {
+      *ptr = idx;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
+      EXPECT_EQ(*ptr, idx);
+    }
+    // Reset back to 0 for the next test that uses the same name.
+    *ptr = 0;
+
+    memory.Close();
+  }
+
+ private:
+  int16_t id_;
+
+  static const char s_test_name_[];
+
+  DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
+};
+
+const char MultipleThreadMain::s_test_name_[] =
+    "SharedMemoryOpenThreadTest";
+#endif  // !defined(OS_MACOSX)
+
+}  // namespace
+
+// Android/Mac doesn't support SharedMemory::Open/Delete/
+// CreateNamedDeprecated(openExisting=true)
+#if !defined(OS_ANDROID) && !defined(OS_MACOSX)
+TEST(SharedMemoryTest, OpenClose) {
+  const uint32_t kDataSize = 1024;
+  std::string test_name = "SharedMemoryOpenCloseTest";
+
+  // Open two handles to a memory segment, confirm that they are mapped
+  // separately yet point to the same space.
+  SharedMemory memory1;
+  bool rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory1.Open(test_name, false);
+  EXPECT_FALSE(rv);
+  rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  SharedMemory memory2;
+  rv = memory2.Open(test_name, false);
+  EXPECT_TRUE(rv);
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+  EXPECT_NE(memory1.memory(), memory2.memory());  // Compare the pointers.
+
+  // Make sure we don't segfault. (it actually happened!)
+  ASSERT_NE(memory1.memory(), static_cast<void*>(NULL));
+  ASSERT_NE(memory2.memory(), static_cast<void*>(NULL));
+
+  // Write data to the first memory segment, verify contents of second.
+  memset(memory1.memory(), '1', kDataSize);
+  EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
+
+  // Close the first memory segment, and verify the second has the right data.
+  memory1.Close();
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
+    EXPECT_EQ(*ptr, '1');
+
+  // Close the second memory segment.
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+  rv = memory2.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+
+TEST(SharedMemoryTest, OpenExclusive) {
+  const uint32_t kDataSize = 1024;
+  const uint32_t kDataSize2 = 2048;
+  std::ostringstream test_name_stream;
+  test_name_stream << "SharedMemoryOpenExclusiveTest."
+                   << Time::Now().ToDoubleT();
+  std::string test_name = test_name_stream.str();
+
+  // Open two handles to a memory segment and check that
+  // open_existing_deprecated works as expected.
+  SharedMemory memory1;
+  bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
+  EXPECT_TRUE(rv);
+
+  // Memory1 knows it's size because it created it.
+  EXPECT_EQ(memory1.requested_size(), kDataSize);
+
+  rv = memory1.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory1 must be at least the size we asked for.
+  EXPECT_GE(memory1.mapped_size(), kDataSize);
+
+  // The mapped memory1 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory1.mapped_size(),
+            kDataSize + SysInfo::VMAllocationGranularity());
+
+  memset(memory1.memory(), 'G', kDataSize);
+
+  SharedMemory memory2;
+  // Should not be able to create if openExisting is false.
+  rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
+  EXPECT_FALSE(rv);
+
+  // Should be able to create with openExisting true.
+  rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
+  EXPECT_TRUE(rv);
+
+  // Memory2 shouldn't know the size because we didn't create it.
+  EXPECT_EQ(memory2.requested_size(), 0U);
+
+  // We should be able to map the original size.
+  rv = memory2.Map(kDataSize);
+  EXPECT_TRUE(rv);
+
+  // The mapped memory2 must be at least the size of the original.
+  EXPECT_GE(memory2.mapped_size(), kDataSize);
+
+  // The mapped memory2 shouldn't exceed rounding for allocation granularity.
+  EXPECT_LT(memory2.mapped_size(),
+            kDataSize2 + SysInfo::VMAllocationGranularity());
+
+  // Verify that opening memory2 didn't truncate or delete memory 1.
+  char* start_ptr = static_cast<char*>(memory2.memory());
+  char* end_ptr = start_ptr + kDataSize;
+  for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
+    EXPECT_EQ(*ptr, 'G');
+  }
+
+  memory1.Close();
+  memory2.Close();
+
+  rv = memory1.Delete(test_name);
+  EXPECT_TRUE(rv);
+}
+#endif  // !defined(OS_ANDROID) && !defined(OS_MACOSX)
+
+// Check that memory is still mapped after its closed.
+TEST(SharedMemoryTest, CloseNoUnmap) {
+  const size_t kDataSize = 4096;
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  char* ptr = static_cast<char*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(NULL));
+  memset(ptr, 'G', kDataSize);
+
+  memory.Close();
+
+  EXPECT_EQ(ptr, memory.memory());
+  EXPECT_EQ(SharedMemory::NULLHandle(), memory.handle());
+
+  for (size_t i = 0; i < kDataSize; i++) {
+    EXPECT_EQ('G', ptr[i]);
+  }
+
+  memory.Unmap();
+  EXPECT_EQ(nullptr, memory.memory());
+}
+
+#if !defined(OS_MACOSX)
+// Create a set of N threads to each open a shared memory segment and write to
+// it. Verify that they are always reading/writing consistent data.
+TEST(SharedMemoryTest, MultipleThreads) {
+  const int kNumThreads = 5;
+
+  MultipleThreadMain::CleanUp();
+  // On POSIX we have a problem when 2 threads try to create the shmem
+  // (a file) at exactly the same time, since create both creates the
+  // file and zerofills it.  We solve the problem for this unit test
+  // (make it not flaky) by starting with 1 thread, then
+  // intentionally don't clean up its shmem before running with
+  // kNumThreads.
+
+  int threadcounts[] = { 1, kNumThreads };
+  for (size_t i = 0; i < arraysize(threadcounts); i++) {
+    int numthreads = threadcounts[i];
+    scoped_ptr<PlatformThreadHandle[]> thread_handles;
+    scoped_ptr<MultipleThreadMain*[]> thread_delegates;
+
+    thread_handles.reset(new PlatformThreadHandle[numthreads]);
+    thread_delegates.reset(new MultipleThreadMain*[numthreads]);
+
+    // Spawn the threads.
+    for (int16_t index = 0; index < numthreads; index++) {
+      PlatformThreadHandle pth;
+      thread_delegates[index] = new MultipleThreadMain(index);
+      EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
+      thread_handles[index] = pth;
+    }
+
+    // Wait for the threads to finish.
+    for (int index = 0; index < numthreads; index++) {
+      PlatformThread::Join(thread_handles[index]);
+      delete thread_delegates[index];
+    }
+  }
+  MultipleThreadMain::CleanUp();
+}
+#endif
+
+// Allocate private (unique) shared memory with an empty string for a
+// name.  Make sure several of them don't point to the same thing as
+// we might expect if the names are equal.
+TEST(SharedMemoryTest, AnonymousPrivate) {
+  int i, j;
+  int count = 4;
+  bool rv;
+  const uint32_t kDataSize = 8192;
+
+  scoped_ptr<SharedMemory[]> memories(new SharedMemory[count]);
+  scoped_ptr<int*[]> pointers(new int*[count]);
+  ASSERT_TRUE(memories.get());
+  ASSERT_TRUE(pointers.get());
+
+  for (i = 0; i < count; i++) {
+    rv = memories[i].CreateAndMapAnonymous(kDataSize);
+    EXPECT_TRUE(rv);
+    int* ptr = static_cast<int*>(memories[i].memory());
+    EXPECT_TRUE(ptr);
+    pointers[i] = ptr;
+  }
+
+  for (i = 0; i < count; i++) {
+    // zero out the first int in each except for i; for that one, make it 100.
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        pointers[j][0] = 100;
+      else
+        pointers[j][0] = 0;
+    }
+    // make sure there is no bleeding of the 100 into the other pointers
+    for (j = 0; j < count; j++) {
+      if (i == j)
+        EXPECT_EQ(100, pointers[j][0]);
+      else
+        EXPECT_EQ(0, pointers[j][0]);
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    memories[i].Close();
+  }
+}
+
+TEST(SharedMemoryTest, ShareReadOnly) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory writable_shmem;
+  SharedMemoryCreateOptions options;
+  options.size = contents.size();
+  options.share_read_only = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+  ASSERT_TRUE(writable_shmem.Create(options));
+  ASSERT_TRUE(writable_shmem.Map(options.size));
+  memcpy(writable_shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(writable_shmem.Unmap());
+
+  SharedMemoryHandle readonly_handle;
+  ASSERT_TRUE(writable_shmem.ShareReadOnlyToProcess(GetCurrentProcessHandle(),
+                                                    &readonly_handle));
+  SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly_shmem.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly_shmem.memory()),
+                        contents.size()));
+  EXPECT_TRUE(readonly_shmem.Unmap());
+
+  // Make sure the writable instance is still writable.
+  ASSERT_TRUE(writable_shmem.Map(contents.size()));
+  StringPiece new_contents = "Goodbye";
+  memcpy(writable_shmem.memory(), new_contents.data(), new_contents.size());
+  EXPECT_EQ(new_contents,
+            StringPiece(static_cast<const char*>(writable_shmem.memory()),
+                        new_contents.size()));
+
+  // We'd like to check that if we send the read-only segment to another
+  // process, then that other process can't reopen it read/write.  (Since that
+  // would be a security hole.)  Setting up multiple processes is hard in a
+  // unittest, so this test checks that the *current* process can't reopen the
+  // segment read/write.  I think the test here is stronger than we actually
+  // care about, but there's a remote possibility that sending a file over a
+  // pipe would transform it into read/write.
+  SharedMemoryHandle handle = readonly_shmem.handle();
+
+#if defined(OS_ANDROID)
+  // The "read-only" handle is still writable on Android:
+  // http://crbug.com/320865
+  (void)handle;
+#elif defined(OS_POSIX)
+  int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
+  EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
+      << "The descriptor itself should be read-only.";
+
+  errno = 0;
+  void* writable = mmap(NULL, contents.size(), PROT_READ | PROT_WRITE,
+                        MAP_SHARED, handle_fd, 0);
+  int mmap_errno = errno;
+  EXPECT_EQ(MAP_FAILED, writable)
+      << "It shouldn't be possible to re-mmap the descriptor writable.";
+  EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
+  if (writable != MAP_FAILED)
+    EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
+
+#elif defined(OS_WIN)
+  EXPECT_EQ(NULL, MapViewOfFile(handle.GetHandle(), FILE_MAP_WRITE, 0, 0, 0))
+      << "Shouldn't be able to map memory writable.";
+
+  HANDLE temp_handle;
+  BOOL rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                              GetCurrentProcess(), &temp_handle,
+                              FILE_MAP_ALL_ACCESS, false, 0);
+  EXPECT_EQ(FALSE, rv)
+      << "Shouldn't be able to duplicate the handle into a writable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+  rv = ::DuplicateHandle(GetCurrentProcess(), handle.GetHandle(),
+                         GetCurrentProcess(), &temp_handle, FILE_MAP_READ,
+                         false, 0);
+  EXPECT_EQ(TRUE, rv)
+      << "Should be able to duplicate the handle into a readable one.";
+  if (rv)
+    win::ScopedHandle writable_handle(temp_handle);
+#else
+#error Unexpected platform; write a test that tries to make 'handle' writable.
+#endif  // defined(OS_POSIX) || defined(OS_WIN)
+}
+
+TEST(SharedMemoryTest, ShareToSelf) {
+  StringPiece contents = "Hello World";
+
+  SharedMemory shmem;
+  ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
+  memcpy(shmem.memory(), contents.data(), contents.size());
+  EXPECT_TRUE(shmem.Unmap());
+
+  SharedMemoryHandle shared_handle;
+  ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+#if defined(OS_WIN)
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+#endif
+  SharedMemory shared(shared_handle, /*readonly=*/false);
+
+  ASSERT_TRUE(shared.Map(contents.size()));
+  EXPECT_EQ(
+      contents,
+      StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
+
+  shared_handle = SharedMemoryHandle();
+  ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
+#if defined(OS_WIN)
+  ASSERT_TRUE(shared_handle.OwnershipPassesToIPC());
+#endif
+  SharedMemory readonly(shared_handle, /*readonly=*/true);
+
+  ASSERT_TRUE(readonly.Map(contents.size()));
+  EXPECT_EQ(contents,
+            StringPiece(static_cast<const char*>(readonly.memory()),
+                        contents.size()));
+}
+
+TEST(SharedMemoryTest, MapAt) {
+  ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32_t));
+  const size_t kCount = SysInfo::VMAllocationGranularity();
+  const size_t kDataSize = kCount * sizeof(uint32_t);
+
+  SharedMemory memory;
+  ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
+  uint32_t* ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(NULL));
+
+  for (size_t i = 0; i < kCount; ++i) {
+    ptr[i] = i;
+  }
+
+  memory.Unmap();
+
+  off_t offset = SysInfo::VMAllocationGranularity();
+  ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
+  offset /= sizeof(uint32_t);
+  ptr = static_cast<uint32_t*>(memory.memory());
+  ASSERT_NE(ptr, static_cast<void*>(NULL));
+  for (size_t i = offset; i < kCount; ++i) {
+    EXPECT_EQ(ptr[i - offset], i);
+  }
+}
+
+TEST(SharedMemoryTest, MapTwice) {
+  const uint32_t kDataSize = 1024;
+  SharedMemory memory;
+  bool rv = memory.CreateAndMapAnonymous(kDataSize);
+  EXPECT_TRUE(rv);
+
+  void* old_address = memory.memory();
+
+  rv = memory.Map(kDataSize);
+  EXPECT_FALSE(rv);
+  EXPECT_EQ(old_address, memory.memory());
+}
+
+#if defined(OS_POSIX)
+// This test is not applicable for iOS (crbug.com/399384).
+#if !defined(OS_IOS)
+// Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
+TEST(SharedMemoryTest, AnonymousExecutable) {
+  const uint32_t kTestSize = 1 << 16;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+  options.executable = true;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  EXPECT_TRUE(shared_memory.Create(options));
+  EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
+
+  EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
+                        PROT_READ | PROT_EXEC));
+}
+#endif  // !defined(OS_IOS)
+
+// Android supports a different permission model than POSIX for its "ashmem"
+// shared memory implementation. So the tests about file permissions are not
+// included on Android.
+#if !defined(OS_ANDROID)
+
+// Set a umask and restore the old mask on destruction.
+class ScopedUmaskSetter {
+ public:
+  explicit ScopedUmaskSetter(mode_t target_mask) {
+    old_umask_ = umask(target_mask);
+  }
+  ~ScopedUmaskSetter() { umask(old_umask_); }
+ private:
+  mode_t old_umask_;
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
+};
+
+// Create a shared memory object, check its permissions.
+TEST(SharedMemoryTest, FilePermissionsAnonymous) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int shm_fd =
+      SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
+  // Neither the group, nor others should be able to read the shared memory
+  // file.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+
+// Create a shared memory object, check its permissions.
+TEST(SharedMemoryTest, FilePermissionsNamed) {
+  const uint32_t kTestSize = 1 << 8;
+
+  SharedMemory shared_memory;
+  SharedMemoryCreateOptions options;
+  options.size = kTestSize;
+#if defined(OS_MACOSX) && !defined(OS_IOS)
+  // The Mach functionality is tested in shared_memory_mac_unittest.cc.
+  options.type = SharedMemoryHandle::POSIX;
+#endif
+
+  // Set a file mode creation mask that gives all permissions.
+  ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
+
+  EXPECT_TRUE(shared_memory.Create(options));
+
+  int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
+  struct stat shm_stat;
+  EXPECT_EQ(0, fstat(fd, &shm_stat));
+  // Neither the group, nor others should have been able to open the shared
+  // memory file while its name existed.
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
+  EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
+}
+#endif  // !defined(OS_ANDROID)
+
+#endif  // defined(OS_POSIX)
+
+// Map() will return addresses which are aligned to the platform page size, this
+// varies from platform to platform though.  Since we'd like to advertise a
+// minimum alignment that callers can count on, test for it here.
+TEST(SharedMemoryTest, MapMinimumAlignment) {
+  static const int kDataSize = 8192;
+
+  SharedMemory shared_memory;
+  ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
+  EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
+      shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
+  shared_memory.Close();
+}
+
+#if defined(OS_WIN)
+TEST(SharedMemoryTest, UnsafeImageSection) {
+  const char kTestSectionName[] = "UnsafeImageSection";
+  wchar_t path[MAX_PATH];
+  EXPECT_GT(::GetModuleFileName(nullptr, path, arraysize(path)), 0U);
+
+  // Map the current executable image to save us creating a new PE file on disk.
+  base::win::ScopedHandle file_handle(
+      ::CreateFile(path, GENERIC_READ, 0, nullptr, OPEN_EXISTING, 0, nullptr));
+  EXPECT_TRUE(file_handle.IsValid());
+  base::win::ScopedHandle section_handle(
+      ::CreateFileMappingA(file_handle.Get(), nullptr,
+                           PAGE_READONLY | SEC_IMAGE, 0, 0, kTestSectionName));
+  EXPECT_TRUE(section_handle.IsValid());
+
+  // Check direct opening by name, from handle and duplicated from handle.
+  SharedMemory shared_memory_open;
+  EXPECT_TRUE(shared_memory_open.Open(kTestSectionName, true));
+  EXPECT_FALSE(shared_memory_open.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_open.memory());
+
+  SharedMemory shared_memory_handle_dup(
+      SharedMemoryHandle(section_handle.Get(), ::GetCurrentProcessId()), true);
+  EXPECT_FALSE(shared_memory_handle_dup.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_dup.memory());
+
+  SharedMemory shared_memory_handle_local(
+      SharedMemoryHandle(section_handle.Take(), ::GetCurrentProcessId()), true);
+  EXPECT_FALSE(shared_memory_handle_local.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_local.memory());
+
+  // Check that a handle without SECTION_QUERY also can't be mapped as it can't
+  // be checked.
+  SharedMemory shared_memory_handle_dummy;
+  SharedMemoryCreateOptions options;
+  options.size = 0x1000;
+  EXPECT_TRUE(shared_memory_handle_dummy.Create(options));
+  HANDLE handle_no_query;
+  EXPECT_TRUE(::DuplicateHandle(
+      ::GetCurrentProcess(), shared_memory_handle_dummy.handle().GetHandle(),
+      ::GetCurrentProcess(), &handle_no_query, FILE_MAP_READ, FALSE, 0));
+  SharedMemory shared_memory_handle_no_query(
+      SharedMemoryHandle(handle_no_query, ::GetCurrentProcessId()), true);
+  EXPECT_FALSE(shared_memory_handle_no_query.Map(1));
+  EXPECT_EQ(nullptr, shared_memory_handle_no_query.memory());
+}
+#endif  // defined(OS_WIN)
+
+// iOS does not allow multiple processes.
+// Android ashmem does not support named shared memory.
+// Mac SharedMemory does not support named shared memory. crbug.com/345734
+#if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
+// On POSIX it is especially important we test shmem across processes,
+// not just across threads.  But the test is enabled on all platforms.
+class SharedMemoryProcessTest : public MultiProcessTest {
+ public:
+  static void CleanUp() {
+    SharedMemory memory;
+    memory.Delete(s_test_name_);
+  }
+
+  static int TaskTestMain() {
+    int errors = 0;
+    SharedMemory memory;
+    bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    rv = memory.Map(s_data_size_);
+    EXPECT_TRUE(rv);
+    if (rv != true)
+      errors++;
+    int* ptr = static_cast<int*>(memory.memory());
+
+    // This runs concurrently in multiple processes. Writes need to be atomic.
+    subtle::Barrier_AtomicIncrement(ptr, 1);
+    memory.Close();
+    return errors;
+  }
+
+  static const char s_test_name_[];
+  static const uint32_t s_data_size_;
+};
+
+const char SharedMemoryProcessTest::s_test_name_[] = "MPMem";
+const uint32_t SharedMemoryProcessTest::s_data_size_ = 1024;
+
+TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
+  const int kNumTasks = 5;
+
+  SharedMemoryProcessTest::CleanUp();
+
+  // Create a shared memory region. Set the first word to 0.
+  SharedMemory memory;
+  bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
+  ASSERT_TRUE(rv);
+  rv = memory.Map(s_data_size_);
+  ASSERT_TRUE(rv);
+  int* ptr = static_cast<int*>(memory.memory());
+  *ptr = 0;
+
+  // Start |kNumTasks| processes, each of which atomically increments the first
+  // word by 1.
+  Process processes[kNumTasks];
+  for (int index = 0; index < kNumTasks; ++index) {
+    processes[index] = SpawnChild("SharedMemoryTestMain");
+    ASSERT_TRUE(processes[index].IsValid());
+  }
+
+  // Check that each process exited correctly.
+  int exit_code = 0;
+  for (int index = 0; index < kNumTasks; ++index) {
+    EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
+    EXPECT_EQ(0, exit_code);
+  }
+
+  // Check that the shared memory region reflects |kNumTasks| increments.
+  ASSERT_EQ(kNumTasks, *ptr);
+
+  memory.Close();
+  SharedMemoryProcessTest::CleanUp();
+}
+
+MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
+  return SharedMemoryProcessTest::TaskTestMain();
+}
+#endif  // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
+
+}  // namespace base
diff --git a/base/memory/weak_ptr.cc b/base/memory/weak_ptr.cc
index d9ce86a..16d3dff 100644
--- a/base/memory/weak_ptr.cc
+++ b/base/memory/weak_ptr.cc
@@ -34,6 +34,8 @@
 WeakReference::WeakReference() {
 }
 
+WeakReference::WeakReference(const WeakReference& other) = default;
+
 WeakReference::WeakReference(const Flag* flag) : flag_(flag) {
 }
 
diff --git a/base/memory/weak_ptr.h b/base/memory/weak_ptr.h
index 33d1e47..006e1fd 100644
--- a/base/memory/weak_ptr.h
+++ b/base/memory/weak_ptr.h
@@ -70,12 +70,13 @@
 #ifndef BASE_MEMORY_WEAK_PTR_H_
 #define BASE_MEMORY_WEAK_PTR_H_
 
+#include <type_traits>
+
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/sequence_checker.h"
-#include "base/template_util.h"
 
 namespace base {
 
@@ -107,6 +108,7 @@
   };
 
   WeakReference();
+  WeakReference(const WeakReference& other);
   explicit WeakReference(const Flag* flag);
   ~WeakReference();
 
@@ -159,10 +161,9 @@
   // function that makes calling this easier.
   template<typename Derived>
   static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
-    typedef
-        is_convertible<Derived, internal::SupportsWeakPtrBase&> convertible;
-    static_assert(convertible::value,
-                  "AsWeakPtr argument must inherit from SupportsWeakPtr");
+    static_assert(
+        std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
+        "AsWeakPtr argument must inherit from SupportsWeakPtr");
     return AsWeakPtrImpl<Derived>(t, *t);
   }
 
@@ -218,27 +219,38 @@
     return get();
   }
 
-  // Allow WeakPtr<element_type> to be used in boolean expressions, but not
-  // implicitly convertible to a real bool (which is dangerous).
+  void reset() {
+    ref_ = internal::WeakReference();
+    ptr_ = NULL;
+  }
+
+  // Implement "Safe Bool Idiom"
+  // https://en.wikibooks.org/wiki/More_C%2B%2B_Idioms/Safe_bool
   //
-  // Note that this trick is only safe when the == and != operators
-  // are declared explicitly, as otherwise "weak_ptr1 == weak_ptr2"
-  // will compile but do the wrong thing (i.e., convert to Testable
-  // and then do the comparison).
+  // Allow WeakPtr<element_type> to be used in boolean expressions such as
+  //   if (weak_ptr_instance)
+  // But do not become convertible to a real bool (which is dangerous).
+  //   Implementation requires:
+  //     typedef Testable
+  //     operator Testable() const
+  //     operator==
+  //     operator!=
+  //
+  // == and != operators must be declared explicitly or dissallowed, as
+  // otherwise "ptr1 == ptr2" will compile but do the wrong thing (i.e., convert
+  // to Testable and then do the comparison).
+  //
+  // C++11 provides for "explicit operator bool()", however it is currently
+  // banned due to MSVS2013. https://chromium-cpp.appspot.com/#core-blacklist
  private:
   typedef T* WeakPtr::*Testable;
 
  public:
   operator Testable() const { return get() ? &WeakPtr::ptr_ : NULL; }
 
-  void reset() {
-    ref_ = internal::WeakReference();
-    ptr_ = NULL;
-  }
-
  private:
-  // Explicitly declare comparison operators as required by the bool
-  // trick, but keep them private.
+  // Explicitly declare comparison operators as required by the "Safe Bool
+  // Idiom", but keep them private.
   template <class U> bool operator==(WeakPtr<U> const&) const;
   template <class U> bool operator!=(WeakPtr<U> const&) const;
 
diff --git a/base/memory/weak_ptr_unittest.cc b/base/memory/weak_ptr_unittest.cc
index 2c475f7..d4fb969 100644
--- a/base/memory/weak_ptr_unittest.cc
+++ b/base/memory/weak_ptr_unittest.cc
@@ -260,6 +260,37 @@
   EXPECT_EQ(&target, ptr.get());
 }
 
+TEST(WeakPtrFactoryTest, BooleanTesting) {
+  int data;
+  WeakPtrFactory<int> factory(&data);
+
+  WeakPtr<int> ptr_to_an_instance = factory.GetWeakPtr();
+  EXPECT_TRUE(ptr_to_an_instance);
+  EXPECT_FALSE(!ptr_to_an_instance);
+
+  if (ptr_to_an_instance) {
+  } else {
+    ADD_FAILURE() << "Pointer to an instance should result in true.";
+  }
+
+  if (!ptr_to_an_instance) {  // check for operator!().
+    ADD_FAILURE() << "Pointer to an instance should result in !x being false.";
+  }
+
+  WeakPtr<int> null_ptr;
+  EXPECT_FALSE(null_ptr);
+  EXPECT_TRUE(!null_ptr);
+
+  if (null_ptr) {
+    ADD_FAILURE() << "Null pointer should result in false.";
+  }
+
+  if (!null_ptr) {  // check for operator!().
+  } else {
+    ADD_FAILURE() << "Null pointer should result in !x being true.";
+  }
+}
+
 TEST(WeakPtrTest, InvalidateWeakPtrs) {
   int data;
   WeakPtrFactory<int> factory(&data);
diff --git a/base/memory/weak_ptr_unittest.nc b/base/memory/weak_ptr_unittest.nc
index bad1c97..32deca9 100644
--- a/base/memory/weak_ptr_unittest.nc
+++ b/base/memory/weak_ptr_unittest.nc
@@ -129,7 +129,7 @@
   WeakPtr<Unrelated> ptr = AsWeakPtr(&f);
 }
 
-#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: ambiguous conversion from derived class 'base::MultiplyDerivedProducer' to base class 'base::internal::SupportsWeakPtrBase':"]
+#elif defined(NCTEST_AMBIGUOUS_ANCESTORS)  // [r"fatal error: member 'AsWeakPtr' found in multiple base classes of different types"]
 
 void WontCompile() {
   MultiplyDerivedProducer f;
diff --git a/base/message_loop/message_loop.cc b/base/message_loop/message_loop.cc
index e2b8bca..eeed76a 100644
--- a/base/message_loop/message_loop.cc
+++ b/base/message_loop/message_loop.cc
@@ -397,7 +397,7 @@
           new internal::MessageLoopTaskRunner(incoming_task_queue_)),
       task_runner_(unbound_task_runner_) {
   // If type is TYPE_CUSTOM non-null pump_factory must be given.
-  DCHECK_EQ(type_ == TYPE_CUSTOM, !pump_factory_.is_null());
+  DCHECK(type_ != TYPE_CUSTOM || !pump_factory_.is_null());
 }
 
 void MessageLoop::BindToCurrentThread() {
@@ -435,17 +435,7 @@
 
 void MessageLoop::RunHandler() {
   DCHECK_EQ(this, current());
-
   StartHistogrammer();
-
-#if defined(OS_WIN)
-  if (run_loop_->dispatcher_ && type() == TYPE_UI) {
-    static_cast<MessagePumpForUI*>(pump_.get())->
-        RunWithDispatcher(this, run_loop_->dispatcher_);
-    return;
-  }
-#endif
-
   pump_->Run(this);
 }
 
@@ -683,6 +673,10 @@
 //------------------------------------------------------------------------------
 // MessageLoopForUI
 
+MessageLoopForUI::MessageLoopForUI(scoped_ptr<MessagePump> pump)
+    : MessageLoop(TYPE_UI, Bind(&ReturnPump, Passed(&pump))) {
+}
+
 #if defined(OS_ANDROID)
 void MessageLoopForUI::Start() {
   // No Histogram support for UI message loop as it is managed by Java side
diff --git a/base/message_loop/message_loop.h b/base/message_loop/message_loop.h
index d827fb1..c569aae 100644
--- a/base/message_loop/message_loop.h
+++ b/base/message_loop/message_loop.h
@@ -399,6 +399,19 @@
  protected:
   scoped_ptr<MessagePump> pump_;
 
+  using MessagePumpFactoryCallback = Callback<scoped_ptr<MessagePump>()>;
+
+  // Common protected constructor. Other constructors delegate the
+  // initialization to this constructor.
+  // A subclass can invoke this constructor to create a message_loop of a
+  // specific type with a custom loop. The implementation does not call
+  // BindToCurrentThread. If this constructor is invoked directly by a subclass,
+  // then the subclass must subsequently bind the message loop.
+  MessageLoop(Type type, MessagePumpFactoryCallback pump_factory);
+
+  // Configure various members and bind this message loop to the current thread.
+  void BindToCurrentThread();
+
  private:
   friend class RunLoop;
   friend class internal::IncomingTaskQueue;
@@ -406,8 +419,6 @@
   friend class Thread;
   FRIEND_TEST_ALL_PREFIXES(MessageLoopTest, DeleteUnboundLoop);
 
-  using MessagePumpFactoryCallback = Callback<scoped_ptr<MessagePump>()>;
-
   // Creates a MessageLoop without binding to a thread.
   // If |type| is TYPE_CUSTOM non-null |pump_factory| must be also given
   // to create a message pump for this message loop.  Otherwise a default
@@ -423,13 +434,6 @@
       Type type,
       MessagePumpFactoryCallback pump_factory);
 
-  // Common private constructor. Other constructors delegate the initialization
-  // to this constructor.
-  MessageLoop(Type type, MessagePumpFactoryCallback pump_factory);
-
-  // Configure various members and bind this message loop to the current thread.
-  void BindToCurrentThread();
-
   // Sets the ThreadTaskRunnerHandle for the current thread to point to the
   // task runner for this message loop.
   void SetThreadTaskRunnerHandle();
@@ -563,17 +567,19 @@
   MessageLoopForUI() : MessageLoop(TYPE_UI) {
   }
 
+  explicit MessageLoopForUI(scoped_ptr<MessagePump> pump);
+
   // Returns the MessageLoopForUI of the current thread.
   static MessageLoopForUI* current() {
     MessageLoop* loop = MessageLoop::current();
     DCHECK(loop);
-    DCHECK_EQ(MessageLoop::TYPE_UI, loop->type());
+    DCHECK(loop->IsType(MessageLoop::TYPE_UI));
     return static_cast<MessageLoopForUI*>(loop);
   }
 
   static bool IsCurrent() {
     MessageLoop* loop = MessageLoop::current();
-    return loop && loop->type() == MessageLoop::TYPE_UI;
+    return loop && loop->IsType(MessageLoop::TYPE_UI);
   }
 
 #if defined(OS_IOS)
diff --git a/base/message_loop/message_loop_unittest.cc b/base/message_loop/message_loop_unittest.cc
index 1a3a925..a06ba91 100644
--- a/base/message_loop/message_loop_unittest.cc
+++ b/base/message_loop/message_loop_unittest.cc
@@ -27,7 +27,6 @@
 #include "testing/gtest/include/gtest/gtest.h"
 
 #if defined(OS_WIN)
-#include "base/message_loop/message_pump_dispatcher.h"
 #include "base/message_loop/message_pump_win.h"
 #include "base/process/memory.h"
 #include "base/strings/string16.h"
@@ -426,70 +425,6 @@
 
 #if defined(OS_WIN)
 
-class DispatcherImpl : public MessagePumpDispatcher {
- public:
-  DispatcherImpl() : dispatch_count_(0) {}
-
-  uint32_t Dispatch(const NativeEvent& msg) override {
-    ::TranslateMessage(&msg);
-    ::DispatchMessage(&msg);
-    // Do not count WM_TIMER since it is not what we post and it will cause
-    // flakiness.
-    if (msg.message != WM_TIMER)
-      ++dispatch_count_;
-    // We treat WM_LBUTTONUP as the last message.
-    return msg.message == WM_LBUTTONUP ? POST_DISPATCH_QUIT_LOOP
-                                       : POST_DISPATCH_NONE;
-  }
-
-  int dispatch_count_;
-};
-
-void MouseDownUp() {
-  PostMessage(NULL, WM_LBUTTONDOWN, 0, 0);
-  PostMessage(NULL, WM_LBUTTONUP, 'A', 0);
-}
-
-void RunTest_Dispatcher(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
-
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&MouseDownUp),
-      TimeDelta::FromMilliseconds(100));
-  DispatcherImpl dispatcher;
-  RunLoop run_loop(&dispatcher);
-  run_loop.Run();
-  ASSERT_EQ(2, dispatcher.dispatch_count_);
-}
-
-LRESULT CALLBACK MsgFilterProc(int code, WPARAM wparam, LPARAM lparam) {
-  if (code == MessagePumpForUI::kMessageFilterCode) {
-    MSG* msg = reinterpret_cast<MSG*>(lparam);
-    if (msg->message == WM_LBUTTONDOWN)
-      return TRUE;
-  }
-  return FALSE;
-}
-
-void RunTest_DispatcherWithMessageHook(MessageLoop::Type message_loop_type) {
-  MessageLoop loop(message_loop_type);
-
-  MessageLoop::current()->PostDelayedTask(
-      FROM_HERE,
-      Bind(&MouseDownUp),
-      TimeDelta::FromMilliseconds(100));
-  HHOOK msg_hook = SetWindowsHookEx(WH_MSGFILTER,
-                                    MsgFilterProc,
-                                    NULL,
-                                    GetCurrentThreadId());
-  DispatcherImpl dispatcher;
-  RunLoop run_loop(&dispatcher);
-  run_loop.Run();
-  ASSERT_EQ(1, dispatcher.dispatch_count_);
-  UnhookWindowsHookEx(msg_hook);
-}
-
 class TestIOHandler : public MessageLoopForIO::IOHandler {
  public:
   TestIOHandler(const wchar_t* name, HANDLE signal, bool wait);
@@ -711,16 +646,6 @@
 }
 
 #if defined(OS_WIN)
-TEST(MessageLoopTest, Dispatcher) {
-  // This test requires a UI loop
-  RunTest_Dispatcher(MessageLoop::TYPE_UI);
-}
-
-TEST(MessageLoopTest, DispatcherWithMessageHook) {
-  // This test requires a UI loop
-  RunTest_DispatcherWithMessageHook(MessageLoop::TYPE_UI);
-}
-
 TEST(MessageLoopTest, IOHandler) {
   RunTest_IOHandler();
 }
diff --git a/base/message_loop/message_pump_dispatcher.h b/base/message_loop/message_pump_dispatcher.h
deleted file mode 100644
index 5b1bd55..0000000
--- a/base/message_loop/message_pump_dispatcher.h
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_MESSAGE_LOOP_MESSAGE_PUMP_DISPATCHER_H_
-#define BASE_MESSAGE_LOOP_MESSAGE_PUMP_DISPATCHER_H_
-
-#include <stdint.h>
-
-#include "base/base_export.h"
-#include "base/event_types.h"
-
-namespace base {
-
-// Dispatcher is used during a nested invocation of Run to dispatch events when
-// |RunLoop(dispatcher).Run()| is used.  If |RunLoop().Run()| is invoked,
-// MessageLoop does not dispatch events (or invoke TranslateMessage), rather
-// every message is passed to Dispatcher's Dispatch method for dispatch. It is
-// up to the Dispatcher whether or not to dispatch the event.
-//
-// The nested loop is exited by either posting a quit, or setting the
-// POST_DISPATCH_QUIT_LOOP flag on the return value from Dispatch.
-class BASE_EXPORT MessagePumpDispatcher {
- public:
-  enum PostDispatchAction {
-    POST_DISPATCH_NONE = 0x0,
-    POST_DISPATCH_QUIT_LOOP = 0x1,
-    POST_DISPATCH_PERFORM_DEFAULT = 0x2,
-  };
-
-  virtual ~MessagePumpDispatcher() {}
-
-  // Dispatches the event. The return value can have more than one
-  // PostDispatchAction flags OR'ed together. If POST_DISPATCH_PERFORM_DEFAULT
-  // is set in the returned value, then the message-pump performs the default
-  // action. If POST_DISPATCH_QUIT_LOOP is set, in the return value, then the
-  // nested loop exits immediately.
-  virtual uint32_t Dispatch(const NativeEvent& event) = 0;
-};
-
-}  // namespace base
-
-#endif  // BASE_MESSAGE_LOOP_MESSAGE_PUMP_DISPATCHER_H_
diff --git a/base/message_loop/message_pump_win.h b/base/message_loop/message_pump_win.h
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/base/message_loop/message_pump_win.h
diff --git a/base/metrics/field_trial.cc b/base/metrics/field_trial.cc
index b417b05..78862fa 100644
--- a/base/metrics/field_trial.cc
+++ b/base/metrics/field_trial.cc
@@ -105,6 +105,16 @@
   return true;
 }
 
+void CheckTrialGroup(const std::string& trial_name,
+                     const std::string& trial_group,
+                     std::map<std::string, std::string>* seen_states) {
+  if (ContainsKey(*seen_states, trial_name)) {
+    CHECK_EQ((*seen_states)[trial_name], trial_group) << trial_name;
+  } else {
+    (*seen_states)[trial_name] = trial_group;
+  }
+}
+
 }  // namespace
 
 // statics
@@ -122,6 +132,8 @@
 
 FieldTrial::State::State() : activated(false) {}
 
+FieldTrial::State::State(const State& other) = default;
+
 FieldTrial::State::~State() {}
 
 void FieldTrial::Disable() {
@@ -303,7 +315,8 @@
     : entropy_provider_(entropy_provider),
       observer_list_(new ObserverListThreadSafe<FieldTrialList::Observer>(
           ObserverListBase<FieldTrialList::Observer>::NOTIFY_EXISTING_ONLY)) {
-  DCHECK(!global_);
+  // TODO(asvitkine): Turn into a DCHECK after http://crbug.com/359406 is fixed.
+  CHECK(!global_);
   DCHECK(!used_without_global_);
   global_ = this;
 
@@ -475,6 +488,9 @@
     output->append(1, kPersistentStringSeparator);
     trial.group_name.AppendToString(output);
     output->append(1, kPersistentStringSeparator);
+
+    CheckTrialGroup(trial.trial_name.as_string(), trial.group_name.as_string(),
+                    &global_->seen_states_);
   }
 }
 
@@ -515,7 +531,6 @@
 // static
 bool FieldTrialList::CreateTrialsFromString(
     const std::string& trials_string,
-    FieldTrialActivationMode mode,
     const std::set<std::string>& ignored_trial_names) {
   DCHECK(global_);
   if (trials_string.empty() || !global_)
@@ -535,7 +550,7 @@
     FieldTrial* trial = CreateFieldTrial(trial_name, group_name);
     if (!trial)
       return false;
-    if (mode == ACTIVATE_TRIALS || entry.activated) {
+    if (entry.activated) {
       // Call |group()| to mark the trial as "used" and notify observers, if
       // any. This is useful to ensure that field trials created in child
       // processes are properly reported in crash reports.
@@ -600,6 +615,11 @@
   if (!field_trial->enable_field_trial_)
     return;
 
+  {
+    AutoLock auto_lock(global_->lock_);
+    CheckTrialGroup(field_trial->trial_name(),
+                    field_trial->group_name_internal(), &global_->seen_states_);
+  }
   global_->observer_list_->Notify(
       FROM_HERE, &FieldTrialList::Observer::OnFieldTrialGroupFinalized,
       field_trial->trial_name(), field_trial->group_name_internal());
@@ -638,7 +658,7 @@
     return;
   }
   AutoLock auto_lock(global_->lock_);
-  DCHECK(!global_->PreLockedFind(trial->trial_name()));
+  CHECK(!global_->PreLockedFind(trial->trial_name())) << trial->trial_name();
   trial->AddRef();
   trial->SetTrialRegistered();
   global_->registered_[trial->trial_name()] = trial;
diff --git a/base/metrics/field_trial.h b/base/metrics/field_trial.h
index 7bfc1de..95cf504 100644
--- a/base/metrics/field_trial.h
+++ b/base/metrics/field_trial.h
@@ -119,6 +119,7 @@
     bool activated;
 
     State();
+    State(const State& other);
     ~State();
   };
 
@@ -320,14 +321,6 @@
 // Only one instance of this class exists.
 class BASE_EXPORT FieldTrialList {
  public:
-  // Specifies whether field trials should be activated (marked as "used"), when
-  // created using |CreateTrialsFromString()|. Has no effect on trials that are
-  // prefixed with |kActivationMarker|, which will always be activated."
-  enum FieldTrialActivationMode {
-    DONT_ACTIVATE_TRIALS,
-    ACTIVATE_TRIALS,
-  };
-
   // Year that is guaranteed to not be expired when instantiating a field trial
   // via |FactoryGetFieldTrial()|.  Set to two years from the build date.
   static int kNoExpirationYear;
@@ -457,14 +450,12 @@
   // for each trial, force them to have the same group string. This is commonly
   // used in a non-browser process, to carry randomly selected state in a
   // browser process into this non-browser process, but could also be invoked
-  // through a command line argument to the browser process. The created field
-  // trials are all marked as "used" for the purposes of active trial reporting
-  // if |mode| is ACTIVATE_TRIALS, otherwise each trial will be marked as "used"
-  // if it is prefixed with |kActivationMarker|. Trial names in
+  // through a command line argument to the browser process. Created field
+  // trials will be marked "used" for the purposes of active trial reporting
+  // if they are prefixed with |kActivationMarker|. Trial names in
   // |ignored_trial_names| are ignored when parsing |trials_string|.
   static bool CreateTrialsFromString(
       const std::string& trials_string,
-      FieldTrialActivationMode mode,
       const std::set<std::string>& ignored_trial_names);
 
   // Create a FieldTrial with the given |name| and using 100% probability for
@@ -519,6 +510,8 @@
   base::Lock lock_;
   RegistrationMap registered_;
 
+  std::map<std::string, std::string> seen_states_;
+
   // Entropy provider to be used for one-time randomized field trials. If NULL,
   // one-time randomization is not supported.
   scoped_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
diff --git a/base/metrics/field_trial_unittest.cc b/base/metrics/field_trial_unittest.cc
index 555d7fa..00f351f 100644
--- a/base/metrics/field_trial_unittest.cc
+++ b/base/metrics/field_trial_unittest.cc
@@ -504,7 +504,6 @@
   ASSERT_FALSE(FieldTrialList::TrialExists("xxx"));
 
   FieldTrialList::CreateTrialsFromString("Some_name/Winner/xxx/yyyy/",
-                                         FieldTrialList::DONT_ACTIVATE_TRIALS,
                                          std::set<std::string>());
 
   FieldTrial* trial = FieldTrialList::Find("Some_name");
@@ -519,9 +518,8 @@
 }
 
 TEST_F(FieldTrialTest, RestoreNotEndingWithSlash) {
-  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "tname/gname", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString("tname/gname",
+                                                     std::set<std::string>()));
 
   FieldTrial* trial = FieldTrialList::Find("tname");
   ASSERT_NE(static_cast<FieldTrial*>(NULL), trial);
@@ -530,21 +528,16 @@
 }
 
 TEST_F(FieldTrialTest, BogusRestore) {
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "MissingSlash", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "MissingGroupName/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "noname, only group/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "/emptyname", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "*/emptyname", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingSlash",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("MissingGroupName/",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("noname, only group/",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("/emptyname",
+                                                      std::set<std::string>()));
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("*/emptyname",
+                                                      std::set<std::string>()));
 }
 
 TEST_F(FieldTrialTest, DuplicateRestore) {
@@ -558,38 +551,19 @@
   EXPECT_EQ("Some name/Winner/", save_string);
 
   // It is OK if we redundantly specify a winner.
-  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(
-      save_string, FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  EXPECT_TRUE(FieldTrialList::CreateTrialsFromString(save_string,
+                                                     std::set<std::string>()));
 
   // But it is an error to try to change to a different winner.
-  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString(
-      "Some name/Loser/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
-}
-
-TEST_F(FieldTrialTest, CreateTrialsFromStringActive) {
-  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
-  ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/Xyz/zyx/", FieldTrialList::ACTIVATE_TRIALS,
-      std::set<std::string>()));
-
-  FieldTrial::ActiveGroups active_groups;
-  FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
-  ASSERT_EQ(2U, active_groups.size());
-  EXPECT_EQ("Abc", active_groups[0].trial_name);
-  EXPECT_EQ("def", active_groups[0].group_name);
-  EXPECT_EQ("Xyz", active_groups[1].trial_name);
-  EXPECT_EQ("zyx", active_groups[1].group_name);
+  EXPECT_FALSE(FieldTrialList::CreateTrialsFromString("Some name/Loser/",
+                                                      std::set<std::string>()));
 }
 
 TEST_F(FieldTrialTest, CreateTrialsFromStringNotActive) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
   ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/Xyz/zyx/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/Xyz/zyx/",
+                                                     std::set<std::string>()));
 
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -612,8 +586,7 @@
   ASSERT_FALSE(FieldTrialList::TrialExists("def"));
   ASSERT_FALSE(FieldTrialList::TrialExists("Xyz"));
   ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "*Abc/cba/def/fed/*Xyz/zyx/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+      "*Abc/cba/def/fed/*Xyz/zyx/", std::set<std::string>()));
 
   FieldTrial::ActiveGroups active_groups;
   FieldTrialList::GetActiveFieldTrialGroups(&active_groups);
@@ -624,25 +597,12 @@
   EXPECT_EQ("zyx", active_groups[1].group_name);
 }
 
-TEST_F(FieldTrialTest, CreateTrialsFromStringActiveObserver) {
-  ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
-
-  TestFieldTrialObserver observer;
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/", FieldTrialList::ACTIVATE_TRIALS, std::set<std::string>()));
-
-  RunLoop().RunUntilIdle();
-  EXPECT_EQ("Abc", observer.trial_name());
-  EXPECT_EQ("def", observer.group_name());
-}
-
 TEST_F(FieldTrialTest, CreateTrialsFromStringNotActiveObserver) {
   ASSERT_FALSE(FieldTrialList::TrialExists("Abc"));
 
   TestFieldTrialObserver observer;
-  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString(
-      "Abc/def/", FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  ASSERT_TRUE(FieldTrialList::CreateTrialsFromString("Abc/def/",
+                                                     std::set<std::string>()));
   RunLoop().RunUntilIdle();
   // Observer shouldn't be notified.
   EXPECT_TRUE(observer.trial_name().empty());
@@ -673,7 +633,6 @@
       "Unaccepted2/Unaccepted2_name/"
       "Bar/Bar_name/"
       "Unaccepted3/Unaccepted3_name/",
-      FieldTrialList::DONT_ACTIVATE_TRIALS,
       ignored_trial_names);
 
   EXPECT_FALSE(FieldTrialList::TrialExists("Unaccepted1"));
@@ -1148,9 +1107,8 @@
 
   // Starting with a new blank FieldTrialList.
   FieldTrialList field_trial_list(NULL);
-  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(
-      save_string, FieldTrialList::DONT_ACTIVATE_TRIALS,
-      std::set<std::string>()));
+  ASSERT_TRUE(field_trial_list.CreateTrialsFromString(save_string,
+                                                      std::set<std::string>()));
 
   FieldTrial::ActiveGroups active_groups;
   field_trial_list.GetActiveFieldTrialGroups(&active_groups);
diff --git a/base/metrics/histogram.cc b/base/metrics/histogram.cc
index 9b9f99d..713ca06 100644
--- a/base/metrics/histogram.cc
+++ b/base/metrics/histogram.cc
@@ -20,6 +20,8 @@
 #include "base/logging.h"
 #include "base/metrics/histogram_macros.h"
 #include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
@@ -37,13 +39,13 @@
                             int* flags,
                             int* declared_min,
                             int* declared_max,
-                            size_t* bucket_count,
+                            uint32_t* bucket_count,
                             uint32_t* range_checksum) {
   if (!iter->ReadString(histogram_name) ||
       !iter->ReadInt(flags) ||
       !iter->ReadInt(declared_min) ||
       !iter->ReadInt(declared_max) ||
-      !iter->ReadSizeT(bucket_count) ||
+      !iter->ReadUInt32(bucket_count) ||
       !iter->ReadUInt32(range_checksum)) {
     DLOG(ERROR) << "Pickle error decoding Histogram: " << *histogram_name;
     return false;
@@ -82,51 +84,178 @@
 typedef HistogramBase::Sample Sample;
 
 // static
-const size_t Histogram::kBucketCount_MAX = 16384u;
+const uint32_t Histogram::kBucketCount_MAX = 16384u;
 
-HistogramBase* Histogram::FactoryGet(const std::string& name,
-                                     Sample minimum,
-                                     Sample maximum,
-                                     size_t bucket_count,
-                                     int32_t flags) {
-  bool valid_arguments =
-      InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
-  DCHECK(valid_arguments);
+class Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags)
+    : Factory(name, HISTOGRAM, minimum, maximum, bucket_count, flags) {}
 
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    BucketRanges* ranges = new BucketRanges(bucket_count + 1);
-    InitializeBucketRanges(minimum, maximum, ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
+  virtual ~Factory() = default;
 
-    Histogram* tentative_histogram =
-        new Histogram(name, minimum, maximum, registered_ranges);
+  // Create histogram based on construction parameters. Caller takes
+  // ownership of the returned object.
+  HistogramBase* Build();
 
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+ protected:
+  Factory(const std::string& name,
+          HistogramType histogram_type,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags)
+    : name_(name),
+      histogram_type_(histogram_type),
+      minimum_(minimum),
+      maximum_(maximum),
+      bucket_count_(bucket_count),
+      flags_(flags) {}
+
+  // Create a BucketRanges structure appropriate for this histogram.
+  virtual BucketRanges* CreateRanges() {
+    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+    Histogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+    return ranges;
   }
 
-  DCHECK_EQ(HISTOGRAM, histogram->GetHistogramType());
-  if (!histogram->HasConstructionArguments(minimum, maximum, bucket_count)) {
+  // Allocate the correct Histogram object off the heap (in case persistent
+  // memory is not available).
+  virtual scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) {
+    return make_scoped_ptr(new Histogram(name_, minimum_, maximum_, ranges));
+  }
+
+  // Perform any required datafill on the just-created histogram.  If
+  // overridden, be sure to call the "super" version -- this method may not
+  // always remain empty.
+  virtual void FillHistogram(HistogramBase* /* histogram */) {}
+
+  // These values are protected (instead of private) because they need to
+  // be accessible to methods of sub-classes in order to avoid passing
+  // unnecessary parameters everywhere.
+  const std::string& name_;
+  const HistogramType histogram_type_;
+  HistogramBase::Sample minimum_;
+  HistogramBase::Sample maximum_;
+  uint32_t bucket_count_;
+  int32_t flags_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* Histogram::Factory::Build() {
+  // Import histograms from known persistent storage. Histograms could have
+  // been added by other processes and they must be fetched and recognized
+  // locally in order to be found by FindHistograms() below. If the persistent
+  // memory segment is not shared between processes, this call does nothing.
+  PersistentHistogramAllocator::ImportGlobalHistograms();
+
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name_);
+  if (!histogram) {
+    // To avoid racy destruction at shutdown, the following will be leaked.
+    const BucketRanges* created_ranges = CreateRanges();
+    const BucketRanges* registered_ranges =
+        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(created_ranges);
+
+    // In most cases, the bucket-count, minimum, and maximum values are known
+    // when the code is written and so are passed in explicitly. In other
+    // cases (such as with a CustomHistogram), they are calculated dynamically
+    // at run-time. In the latter case, those ctor parameters are zero and
+    // the results extracted from the result of CreateRanges().
+    if (bucket_count_ == 0) {
+      bucket_count_ = static_cast<uint32_t>(registered_ranges->bucket_count());
+      minimum_ = registered_ranges->range(1);
+      maximum_ = registered_ranges->range(bucket_count_ - 1);
+    }
+
+    // Try to create the histogram using a "persistent" allocator. As of
+    // 2016-02-25, the availability of such is controlled by a base::Feature
+    // that is off by default. If the allocator doesn't exist or if
+    // allocating from it fails, code below will allocate the histogram from
+    // the process heap.
+    PersistentHistogramAllocator::Reference histogram_ref = 0;
+    scoped_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator =
+        PersistentHistogramAllocator::GetGlobalAllocator();
+    if (allocator) {
+      tentative_histogram = allocator->AllocateHistogram(
+          histogram_type_,
+          name_,
+          minimum_,
+          maximum_,
+          registered_ranges,
+          flags_,
+          &histogram_ref);
+    }
+
+    // Handle the case where no persistent allocator is present or the
+    // persistent allocation fails (perhaps because it is full).
+    if (!tentative_histogram) {
+      DCHECK(!histogram_ref);  // Should never have been set.
+      DCHECK(!allocator);  // Shouldn't have failed.
+      flags_ &= ~HistogramBase::kIsPersistent;
+      tentative_histogram = HeapAlloc(registered_ranges);
+      tentative_histogram->SetFlags(flags_);
+    }
+
+    FillHistogram(tentative_histogram.get());
+
+    // Register this histogram with the StatisticsRecorder. Keep a copy of
+    // the pointer value to tell later whether the locally created histogram
+    // was registered or deleted. The type is "void" because it could point
+    // to released memory after the following line.
+    const void* tentative_histogram_ptr = tentative_histogram.get();
+    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+        tentative_histogram.release());
+
+    // Persistent histograms need some follow-up processing.
+    if (histogram_ref) {
+      allocator->FinalizeHistogram(histogram_ref,
+                                   histogram == tentative_histogram_ptr);
+    }
+
+    // Update report on created histograms.
+    ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
+  } else {
+    // Update report on lookup histograms.
+    ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
+  }
+
+  DCHECK_EQ(histogram_type_, histogram->GetHistogramType()) << name_;
+  if (bucket_count_ != 0 &&
+      !histogram->HasConstructionArguments(minimum_, maximum_, bucket_count_)) {
     // The construction arguments do not match the existing histogram.  This can
     // come about if an extension updates in the middle of a chrome run and has
     // changed one of them, or simply by bad code within Chrome itself.  We
     // return NULL here with the expectation that bad code in Chrome will crash
     // on dereference, but extension/Pepper APIs will guard against NULL and not
     // crash.
-    LOG(ERROR) << "Histogram " << name << " has bad construction arguments";
-    return NULL;
+    LOG(ERROR) << "Histogram " << name_ << " has bad construction arguments";
+    return nullptr;
   }
   return histogram;
 }
 
+HistogramBase* Histogram::FactoryGet(const std::string& name,
+                                     Sample minimum,
+                                     Sample maximum,
+                                     uint32_t bucket_count,
+                                     int32_t flags) {
+  bool valid_arguments =
+      InspectConstructionArguments(name, &minimum, &maximum, &bucket_count);
+  DCHECK(valid_arguments);
+
+  return Factory(name, minimum, maximum, bucket_count, flags).Build();
+}
+
 HistogramBase* Histogram::FactoryTimeGet(const std::string& name,
                                          TimeDelta minimum,
                                          TimeDelta maximum,
-                                         size_t bucket_count,
+                                         uint32_t bucket_count,
                                          int32_t flags) {
   return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
                     static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
@@ -136,7 +265,7 @@
 HistogramBase* Histogram::FactoryGet(const char* name,
                                      Sample minimum,
                                      Sample maximum,
-                                     size_t bucket_count,
+                                     uint32_t bucket_count,
                                      int32_t flags) {
   return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
 }
@@ -144,12 +273,27 @@
 HistogramBase* Histogram::FactoryTimeGet(const char* name,
                                          TimeDelta minimum,
                                          TimeDelta maximum,
-                                         size_t bucket_count,
+                                         uint32_t bucket_count,
                                          int32_t flags) {
   return FactoryTimeGet(std::string(name), minimum, maximum, bucket_count,
                         flags);
 }
 
+scoped_ptr<HistogramBase> Histogram::PersistentCreate(
+    const std::string& name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    uint32_t counts_size,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return make_scoped_ptr(new Histogram(
+      name, minimum, maximum, ranges, counts, logged_counts, counts_size,
+      meta, logged_meta));
+}
+
 // Calculate what range of values are held in each bucket.
 // We have to be careful that we don't pick a ratio between starting points in
 // consecutive buckets that is sooo small, that the integer bounds are the same
@@ -192,10 +336,10 @@
 // static
 const int Histogram::kCommonRaceBasedCountMismatch = 5;
 
-int Histogram::FindCorruption(const HistogramSamples& samples) const {
+uint32_t Histogram::FindCorruption(const HistogramSamples& samples) const {
   int inconsistencies = NO_INCONSISTENCIES;
   Sample previous_range = -1;  // Bottom range is always 0.
-  for (size_t index = 0; index < bucket_count(); ++index) {
+  for (uint32_t index = 0; index < bucket_count(); ++index) {
     int new_range = ranges(index);
     if (previous_range >= new_range)
       inconsistencies |= BUCKET_ORDER_ERROR;
@@ -224,19 +368,19 @@
   return inconsistencies;
 }
 
-Sample Histogram::ranges(size_t i) const {
+Sample Histogram::ranges(uint32_t i) const {
   return bucket_ranges_->range(i);
 }
 
-size_t Histogram::bucket_count() const {
-  return bucket_ranges_->bucket_count();
+uint32_t Histogram::bucket_count() const {
+  return static_cast<uint32_t>(bucket_ranges_->bucket_count());
 }
 
 // static
 bool Histogram::InspectConstructionArguments(const std::string& name,
                                              Sample* minimum,
                                              Sample* maximum,
-                                             size_t* bucket_count) {
+                                             uint32_t* bucket_count) {
   // Defensive code for backward compatibility.
   if (*minimum < 1) {
     DVLOG(1) << "Histogram: " << name << " has bad minimum: " << *minimum;
@@ -256,7 +400,7 @@
     return false;
   if (*bucket_count < 3)
     return false;
-  if (*bucket_count > static_cast<size_t>(*maximum - *minimum + 2))
+  if (*bucket_count > static_cast<uint32_t>(*maximum - *minimum + 2))
     return false;
   return true;
 }
@@ -271,7 +415,7 @@
 
 bool Histogram::HasConstructionArguments(Sample expected_minimum,
                                          Sample expected_maximum,
-                                         size_t expected_bucket_count) const {
+                                         uint32_t expected_bucket_count) const {
   return ((expected_minimum == declared_min_) &&
           (expected_maximum == declared_max_) &&
           (expected_bucket_count == bucket_count()));
@@ -302,6 +446,21 @@
   return SnapshotSampleVector();
 }
 
+scoped_ptr<HistogramSamples> Histogram::SnapshotDelta() {
+  scoped_ptr<HistogramSamples> snapshot = SnapshotSampleVector();
+  if (!logged_samples_) {
+    // If nothing has been previously logged, save this one as
+    // |logged_samples_| and gather another snapshot to return.
+    logged_samples_.swap(snapshot);
+    return SnapshotSampleVector();
+  }
+
+  // Subtract what was previously logged and update that information.
+  snapshot->Subtract(*logged_samples_);
+  logged_samples_->Add(*snapshot);
+  return snapshot;
+}
+
 void Histogram::AddSamples(const HistogramSamples& samples) {
   samples_->Add(samples);
 }
@@ -328,7 +487,7 @@
       pickle->WriteInt(flags()) &&
       pickle->WriteInt(declared_min()) &&
       pickle->WriteInt(declared_max()) &&
-      pickle->WriteSizeT(bucket_count()) &&
+      pickle->WriteUInt32(bucket_count()) &&
       pickle->WriteUInt32(bucket_ranges()->checksum());
 }
 
@@ -344,10 +503,31 @@
     samples_.reset(new SampleVector(HashMetricName(name), ranges));
 }
 
+Histogram::Histogram(const std::string& name,
+                     Sample minimum,
+                     Sample maximum,
+                     const BucketRanges* ranges,
+                     HistogramBase::AtomicCount* counts,
+                     HistogramBase::AtomicCount* logged_counts,
+                     uint32_t counts_size,
+                     HistogramSamples::Metadata* meta,
+                     HistogramSamples::Metadata* logged_meta)
+  : HistogramBase(name),
+    bucket_ranges_(ranges),
+    declared_min_(minimum),
+    declared_max_(maximum) {
+  if (ranges) {
+    samples_.reset(new SampleVector(HashMetricName(name),
+                                    counts, counts_size, meta, ranges));
+    logged_samples_.reset(new SampleVector(samples_->id(), logged_counts,
+                                           counts_size, logged_meta, ranges));
+  }
+}
+
 Histogram::~Histogram() {
 }
 
-bool Histogram::PrintEmptyBucket(size_t /* index */) const {
+bool Histogram::PrintEmptyBucket(uint32_t /* index */) const {
   return true;
 }
 
@@ -356,7 +536,7 @@
 // get so big so fast (and we don't expect to see a lot of entries in the large
 // buckets), so we need this to make it possible to see what is going on and
 // not have 0-graphical-height buckets.
-double Histogram::GetBucketSize(Count current, size_t i) const {
+double Histogram::GetBucketSize(Count current, uint32_t i) const {
   DCHECK_GT(ranges(i + 1), ranges(i));
   static const double kTransitionWidth = 5;
   double denominator = ranges(i + 1) - ranges(i);
@@ -365,7 +545,7 @@
   return current/denominator;
 }
 
-const std::string Histogram::GetAsciiBucketRange(size_t i) const {
+const std::string Histogram::GetAsciiBucketRange(uint32_t i) const {
   return GetSimpleAsciiBucketRange(ranges(i));
 }
 
@@ -378,7 +558,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -422,7 +602,7 @@
 
   // Calculate space needed to print bucket range numbers.  Leave room to print
   // nearly the largest bucket range without sliding over the histogram.
-  size_t largest_non_empty_bucket = bucket_count() - 1;
+  uint32_t largest_non_empty_bucket = bucket_count() - 1;
   while (0 == snapshot->GetCountAtIndex(largest_non_empty_bucket)) {
     if (0 == largest_non_empty_bucket)
       break;  // All buckets are empty.
@@ -431,7 +611,7 @@
 
   // Calculate largest print width needed for any of our bucket range displays.
   size_t print_width = 1;
-  for (size_t i = 0; i < bucket_count(); ++i) {
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
     if (snapshot->GetCountAtIndex(i)) {
       size_t width = GetAsciiBucketRange(i).size() + 1;
       if (width > print_width)
@@ -442,7 +622,7 @@
   int64_t remaining = sample_count;
   int64_t past = 0;
   // Output the actual histogram graph.
-  for (size_t i = 0; i < bucket_count(); ++i) {
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
     Count current = snapshot->GetCountAtIndex(i);
     if (!current && !PrintEmptyBucket(i))
       continue;
@@ -473,7 +653,7 @@
 
 double Histogram::GetPeakBucketSize(const SampleVector& samples) const {
   double max = 0;
-  for (size_t i = 0; i < bucket_count() ; ++i) {
+  for (uint32_t i = 0; i < bucket_count() ; ++i) {
     double current_size = GetBucketSize(samples.GetCountAtIndex(i), i);
     if (current_size > max)
       max = current_size;
@@ -502,7 +682,7 @@
 void Histogram::WriteAsciiBucketContext(const int64_t past,
                                         const Count current,
                                         const int64_t remaining,
-                                        const size_t i,
+                                        const uint32_t i,
                                         std::string* output) const {
   double scaled_sum = (past + current + remaining) / 100.0;
   WriteAsciiBucketValue(current, scaled_sum, output);
@@ -525,8 +705,8 @@
   scoped_ptr<SampleVector> snapshot = SnapshotSampleVector();
   *count = snapshot->TotalCount();
   *sum = snapshot->sum();
-  size_t index = 0;
-  for (size_t i = 0; i < bucket_count(); ++i) {
+  uint32_t index = 0;
+  for (uint32_t i = 0; i < bucket_count(); ++i) {
     Sample count_at_index = snapshot->GetCountAtIndex(i);
     if (count_at_index > 0) {
       scoped_ptr<DictionaryValue> bucket_value(new DictionaryValue());
@@ -545,12 +725,55 @@
 // buckets.
 //------------------------------------------------------------------------------
 
+class LinearHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          HistogramBase::Sample minimum,
+          HistogramBase::Sample maximum,
+          uint32_t bucket_count,
+          int32_t flags,
+          const DescriptionPair* descriptions)
+    : Histogram::Factory(name, LINEAR_HISTOGRAM, minimum, maximum,
+                         bucket_count, flags) {
+    descriptions_ = descriptions;
+  }
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    BucketRanges* ranges = new BucketRanges(bucket_count_ + 1);
+    LinearHistogram::InitializeBucketRanges(minimum_, maximum_, ranges);
+    return ranges;
+  }
+
+  scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) override {
+    return make_scoped_ptr(
+        new LinearHistogram(name_, minimum_, maximum_, ranges));
+  }
+
+  void FillHistogram(HistogramBase* base_histogram) override {
+    Histogram::Factory::FillHistogram(base_histogram);
+    LinearHistogram* histogram = static_cast<LinearHistogram*>(base_histogram);
+    // Set range descriptions.
+    if (descriptions_) {
+      for (int i = 0; descriptions_[i].description; ++i) {
+        histogram->bucket_description_[descriptions_[i].sample] =
+            descriptions_[i].description;
+      }
+    }
+  }
+
+ private:
+  const DescriptionPair* descriptions_;
+
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
 LinearHistogram::~LinearHistogram() {}
 
 HistogramBase* LinearHistogram::FactoryGet(const std::string& name,
                                            Sample minimum,
                                            Sample maximum,
-                                           size_t bucket_count,
+                                           uint32_t bucket_count,
                                            int32_t flags) {
   return FactoryGetWithRangeDescription(
       name, minimum, maximum, bucket_count, flags, NULL);
@@ -559,7 +782,7 @@
 HistogramBase* LinearHistogram::FactoryTimeGet(const std::string& name,
                                                TimeDelta minimum,
                                                TimeDelta maximum,
-                                               size_t bucket_count,
+                                               uint32_t bucket_count,
                                                int32_t flags) {
   return FactoryGet(name, static_cast<Sample>(minimum.InMilliseconds()),
                     static_cast<Sample>(maximum.InMilliseconds()), bucket_count,
@@ -569,7 +792,7 @@
 HistogramBase* LinearHistogram::FactoryGet(const char* name,
                                            Sample minimum,
                                            Sample maximum,
-                                           size_t bucket_count,
+                                           uint32_t bucket_count,
                                            int32_t flags) {
   return FactoryGet(std::string(name), minimum, maximum, bucket_count, flags);
 }
@@ -577,59 +800,40 @@
 HistogramBase* LinearHistogram::FactoryTimeGet(const char* name,
                                                TimeDelta minimum,
                                                TimeDelta maximum,
-                                               size_t bucket_count,
+                                               uint32_t bucket_count,
                                                int32_t flags) {
   return FactoryTimeGet(std::string(name),  minimum, maximum, bucket_count,
                         flags);
 }
 
+scoped_ptr<HistogramBase> LinearHistogram::PersistentCreate(
+    const std::string& name,
+    Sample minimum,
+    Sample maximum,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    uint32_t counts_size,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return make_scoped_ptr(new LinearHistogram(
+      name, minimum, maximum, ranges, counts, logged_counts, counts_size,
+      meta, logged_meta));
+}
+
 HistogramBase* LinearHistogram::FactoryGetWithRangeDescription(
     const std::string& name,
     Sample minimum,
     Sample maximum,
-    size_t bucket_count,
+    uint32_t bucket_count,
     int32_t flags,
     const DescriptionPair descriptions[]) {
   bool valid_arguments = Histogram::InspectConstructionArguments(
       name, &minimum, &maximum, &bucket_count);
   DCHECK(valid_arguments);
 
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    BucketRanges* ranges = new BucketRanges(bucket_count + 1);
-    InitializeBucketRanges(minimum, maximum, ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-
-    LinearHistogram* tentative_histogram =
-        new LinearHistogram(name, minimum, maximum, registered_ranges);
-
-    // Set range descriptions.
-    if (descriptions) {
-      for (int i = 0; descriptions[i].description; ++i) {
-        tentative_histogram->bucket_description_[descriptions[i].sample] =
-            descriptions[i].description;
-      }
-    }
-
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
-  }
-
-  DCHECK_EQ(LINEAR_HISTOGRAM, histogram->GetHistogramType());
-  if (!histogram->HasConstructionArguments(minimum, maximum, bucket_count)) {
-    // The construction arguments do not match the existing histogram.  This can
-    // come about if an extension updates in the middle of a chrome run and has
-    // changed one of them, or simply by bad code within Chrome itself.  We
-    // return NULL here with the expectation that bad code in Chrome will crash
-    // on dereference, but extension/Pepper APIs will guard against NULL and not
-    // crash.
-    LOG(ERROR) << "Histogram " << name << " has bad construction arguments";
-    return NULL;
-  }
-  return histogram;
+  return Factory(name, minimum, maximum, bucket_count, flags, descriptions)
+      .Build();
 }
 
 HistogramType LinearHistogram::GetHistogramType() const {
@@ -643,7 +847,19 @@
     : Histogram(name, minimum, maximum, ranges) {
 }
 
-double LinearHistogram::GetBucketSize(Count current, size_t i) const {
+LinearHistogram::LinearHistogram(const std::string& name,
+                                 Sample minimum,
+                                 Sample maximum,
+                                 const BucketRanges* ranges,
+                                 HistogramBase::AtomicCount* counts,
+                                 HistogramBase::AtomicCount* logged_counts,
+                                 uint32_t counts_size,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : Histogram(name, minimum, maximum, ranges, counts, logged_counts,
+                counts_size, meta, logged_meta) {}
+
+double LinearHistogram::GetBucketSize(Count current, uint32_t i) const {
   DCHECK_GT(ranges(i + 1), ranges(i));
   // Adjacent buckets with different widths would have "surprisingly" many (few)
   // samples in a histogram if we didn't normalize this way.
@@ -651,7 +867,7 @@
   return current/denominator;
 }
 
-const std::string LinearHistogram::GetAsciiBucketRange(size_t i) const {
+const std::string LinearHistogram::GetAsciiBucketRange(uint32_t i) const {
   int range = ranges(i);
   BucketDescriptionMap::const_iterator it = bucket_description_.find(range);
   if (it == bucket_description_.end())
@@ -659,7 +875,7 @@
   return it->second;
 }
 
-bool LinearHistogram::PrintEmptyBucket(size_t index) const {
+bool LinearHistogram::PrintEmptyBucket(uint32_t index) const {
   return bucket_description_.find(ranges(index)) == bucket_description_.end();
 }
 
@@ -674,6 +890,8 @@
     double linear_range =
         (min * (bucket_count - 1 - i) + max * (i - 1)) / (bucket_count - 2);
     ranges->set_range(i, static_cast<Sample>(linear_range + 0.5));
+    // TODO(bcwhite): Remove once crbug/586622 is fixed.
+    base::debug::Alias(&linear_range);
   }
   ranges->set_range(ranges->bucket_count(), HistogramBase::kSampleType_MAX);
   ranges->ResetChecksum();
@@ -685,7 +903,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -706,32 +924,46 @@
 // This section provides implementation for BooleanHistogram.
 //------------------------------------------------------------------------------
 
-HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
-                                            int32_t flags) {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    BucketRanges* ranges = new BucketRanges(4);
+class BooleanHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name, int32_t flags)
+    : Histogram::Factory(name, BOOLEAN_HISTOGRAM, 1, 2, 3, flags) {}
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    BucketRanges* ranges = new BucketRanges(3 + 1);
     LinearHistogram::InitializeBucketRanges(1, 2, ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-
-    BooleanHistogram* tentative_histogram =
-        new BooleanHistogram(name, registered_ranges);
-
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+    return ranges;
   }
 
-  DCHECK_EQ(BOOLEAN_HISTOGRAM, histogram->GetHistogramType());
-  return histogram;
+  scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) override {
+    return make_scoped_ptr(new BooleanHistogram(name_, ranges));
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
+HistogramBase* BooleanHistogram::FactoryGet(const std::string& name,
+                                            int32_t flags) {
+  return Factory(name, flags).Build();
 }
 
 HistogramBase* BooleanHistogram::FactoryGet(const char* name, int32_t flags) {
   return FactoryGet(std::string(name), flags);
 }
 
+scoped_ptr<HistogramBase> BooleanHistogram::PersistentCreate(
+    const std::string& name,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return make_scoped_ptr(new BooleanHistogram(
+      name, ranges, counts, logged_counts, meta, logged_meta));
+}
+
 HistogramType BooleanHistogram::GetHistogramType() const {
   return BOOLEAN_HISTOGRAM;
 }
@@ -740,12 +972,21 @@
                                    const BucketRanges* ranges)
     : LinearHistogram(name, 1, 2, ranges) {}
 
+BooleanHistogram::BooleanHistogram(const std::string& name,
+                                   const BucketRanges* ranges,
+                                   HistogramBase::AtomicCount* counts,
+                                   HistogramBase::AtomicCount* logged_counts,
+                                   HistogramSamples::Metadata* meta,
+                                   HistogramSamples::Metadata* logged_meta)
+    : LinearHistogram(name, 1, 2, ranges, counts, logged_counts, 2, meta,
+                      logged_meta) {}
+
 HistogramBase* BooleanHistogram::DeserializeInfoImpl(PickleIterator* iter) {
   std::string histogram_name;
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -766,30 +1007,49 @@
 // CustomHistogram:
 //------------------------------------------------------------------------------
 
+class CustomHistogram::Factory : public Histogram::Factory {
+ public:
+  Factory(const std::string& name,
+          const std::vector<Sample>* custom_ranges,
+          int32_t flags)
+    : Histogram::Factory(name, CUSTOM_HISTOGRAM, 0, 0, 0, flags) {
+    custom_ranges_ = custom_ranges;
+  }
+
+ protected:
+  BucketRanges* CreateRanges() override {
+    // Remove the duplicates in the custom ranges array.
+    std::vector<int> ranges = *custom_ranges_;
+    ranges.push_back(0);  // Ensure we have a zero value.
+    ranges.push_back(HistogramBase::kSampleType_MAX);
+    std::sort(ranges.begin(), ranges.end());
+    ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
+
+    BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
+    for (uint32_t i = 0; i < ranges.size(); i++) {
+      bucket_ranges->set_range(i, ranges[i]);
+    }
+    bucket_ranges->ResetChecksum();
+    return bucket_ranges;
+  }
+
+  scoped_ptr<HistogramBase> HeapAlloc(const BucketRanges* ranges) override {
+    return make_scoped_ptr(new CustomHistogram(name_, ranges));
+  }
+
+ private:
+  const std::vector<Sample>* custom_ranges_;
+
+  DISALLOW_COPY_AND_ASSIGN(Factory);
+};
+
 HistogramBase* CustomHistogram::FactoryGet(
     const std::string& name,
     const std::vector<Sample>& custom_ranges,
     int32_t flags) {
   CHECK(ValidateCustomRanges(custom_ranges));
 
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
-  if (!histogram) {
-    BucketRanges* ranges = CreateBucketRangesFromCustomRanges(custom_ranges);
-    const BucketRanges* registered_ranges =
-        StatisticsRecorder::RegisterOrDeleteDuplicateRanges(ranges);
-
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    CustomHistogram* tentative_histogram =
-        new CustomHistogram(name, registered_ranges);
-
-    tentative_histogram->SetFlags(flags);
-
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
-  }
-
-  DCHECK_EQ(histogram->GetHistogramType(), CUSTOM_HISTOGRAM);
-  return histogram;
+  return Factory(name, &custom_ranges, flags).Build();
 }
 
 HistogramBase* CustomHistogram::FactoryGet(
@@ -799,15 +1059,27 @@
   return FactoryGet(std::string(name), custom_ranges, flags);
 }
 
+scoped_ptr<HistogramBase> CustomHistogram::PersistentCreate(
+    const std::string& name,
+    const BucketRanges* ranges,
+    HistogramBase::AtomicCount* counts,
+    HistogramBase::AtomicCount* logged_counts,
+    uint32_t counts_size,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return make_scoped_ptr(new CustomHistogram(
+      name, ranges, counts, logged_counts, counts_size, meta, logged_meta));
+}
+
 HistogramType CustomHistogram::GetHistogramType() const {
   return CUSTOM_HISTOGRAM;
 }
 
 // static
 std::vector<Sample> CustomHistogram::ArrayToCustomRanges(
-    const Sample* values, size_t num_values) {
+    const Sample* values, uint32_t num_values) {
   std::vector<Sample> all_values;
-  for (size_t i = 0; i < num_values; ++i) {
+  for (uint32_t i = 0; i < num_values; ++i) {
     Sample value = values[i];
     all_values.push_back(value);
 
@@ -825,13 +1097,30 @@
                 ranges->range(ranges->bucket_count() - 1),
                 ranges) {}
 
+CustomHistogram::CustomHistogram(const std::string& name,
+                                 const BucketRanges* ranges,
+                                 HistogramBase::AtomicCount* counts,
+                                 HistogramBase::AtomicCount* logged_counts,
+                                 uint32_t counts_size,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : Histogram(name,
+                ranges->range(1),
+                ranges->range(ranges->bucket_count() - 1),
+                ranges,
+                counts,
+                logged_counts,
+                counts_size,
+                meta,
+                logged_meta) {}
+
 bool CustomHistogram::SerializeInfoImpl(Pickle* pickle) const {
   if (!Histogram::SerializeInfoImpl(pickle))
     return false;
 
   // Serialize ranges. First and last ranges are alwasy 0 and INT_MAX, so don't
   // write them.
-  for (size_t i = 1; i < bucket_ranges()->bucket_count(); ++i) {
+  for (uint32_t i = 1; i < bucket_ranges()->bucket_count(); ++i) {
     if (!pickle->WriteInt(bucket_ranges()->range(i)))
       return false;
   }
@@ -839,7 +1128,7 @@
 }
 
 double CustomHistogram::GetBucketSize(Count /* current */,
-                                      size_t /* i */) const {
+                                      uint32_t /* i */) const {
   return 1;
 }
 
@@ -849,7 +1138,7 @@
   int flags;
   int declared_min;
   int declared_max;
-  size_t bucket_count;
+  uint32_t bucket_count;
   uint32_t range_checksum;
 
   if (!ReadHistogramArguments(iter, &histogram_name, &flags, &declared_min,
@@ -860,7 +1149,7 @@
   // First and last ranges are not serialized.
   std::vector<Sample> sample_ranges(bucket_count - 1);
 
-  for (size_t i = 0; i < sample_ranges.size(); ++i) {
+  for (uint32_t i = 0; i < sample_ranges.size(); ++i) {
     if (!iter->ReadInt(&sample_ranges[i]))
       return NULL;
   }
@@ -878,7 +1167,7 @@
 bool CustomHistogram::ValidateCustomRanges(
     const std::vector<Sample>& custom_ranges) {
   bool has_valid_range = false;
-  for (size_t i = 0; i < custom_ranges.size(); i++) {
+  for (uint32_t i = 0; i < custom_ranges.size(); i++) {
     Sample sample = custom_ranges[i];
     if (sample < 0 || sample > HistogramBase::kSampleType_MAX - 1)
       return false;
@@ -888,22 +1177,4 @@
   return has_valid_range;
 }
 
-// static
-BucketRanges* CustomHistogram::CreateBucketRangesFromCustomRanges(
-      const std::vector<Sample>& custom_ranges) {
-  // Remove the duplicates in the custom ranges array.
-  std::vector<int> ranges = custom_ranges;
-  ranges.push_back(0);  // Ensure we have a zero value.
-  ranges.push_back(HistogramBase::kSampleType_MAX);
-  std::sort(ranges.begin(), ranges.end());
-  ranges.erase(std::unique(ranges.begin(), ranges.end()), ranges.end());
-
-  BucketRanges* bucket_ranges = new BucketRanges(ranges.size());
-  for (size_t i = 0; i < ranges.size(); i++) {
-    bucket_ranges->set_range(i, ranges[i]);
-  }
-  bucket_ranges->ResetChecksum();
-  return bucket_ranges;
-}
-
 }  // namespace base
diff --git a/base/metrics/histogram.h b/base/metrics/histogram.h
index 28bb29b..5111b8f 100644
--- a/base/metrics/histogram.h
+++ b/base/metrics/histogram.h
@@ -92,6 +92,7 @@
 class CustomHistogram;
 class Histogram;
 class LinearHistogram;
+class PersistentMemoryAllocator;
 class Pickle;
 class PickleIterator;
 class SampleVector;
@@ -99,10 +100,12 @@
 class BASE_EXPORT Histogram : public HistogramBase {
  public:
   // Initialize maximum number of buckets in histograms as 16,384.
-  static const size_t kBucketCount_MAX;
+  static const uint32_t kBucketCount_MAX;
 
   typedef std::vector<Count> Counts;
 
+  ~Histogram() override;
+
   //----------------------------------------------------------------------------
   // For a valid histogram, input should follow these restrictions:
   // minimum > 0 (if a minimum below 1 is specified, it will implicitly be
@@ -116,12 +119,12 @@
   static HistogramBase* FactoryGet(const std::string& name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const std::string& name,
                                        base::TimeDelta minimum,
                                        base::TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
   // Overloads of the above two functions that take a const char* |name| param,
@@ -130,14 +133,26 @@
   static HistogramBase* FactoryGet(const char* name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const char* name,
                                        base::TimeDelta minimum,
                                        base::TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static scoped_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      Sample minimum,
+      Sample maximum,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      uint32_t counts_size,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   static void InitializeBucketRanges(Sample minimum,
                                      Sample maximum,
                                      BucketRanges* ranges);
@@ -155,16 +170,17 @@
   // consistent with the bucket ranges and checksums in our histogram.  This can
   // produce a false-alarm if a race occurred in the reading of the data during
   // a SnapShot process, but should otherwise be false at all times (unless we
-  // have memory over-writes, or DRAM failures).
-  int FindCorruption(const HistogramSamples& samples) const override;
+  // have memory over-writes, or DRAM failures). Flag definitions are located
+  // under "enum Inconsistency" in base/metrics/histogram_base.h.
+  uint32_t FindCorruption(const HistogramSamples& samples) const override;
 
   //----------------------------------------------------------------------------
   // Accessors for factory construction, serialization and testing.
   //----------------------------------------------------------------------------
   Sample declared_min() const { return declared_min_; }
   Sample declared_max() const { return declared_max_; }
-  virtual Sample ranges(size_t i) const;
-  virtual size_t bucket_count() const;
+  virtual Sample ranges(uint32_t i) const;
+  virtual uint32_t bucket_count() const;
   const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
 
   // This function validates histogram construction arguments. It returns false
@@ -176,23 +192,31 @@
   static bool InspectConstructionArguments(const std::string& name,
                                            Sample* minimum,
                                            Sample* maximum,
-                                           size_t* bucket_count);
+                                           uint32_t* bucket_count);
 
   // HistogramBase implementation:
   uint64_t name_hash() const override;
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                size_t expected_bucket_count) const override;
+                                uint32_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
   scoped_ptr<HistogramSamples> SnapshotSamples() const override;
+  scoped_ptr<HistogramSamples> SnapshotDelta() override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
   void WriteHTMLGraph(std::string* output) const override;
   void WriteAscii(std::string* output) const override;
 
  protected:
+  // This class, defined entirely within the .cc file, contains all the
+  // common logic for building a Histogram and can be overridden by more
+  // specific types to alter details of how the creation is done. It is
+  // defined as an embedded class (rather than an anonymous one) so it
+  // can access the protected constructors.
+  class Factory;
+
   // |ranges| should contain the underflow and overflow buckets. See top
   // comments for example.
   Histogram(const std::string& name,
@@ -200,30 +224,41 @@
             Sample maximum,
             const BucketRanges* ranges);
 
-  ~Histogram() override;
+  // Traditionally, histograms allocate their own memory for the bucket
+  // vector but "shared" histograms use memory regions allocated from a
+  // special memory segment that is passed in here.  It is assumed that
+  // the life of this memory is managed externally and exceeds the lifetime
+  // of this object. Practically, this memory is never released until the
+  // process exits and the OS cleans it up.
+  Histogram(const std::string& name,
+            Sample minimum,
+            Sample maximum,
+            const BucketRanges* ranges,
+            HistogramBase::AtomicCount* counts,
+            HistogramBase::AtomicCount* logged_counts,
+            uint32_t counts_size,
+            HistogramSamples::Metadata* meta,
+            HistogramSamples::Metadata* logged_meta);
 
   // HistogramBase implementation:
   bool SerializeInfoImpl(base::Pickle* pickle) const override;
 
   // Method to override to skip the display of the i'th bucket if it's empty.
-  virtual bool PrintEmptyBucket(size_t index) const;
+  virtual bool PrintEmptyBucket(uint32_t index) const;
 
   // Get normalized size, relative to the ranges(i).
-  virtual double GetBucketSize(Count current, size_t i) const;
+  virtual double GetBucketSize(Count current, uint32_t i) const;
 
   // Return a string description of what goes in a given bucket.
   // Most commonly this is the numeric value, but in derived classes it may
   // be a name (or string description) given to the bucket.
-  virtual const std::string GetAsciiBucketRange(size_t it) const;
+  virtual const std::string GetAsciiBucketRange(uint32_t it) const;
 
  private:
   // Allow tests to corrupt our innards for testing purposes.
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptBucketBounds);
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, NameMatchTest);
-  FRIEND_TEST_ALL_PREFIXES(HistogramTest, AddCountTest);
 
   friend class StatisticsRecorder;  // To allow it to delete duplicates.
   friend class StatisticsRecorderTest;
@@ -255,7 +290,7 @@
   void WriteAsciiBucketContext(const int64_t past,
                                const Count current,
                                const int64_t remaining,
-                               const size_t i,
+                               const uint32_t i,
                                std::string* output) const;
 
   // WriteJSON calls these.
@@ -275,6 +310,9 @@
   // sample.
   scoped_ptr<SampleVector> samples_;
 
+  // Also keep a previous uploaded state for calculating deltas.
+  scoped_ptr<HistogramSamples> logged_samples_;
+
   DISALLOW_COPY_AND_ASSIGN(Histogram);
 };
 
@@ -291,12 +329,12 @@
   static HistogramBase* FactoryGet(const std::string& name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const std::string& name,
                                        TimeDelta minimum,
                                        TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
   // Overloads of the above two functions that take a const char* |name| param,
@@ -305,14 +343,26 @@
   static HistogramBase* FactoryGet(const char* name,
                                    Sample minimum,
                                    Sample maximum,
-                                   size_t bucket_count,
+                                   uint32_t bucket_count,
                                    int32_t flags);
   static HistogramBase* FactoryTimeGet(const char* name,
                                        TimeDelta minimum,
                                        TimeDelta maximum,
-                                       size_t bucket_count,
+                                       uint32_t bucket_count,
                                        int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static scoped_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      Sample minimum,
+      Sample maximum,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      uint32_t counts_size,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   struct DescriptionPair {
     Sample sample;
     const char* description;  // Null means end of a list of pairs.
@@ -327,7 +377,7 @@
       const std::string& name,
       Sample minimum,
       Sample maximum,
-      size_t bucket_count,
+      uint32_t bucket_count,
       int32_t flags,
       const DescriptionPair descriptions[]);
 
@@ -339,20 +389,32 @@
   HistogramType GetHistogramType() const override;
 
  protected:
+  class Factory;
+
   LinearHistogram(const std::string& name,
                   Sample minimum,
                   Sample maximum,
                   const BucketRanges* ranges);
 
-  double GetBucketSize(Count current, size_t i) const override;
+  LinearHistogram(const std::string& name,
+                  Sample minimum,
+                  Sample maximum,
+                  const BucketRanges* ranges,
+                  HistogramBase::AtomicCount* counts,
+                  HistogramBase::AtomicCount* logged_counts,
+                  uint32_t counts_size,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
+  double GetBucketSize(Count current, uint32_t i) const override;
 
   // If we have a description for a bucket, then return that.  Otherwise
   // let parent class provide a (numeric) description.
-  const std::string GetAsciiBucketRange(size_t i) const override;
+  const std::string GetAsciiBucketRange(uint32_t i) const override;
 
   // Skip printing of name for numeric range if we have a name (and if this is
   // an empty bucket).
-  bool PrintEmptyBucket(size_t index) const override;
+  bool PrintEmptyBucket(uint32_t index) const override;
 
  private:
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
@@ -380,10 +442,28 @@
   // call sites.
   static HistogramBase* FactoryGet(const char* name, int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static scoped_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   HistogramType GetHistogramType() const override;
 
+ protected:
+  class Factory;
+
  private:
   BooleanHistogram(const std::string& name, const BucketRanges* ranges);
+  BooleanHistogram(const std::string& name,
+                   const BucketRanges* ranges,
+                   HistogramBase::AtomicCount* counts,
+                   HistogramBase::AtomicCount* logged_counts,
+                   HistogramSamples::Metadata* meta,
+                   HistogramSamples::Metadata* logged_meta);
 
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
       base::PickleIterator* iter);
@@ -412,6 +492,16 @@
                                    const std::vector<Sample>& custom_ranges,
                                    int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static scoped_ptr<HistogramBase> PersistentCreate(
+      const std::string& name,
+      const BucketRanges* ranges,
+      HistogramBase::AtomicCount* counts,
+      HistogramBase::AtomicCount* logged_counts,
+      uint32_t counts_size,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   // Overridden from Histogram:
   HistogramType GetHistogramType() const override;
 
@@ -422,15 +512,25 @@
   // so that invalid samples never fall into the same bucket as valid samples.
   // TODO(kaiwang): Change name to ArrayToCustomEnumRanges.
   static std::vector<Sample> ArrayToCustomRanges(const Sample* values,
-                                                 size_t num_values);
+                                                 uint32_t num_values);
  protected:
+  class Factory;
+
   CustomHistogram(const std::string& name,
                   const BucketRanges* ranges);
 
+  CustomHistogram(const std::string& name,
+                  const BucketRanges* ranges,
+                  HistogramBase::AtomicCount* counts,
+                  HistogramBase::AtomicCount* logged_counts,
+                  uint32_t counts_size,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
   // HistogramBase implementation:
   bool SerializeInfoImpl(base::Pickle* pickle) const override;
 
-  double GetBucketSize(Count current, size_t i) const override;
+  double GetBucketSize(Count current, uint32_t i) const override;
 
  private:
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
@@ -438,8 +538,6 @@
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
 
   static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
-  static BucketRanges* CreateBucketRangesFromCustomRanges(
-      const std::vector<Sample>& custom_ranges);
 
   DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
 };
diff --git a/base/metrics/histogram_base.cc b/base/metrics/histogram_base.cc
index d8aefb1..0152bf7 100644
--- a/base/metrics/histogram_base.cc
+++ b/base/metrics/histogram_base.cc
@@ -34,9 +34,8 @@
       return "CUSTOM_HISTOGRAM";
     case SPARSE_HISTOGRAM:
       return "SPARSE_HISTOGRAM";
-    default:
-      NOTREACHED();
   }
+  NOTREACHED();
   return "UNKNOWN";
 }
 
@@ -62,6 +61,7 @@
 }
 
 const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
+HistogramBase* HistogramBase::report_histogram_ = nullptr;
 
 HistogramBase::HistogramBase(const std::string& name)
     : histogram_name_(name),
@@ -97,7 +97,8 @@
   return SerializeInfoImpl(pickle);
 }
 
-int HistogramBase::FindCorruption(const HistogramSamples& /* samples */) const {
+uint32_t HistogramBase::FindCorruption(
+    const HistogramSamples& /* samples */) const {
   // Not supported by default.
   return NO_INCONSISTENCIES;
 }
@@ -122,6 +123,30 @@
   serializer.Serialize(root);
 }
 
+// static
+void HistogramBase::EnableActivityReportHistogram(
+    const std::string& process_type) {
+  DCHECK(!report_histogram_);
+  size_t existing = StatisticsRecorder::GetHistogramCount();
+  if (existing != 0) {
+    DLOG(WARNING) << existing
+                  << " histograms were created before reporting was enabled.";
+  }
+
+  std::string name =
+      "UMA.Histograms.Activity" +
+      (process_type.empty() ? process_type : "." + process_type);
+
+  // Calling FactoryGet() here rather than using a histogram-macro works
+  // around some problems with tests that could end up seeing the results
+  // histogram when not expected due to a bad interaction between
+  // HistogramTester and StatisticsRecorder.
+  report_histogram_ = LinearHistogram::FactoryGet(
+      name, 1, HISTOGRAM_REPORT_MAX, HISTOGRAM_REPORT_MAX + 1,
+      kUmaTargetedHistogramFlag);
+  report_histogram_->Add(HISTOGRAM_REPORT_CREATED);
+}
+
 void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
   if ((flags() & kCallbackExists) == 0)
     return;
@@ -163,4 +188,47 @@
   StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
 }
 
+// static
+void HistogramBase::ReportHistogramActivity(const HistogramBase& histogram,
+                                            ReportActivity activity) {
+  if (!report_histogram_)
+    return;
+
+  const int32_t flags = histogram.flags_;
+  HistogramReport report_type = HISTOGRAM_REPORT_MAX;
+  switch (activity) {
+    case HISTOGRAM_CREATED:
+      report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_CREATED);
+      switch (histogram.GetHistogramType()) {
+        case HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_LOGARITHMIC;
+          break;
+        case LINEAR_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_LINEAR;
+          break;
+        case BOOLEAN_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_BOOLEAN;
+          break;
+        case CUSTOM_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_CUSTOM;
+          break;
+        case SPARSE_HISTOGRAM:
+          report_type = HISTOGRAM_REPORT_TYPE_SPARSE;
+          break;
+      }
+      report_histogram_->Add(report_type);
+      if (flags & kIsPersistent)
+        report_histogram_->Add(HISTOGRAM_REPORT_FLAG_PERSISTENT);
+      if ((flags & kUmaStabilityHistogramFlag) == kUmaStabilityHistogramFlag)
+        report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_STABILITY);
+      else if (flags & kUmaTargetedHistogramFlag)
+        report_histogram_->Add(HISTOGRAM_REPORT_FLAG_UMA_TARGETED);
+      break;
+
+    case HISTOGRAM_LOOKUP:
+      report_histogram_->Add(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP);
+      break;
+  }
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_base.h b/base/metrics/histogram_base.h
index 4fa07c6..f11befd 100644
--- a/base/metrics/histogram_base.h
+++ b/base/metrics/histogram_base.h
@@ -21,6 +21,7 @@
 
 namespace base {
 
+class BucketRanges;
 class DictionaryValue;
 class HistogramBase;
 class HistogramSamples;
@@ -29,7 +30,7 @@
 class PickleIterator;
 
 ////////////////////////////////////////////////////////////////////////////////
-// These enums are used to facilitate deserialization of histograms from other
+// This enum is used to facilitate deserialization of histograms from other
 // processes into the browser. If you create another class that inherits from
 // HistogramBase, add new histogram types and names below.
 
@@ -43,6 +44,39 @@
 
 std::string HistogramTypeToString(HistogramType type);
 
+// This enum is used for reporting how many histograms and of what types and
+// variations are being created. It has to be in the main .h file so it is
+// visible to files that define the various histogram types.
+enum HistogramReport {
+  // Count the number of reports created. The other counts divided by this
+  // number will give the average per run of the program.
+  HISTOGRAM_REPORT_CREATED = 0,
+
+  // Count the total number of histograms created. It is the limit against
+  // which all others are compared.
+  HISTOGRAM_REPORT_HISTOGRAM_CREATED = 1,
+
+  // Count the total number of histograms looked-up. It's better to cache
+  // the result of a single lookup rather than do it repeatedly.
+  HISTOGRAM_REPORT_HISTOGRAM_LOOKUP = 2,
+
+  // These count the individual histogram types. This must follow the order
+  // of HistogramType above.
+  HISTOGRAM_REPORT_TYPE_LOGARITHMIC = 3,
+  HISTOGRAM_REPORT_TYPE_LINEAR = 4,
+  HISTOGRAM_REPORT_TYPE_BOOLEAN = 5,
+  HISTOGRAM_REPORT_TYPE_CUSTOM = 6,
+  HISTOGRAM_REPORT_TYPE_SPARSE = 7,
+
+  // These indicate the individual flags that were set.
+  HISTOGRAM_REPORT_FLAG_UMA_TARGETED = 8,
+  HISTOGRAM_REPORT_FLAG_UMA_STABILITY = 9,
+  HISTOGRAM_REPORT_FLAG_PERSISTENT = 10,
+
+  // This must be last.
+  HISTOGRAM_REPORT_MAX = 11
+};
+
 // Create or find existing histogram that matches the pickled info.
 // Returns NULL if the pickled data has problems.
 BASE_EXPORT HistogramBase* DeserializeHistogramInfo(base::PickleIterator* iter);
@@ -81,19 +115,30 @@
     // to shortcut looking up the callback if it doesn't exist.
     kCallbackExists = 0x20,
 
+    // Indicates that the histogram is held in "persistent" memory and may
+    // be accessible between processes. This is only possible if such a
+    // memory segment has been created/attached, used to create a Persistent-
+    // MemoryAllocator, and that loaded into the Histogram module before this
+    // histogram is created.
+    kIsPersistent = 0x40,
+
     // Only for Histogram and its sub classes: fancy bucket-naming support.
     kHexRangePrintingFlag = 0x8000,
   };
 
   // Histogram data inconsistency types.
-  enum Inconsistency {
+  enum Inconsistency : uint32_t {
     NO_INCONSISTENCIES = 0x0,
     RANGE_CHECKSUM_ERROR = 0x1,
     BUCKET_ORDER_ERROR = 0x2,
     COUNT_HIGH_ERROR = 0x4,
     COUNT_LOW_ERROR = 0x8,
 
-    NEVER_EXCEEDED_VALUE = 0x10
+    NEVER_EXCEEDED_VALUE = 0x10,
+
+    // This value is used only in HistogramSnapshotManager for marking
+    // internally when new inconsistencies are found.
+    NEW_INCONSISTENCY_FOUND = 0x8000000
   };
 
   explicit HistogramBase(const std::string& name);
@@ -119,9 +164,10 @@
   // Whether the histogram has construction arguments as parameters specified.
   // For histograms that don't have the concept of minimum, maximum or
   // bucket_count, this function always returns false.
-  virtual bool HasConstructionArguments(Sample expected_minimum,
-                                        Sample expected_maximum,
-                                        size_t expected_bucket_count) const = 0;
+  virtual bool HasConstructionArguments(
+      Sample expected_minimum,
+      Sample expected_maximum,
+      uint32_t expected_bucket_count) const = 0;
 
   virtual void Add(Sample value) = 0;
 
@@ -145,12 +191,17 @@
 
   // Try to find out data corruption from histogram and the samples.
   // The returned value is a combination of Inconsistency enum.
-  virtual int FindCorruption(const HistogramSamples& samples) const;
+  virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
 
   // Snapshot the current complete set of sample data.
   // Override with atomic/locked snapshot if needed.
   virtual scoped_ptr<HistogramSamples> SnapshotSamples() const = 0;
 
+  // Calculate the change (delta) in histogram counts since the previous call
+  // to this method. Each successive call will return only those counts
+  // changed since the last call.
+  virtual scoped_ptr<HistogramSamples> SnapshotDelta() = 0;
+
   // The following methods provide graphical histogram displays.
   virtual void WriteHTMLGraph(std::string* output) const = 0;
   virtual void WriteAscii(std::string* output) const = 0;
@@ -160,7 +211,17 @@
   // customize the output.
   void WriteJSON(std::string* output) const;
 
+  // This enables a histogram that reports the what types of histograms are
+  // created and their flags. It must be called while still single-threaded.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histogram:
+  //    UMA.Histograms.process_type.Creations
+  static void EnableActivityReportHistogram(const std::string& process_type);
+
  protected:
+  enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
+
   // Subclasses should implement this function to make SerializeInfo work.
   virtual bool SerializeInfoImpl(base::Pickle* pickle) const = 0;
 
@@ -192,7 +253,16 @@
   // passing |sample| as the parameter.
   void FindAndRunCallback(Sample sample) const;
 
+  // Update report with an |activity| that occurred for |histogram|.
+  static void ReportHistogramActivity(const HistogramBase& histogram,
+                                      ReportActivity activicty);
+
+  // Retrieves the global histogram reporting what histograms are created.
+  static HistogramBase* report_histogram_;
+
  private:
+  friend class HistogramBaseTest;
+
   const std::string histogram_name_;
   AtomicCount flags_;
 
diff --git a/base/metrics/histogram_base_unittest.cc b/base/metrics/histogram_base_unittest.cc
index 2d6b6df..6b41597 100644
--- a/base/metrics/histogram_base_unittest.cc
+++ b/base/metrics/histogram_base_unittest.cc
@@ -18,19 +18,29 @@
   HistogramBaseTest() {
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
-    statistics_recorder_ = NULL;
     ResetStatisticsRecorder();
   }
 
-  ~HistogramBaseTest() override { delete statistics_recorder_; }
+  ~HistogramBaseTest() override {
+    HistogramBase::report_histogram_ = nullptr;
+  }
 
   void ResetStatisticsRecorder() {
-    delete statistics_recorder_;
-    statistics_recorder_ = new StatisticsRecorder();
+    // It is necessary to fully destruct any existing StatisticsRecorder
+    // before creating a new one.
+    statistics_recorder_.reset();
+    statistics_recorder_.reset(new StatisticsRecorder());
+  }
+
+  HistogramBase* GetCreationReportHistogram(const std::string& name) {
+    HistogramBase::EnableActivityReportHistogram(name);
+    return HistogramBase::report_histogram_;
   }
 
  private:
-  StatisticsRecorder* statistics_recorder_;
+  scoped_ptr<StatisticsRecorder> statistics_recorder_;
+
+  DISALLOW_COPY_AND_ASSIGN(HistogramBaseTest);
 };
 
 TEST_F(HistogramBaseTest, DeserializeHistogram) {
@@ -152,4 +162,61 @@
   EXPECT_EQ(0, deserialized->flags());
 }
 
+TEST_F(HistogramBaseTest, CreationReportHistogram) {
+  // Enabled creation report. Itself is not included in the report.
+  HistogramBase* report = GetCreationReportHistogram("CreationReportTest");
+  ASSERT_TRUE(report);
+
+  std::vector<HistogramBase::Sample> ranges;
+  ranges.push_back(1);
+  ranges.push_back(2);
+  ranges.push_back(4);
+  ranges.push_back(8);
+  ranges.push_back(10);
+
+  // Create all histogram types and verify counts.
+  Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
+  LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
+  BooleanHistogram::FactoryGet("CRH-Boolean", 0);
+  CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
+  SparseHistogram::FactoryGet("CRH-Sparse", 0);
+
+  scoped_ptr<HistogramSamples> samples = report->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+  EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+  EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LOGARITHMIC));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_LINEAR));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_BOOLEAN));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_CUSTOM));
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_TYPE_SPARSE));
+
+  // Create all flag types and verify counts.
+  Histogram::FactoryGet("CRH-Histogram-UMA-Targeted", 1, 10, 5,
+                        HistogramBase::kUmaTargetedHistogramFlag);
+  Histogram::FactoryGet("CRH-Histogram-UMA-Stability", 1, 10, 5,
+                        HistogramBase::kUmaStabilityHistogramFlag);
+  SparseHistogram::FactoryGet("CRH-Sparse-UMA-Targeted",
+                              HistogramBase::kUmaTargetedHistogramFlag);
+  SparseHistogram::FactoryGet("CRH-Sparse-UMA-Stability",
+                              HistogramBase::kUmaStabilityHistogramFlag);
+  samples = report->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+  EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+  EXPECT_EQ(0, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+  EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_TARGETED));
+  EXPECT_EQ(2, samples->GetCount(HISTOGRAM_REPORT_FLAG_UMA_STABILITY));
+
+  // Do lookup of existing histograms and verify counts.
+  Histogram::FactoryGet("CRH-Histogram", 1, 10, 5, 0);
+  LinearHistogram::FactoryGet("CRH-Linear", 1, 10, 5, 0);
+  BooleanHistogram::FactoryGet("CRH-Boolean", 0);
+  CustomHistogram::FactoryGet("CRH-Custom", ranges, 0);
+  SparseHistogram::FactoryGet("CRH-Sparse", 0);
+  samples = report->SnapshotSamples();
+  EXPECT_EQ(1, samples->GetCount(HISTOGRAM_REPORT_CREATED));
+  EXPECT_EQ(9, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_CREATED));
+  EXPECT_EQ(5, samples->GetCount(HISTOGRAM_REPORT_HISTOGRAM_LOOKUP));
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.cc b/base/metrics/histogram_delta_serialization.cc
new file mode 100644
index 0000000..3e5d154
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization.cc
@@ -0,0 +1,123 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_snapshot_manager.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/numerics/safe_conversions.h"
+#include "base/pickle.h"
+#include "base/values.h"
+
+namespace base {
+
+namespace {
+
+// Create or find existing histogram and add the samples from pickle.
+// Silently returns when seeing any data problem in the pickle.
+void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
+  HistogramBase* histogram = DeserializeHistogramInfo(iter);
+  if (!histogram)
+    return;
+
+  if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
+    DVLOG(1) << "Single process mode, histogram observed and not copied: "
+             << histogram->histogram_name();
+    return;
+  }
+  histogram->AddSamplesFromPickle(iter);
+}
+
+}  // namespace
+
+HistogramDeltaSerialization::HistogramDeltaSerialization(
+    const std::string& caller_name)
+    : histogram_snapshot_manager_(this),
+      serialized_deltas_(NULL) {
+  inconsistencies_histogram_ =
+      LinearHistogram::FactoryGet(
+          "Histogram.Inconsistencies" + caller_name, 1,
+          HistogramBase::NEVER_EXCEEDED_VALUE,
+          HistogramBase::NEVER_EXCEEDED_VALUE + 1,
+          HistogramBase::kUmaTargetedHistogramFlag);
+
+  inconsistencies_unique_histogram_ =
+      LinearHistogram::FactoryGet(
+          "Histogram.Inconsistencies" + caller_name + "Unique", 1,
+          HistogramBase::NEVER_EXCEEDED_VALUE,
+          HistogramBase::NEVER_EXCEEDED_VALUE + 1,
+          HistogramBase::kUmaTargetedHistogramFlag);
+
+  inconsistent_snapshot_histogram_ =
+      Histogram::FactoryGet(
+          "Histogram.InconsistentSnapshot" + caller_name, 1, 1000000, 50,
+          HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+HistogramDeltaSerialization::~HistogramDeltaSerialization() {
+}
+
+void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
+    std::vector<std::string>* serialized_deltas,
+    bool include_persistent) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  serialized_deltas_ = serialized_deltas;
+  // Note: Before serializing, we set the kIPCSerializationSourceFlag for all
+  // the histograms, so that the receiving process can distinguish them from the
+  // local histograms.
+  histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(include_persistent), StatisticsRecorder::end(),
+      Histogram::kIPCSerializationSourceFlag, Histogram::kNoFlags);
+  serialized_deltas_ = NULL;
+}
+
+// static
+void HistogramDeltaSerialization::DeserializeAndAddSamples(
+    const std::vector<std::string>& serialized_deltas) {
+  for (std::vector<std::string>::const_iterator it = serialized_deltas.begin();
+       it != serialized_deltas.end(); ++it) {
+    Pickle pickle(it->data(), checked_cast<int>(it->size()));
+    PickleIterator iter(pickle);
+    DeserializeHistogramAndAddSamples(&iter);
+  }
+}
+
+void HistogramDeltaSerialization::RecordDelta(
+    const HistogramBase& histogram,
+    const HistogramSamples& snapshot) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+  DCHECK_NE(0, snapshot.TotalCount());
+
+  Pickle pickle;
+  histogram.SerializeInfo(&pickle);
+  snapshot.Serialize(&pickle);
+  serialized_deltas_->push_back(
+      std::string(static_cast<const char*>(pickle.data()), pickle.size()));
+}
+
+void HistogramDeltaSerialization::InconsistencyDetected(
+    HistogramBase::Inconsistency problem) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  inconsistencies_histogram_->Add(problem);
+}
+
+void HistogramDeltaSerialization::UniqueInconsistencyDetected(
+    HistogramBase::Inconsistency problem) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  inconsistencies_unique_histogram_->Add(problem);
+}
+
+void HistogramDeltaSerialization::InconsistencyDetectedInLoggedCount(
+    int amount) {
+  DCHECK(thread_checker_.CalledOnValidThread());
+
+  inconsistent_snapshot_histogram_->Add(std::abs(amount));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_delta_serialization.h b/base/metrics/histogram_delta_serialization.h
index 0a3983f..a05a1a7 100644
--- a/base/metrics/histogram_delta_serialization.h
+++ b/base/metrics/histogram_delta_serialization.h
@@ -28,9 +28,12 @@
 
   // Computes deltas in histogram bucket counts relative to the previous call to
   // this method. Stores the deltas in serialized form into |serialized_deltas|.
-  // If |serialized_deltas| is NULL, no data is serialized, though the next call
-  // will compute the deltas relative to this one.
-  void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas);
+  // If |serialized_deltas| is null, no data is serialized, though the next call
+  // will compute the deltas relative to this one. Setting |include_persistent|
+  // will include histograms held in persistent memory (and thus may be reported
+  // elsewhere); otherwise only histograms local to this process are serialized.
+  void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas,
+                                 bool include_persistent);
 
   // Deserialize deltas and add samples to corresponding histograms, creating
   // them if necessary. Silently ignores errors in |serialized_deltas|.
diff --git a/base/metrics/histogram_delta_serialization_unittest.cc b/base/metrics/histogram_delta_serialization_unittest.cc
new file mode 100644
index 0000000..93f7198
--- /dev/null
+++ b/base/metrics/histogram_delta_serialization_unittest.cc
@@ -0,0 +1,54 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/histogram_delta_serialization.h"
+
+#include <vector>
+
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/statistics_recorder.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+TEST(HistogramDeltaSerializationTest, DeserializeHistogramAndAddSamples) {
+  StatisticsRecorder statistic_recorder;
+  HistogramDeltaSerialization serializer("HistogramDeltaSerializationTest");
+  std::vector<std::string> deltas;
+  // Nothing was changed yet.
+  serializer.PrepareAndSerializeDeltas(&deltas, true);
+  EXPECT_TRUE(deltas.empty());
+
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kIPCSerializationSourceFlag);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(100);
+  histogram->Add(1000);
+
+  serializer.PrepareAndSerializeDeltas(&deltas, true);
+  EXPECT_FALSE(deltas.empty());
+
+  HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+  // The histogram has kIPCSerializationSourceFlag. So samples will be ignored.
+  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(1, snapshot->GetCount(1));
+  EXPECT_EQ(1, snapshot->GetCount(10));
+  EXPECT_EQ(1, snapshot->GetCount(100));
+  EXPECT_EQ(1, snapshot->GetCount(1000));
+
+  // Clear kIPCSerializationSourceFlag to emulate multi-process usage.
+  histogram->ClearFlags(HistogramBase::kIPCSerializationSourceFlag);
+  HistogramDeltaSerialization::DeserializeAndAddSamples(deltas);
+
+  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(2, snapshot2->GetCount(1));
+  EXPECT_EQ(2, snapshot2->GetCount(10));
+  EXPECT_EQ(2, snapshot2->GetCount(100));
+  EXPECT_EQ(2, snapshot2->GetCount(1000));
+}
+
+}  // namespace base
diff --git a/base/metrics/histogram_macros.h b/base/metrics/histogram_macros.h
index 0492f0c..fa23bea 100644
--- a/base/metrics/histogram_macros.h
+++ b/base/metrics/histogram_macros.h
@@ -10,6 +10,11 @@
 #include "base/metrics/histogram.h"
 #include "base/time/time.h"
 
+// Macros for efficient use of histograms. See documentation in histogram.h.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is defined in sparse_histogram.h as it has
+// different #include dependencies.
+
 //------------------------------------------------------------------------------
 // Histograms are often put in areas where they are called many many times, and
 // performance is critical.  As a result, they are designed to have a very low
@@ -67,18 +72,24 @@
 // a macro argument here.  The name is only used in a DCHECK, to assure that
 // callers don't try to vary the name of the histogram (which would tend to be
 // ignored by the one-time initialization of the histogtram_pointer).
-#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,           \
-                                       histogram_add_method_invocation,   \
-                                       histogram_factory_get_invocation)  \
+
+// In some cases (integration into 3rd party code), it's useful to seperate the
+// definition of |atomic_histogram_poiner| from its use. To achieve this we
+// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
+// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
+// and forwards to HISTOGRAM_POINTER_USE.
+#define HISTOGRAM_POINTER_USE(atomic_histogram_pointer,                   \
+                              constant_histogram_name,                    \
+                              histogram_add_method_invocation,            \
+                              histogram_factory_get_invocation)           \
   do {                                                                    \
-    static base::subtle::AtomicWord atomic_histogram_pointer = 0;         \
     base::HistogramBase* histogram_pointer(                               \
         reinterpret_cast<base::HistogramBase*>(                           \
-            base::subtle::Acquire_Load(&atomic_histogram_pointer)));      \
+            base::subtle::Acquire_Load(atomic_histogram_pointer)));       \
     if (!histogram_pointer) {                                             \
       histogram_pointer = histogram_factory_get_invocation;               \
       base::subtle::Release_Store(                                        \
-          &atomic_histogram_pointer,                                      \
+          atomic_histogram_pointer,                                       \
           reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer)); \
     }                                                                     \
     if (DCHECK_IS_ON())                                                   \
@@ -86,6 +97,18 @@
     histogram_pointer->histogram_add_method_invocation;                   \
   } while (0)
 
+// Defines the static |atomic_histogram_pointer| and forwards to
+// HISTOGRAM_POINTER_USE.
+#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name,               \
+                                       histogram_add_method_invocation,       \
+                                       histogram_factory_get_invocation)      \
+  do {                                                                        \
+    static base::subtle::AtomicWord atomic_histogram_pointer = 0;             \
+    HISTOGRAM_POINTER_USE(&atomic_histogram_pointer, constant_histogram_name, \
+                          histogram_add_method_invocation,                    \
+                          histogram_factory_get_invocation);                  \
+  } while (0)
+
 //------------------------------------------------------------------------------
 // Provide easy general purpose histogram in a macro, just like stats counters.
 // The first four macros use 50 buckets.
diff --git a/base/metrics/histogram_samples.cc b/base/metrics/histogram_samples.cc
index 61efd23..e3a4013 100644
--- a/base/metrics/histogram_samples.cc
+++ b/base/metrics/histogram_samples.cc
@@ -73,7 +73,11 @@
 HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
     : meta_(meta) {
   DCHECK(meta_->id == 0 || meta_->id == id);
-  meta_->id = id;
+
+  // It's possible that |meta| is contained in initialized, read-only memory
+  // so it's essential that no write be done in that case.
+  if (!meta_->id)
+    meta_->id = id;
 }
 
 HistogramSamples::~HistogramSamples() {}
diff --git a/base/metrics/histogram_samples.h b/base/metrics/histogram_samples.h
index 3da3e2d..30bff84 100644
--- a/base/metrics/histogram_samples.h
+++ b/base/metrics/histogram_samples.h
@@ -19,7 +19,10 @@
 class PickleIterator;
 class SampleCountIterator;
 
-// HistogramSamples is a container storing all samples of a histogram.
+// HistogramSamples is a container storing all samples of a histogram. All
+// elements must be of a fixed width to ensure 32/64-bit interoperability.
+// If this structure changes, bump the version number for kTypeIdHistogram
+// in persistent_histogram_allocator.cc.
 class BASE_EXPORT HistogramSamples {
  public:
   struct Metadata {
diff --git a/base/metrics/histogram_snapshot_manager.cc b/base/metrics/histogram_snapshot_manager.cc
index 02f87f0..32dd4e6 100644
--- a/base/metrics/histogram_snapshot_manager.cc
+++ b/base/metrics/histogram_snapshot_manager.cc
@@ -14,82 +14,138 @@
 
 HistogramSnapshotManager::HistogramSnapshotManager(
     HistogramFlattener* histogram_flattener)
-    : histogram_flattener_(histogram_flattener) {
+    : preparing_deltas_(false),
+      histogram_flattener_(histogram_flattener) {
   DCHECK(histogram_flattener_);
 }
 
 HistogramSnapshotManager::~HistogramSnapshotManager() {
-  STLDeleteValues(&logged_samples_);
 }
 
-void HistogramSnapshotManager::PrepareDeltas(
-    HistogramBase::Flags flag_to_set,
-    HistogramBase::Flags required_flags) {
-  StatisticsRecorder::Histograms histograms;
-  StatisticsRecorder::GetHistograms(&histograms);
-  for (StatisticsRecorder::Histograms::const_iterator it = histograms.begin();
-       histograms.end() != it;
-       ++it) {
-    (*it)->SetFlags(flag_to_set);
-    if (((*it)->flags() & required_flags) == required_flags)
-      PrepareDelta(**it);
+void HistogramSnapshotManager::StartDeltas() {
+  // Ensure that start/finish calls do not get nested.
+  DCHECK(!preparing_deltas_);
+  preparing_deltas_ = true;
+
+  DCHECK(owned_histograms_.empty());
+
+#ifdef DEBUG
+    CHECK(!iter->second.histogram);
+    CHECK(!iter->second.accumulated_samples);
+    CHECK(!(iter->second.inconsistencies &
+            HistogramBase::NEW_INCONSISTENCY_FOUND));
   }
+#endif
 }
 
-void HistogramSnapshotManager::PrepareDelta(const HistogramBase& histogram) {
+void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
+  PrepareSamples(histogram, histogram->SnapshotDelta());
+}
+
+void HistogramSnapshotManager::PrepareDeltaTakingOwnership(
+    scoped_ptr<HistogramBase> histogram) {
+  PrepareSamples(histogram.get(), histogram->SnapshotDelta());
+  owned_histograms_.push_back(std::move(histogram));
+}
+
+void HistogramSnapshotManager::PrepareAbsolute(const HistogramBase* histogram) {
+  PrepareSamples(histogram, histogram->SnapshotSamples());
+}
+
+void HistogramSnapshotManager::PrepareAbsoluteTakingOwnership(
+    scoped_ptr<const HistogramBase> histogram) {
+  PrepareSamples(histogram.get(), histogram->SnapshotSamples());
+  owned_histograms_.push_back(std::move(histogram));
+}
+
+void HistogramSnapshotManager::FinishDeltas() {
+  DCHECK(preparing_deltas_);
+
+  // Iterate over all known histograms to see what should be recorded.
+  for (auto& hash_and_info : known_histograms_) {
+    SampleInfo* sample_info = &hash_and_info.second;
+
+    // First, record any histograms in which corruption was detected.
+    if (sample_info->inconsistencies & HistogramBase::NEW_INCONSISTENCY_FOUND) {
+      sample_info->inconsistencies &= ~HistogramBase::NEW_INCONSISTENCY_FOUND;
+      histogram_flattener_->UniqueInconsistencyDetected(
+          static_cast<HistogramBase::Inconsistency>(
+              sample_info->inconsistencies));
+    }
+
+    // Second, record actual accumulated deltas.
+    if (sample_info->accumulated_samples) {
+      // TODO(bcwhite): Investigate using redundant_count() below to avoid
+      // additional pass through all the samples to calculate real total.
+      if (sample_info->accumulated_samples->TotalCount() > 0) {
+        histogram_flattener_->RecordDelta(*sample_info->histogram,
+                                          *sample_info->accumulated_samples);
+      }
+      delete sample_info->accumulated_samples;
+      sample_info->accumulated_samples = nullptr;
+    }
+
+    // The Histogram pointer must be cleared at this point because the owner
+    // is only required to keep it alive until FinishDeltas() completes.
+    sample_info->histogram = nullptr;
+  }
+
+  owned_histograms_.clear();
+  preparing_deltas_ = false;
+}
+
+void HistogramSnapshotManager::PrepareSamples(
+    const HistogramBase* histogram,
+    scoped_ptr<HistogramSamples> samples) {
   DCHECK(histogram_flattener_);
 
-  // Get up-to-date snapshot of sample stats.
-  scoped_ptr<HistogramSamples> snapshot(histogram.SnapshotSamples());
+  // Get information known about this histogram.
+  SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
+  if (sample_info->histogram) {
+    DCHECK_EQ(sample_info->histogram->histogram_name(),
+              histogram->histogram_name()) << "hash collision";
+  } else {
+    // First time this histogram has been seen; datafill.
+    sample_info->histogram = histogram;
+  }
 
   // Crash if we detect that our histograms have been overwritten.  This may be
   // a fair distance from the memory smasher, but we hope to correlate these
   // crashes with other events, such as plugins, or usage patterns, etc.
-  int corruption = histogram.FindCorruption(*snapshot);
+  uint32_t corruption = histogram->FindCorruption(*samples);
   if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
     // The checksum should have caught this, so crash separately if it didn't.
-    CHECK_NE(0, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+    CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
     CHECK(false);  // Crash for the bucket order corruption.
   }
   // Checksum corruption might not have caused order corruption.
-  CHECK_EQ(0, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
+  CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
 
   // Note, at this point corruption can only be COUNT_HIGH_ERROR or
   // COUNT_LOW_ERROR and they never arise together, so we don't need to extract
   // bits from corruption.
-  const uint64_t histogram_hash = histogram.name_hash();
   if (corruption) {
-    DLOG(ERROR) << "Histogram: " << histogram.histogram_name()
-                << " has data corruption: " << corruption;
+    DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
+                << "\" has data corruption: " << corruption;
     histogram_flattener_->InconsistencyDetected(
         static_cast<HistogramBase::Inconsistency>(corruption));
     // Don't record corrupt data to metrics services.
-    int old_corruption = inconsistencies_[histogram_hash];
+    const uint32_t old_corruption = sample_info->inconsistencies;
     if (old_corruption == (corruption | old_corruption))
       return;  // We've already seen this corruption for this histogram.
-    inconsistencies_[histogram_hash] |= corruption;
-    histogram_flattener_->UniqueInconsistencyDetected(
-        static_cast<HistogramBase::Inconsistency>(corruption));
+    sample_info->inconsistencies |=
+        corruption | HistogramBase::NEW_INCONSISTENCY_FOUND;
+    // TODO(bcwhite): Can we clear the inconsistency for future collection?
     return;
   }
 
-  HistogramSamples* to_log;
-  auto it = logged_samples_.find(histogram_hash);
-  if (it == logged_samples_.end()) {
-    to_log = snapshot.release();
-
-    // This histogram has not been logged before, add a new entry.
-    logged_samples_[histogram_hash] = to_log;
+  if (!sample_info->accumulated_samples) {
+    // This histogram has not been seen before; add it as a new entry.
+    sample_info->accumulated_samples = samples.release();
   } else {
-    HistogramSamples* already_logged = it->second;
-    InspectLoggedSamplesInconsistency(*snapshot, already_logged);
-    snapshot->Subtract(*already_logged);
-    already_logged->Add(*snapshot);
-    to_log = snapshot.get();
+    // There are previous values from this histogram; add them together.
+    sample_info->accumulated_samples->Add(*samples);
   }
-
-  if (to_log->TotalCount() > 0)
-    histogram_flattener_->RecordDelta(histogram, *to_log);
 }
 
 void HistogramSnapshotManager::InspectLoggedSamplesInconsistency(
diff --git a/base/metrics/histogram_snapshot_manager.h b/base/metrics/histogram_snapshot_manager.h
index bad4668..d44db19 100644
--- a/base/metrics/histogram_snapshot_manager.h
+++ b/base/metrics/histogram_snapshot_manager.h
@@ -9,7 +9,9 @@
 
 #include <map>
 #include <string>
+#include <vector>
 
+#include "base/gtest_prod_util.h"
 #include "base/macros.h"
 #include "base/metrics/histogram_base.h"
 
@@ -35,27 +37,86 @@
   // |required_flags| is used to select histograms to be recorded.
   // Only histograms that have all the flags specified by the argument will be
   // chosen. If all histograms should be recorded, set it to
-  // |Histogram::kNoFlags|.
-  void PrepareDeltas(HistogramBase::Flags flags_to_set,
-                     HistogramBase::Flags required_flags);
+  // |Histogram::kNoFlags|. Though any "forward" iterator will work, the
+  // histograms over which it iterates *must* remain valid until this method
+  // returns; the iterator cannot deallocate histograms once it iterates past
+  // them.
+  template <class ForwardHistogramIterator>
+  void PrepareDeltas(ForwardHistogramIterator begin,
+                     ForwardHistogramIterator end,
+                     HistogramBase::Flags flags_to_set,
+                     HistogramBase::Flags required_flags) {
+    StartDeltas();
+    for (ForwardHistogramIterator it = begin; it != end; ++it) {
+      (*it)->SetFlags(flags_to_set);
+      if (((*it)->flags() & required_flags) == required_flags)
+        PrepareDelta(*it);
+    }
+    FinishDeltas();
+  }
+
+  // When the collection is not so simple as can be done using a single
+  // iterator, the steps can be performed separately. Call PerpareDelta()
+  // as many times as necessary with a single StartDeltas() before and
+  // a single FinishDeltas() after. All passed histograms must live
+  // until FinishDeltas() completes. PrepareAbsolute() works the same
+  // but assumes there were no previous logged values and no future deltas
+  // will be created (and thus can work on read-only histograms).
+  // Use Prepare*TakingOwnership() if it is desireable to have this class
+  // automatically delete the histogram once it is "finished".
+  void StartDeltas();
+  void PrepareDelta(HistogramBase* histogram);
+  void PrepareDeltaTakingOwnership(scoped_ptr<HistogramBase> histogram);
+  void PrepareAbsolute(const HistogramBase* histogram);
+  void PrepareAbsoluteTakingOwnership(
+      scoped_ptr<const HistogramBase> histogram);
+  void FinishDeltas();
 
  private:
-  // Snapshot this histogram, and record the delta.
-  void PrepareDelta(const HistogramBase& histogram);
+  FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
+
+  // During a snapshot, samples are acquired and aggregated. This structure
+  // contains all the information collected for a given histogram. Once a
+  // snapshot operation is finished, it is generally emptied except for
+  // information that must persist from one report to the next, such as
+  // the "inconsistencies".
+  struct SampleInfo {
+    // A histogram associated with this sample; it may be one of many if
+    // several have been aggregated into the same "accumulated" sample set.
+    // Ownership of the histogram remains elsewhere and this pointer is
+    // cleared by FinishDeltas().
+    const HistogramBase* histogram = nullptr;
+
+    // The current snapshot-delta values being accumulated.
+    // TODO(bcwhite): Change this to a scoped_ptr once all build architectures
+    // support such as the value of a std::map.
+    HistogramSamples* accumulated_samples = nullptr;
+
+    // The set of inconsistencies (flags) already seen for the histogram.
+    // See HistogramBase::Inconsistency for values.
+    uint32_t inconsistencies = 0;
+  };
+
+  // Capture and hold samples from a histogram. This does all the heavy
+  // lifting for PrepareDelta() and PrepareAbsolute().
+  void PrepareSamples(const HistogramBase* histogram,
+                      scoped_ptr<HistogramSamples> samples);
 
   // Try to detect and fix count inconsistency of logged samples.
   void InspectLoggedSamplesInconsistency(
       const HistogramSamples& new_snapshot,
       HistogramSamples* logged_samples);
 
-  // For histograms, track what we've already recorded (as a sample for
-  // each histogram) so that we can record only the delta with the next log.
-  // The information is indexed by the hash of the histogram name.
-  std::map<uint64_t, HistogramSamples*> logged_samples_;
-
-  // Set of histograms found to be corrupt and their problems, indexed
+  // For histograms, track what has been previously seen, indexed
   // by the hash of the histogram name.
-  std::map<uint64_t, int> inconsistencies_;
+  std::map<uint64_t, SampleInfo> known_histograms_;
+
+  // Collection of histograms of which ownership has been passed to this
+  // object. They will be deleted by FinishDeltas().
+  std::vector<scoped_ptr<const HistogramBase>> owned_histograms_;
+
+  // Indicates if deltas are currently being prepared.
+  bool preparing_deltas_;
 
   // |histogram_flattener_| handles the logistics of recording the histogram
   // deltas.
diff --git a/base/metrics/histogram_snapshot_manager_unittest.cc b/base/metrics/histogram_snapshot_manager_unittest.cc
index b6a367a..8ec03da 100644
--- a/base/metrics/histogram_snapshot_manager_unittest.cc
+++ b/base/metrics/histogram_snapshot_manager_unittest.cc
@@ -10,7 +10,9 @@
 #include "base/macros.h"
 #include "base/metrics/histogram_delta_serialization.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
+#include "base/stl_util.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
@@ -22,6 +24,11 @@
   void RecordDelta(const HistogramBase& histogram,
                    const HistogramSamples& snapshot) override {
     recorded_delta_histogram_names_.push_back(histogram.histogram_name());
+    ASSERT_FALSE(ContainsKey(recorded_delta_histogram_sum_,
+                             histogram.histogram_name()));
+    // Keep pointer to snapshot for testing. This really isn't ideal but the
+    // snapshot-manager keeps the snapshot alive until it's "forgotten".
+    recorded_delta_histogram_sum_[histogram.histogram_name()] = snapshot.sum();
   }
 
   void InconsistencyDetected(HistogramBase::Inconsistency problem) override {
@@ -37,12 +44,23 @@
     ASSERT_TRUE(false);
   }
 
+  void Reset() {
+    recorded_delta_histogram_names_.clear();
+    recorded_delta_histogram_sum_.clear();
+  }
+
   std::vector<std::string> GetRecordedDeltaHistogramNames() {
     return recorded_delta_histogram_names_;
   }
 
+  int64_t GetRecordedDeltaHistogramSum(const std::string& name) {
+    EXPECT_TRUE(ContainsKey(recorded_delta_histogram_sum_, name));
+    return recorded_delta_histogram_sum_[name];
+  }
+
  private:
   std::vector<std::string> recorded_delta_histogram_names_;
+  std::map<std::string, int64_t> recorded_delta_histogram_sum_;
 
   DISALLOW_COPY_AND_ASSIGN(HistogramFlattenerDeltaRecorder);
 };
@@ -61,11 +79,12 @@
 
 TEST_F(HistogramSnapshotManagerTest, PrepareDeltasNoFlagsFilter) {
   // kNoFlags filter should record all histograms.
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 2);
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
   UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
 
-  histogram_snapshot_manager_.PrepareDeltas(HistogramBase::kNoFlags,
-                                            HistogramBase::kNoFlags);
+  histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(false), StatisticsRecorder::end(),
+      HistogramBase::kNoFlags, HistogramBase::kNoFlags);
 
   const std::vector<std::string>& histograms =
       histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
@@ -76,10 +95,11 @@
 
 TEST_F(HistogramSnapshotManagerTest, PrepareDeltasUmaHistogramFlagFilter) {
   // Note that kUmaStabilityHistogramFlag includes kUmaTargetedHistogramFlag.
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 2);
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
   UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
 
   histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(false), StatisticsRecorder::end(),
       HistogramBase::kNoFlags, HistogramBase::kUmaTargetedHistogramFlag);
 
   const std::vector<std::string>& histograms =
@@ -91,10 +111,11 @@
 
 TEST_F(HistogramSnapshotManagerTest,
        PrepareDeltasUmaStabilityHistogramFlagFilter) {
-  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 2);
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
   UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
 
   histogram_snapshot_manager_.PrepareDeltas(
+      StatisticsRecorder::begin(false), StatisticsRecorder::end(),
       HistogramBase::kNoFlags, HistogramBase::kUmaStabilityHistogramFlag);
 
   const std::vector<std::string>& histograms =
@@ -103,4 +124,35 @@
   EXPECT_EQ("UmaStabilityHistogram", histograms[0]);
 }
 
+TEST_F(HistogramSnapshotManagerTest, CheckMerge) {
+  UMA_HISTOGRAM_ENUMERATION("UmaHistogram", 1, 4);
+  UMA_STABILITY_HISTOGRAM_ENUMERATION("UmaStabilityHistogram", 1, 2);
+
+  base::HistogramBase* h1 = base::LinearHistogram::FactoryGet(
+      "UmaHistogram", 1, 4, 5, 0);
+  ASSERT_TRUE(h1);
+  base::HistogramBase* h2 = base::LinearHistogram::FactoryGet(
+      "UmaStabilityHistogram", 1, 2, 3, 0);
+  ASSERT_TRUE(h2);
+
+  histogram_snapshot_manager_.StartDeltas();
+  histogram_snapshot_manager_.PrepareDelta(h1);
+  histogram_snapshot_manager_.PrepareDelta(h1);  // Delta will be zero.
+  histogram_snapshot_manager_.PrepareDelta(h2);
+  h1->Add(2);
+  h2->Add(1);
+  histogram_snapshot_manager_.PrepareDelta(h2);
+  histogram_snapshot_manager_.PrepareDelta(h1);
+  histogram_snapshot_manager_.FinishDeltas();
+  {
+    const std::vector<std::string> histograms =
+        histogram_flattener_delta_recorder_.GetRecordedDeltaHistogramNames();
+    EXPECT_EQ(2U, histograms.size());
+    EXPECT_EQ(3, histogram_flattener_delta_recorder_.
+                     GetRecordedDeltaHistogramSum("UmaHistogram"));
+    EXPECT_EQ(2, histogram_flattener_delta_recorder_.
+                     GetRecordedDeltaHistogramSum("UmaStabilityHistogram"));
+  }
+}
+
 }  // namespace base
diff --git a/base/metrics/histogram_unittest.cc b/base/metrics/histogram_unittest.cc
index 2fadc30..03dc7bd 100644
--- a/base/metrics/histogram_unittest.cc
+++ b/base/metrics/histogram_unittest.cc
@@ -8,33 +8,54 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <algorithm>
 #include <climits>
+#include <string>
 #include <vector>
 
 #include "base/logging.h"
 #include "base/memory/scoped_ptr.h"
 #include "base/metrics/bucket_ranges.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_vector.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
+#include "base/strings/stringprintf.h"
 #include "base/time/time.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 namespace base {
 
-class HistogramTest : public testing::Test {
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class HistogramTest : public testing::TestWithParam<bool> {
  protected:
+  const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
+
+  HistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
   void SetUp() override {
+    if (use_persistent_histogram_allocator_)
+      CreatePersistentHistogramAllocator();
+
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
   }
 
-  void TearDown() override { UninitializeStatisticsRecorder(); }
+  void TearDown() override {
+    if (allocator_) {
+      ASSERT_FALSE(allocator_->IsFull());
+      ASSERT_FALSE(allocator_->IsCorrupt());
+    }
+    UninitializeStatisticsRecorder();
+    DestroyPersistentHistogramAllocator();
+  }
 
   void InitializeStatisticsRecorder() {
+    StatisticsRecorder::ResetForTesting();
     statistics_recorder_ = new StatisticsRecorder();
   }
 
@@ -43,11 +64,39 @@
     statistics_recorder_ = NULL;
   }
 
-  StatisticsRecorder* statistics_recorder_;
+  void CreatePersistentHistogramAllocator() {
+    // By getting the results-histogram before any persistent allocator
+    // is attached, that histogram is guaranteed not to be stored in
+    // any persistent memory segment (which simplifies some tests).
+    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+
+    PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+        kAllocatorMemorySize, 0, "HistogramAllocatorTest");
+    allocator_ =
+        PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+  }
+
+  void DestroyPersistentHistogramAllocator() {
+    allocator_ = nullptr;
+    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+  }
+
+  const bool use_persistent_histogram_allocator_;
+
+  StatisticsRecorder* statistics_recorder_ = nullptr;
+  scoped_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(HistogramTest);
 };
 
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent, HistogramTest, testing::Bool());
+
+
 // Check for basic syntax and use.
-TEST_F(HistogramTest, BasicTest) {
+TEST_P(HistogramTest, BasicTest) {
   // Try basic construction
   HistogramBase* histogram = Histogram::FactoryGet(
       "TestHistogram", 1, 1000, 10, HistogramBase::kNoFlags);
@@ -73,7 +122,15 @@
 
 // Check that the macro correctly matches histograms by name and records their
 // data together.
-TEST_F(HistogramTest, NameMatchTest) {
+TEST_P(HistogramTest, NameMatchTest) {
+  // Macros that create hitograms have an internal static variable which will
+  // continue to point to those from the very first run of this method even
+  // during subsequent runs.
+  static bool already_run = false;
+  if (already_run)
+    return;
+  already_run = true;
+
   LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
   LOCAL_HISTOGRAM_PERCENTAGE("DuplicatedHistogram", 10);
   HistogramBase* histogram = LinearHistogram::FactoryGet(
@@ -84,7 +141,36 @@
   EXPECT_EQ(2, samples->GetCount(10));
 }
 
-TEST_F(HistogramTest, ExponentialRangesTest) {
+// Check that delta calculations work correct.
+TEST_P(HistogramTest, DeltaTest) {
+  HistogramBase* histogram =
+      Histogram::FactoryGet("DeltaHistogram", 1, 64, 8,
+                            HistogramBase::kNoFlags);
+  histogram->Add(1);
+  histogram->Add(10);
+  histogram->Add(50);
+
+  scoped_ptr<HistogramSamples> samples = histogram->SnapshotDelta();
+  EXPECT_EQ(3, samples->TotalCount());
+  EXPECT_EQ(1, samples->GetCount(1));
+  EXPECT_EQ(1, samples->GetCount(10));
+  EXPECT_EQ(1, samples->GetCount(50));
+  EXPECT_EQ(samples->TotalCount(), samples->redundant_count());
+
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+
+  histogram->Add(10);
+  histogram->Add(10);
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(2, samples->TotalCount());
+  EXPECT_EQ(2, samples->GetCount(10));
+
+  samples = histogram->SnapshotDelta();
+  EXPECT_EQ(0, samples->TotalCount());
+}
+
+TEST_P(HistogramTest, ExponentialRangesTest) {
   // Check that we got a nice exponential when there was enough room.
   BucketRanges ranges(9);
   Histogram::InitializeBucketRanges(1, 64, &ranges);
@@ -129,7 +215,7 @@
   EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
 }
 
-TEST_F(HistogramTest, LinearRangesTest) {
+TEST_P(HistogramTest, LinearRangesTest) {
   BucketRanges ranges(9);
   LinearHistogram::InitializeBucketRanges(1, 7, &ranges);
   // Gets a nice linear set of bucket ranges.
@@ -157,7 +243,7 @@
   EXPECT_TRUE(ranges2.Equals(histogram2->bucket_ranges()));
 }
 
-TEST_F(HistogramTest, ArrayToCustomRangesTest) {
+TEST_P(HistogramTest, ArrayToCustomRangesTest) {
   const HistogramBase::Sample ranges[3] = {5, 10, 20};
   std::vector<HistogramBase::Sample> ranges_vec =
       CustomHistogram::ArrayToCustomRanges(ranges, 3);
@@ -170,7 +256,7 @@
   EXPECT_EQ(21, ranges_vec[5]);
 }
 
-TEST_F(HistogramTest, CustomHistogramTest) {
+TEST_P(HistogramTest, CustomHistogramTest) {
   // A well prepared custom ranges.
   std::vector<HistogramBase::Sample> custom_ranges;
   custom_ranges.push_back(1);
@@ -216,7 +302,7 @@
   EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(3));
 }
 
-TEST_F(HistogramTest, CustomHistogramWithOnly2Buckets) {
+TEST_P(HistogramTest, CustomHistogramWithOnly2Buckets) {
   // This test exploits the fact that the CustomHistogram can have 2 buckets,
   // while the base class Histogram is *supposed* to have at least 3 buckets.
   // We should probably change the restriction on the base class (or not inherit
@@ -235,8 +321,7 @@
   EXPECT_EQ(HistogramBase::kSampleType_MAX, ranges->range(2));
 }
 
-// Test the AddCount function.
-TEST_F(HistogramTest, AddCountTest) {
+TEST_P(HistogramTest, AddCountTest) {
   const size_t kBucketCount = 50;
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("AddCountHistogram", 10, 100, kBucketCount,
@@ -245,7 +330,7 @@
   histogram->AddCount(20, 15);
   histogram->AddCount(30, 14);
 
-  scoped_ptr<SampleVector> samples = histogram->SnapshotSampleVector();
+  scoped_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
   EXPECT_EQ(29, samples->TotalCount());
   EXPECT_EQ(15, samples->GetCount(20));
   EXPECT_EQ(14, samples->GetCount(30));
@@ -253,14 +338,38 @@
   histogram->AddCount(20, 25);
   histogram->AddCount(30, 24);
 
-  scoped_ptr<SampleVector> samples2 = histogram->SnapshotSampleVector();
+  scoped_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
   EXPECT_EQ(78, samples2->TotalCount());
   EXPECT_EQ(40, samples2->GetCount(20));
   EXPECT_EQ(38, samples2->GetCount(30));
 }
 
+TEST_P(HistogramTest, AddCount_LargeValuesDontOverflow) {
+  const size_t kBucketCount = 50;
+  Histogram* histogram = static_cast<Histogram*>(
+      Histogram::FactoryGet("AddCountHistogram", 10, 1000000000, kBucketCount,
+                            HistogramBase::kNoFlags));
+
+  histogram->AddCount(200000000, 15);
+  histogram->AddCount(300000000, 14);
+
+  scoped_ptr<HistogramSamples> samples = histogram->SnapshotSamples();
+  EXPECT_EQ(29, samples->TotalCount());
+  EXPECT_EQ(15, samples->GetCount(200000000));
+  EXPECT_EQ(14, samples->GetCount(300000000));
+
+  histogram->AddCount(200000000, 25);
+  histogram->AddCount(300000000, 24);
+
+  scoped_ptr<HistogramSamples> samples2 = histogram->SnapshotSamples();
+  EXPECT_EQ(78, samples2->TotalCount());
+  EXPECT_EQ(40, samples2->GetCount(200000000));
+  EXPECT_EQ(38, samples2->GetCount(300000000));
+  EXPECT_EQ(19400000000LL, samples2->sum());
+}
+
 // Make sure histogram handles out-of-bounds data gracefully.
-TEST_F(HistogramTest, BoundsTest) {
+TEST_P(HistogramTest, BoundsTest) {
   const size_t kBucketCount = 50;
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Bounded", 10, 100, kBucketCount,
@@ -308,7 +417,7 @@
 }
 
 // Check to be sure samples land as expected is "correct" buckets.
-TEST_F(HistogramTest, BucketPlacementTest) {
+TEST_P(HistogramTest, BucketPlacementTest) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
@@ -327,7 +436,7 @@
     EXPECT_EQ(i + 1, samples->GetCountAtIndex(i));
 }
 
-TEST_F(HistogramTest, CorruptSampleCounts) {
+TEST_P(HistogramTest, CorruptSampleCounts) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
@@ -354,11 +463,11 @@
             histogram->FindCorruption(*snapshot));
 }
 
-TEST_F(HistogramTest, CorruptBucketBounds) {
+TEST_P(HistogramTest, CorruptBucketBounds) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8, HistogramBase::kNoFlags));
 
-  scoped_ptr<SampleVector> snapshot = histogram->SnapshotSampleVector();
+  scoped_ptr<HistogramSamples> snapshot = histogram->SnapshotSamples();
   EXPECT_EQ(HistogramBase::NO_INCONSISTENCIES,
             histogram->FindCorruption(*snapshot));
 
@@ -373,7 +482,7 @@
 
   bucket_ranges->set_range(2, bucket_ranges->range(1));
   bucket_ranges->set_range(1, tmp);
-  EXPECT_EQ(0, histogram->FindCorruption(*snapshot));
+  EXPECT_EQ(0U, histogram->FindCorruption(*snapshot));
 
   // Show that two simple changes don't offset each other
   bucket_ranges->set_range(3, bucket_ranges->range(3) + 1);
@@ -389,7 +498,7 @@
   bucket_ranges->set_range(4, bucket_ranges->range(4) + 1);
 }
 
-TEST_F(HistogramTest, HistogramSerializeInfo) {
+TEST_P(HistogramTest, HistogramSerializeInfo) {
   Histogram* histogram = static_cast<Histogram*>(
       Histogram::FactoryGet("Histogram", 1, 64, 8,
                             HistogramBase::kIPCSerializationSourceFlag));
@@ -408,7 +517,8 @@
 
   int flag;
   EXPECT_TRUE(iter.ReadInt(&flag));
-  EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag, flag);
+  EXPECT_EQ(HistogramBase::kIPCSerializationSourceFlag,
+            flag & ~HistogramBase::kIsPersistent);
 
   int min;
   EXPECT_TRUE(iter.ReadInt(&min));
@@ -418,9 +528,9 @@
   EXPECT_TRUE(iter.ReadInt(&max));
   EXPECT_EQ(64, max);
 
-  int64_t bucket_count;
-  EXPECT_TRUE(iter.ReadInt64(&bucket_count));
-  EXPECT_EQ(8, bucket_count);
+  uint32_t bucket_count;
+  EXPECT_TRUE(iter.ReadUInt32(&bucket_count));
+  EXPECT_EQ(8u, bucket_count);
 
   uint32_t checksum;
   EXPECT_TRUE(iter.ReadUInt32(&checksum));
@@ -430,7 +540,7 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
-TEST_F(HistogramTest, CustomHistogramSerializeInfo) {
+TEST_P(HistogramTest, CustomHistogramSerializeInfo) {
   std::vector<int> custom_ranges;
   custom_ranges.push_back(10);
   custom_ranges.push_back(100);
@@ -447,12 +557,12 @@
 
   int i;
   std::string s;
-  int64_t bucket_count;
+  uint32_t bucket_count;
   uint32_t ui32;
   EXPECT_TRUE(iter.ReadInt(&i) && iter.ReadString(&s) && iter.ReadInt(&i) &&
               iter.ReadInt(&i) && iter.ReadInt(&i) &&
-              iter.ReadInt64(&bucket_count) && iter.ReadUInt32(&ui32));
-  EXPECT_EQ(3, bucket_count);
+              iter.ReadUInt32(&bucket_count) && iter.ReadUInt32(&ui32));
+  EXPECT_EQ(3u, bucket_count);
 
   int range;
   EXPECT_TRUE(iter.ReadInt(&range));
@@ -464,7 +574,7 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
-TEST_F(HistogramTest, BadConstruction) {
+TEST_P(HistogramTest, BadConstruction) {
   HistogramBase* histogram = Histogram::FactoryGet(
       "BadConstruction", 0, 100, 8, HistogramBase::kNoFlags);
   EXPECT_TRUE(histogram->HasConstructionArguments(1, 100, 8));
@@ -490,6 +600,68 @@
   EXPECT_EQ(NULL, bad_histogram);
 }
 
+TEST_P(HistogramTest, FactoryTime) {
+  const int kTestCreateCount = 1 << 14;  // Must be power-of-2.
+  const int kTestLookupCount = 100000;
+  const int kTestAddCount = 1000000;
+
+  // Create all histogram names in advance for accurate timing below.
+  std::vector<std::string> histogram_names;
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    histogram_names.push_back(
+        StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+  }
+
+  // Calculate cost of creating histograms.
+  TimeTicks create_start = TimeTicks::Now();
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    Histogram::FactoryGet(histogram_names[i], 0, 100, 10,
+                          HistogramBase::kNoFlags);
+  }
+  TimeDelta create_ticks = TimeTicks::Now() - create_start;
+  int64_t create_ms = create_ticks.InMilliseconds();
+
+  VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+          << "ms or about "
+          << (create_ms * 1000000) / kTestCreateCount
+          << "ns each.";
+
+  // Calculate cost of looking up existing histograms.
+  TimeTicks lookup_start = TimeTicks::Now();
+  for (int i = 0; i < kTestLookupCount; ++i) {
+    // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+    // order less likely to be cacheable (but still hit them all) should the
+    // underlying storage use the exact histogram name as the key.
+    const int i_mult = 6007;
+    static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+    int index = (i * i_mult) & (kTestCreateCount - 1);
+    Histogram::FactoryGet(histogram_names[index], 0, 100, 10,
+                          HistogramBase::kNoFlags);
+  }
+  TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+  int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+  VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+          << "ms or about "
+          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ns each.";
+
+  // Calculate cost of accessing histograms.
+  HistogramBase* histogram = Histogram::FactoryGet(
+      histogram_names[0], 0, 100, 10, HistogramBase::kNoFlags);
+  ASSERT_TRUE(histogram);
+  TimeTicks add_start = TimeTicks::Now();
+  for (int i = 0; i < kTestAddCount; ++i)
+    histogram->Add(i & 127);
+  TimeDelta add_ticks = TimeTicks::Now() - add_start;
+  int64_t add_ms = add_ticks.InMilliseconds();
+
+  VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+          << "ms or about "
+          << (add_ms * 1000000) / kTestAddCount
+          << "ns each.";
+}
+
 #if GTEST_HAS_DEATH_TEST
 // For Histogram, LinearHistogram and CustomHistogram, the minimum for a
 // declared range is 1, while the maximum is (HistogramBase::kSampleType_MAX -
diff --git a/base/metrics/metrics_hashes.cc b/base/metrics/metrics_hashes.cc
index 73bce2e..5672b06 100644
--- a/base/metrics/metrics_hashes.cc
+++ b/base/metrics/metrics_hashes.cc
@@ -22,9 +22,9 @@
 
 }  // namespace
 
-uint64_t HashMetricName(const std::string& name) {
+uint64_t HashMetricName(base::StringPiece name) {
   base::MD5Digest digest;
-  base::MD5Sum(name.c_str(), name.size(), &digest);
+  base::MD5Sum(name.data(), name.size(), &digest);
   return DigestToUInt64(digest);
 }
 
diff --git a/base/metrics/metrics_hashes.h b/base/metrics/metrics_hashes.h
index bd04017..d05c4ba 100644
--- a/base/metrics/metrics_hashes.h
+++ b/base/metrics/metrics_hashes.h
@@ -6,15 +6,15 @@
 #define BASE_METRICS_METRICS_HASHES_H_
 
 #include <stdint.h>
-#include <string>
 
 #include "base/base_export.h"
+#include "base/strings/string_piece.h"
 
 namespace base {
 
 // Computes a uint64_t hash of a given string based on its MD5 hash. Suitable
 // for metric names.
-BASE_EXPORT uint64_t HashMetricName(const std::string& name);
+BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
 
 }  // namespace metrics
 
diff --git a/base/metrics/persistent_histogram_allocator.cc b/base/metrics/persistent_histogram_allocator.cc
new file mode 100644
index 0000000..6006d31
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator.cc
@@ -0,0 +1,587 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/sparse_histogram.h"
+#include "base/metrics/statistics_recorder.h"
+#include "base/synchronization/lock.h"
+
+// TODO(bcwhite): Order these methods to match the header file. The current
+// order is only temporary in order to aid review of the transition from
+// a non-class implementation.
+
+namespace base {
+
+namespace {
+
+// Name of histogram for storing results of local operations.
+const char kResultHistogram[] = "UMA.CreatePersistentHistogram.Result";
+
+// Type identifiers used when storing in persistent memory so they can be
+// identified during extraction; the first 4 bytes of the SHA1 of the name
+// is used as a unique integer. A "version number" is added to the base
+// so that, if the structure of that object changes, stored older versions
+// will be safely ignored.
+enum : uint32_t {
+  kTypeIdHistogram   = 0xF1645910 + 2,  // SHA1(Histogram)   v2
+  kTypeIdRangesArray = 0xBCEA225A + 1,  // SHA1(RangesArray) v1
+  kTypeIdCountsArray = 0x53215530 + 1,  // SHA1(CountsArray) v1
+};
+
+// The current globally-active persistent allocator for all new histograms.
+// The object held here will obviously not be destructed at process exit
+// but that's best since PersistentMemoryAllocator objects (that underlie
+// PersistentHistogramAllocator objects) are explicitly forbidden from doing
+// anything essential at exit anyway due to the fact that they depend on data
+// managed elsewhere and which could be destructed first.
+PersistentHistogramAllocator* g_allocator;
+
+// Take an array of range boundaries and create a proper BucketRanges object
+// which is returned to the caller. A return of nullptr indicates that the
+// passed boundaries are invalid.
+scoped_ptr<BucketRanges> CreateRangesFromData(
+    HistogramBase::Sample* ranges_data,
+    uint32_t ranges_checksum,
+    size_t count) {
+  // To avoid racy destruction at shutdown, the following may be leaked.
+  scoped_ptr<BucketRanges> ranges(new BucketRanges(count));
+  DCHECK_EQ(count, ranges->size());
+  for (size_t i = 0; i < count; ++i) {
+    if (i > 0 && ranges_data[i] <= ranges_data[i - 1])
+      return nullptr;
+    ranges->set_range(i, ranges_data[i]);
+  }
+
+  ranges->ResetChecksum();
+  if (ranges->checksum() != ranges_checksum)
+    return nullptr;
+
+  return ranges;
+}
+
+// Calculate the number of bytes required to store all of a histogram's
+// "counts". This will return zero (0) if |bucket_count| is not valid.
+size_t CalculateRequiredCountsBytes(size_t bucket_count) {
+  // 2 because each "sample count" also requires a backup "logged count"
+  // used for calculating the delta during snapshot operations.
+  const size_t kBytesPerBucket = 2 * sizeof(HistogramBase::AtomicCount);
+
+  // If the |bucket_count| is such that it would overflow the return type,
+  // perhaps as the result of a malicious actor, then return zero to
+  // indicate the problem to the caller.
+  if (bucket_count > std::numeric_limits<size_t>::max() / kBytesPerBucket)
+    return 0;
+
+  return bucket_count * kBytesPerBucket;
+}
+
+}  // namespace
+
+const Feature kPersistentHistogramsFeature{
+  "PersistentHistograms", FEATURE_DISABLED_BY_DEFAULT
+};
+
+// This data will be held in persistent memory in order for processes to
+// locate and use histograms created elsewhere.
+struct PersistentHistogramAllocator::PersistentHistogramData {
+  int32_t histogram_type;
+  int32_t flags;
+  int32_t minimum;
+  int32_t maximum;
+  uint32_t bucket_count;
+  PersistentMemoryAllocator::Reference ranges_ref;
+  uint32_t ranges_checksum;
+  PersistentMemoryAllocator::Reference counts_ref;
+  HistogramSamples::Metadata samples_metadata;
+  HistogramSamples::Metadata logged_metadata;
+
+  // Space for the histogram name will be added during the actual allocation
+  // request. This must be the last field of the structure. A zero-size array
+  // or a "flexible" array would be preferred but is not (yet) valid C++.
+  char name[1];
+};
+
+PersistentHistogramAllocator::PersistentHistogramAllocator(
+    scoped_ptr<PersistentMemoryAllocator> memory)
+    : memory_allocator_(std::move(memory)) {}
+
+PersistentHistogramAllocator::~PersistentHistogramAllocator() {}
+
+void PersistentHistogramAllocator::CreateIterator(Iterator* iter) {
+  memory_allocator_->CreateIterator(&iter->memory_iter);
+}
+
+void PersistentHistogramAllocator::CreateTrackingHistograms(StringPiece name) {
+  memory_allocator_->CreateTrackingHistograms(name);
+}
+
+void PersistentHistogramAllocator::UpdateTrackingHistograms() {
+  memory_allocator_->UpdateTrackingHistograms();
+}
+
+// static
+HistogramBase*
+PersistentHistogramAllocator::GetCreateHistogramResultHistogram() {
+  // Get the histogram in which create-results are stored. This is copied
+  // almost exactly from the STATIC_HISTOGRAM_POINTER_BLOCK macro but with
+  // added code to prevent recursion (a likely occurance because the creation
+  // of a new a histogram can end up calling this.)
+  static base::subtle::AtomicWord atomic_histogram_pointer = 0;
+  HistogramBase* histogram_pointer =
+      reinterpret_cast<HistogramBase*>(
+          base::subtle::Acquire_Load(&atomic_histogram_pointer));
+  if (!histogram_pointer) {
+    // It's possible for multiple threads to make it here in parallel but
+    // they'll always return the same result as there is a mutex in the Get.
+    // The purpose of the "initialized" variable is just to ensure that
+    // the same thread doesn't recurse which is also why it doesn't have
+    // to be atomic.
+    static bool initialized = false;
+    if (!initialized) {
+      initialized = true;
+      if (g_allocator) {
+        DLOG(WARNING) << "Creating the results-histogram inside persistent"
+                      << " memory can cause future allocations to crash if"
+                      << " that memory is ever released (for testing).";
+      }
+
+      histogram_pointer = LinearHistogram::FactoryGet(
+          kResultHistogram, 1, CREATE_HISTOGRAM_MAX, CREATE_HISTOGRAM_MAX + 1,
+          HistogramBase::kUmaTargetedHistogramFlag);
+      base::subtle::Release_Store(
+          &atomic_histogram_pointer,
+          reinterpret_cast<base::subtle::AtomicWord>(histogram_pointer));
+    }
+  }
+  return histogram_pointer;
+}
+
+// static
+void PersistentHistogramAllocator::RecordCreateHistogramResult(
+    CreateHistogramResultType result) {
+  HistogramBase* result_histogram = GetCreateHistogramResultHistogram();
+  if (result_histogram)
+    result_histogram->Add(result);
+}
+
+// static
+void PersistentHistogramAllocator::SetGlobalAllocator(
+    scoped_ptr<PersistentHistogramAllocator> allocator) {
+  // Releasing or changing an allocator is extremely dangerous because it
+  // likely has histograms stored within it. If the backing memory is also
+  // also released, future accesses to those histograms will seg-fault.
+  CHECK(!g_allocator);
+  g_allocator = allocator.release();
+
+  size_t existing = StatisticsRecorder::GetHistogramCount();
+  DLOG_IF(WARNING, existing)
+      << existing
+      << " histograms were created before persistence was enabled.";
+}
+
+// static
+PersistentHistogramAllocator*
+PersistentHistogramAllocator::GetGlobalAllocator() {
+  return g_allocator;
+}
+
+// static
+scoped_ptr<PersistentHistogramAllocator>
+PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting() {
+  PersistentHistogramAllocator* histogram_allocator = g_allocator;
+  if (!histogram_allocator)
+    return nullptr;
+  PersistentMemoryAllocator* memory_allocator =
+      histogram_allocator->memory_allocator();
+
+  // Before releasing the memory, it's necessary to have the Statistics-
+  // Recorder forget about the histograms contained therein; otherwise,
+  // some operations will try to access them and the released memory.
+  PersistentMemoryAllocator::Iterator iter;
+  PersistentMemoryAllocator::Reference ref;
+  uint32_t type_id;
+  memory_allocator->CreateIterator(&iter);
+  while ((ref = memory_allocator->GetNextIterable(&iter, &type_id)) != 0) {
+    if (type_id == kTypeIdHistogram) {
+      PersistentHistogramData* histogram_data =
+          memory_allocator->GetAsObject<PersistentHistogramData>(
+              ref, kTypeIdHistogram);
+      DCHECK(histogram_data);
+      StatisticsRecorder::ForgetHistogramForTesting(histogram_data->name);
+
+      // If a test breaks here then a memory region containing a histogram
+      // actively used by this code is being released back to the test.
+      // If that memory segment were to be deleted, future calls to create
+      // persistent histograms would crash. To avoid this, have the test call
+      // the method GetCreateHistogramResultHistogram() *before* setting
+      // the (temporary) memory allocator via SetGlobalAllocator() so that
+      // histogram is instead allocated from the process heap.
+      DCHECK_NE(kResultHistogram, histogram_data->name);
+    }
+  }
+
+  g_allocator = nullptr;
+  return make_scoped_ptr(histogram_allocator);
+};
+
+// static
+void PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    StringPiece name) {
+  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
+      make_scoped_ptr(new PersistentMemoryAllocator(
+          base, size, page_size, id, name, false)))));
+}
+
+// static
+void PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+    size_t size,
+    uint64_t id,
+    StringPiece name) {
+  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
+      make_scoped_ptr(new LocalPersistentMemoryAllocator(size, id, name)))));
+}
+
+// static
+void PersistentHistogramAllocator::CreateGlobalAllocatorOnSharedMemory(
+    size_t size,
+    const SharedMemoryHandle& handle) {
+  scoped_ptr<SharedMemory> shm(new SharedMemory(handle, /*readonly=*/false));
+  if (!shm->Map(size)) {
+    NOTREACHED();
+    return;
+  }
+
+  SetGlobalAllocator(make_scoped_ptr(new PersistentHistogramAllocator(
+      make_scoped_ptr(new SharedPersistentMemoryAllocator(
+          std::move(shm), 0, StringPiece(), /*readonly=*/false)))));
+}
+
+// static
+scoped_ptr<HistogramBase> PersistentHistogramAllocator::CreateHistogram(
+    PersistentHistogramData* histogram_data_ptr) {
+  if (!histogram_data_ptr) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA_POINTER);
+    NOTREACHED();
+    return nullptr;
+  }
+
+  // Sparse histograms are quite different so handle them as a special case.
+  if (histogram_data_ptr->histogram_type == SPARSE_HISTOGRAM) {
+    scoped_ptr<HistogramBase> histogram = SparseHistogram::PersistentCreate(
+        memory_allocator(), histogram_data_ptr->name,
+        &histogram_data_ptr->samples_metadata,
+        &histogram_data_ptr->logged_metadata);
+    DCHECK(histogram);
+    histogram->SetFlags(histogram_data_ptr->flags);
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+    return histogram;
+  }
+
+  // Copy the histogram_data to local storage because anything in persistent
+  // memory cannot be trusted as it could be changed at any moment by a
+  // malicious actor that shares access. The contents of histogram_data are
+  // validated below; the local copy is to ensure that the contents cannot
+  // be externally changed between validation and use.
+  PersistentHistogramData histogram_data = *histogram_data_ptr;
+
+  HistogramBase::Sample* ranges_data =
+      memory_allocator_->GetAsObject<HistogramBase::Sample>(
+          histogram_data.ranges_ref, kTypeIdRangesArray);
+
+  const uint32_t max_buckets =
+      std::numeric_limits<uint32_t>::max() / sizeof(HistogramBase::Sample);
+  size_t required_bytes =
+      (histogram_data.bucket_count + 1) * sizeof(HistogramBase::Sample);
+  size_t allocated_bytes =
+      memory_allocator_->GetAllocSize(histogram_data.ranges_ref);
+  if (!ranges_data || histogram_data.bucket_count < 2 ||
+      histogram_data.bucket_count >= max_buckets ||
+      allocated_bytes < required_bytes) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
+    NOTREACHED();
+    return nullptr;
+  }
+
+  scoped_ptr<const BucketRanges> created_ranges =
+      CreateRangesFromData(ranges_data, histogram_data.ranges_checksum,
+                           histogram_data.bucket_count + 1);
+  if (!created_ranges) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_RANGES_ARRAY);
+    NOTREACHED();
+    return nullptr;
+  }
+  const BucketRanges* ranges =
+      StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
+          created_ranges.release());
+
+  HistogramBase::AtomicCount* counts_data =
+      memory_allocator_->GetAsObject<HistogramBase::AtomicCount>(
+          histogram_data.counts_ref, kTypeIdCountsArray);
+  size_t counts_bytes =
+      CalculateRequiredCountsBytes(histogram_data.bucket_count);
+  if (!counts_data || counts_bytes == 0 ||
+      memory_allocator_->GetAllocSize(histogram_data.counts_ref) <
+          counts_bytes) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY);
+    NOTREACHED();
+    return nullptr;
+  }
+
+  // After the main "counts" array is a second array using for storing what
+  // was previously logged. This is used to calculate the "delta" during
+  // snapshot operations.
+  HistogramBase::AtomicCount* logged_data =
+      counts_data + histogram_data.bucket_count;
+
+  std::string name(histogram_data_ptr->name);
+  scoped_ptr<HistogramBase> histogram;
+  switch (histogram_data.histogram_type) {
+    case HISTOGRAM:
+      histogram = Histogram::PersistentCreate(
+          name, histogram_data.minimum, histogram_data.maximum, ranges,
+          counts_data, logged_data, histogram_data.bucket_count,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case LINEAR_HISTOGRAM:
+      histogram = LinearHistogram::PersistentCreate(
+          name, histogram_data.minimum, histogram_data.maximum, ranges,
+          counts_data, logged_data, histogram_data.bucket_count,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case BOOLEAN_HISTOGRAM:
+      histogram = BooleanHistogram::PersistentCreate(
+          name, ranges, counts_data, logged_data,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    case CUSTOM_HISTOGRAM:
+      histogram = CustomHistogram::PersistentCreate(
+          name, ranges, counts_data, logged_data, histogram_data.bucket_count,
+          &histogram_data_ptr->samples_metadata,
+          &histogram_data_ptr->logged_metadata);
+      DCHECK(histogram);
+      break;
+    default:
+      NOTREACHED();
+  }
+
+  if (histogram) {
+    DCHECK_EQ(histogram_data.histogram_type, histogram->GetHistogramType());
+    histogram->SetFlags(histogram_data.flags);
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_SUCCESS);
+  } else {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_UNKNOWN_TYPE);
+  }
+
+  return histogram;
+}
+
+scoped_ptr<HistogramBase> PersistentHistogramAllocator::GetHistogram(
+    Reference ref) {
+  // Unfortunately, the histogram "pickle" methods cannot be used as part of
+  // the persistance because the deserialization methods always create local
+  // count data (while these must reference the persistent counts) and always
+  // add it to the local list of known histograms (while these may be simple
+  // references to histograms in other processes).
+  PersistentHistogramData* histogram_data =
+      memory_allocator_->GetAsObject<PersistentHistogramData>(
+          ref, kTypeIdHistogram);
+  size_t length = memory_allocator_->GetAllocSize(ref);
+  if (!histogram_data ||
+      reinterpret_cast<char*>(histogram_data)[length - 1] != '\0') {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_INVALID_METADATA);
+    NOTREACHED();
+    return nullptr;
+  }
+  return CreateHistogram(histogram_data);
+}
+
+scoped_ptr<HistogramBase>
+PersistentHistogramAllocator::GetNextHistogramWithIgnore(Iterator* iter,
+                                                         Reference ignore) {
+  PersistentMemoryAllocator::Reference ref;
+  uint32_t type_id;
+  while ((ref = memory_allocator_->GetNextIterable(&iter->memory_iter,
+                                                   &type_id)) != 0) {
+    if (ref == ignore)
+      continue;
+    if (type_id == kTypeIdHistogram)
+      return GetHistogram(ref);
+  }
+  return nullptr;
+}
+
+void PersistentHistogramAllocator::FinalizeHistogram(Reference ref,
+                                                     bool registered) {
+  // If the created persistent histogram was registered then it needs to
+  // be marked as "iterable" in order to be found by other processes.
+  if (registered)
+    memory_allocator_->MakeIterable(ref);
+  // If it wasn't registered then a race condition must have caused
+  // two to be created. The allocator does not support releasing the
+  // acquired memory so just change the type to be empty.
+  else
+    memory_allocator_->SetType(ref, 0);
+}
+
+scoped_ptr<HistogramBase> PersistentHistogramAllocator::AllocateHistogram(
+    HistogramType histogram_type,
+    const std::string& name,
+    int minimum,
+    int maximum,
+    const BucketRanges* bucket_ranges,
+    int32_t flags,
+    Reference* ref_ptr) {
+  // If the allocator is corrupt, don't waste time trying anything else.
+  // This also allows differentiating on the dashboard between allocations
+  // failed due to a corrupt allocator and the number of process instances
+  // with one, the latter being idicated by "newly corrupt", below.
+  if (memory_allocator_->IsCorrupt()) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_CORRUPT);
+    return nullptr;
+  }
+
+  // Create the metadata necessary for a persistent sparse histogram. This
+  // is done first because it is a small subset of what is required for
+  // other histograms.
+  PersistentMemoryAllocator::Reference histogram_ref =
+      memory_allocator_->Allocate(
+          offsetof(PersistentHistogramData, name) + name.length() + 1,
+          kTypeIdHistogram);
+  PersistentHistogramData* histogram_data =
+      memory_allocator_->GetAsObject<PersistentHistogramData>(histogram_ref,
+                                                              kTypeIdHistogram);
+  if (histogram_data) {
+    memcpy(histogram_data->name, name.c_str(), name.size() + 1);
+    histogram_data->histogram_type = histogram_type;
+    histogram_data->flags = flags | HistogramBase::kIsPersistent;
+  }
+
+  // Create the remaining metadata necessary for regular histograms.
+  if (histogram_type != SPARSE_HISTOGRAM) {
+    size_t bucket_count = bucket_ranges->bucket_count();
+    size_t counts_bytes = CalculateRequiredCountsBytes(bucket_count);
+    if (counts_bytes == 0) {
+      // |bucket_count| was out-of-range.
+      NOTREACHED();
+      return nullptr;
+    }
+
+    size_t ranges_bytes = (bucket_count + 1) * sizeof(HistogramBase::Sample);
+    PersistentMemoryAllocator::Reference counts_ref =
+        memory_allocator_->Allocate(counts_bytes, kTypeIdCountsArray);
+    PersistentMemoryAllocator::Reference ranges_ref =
+        memory_allocator_->Allocate(ranges_bytes, kTypeIdRangesArray);
+    HistogramBase::Sample* ranges_data =
+        memory_allocator_->GetAsObject<HistogramBase::Sample>(
+            ranges_ref, kTypeIdRangesArray);
+
+    // Only continue here if all allocations were successful. If they weren't,
+    // there is no way to free the space but that's not really a problem since
+    // the allocations only fail because the space is full or corrupt and so
+    // any future attempts will also fail.
+    if (counts_ref && ranges_data && histogram_data) {
+      for (size_t i = 0; i < bucket_ranges->size(); ++i)
+        ranges_data[i] = bucket_ranges->range(i);
+
+      histogram_data->minimum = minimum;
+      histogram_data->maximum = maximum;
+      // |bucket_count| must fit within 32-bits or the allocation of the counts
+      // array would have failed for being too large; the allocator supports
+      // less than 4GB total size.
+      histogram_data->bucket_count = static_cast<uint32_t>(bucket_count);
+      histogram_data->ranges_ref = ranges_ref;
+      histogram_data->ranges_checksum = bucket_ranges->checksum();
+      histogram_data->counts_ref = counts_ref;
+    } else {
+      histogram_data = nullptr;  // Clear this for proper handling below.
+    }
+  }
+
+  if (histogram_data) {
+    // Create the histogram using resources in persistent memory. This ends up
+    // resolving the "ref" values stored in histogram_data instad of just
+    // using what is already known above but avoids duplicating the switch
+    // statement here and serves as a double-check that everything is
+    // correct before commiting the new histogram to persistent space.
+    scoped_ptr<HistogramBase> histogram = CreateHistogram(histogram_data);
+    DCHECK(histogram);
+    if (ref_ptr != nullptr)
+      *ref_ptr = histogram_ref;
+
+    // By storing the reference within the allocator to this histogram, the
+    // next import (which will happen before the next histogram creation)
+    // will know to skip it. See also the comment in ImportGlobalHistograms().
+    subtle::NoBarrier_Store(&last_created_, histogram_ref);
+    return histogram;
+  }
+
+  CreateHistogramResultType result;
+  if (memory_allocator_->IsCorrupt()) {
+    RecordCreateHistogramResult(CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT);
+    result = CREATE_HISTOGRAM_ALLOCATOR_CORRUPT;
+  } else if (memory_allocator_->IsFull()) {
+    result = CREATE_HISTOGRAM_ALLOCATOR_FULL;
+  } else {
+    result = CREATE_HISTOGRAM_ALLOCATOR_ERROR;
+  }
+  RecordCreateHistogramResult(result);
+  NOTREACHED() << "error=" << result;
+
+  return nullptr;
+}
+
+// static
+void PersistentHistogramAllocator::ImportGlobalHistograms() {
+  // The lock protects against concurrent access to the iterator and is created
+  // in a thread-safe manner when needed.
+  static base::LazyInstance<base::Lock>::Leaky lock = LAZY_INSTANCE_INITIALIZER;
+
+  if (g_allocator) {
+    // TODO(bcwhite): Investigate a lock-free, thread-safe iterator.
+    base::AutoLock auto_lock(lock.Get());
+
+    // Each call resumes from where it last left off so a persistant iterator
+    // is needed. This class has a constructor so even the definition has to
+    // be protected by the lock in order to be thread-safe.
+    static Iterator iter;
+    if (iter.is_clear())
+      g_allocator->CreateIterator(&iter);
+
+    // Skip the import if it's the histogram that was last created. Should a
+    // race condition cause the "last created" to be overwritten before it
+    // is recognized here then the histogram will be created and be ignored
+    // when it is detected as a duplicate by the statistics-recorder. This
+    // simple check reduces the time of creating persistent histograms by
+    // about 40%.
+    Reference last_created =
+        subtle::NoBarrier_Load(&g_allocator->last_created_);
+
+    while (true) {
+      scoped_ptr<HistogramBase> histogram =
+          g_allocator->GetNextHistogramWithIgnore(&iter, last_created);
+      if (!histogram)
+        break;
+      StatisticsRecorder::RegisterOrDeleteDuplicate(histogram.release());
+    }
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_histogram_allocator.h b/base/metrics/persistent_histogram_allocator.h
new file mode 100644
index 0000000..cc8d023
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator.h
@@ -0,0 +1,212 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+#define BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/feature_list.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+// Feature definition for enabling histogram persistence.
+BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
+
+// This class manages histograms created within a PersistentMemoryAllocator.
+class BASE_EXPORT PersistentHistogramAllocator {
+ public:
+  // This iterator is used for fetching persistent histograms from an allocator.
+  class Iterator {
+   public:
+    bool is_clear() { return memory_iter.is_clear(); }
+
+   private:
+    friend class PersistentHistogramAllocator;
+
+    // The iterator used for stepping through persistent memory iterables.
+    PersistentMemoryAllocator::Iterator memory_iter;
+  };
+
+  using Reference = PersistentMemoryAllocator::Reference;
+
+  // A PersistentHistogramAllocator is constructed from a PersistentMemory-
+  // Allocator object of which it takes ownership.
+  PersistentHistogramAllocator(scoped_ptr<PersistentMemoryAllocator> memory);
+  ~PersistentHistogramAllocator();
+
+  // Direct access to underlying memory allocator. If the segment is shared
+  // across threads or processes, reading data through these values does
+  // not guarantee consistency. Use with care. Do not write.
+  PersistentMemoryAllocator* memory_allocator() {
+    return memory_allocator_.get();
+  }
+
+  // Implement the "metadata" API of a PersistentMemoryAllocator, forwarding
+  // those requests to the real one.
+  uint64_t Id() const { return memory_allocator_->Id(); }
+  const char* Name() const { return memory_allocator_->Name(); }
+  const void* data() const { return memory_allocator_->data(); }
+  size_t length() const { return memory_allocator_->length(); }
+  size_t used() const { return memory_allocator_->used(); }
+
+  // Recreate a Histogram from data held in persistent memory. Though this
+  // object will be local to the current process, the sample data will be
+  // shared with all other threads referencing it. This method takes a |ref|
+  // to where the top-level histogram data may be found in this allocator.
+  // This method will return null if any problem is detected with the data.
+  scoped_ptr<HistogramBase> GetHistogram(Reference ref);
+
+  // Get the next histogram in persistent data based on iterator.
+  scoped_ptr<HistogramBase> GetNextHistogram(Iterator* iter) {
+    return GetNextHistogramWithIgnore(iter, 0);
+  }
+
+  // Create an iterator for going through all histograms in an allocator.
+  void CreateIterator(Iterator* iter);
+
+  // Allocate a new persistent histogram. The returned histogram will not
+  // be able to be located by other allocators until it is "finalized".
+  scoped_ptr<HistogramBase> AllocateHistogram(
+      HistogramType histogram_type,
+      const std::string& name,
+      int minimum,
+      int maximum,
+      const BucketRanges* bucket_ranges,
+      int32_t flags,
+      Reference* ref_ptr);
+
+  // Finalize the creation of the histogram, making it available to other
+  // processes if |registered| (as in: added to the StatisticsRecorder) is
+  // True, forgetting it otherwise.
+  void FinalizeHistogram(Reference ref, bool registered);
+
+  // Create internal histograms for tracking memory use and allocation sizes
+  // for allocator of |name| (which can simply be the result of Name()). This
+  // is done seperately from construction for situations such as when the
+  // histograms will be backed by memory provided by this very allocator.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histograms:
+  //    UMA.PersistentAllocator.name.Allocs
+  //    UMA.PersistentAllocator.name.UsedPct
+  void CreateTrackingHistograms(StringPiece name);
+  void UpdateTrackingHistograms();
+
+  // Manage a PersistentHistogramAllocator for globally storing histograms in
+  // a space that can be persisted or shared between processes. There is only
+  // ever one allocator for all such histograms created by a single process.
+  // This takes ownership of the object and should be called as soon as
+  // possible during startup to capture as many histograms as possible and
+  // while operating single-threaded so there are no race-conditions.
+  static void SetGlobalAllocator(
+      scoped_ptr<PersistentHistogramAllocator> allocator);
+  static PersistentHistogramAllocator* GetGlobalAllocator();
+
+  // This access to the persistent allocator is only for testing; it extracts
+  // the current allocator completely. This allows easy creation of histograms
+  // within persistent memory segments which can then be extracted and used
+  // in other ways.
+  static scoped_ptr<PersistentHistogramAllocator>
+  ReleaseGlobalAllocatorForTesting();
+
+  // These helper methods perform SetGlobalAllocator() calls with allocators
+  // of the specified type and parameters.
+  static void CreateGlobalAllocatorOnPersistentMemory(
+      void* base,
+      size_t size,
+      size_t page_size,
+      uint64_t id,
+      StringPiece name);
+  static void CreateGlobalAllocatorOnLocalMemory(
+      size_t size,
+      uint64_t id,
+      StringPiece name);
+  static void CreateGlobalAllocatorOnSharedMemory(
+      size_t size,
+      const SharedMemoryHandle& handle);
+
+  // Import new histograms from the global PersistentHistogramAllocator. It's
+  // possible for other processes to create histograms in the active memory
+  // segment; this adds those to the internal list of known histograms to
+  // avoid creating duplicates that would have to be merged during reporting.
+  // Every call to this method resumes from the last entry it saw; it costs
+  // nothing if nothing new has been added.
+  static void ImportGlobalHistograms();
+
+  // Histogram containing creation results. Visible for testing.
+  static HistogramBase* GetCreateHistogramResultHistogram();
+
+ private:
+  // Enumerate possible creation results for reporting.
+  enum CreateHistogramResultType {
+    // Everything was fine.
+    CREATE_HISTOGRAM_SUCCESS = 0,
+
+    // Pointer to metadata was not valid.
+    CREATE_HISTOGRAM_INVALID_METADATA_POINTER,
+
+    // Histogram metadata was not valid.
+    CREATE_HISTOGRAM_INVALID_METADATA,
+
+    // Ranges information was not valid.
+    CREATE_HISTOGRAM_INVALID_RANGES_ARRAY,
+
+    // Counts information was not valid.
+    CREATE_HISTOGRAM_INVALID_COUNTS_ARRAY,
+
+    // Could not allocate histogram memory due to corruption.
+    CREATE_HISTOGRAM_ALLOCATOR_CORRUPT,
+
+    // Could not allocate histogram memory due to lack of space.
+    CREATE_HISTOGRAM_ALLOCATOR_FULL,
+
+    // Could not allocate histogram memory due to unknown error.
+    CREATE_HISTOGRAM_ALLOCATOR_ERROR,
+
+    // Histogram was of unknown type.
+    CREATE_HISTOGRAM_UNKNOWN_TYPE,
+
+    // Instance has detected a corrupt allocator (recorded only once).
+    CREATE_HISTOGRAM_ALLOCATOR_NEWLY_CORRUPT,
+
+    // Always keep this at the end.
+    CREATE_HISTOGRAM_MAX
+  };
+
+  // The structure used to hold histogram data in persistent memory. It is
+  // defined and used entirely within the .cc file.
+  struct PersistentHistogramData;
+
+  // Get the next histogram in persistent data based on iterator while
+  // ignoring a particular reference if it is found.
+  scoped_ptr<HistogramBase> GetNextHistogramWithIgnore(
+      Iterator* iter,
+      Reference ignore);
+
+  // Create a histogram based on saved (persistent) information about it.
+  scoped_ptr<HistogramBase> CreateHistogram(
+      PersistentHistogramData* histogram_data_ptr);
+
+  // Record the result of a histogram creation.
+  static void RecordCreateHistogramResult(CreateHistogramResultType result);
+
+  // The memory allocator that provides the actual histogram storage.
+  scoped_ptr<PersistentMemoryAllocator> memory_allocator_;
+
+  // A reference to the last-created histogram in the allocator, used to avoid
+  // trying to import what was just created.
+  subtle::AtomicWord last_created_ = 0;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_HISTOGRAM_PERSISTENCE_H_
diff --git a/base/metrics/persistent_histogram_allocator_unittest.cc b/base/metrics/persistent_histogram_allocator_unittest.cc
new file mode 100644
index 0000000..c65eade
--- /dev/null
+++ b/base/metrics/persistent_histogram_allocator_unittest.cc
@@ -0,0 +1,126 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_histogram_allocator.h"
+
+#include "base/logging.h"
+#include "base/metrics/bucket_ranges.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_memory_allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+class PersistentHistogramAllocatorTest : public testing::Test {
+ protected:
+  const int32_t kAllocatorMemorySize = 64 << 10;  // 64 KiB
+
+  PersistentHistogramAllocatorTest() { CreatePersistentHistogramAllocator(); }
+  ~PersistentHistogramAllocatorTest() override {
+    DestroyPersistentHistogramAllocator();
+  }
+
+  void CreatePersistentHistogramAllocator() {
+    allocator_memory_.reset(new char[kAllocatorMemorySize]);
+
+    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+    memset(allocator_memory_.get(), 0, kAllocatorMemorySize);
+    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+    PersistentHistogramAllocator::CreateGlobalAllocatorOnPersistentMemory(
+        allocator_memory_.get(), kAllocatorMemorySize, 0, 0,
+        "PersistentHistogramAllocatorTest");
+    allocator_ =
+        PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+  }
+
+  void DestroyPersistentHistogramAllocator() {
+    allocator_ = nullptr;
+    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+  }
+
+  scoped_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocatorTest);
+};
+
+TEST_F(PersistentHistogramAllocatorTest, CreateAndIterateTest) {
+  PersistentMemoryAllocator::MemoryInfo meminfo0;
+  allocator_->GetMemoryInfo(&meminfo0);
+
+  // Try basic construction
+  HistogramBase* histogram = Histogram::FactoryGet(
+      "TestHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(histogram);
+  histogram->CheckName("TestHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  allocator_->GetMemoryInfo(&meminfo1);
+  EXPECT_GT(meminfo0.free, meminfo1.free);
+
+  HistogramBase* linear_histogram = LinearHistogram::FactoryGet(
+      "TestLinearHistogram", 1, 1000, 10, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(linear_histogram);
+  linear_histogram->CheckName("TestLinearHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  allocator_->GetMemoryInfo(&meminfo2);
+  EXPECT_GT(meminfo1.free, meminfo2.free);
+
+  HistogramBase* boolean_histogram = BooleanHistogram::FactoryGet(
+      "TestBooleanHistogram", HistogramBase::kIsPersistent);
+  EXPECT_TRUE(boolean_histogram);
+  boolean_histogram->CheckName("TestBooleanHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo3;
+  allocator_->GetMemoryInfo(&meminfo3);
+  EXPECT_GT(meminfo2.free, meminfo3.free);
+
+  std::vector<int> custom_ranges;
+  custom_ranges.push_back(1);
+  custom_ranges.push_back(5);
+  HistogramBase* custom_histogram = CustomHistogram::FactoryGet(
+      "TestCustomHistogram", custom_ranges, HistogramBase::kIsPersistent);
+  EXPECT_TRUE(custom_histogram);
+  custom_histogram->CheckName("TestCustomHistogram");
+  PersistentMemoryAllocator::MemoryInfo meminfo4;
+  allocator_->GetMemoryInfo(&meminfo4);
+  EXPECT_GT(meminfo3.free, meminfo4.free);
+
+  PersistentMemoryAllocator::Iterator iter;
+  uint32_t type;
+  allocator_->CreateIterator(&iter);
+  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // Histogram
+  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // LinearHistogram
+  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // BooleanHistogram
+  EXPECT_NE(0U, allocator_->GetNextIterable(&iter, &type));  // CustomHistogram
+  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+
+  // Create a second allocator and have it access the memory of the first.
+  scoped_ptr<HistogramBase> recovered;
+  PersistentHistogramAllocator recovery(
+      make_scoped_ptr(new PersistentMemoryAllocator(
+          allocator_memory_.get(), kAllocatorMemorySize, 0, 0, "", false)));
+  PersistentHistogramAllocator::Iterator histogram_iter;
+  recovery.CreateIterator(&histogram_iter);
+
+  recovered = recovery.GetNextHistogram(&histogram_iter);
+  ASSERT_TRUE(recovered.get());
+  recovered->CheckName("TestHistogram");
+
+  recovered = recovery.GetNextHistogram(&histogram_iter);
+  ASSERT_TRUE(recovered.get());
+  recovered->CheckName("TestLinearHistogram");
+
+  recovered = recovery.GetNextHistogram(&histogram_iter);
+  ASSERT_TRUE(recovered.get());
+  recovered->CheckName("TestBooleanHistogram");
+
+  recovered = recovery.GetNextHistogram(&histogram_iter);
+  ASSERT_TRUE(recovered.get());
+  recovered->CheckName("TestCustomHistogram");
+
+  recovered = recovery.GetNextHistogram(&histogram_iter);
+  EXPECT_FALSE(recovered.get());
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.cc b/base/metrics/persistent_memory_allocator.cc
new file mode 100644
index 0000000..a1a960c
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator.cc
@@ -0,0 +1,706 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include <assert.h>
+#include <algorithm>
+
+#include "base/files/memory_mapped_file.h"
+#include "base/logging.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram_macros.h"
+
+namespace {
+
+// Required range of memory segment sizes. It has to fit in an unsigned 32-bit
+// number and should be a power of 2 in order to accomodate almost any page
+// size.
+const uint32_t kSegmentMinSize = 1 << 10;  // 1 KiB
+const uint32_t kSegmentMaxSize = 1 << 30;  // 1 GiB
+
+// A constant (random) value placed in the shared metadata to identify
+// an already initialized memory segment.
+const uint32_t kGlobalCookie = 0x408305DC;
+
+// The current version of the metadata. If updates are made that change
+// the metadata, the version number can be queried to operate in a backward-
+// compatible manner until the memory segment is completely re-initalized.
+const uint32_t kGlobalVersion = 1;
+
+// Constant values placed in the block headers to indicate its state.
+const uint32_t kBlockCookieFree = 0;
+const uint32_t kBlockCookieQueue = 1;
+const uint32_t kBlockCookieWasted = (uint32_t)-1;
+const uint32_t kBlockCookieAllocated = 0xC8799269;
+
+// TODO(bcwhite): When acceptable, consider moving flags to std::atomic<char>
+// types rather than combined bitfield.
+
+// Flags stored in the flags_ field of the SharedMetaData structure below.
+enum : int {
+  kFlagCorrupt = 1 << 0,
+  kFlagFull    = 1 << 1
+};
+
+bool CheckFlag(const volatile std::atomic<uint32_t>* flags, int flag) {
+  uint32_t loaded_flags = flags->load();
+  return (loaded_flags & flag) != 0;
+}
+
+void SetFlag(volatile std::atomic<uint32_t>* flags, int flag) {
+  uint32_t loaded_flags = flags->load();
+  for (;;) {
+    uint32_t new_flags = (loaded_flags & ~flag) | flag;
+    // In the failue case, actual "flags" value stored in loaded_flags.
+    if (flags->compare_exchange_weak(loaded_flags, new_flags))
+      break;
+  }
+}
+
+}  // namespace
+
+namespace base {
+
+// All allocations and data-structures must be aligned to this byte boundary.
+// Alignment as large as the physical bus between CPU and RAM is _required_
+// for some architectures, is simply more efficient on other CPUs, and
+// generally a Good Idea(tm) for all platforms as it reduces/eliminates the
+// chance that a type will span cache lines. Alignment mustn't be less
+// than 8 to ensure proper alignment for all types. The rest is a balance
+// between reducing spans across multiple cache lines and wasted space spent
+// padding out allocations. An alignment of 16 would ensure that the block
+// header structure always sits in a single cache line. An average of about
+// 1/2 this value will be wasted with every allocation.
+const uint32_t PersistentMemoryAllocator::kAllocAlignment = 8;
+
+// The block-header is placed at the top of every allocation within the
+// segment to describe the data that follows it.
+struct PersistentMemoryAllocator::BlockHeader {
+  uint32_t size;       // Number of bytes in this block, including header.
+  uint32_t cookie;     // Constant value indicating completed allocation.
+  uint32_t type_id;    // A number provided by caller indicating data type.
+  std::atomic<uint32_t> next;  // Pointer to the next block when iterating.
+};
+
+// The shared metadata exists once at the top of the memory segment to
+// describe the state of the allocator to all processes.
+struct PersistentMemoryAllocator::SharedMetadata {
+  uint32_t cookie;     // Some value that indicates complete initialization.
+  uint32_t size;       // Total size of memory segment.
+  uint32_t page_size;  // Paging size within memory segment.
+  uint32_t version;    // Version code so upgrades don't break.
+  uint64_t id;         // Arbitrary ID number given by creator.
+  uint32_t name;       // Reference to stored name string.
+
+  // Above is read-only after first construction. Below may be changed and
+  // so must be marked "volatile" to provide correct inter-process behavior.
+
+  // Bitfield of information flags. Access to this should be done through
+  // the CheckFlag() and SetFlag() methods defined above.
+  volatile std::atomic<uint32_t> flags;
+
+  // Offset/reference to first free space in segment.
+  volatile std::atomic<uint32_t> freeptr;
+
+  // The "iterable" queue is an M&S Queue as described here, append-only:
+  // https://www.research.ibm.com/people/m/michael/podc-1996.pdf
+  volatile std::atomic<uint32_t> tailptr;  // Last block of iteration queue.
+  volatile BlockHeader queue;   // Empty block for linked-list head/tail.
+};
+
+// The "queue" block header is used to detect "last node" so that zero/null
+// can be used to indicate that it hasn't been added at all. It is part of
+// the SharedMetadata structure which itself is always located at offset zero.
+const PersistentMemoryAllocator::Reference
+    PersistentMemoryAllocator::kReferenceQueue =
+        offsetof(SharedMetadata, queue);
+const PersistentMemoryAllocator::Reference
+    PersistentMemoryAllocator::kReferenceNull = 0;
+
+
+// static
+bool PersistentMemoryAllocator::IsMemoryAcceptable(const void* base,
+                                                   size_t size,
+                                                   size_t page_size,
+                                                   bool readonly) {
+  return ((base && reinterpret_cast<uintptr_t>(base) % kAllocAlignment == 0) &&
+          (size >= sizeof(SharedMetadata) && size <= kSegmentMaxSize) &&
+          (size >= kSegmentMinSize || readonly) &&
+          (size % kAllocAlignment == 0 || readonly) &&
+          (page_size == 0 || size % page_size == 0 || readonly));
+}
+
+PersistentMemoryAllocator::PersistentMemoryAllocator(
+    void* base,
+    size_t size,
+    size_t page_size,
+    uint64_t id,
+    base::StringPiece name,
+    bool readonly)
+    : mem_base_(static_cast<char*>(base)),
+      mem_size_(static_cast<uint32_t>(size)),
+      mem_page_(static_cast<uint32_t>((page_size ? page_size : size))),
+      readonly_(readonly),
+      corrupt_(0),
+      allocs_histogram_(nullptr),
+      used_histogram_(nullptr) {
+  static_assert(sizeof(BlockHeader) % kAllocAlignment == 0,
+                "BlockHeader is not a multiple of kAllocAlignment");
+  static_assert(sizeof(SharedMetadata) % kAllocAlignment == 0,
+                "SharedMetadata is not a multiple of kAllocAlignment");
+  static_assert(kReferenceQueue % kAllocAlignment == 0,
+                "\"queue\" is not aligned properly; must be at end of struct");
+
+  // Ensure that memory segment is of acceptable size.
+  CHECK(IsMemoryAcceptable(base, size, page_size, readonly));
+
+  // These atomics operate inter-process and so must be lock-free. The local
+  // casts are to make sure it can be evaluated at compile time to a constant.
+  CHECK(((SharedMetadata*)0)->freeptr.is_lock_free());
+  CHECK(((SharedMetadata*)0)->flags.is_lock_free());
+  CHECK(((BlockHeader*)0)->next.is_lock_free());
+  CHECK(corrupt_.is_lock_free());
+
+  if (shared_meta()->cookie != kGlobalCookie) {
+    if (readonly) {
+      SetCorrupt();
+      return;
+    }
+
+    // This block is only executed when a completely new memory segment is
+    // being initialized. It's unshared and single-threaded...
+    volatile BlockHeader* const first_block =
+        reinterpret_cast<volatile BlockHeader*>(mem_base_ +
+                                                sizeof(SharedMetadata));
+    if (shared_meta()->cookie != 0 ||
+        shared_meta()->size != 0 ||
+        shared_meta()->version != 0 ||
+        shared_meta()->freeptr.load() != 0 ||
+        shared_meta()->flags.load() != 0 ||
+        shared_meta()->id != 0 ||
+        shared_meta()->name != 0 ||
+        shared_meta()->tailptr != 0 ||
+        shared_meta()->queue.cookie != 0 ||
+        shared_meta()->queue.next.load() != 0 ||
+        first_block->size != 0 ||
+        first_block->cookie != 0 ||
+        first_block->type_id != 0 ||
+        first_block->next != 0) {
+      // ...or something malicious has been playing with the metadata.
+      NOTREACHED();
+      SetCorrupt();
+    }
+
+    // This is still safe to do even if corruption has been detected.
+    shared_meta()->cookie = kGlobalCookie;
+    shared_meta()->size = mem_size_;
+    shared_meta()->page_size = mem_page_;
+    shared_meta()->version = kGlobalVersion;
+    shared_meta()->id = id;
+    shared_meta()->freeptr.store(sizeof(SharedMetadata));
+
+    // Set up the queue of iterable allocations.
+    shared_meta()->queue.size = sizeof(BlockHeader);
+    shared_meta()->queue.cookie = kBlockCookieQueue;
+    shared_meta()->queue.next.store(kReferenceQueue);
+    shared_meta()->tailptr.store(kReferenceQueue);
+
+    // Allocate space for the name so other processes can learn it.
+    if (!name.empty()) {
+      const size_t name_length = name.length() + 1;
+      shared_meta()->name = Allocate(name_length, 0);
+      char* name_cstr = GetAsObject<char>(shared_meta()->name, 0);
+      if (name_cstr)
+        memcpy(name_cstr, name.data(), name.length());
+    }
+  } else {
+    if (shared_meta()->size == 0 ||
+        shared_meta()->version == 0 ||
+        shared_meta()->freeptr.load() == 0 ||
+        shared_meta()->tailptr == 0 ||
+        shared_meta()->queue.cookie == 0 ||
+        shared_meta()->queue.next.load() == 0) {
+      SetCorrupt();
+    }
+    if (!readonly) {
+      // The allocator is attaching to a previously initialized segment of
+      // memory. Make sure the embedded data matches what has been passed.
+      if (shared_meta()->size != mem_size_ ||
+          shared_meta()->page_size != mem_page_) {
+        NOTREACHED();
+        SetCorrupt();
+      }
+    }
+  }
+}
+
+PersistentMemoryAllocator::~PersistentMemoryAllocator() {
+  // It's strictly forbidden to do any memory access here in case there is
+  // some issue with the underlying memory segment. The "Local" allocator
+  // makes use of this to allow deletion of the segment on the heap from
+  // within its destructor.
+}
+
+uint64_t PersistentMemoryAllocator::Id() const {
+  return shared_meta()->id;
+}
+
+const char* PersistentMemoryAllocator::Name() const {
+  Reference name_ref = shared_meta()->name;
+  const char* name_cstr = GetAsObject<char>(name_ref, 0);
+  if (!name_cstr)
+    return "";
+
+  size_t name_length = GetAllocSize(name_ref);
+  if (name_cstr[name_length - 1] != '\0') {
+    NOTREACHED();
+    SetCorrupt();
+    return "";
+  }
+
+  return name_cstr;
+}
+
+void PersistentMemoryAllocator::CreateTrackingHistograms(
+    base::StringPiece name) {
+  if (name.empty() || readonly_)
+    return;
+
+  std::string name_string = name.as_string();
+  DCHECK(!used_histogram_);
+  used_histogram_ = LinearHistogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".UsedPct", 1, 101, 21,
+      HistogramBase::kUmaTargetedHistogramFlag);
+
+  DCHECK(!allocs_histogram_);
+  allocs_histogram_ = Histogram::FactoryGet(
+      "UMA.PersistentAllocator." + name_string + ".Allocs", 1, 10000, 50,
+      HistogramBase::kUmaTargetedHistogramFlag);
+}
+
+size_t PersistentMemoryAllocator::used() const {
+  return std::min(shared_meta()->freeptr.load(), mem_size_);
+}
+
+size_t PersistentMemoryAllocator::GetAllocSize(Reference ref) const {
+  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return 0;
+  uint32_t size = block->size;
+  // Header was verified by GetBlock() but a malicious actor could change
+  // the value between there and here. Check it again.
+  if (size <= sizeof(BlockHeader) || ref + size > mem_size_) {
+    SetCorrupt();
+    return 0;
+  }
+  return size - sizeof(BlockHeader);
+}
+
+uint32_t PersistentMemoryAllocator::GetType(Reference ref) const {
+  const volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return 0;
+  return block->type_id;
+}
+
+void PersistentMemoryAllocator::SetType(Reference ref, uint32_t type_id) {
+  DCHECK(!readonly_);
+  volatile BlockHeader* const block = GetBlock(ref, 0, 0, false, false);
+  if (!block)
+    return;
+  block->type_id = type_id;
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::Allocate(
+    size_t req_size,
+    uint32_t type_id) {
+  Reference ref = AllocateImpl(req_size, type_id);
+  if (ref) {
+    // Success: Record this allocation in usage stats (if active).
+    if (allocs_histogram_)
+      allocs_histogram_->Add(static_cast<HistogramBase::Sample>(req_size));
+  } else {
+    // Failure: Record an allocation of zero for tracking.
+    if (allocs_histogram_)
+      allocs_histogram_->Add(0);
+  }
+  return ref;
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::AllocateImpl(
+    size_t req_size,
+    uint32_t type_id) {
+  DCHECK(!readonly_);
+
+  // Validate req_size to ensure it won't overflow when used as 32-bit value.
+  if (req_size > kSegmentMaxSize - sizeof(BlockHeader)) {
+    NOTREACHED();
+    return kReferenceNull;
+  }
+
+  // Round up the requested size, plus header, to the next allocation alignment.
+  uint32_t size = static_cast<uint32_t>(req_size + sizeof(BlockHeader));
+  size = (size + (kAllocAlignment - 1)) & ~(kAllocAlignment - 1);
+  if (size <= sizeof(BlockHeader) || size > mem_page_) {
+    NOTREACHED();
+    return kReferenceNull;
+  }
+
+  // Get the current start of unallocated memory. Other threads may
+  // update this at any time and cause us to retry these operations.
+  // This value should be treated as "const" to avoid confusion through
+  // the code below but recognize that any failed compare-exchange operation
+  // involving it will cause it to be loaded with a more recent value. The
+  // code should either exit or restart the loop in that case.
+  /* const */ uint32_t freeptr = shared_meta()->freeptr.load();
+
+  // Allocation is lockless so we do all our caculation and then, if saving
+  // indicates a change has occurred since we started, scrap everything and
+  // start over.
+  for (;;) {
+    if (IsCorrupt())
+      return kReferenceNull;
+
+    if (freeptr + size > mem_size_) {
+      SetFlag(&shared_meta()->flags, kFlagFull);
+      return kReferenceNull;
+    }
+
+    // Get pointer to the "free" block. If something has been allocated since
+    // the load of freeptr above, it is still safe as nothing will be written
+    // to that location until after the compare-exchange below.
+    volatile BlockHeader* const block = GetBlock(freeptr, 0, 0, false, true);
+    if (!block) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // An allocation cannot cross page boundaries. If it would, create a
+    // "wasted" block and begin again at the top of the next page. This
+    // area could just be left empty but we fill in the block header just
+    // for completeness sake.
+    const uint32_t page_free = mem_page_ - freeptr % mem_page_;
+    if (size > page_free) {
+      if (page_free <= sizeof(BlockHeader)) {
+        SetCorrupt();
+        return kReferenceNull;
+      }
+      const uint32_t new_freeptr = freeptr + page_free;
+      if (shared_meta()->freeptr.compare_exchange_strong(freeptr,
+                                                         new_freeptr)) {
+        block->size = page_free;
+        block->cookie = kBlockCookieWasted;
+      }
+      continue;
+    }
+
+    // Don't leave a slice at the end of a page too small for anything. This
+    // can result in an allocation up to two alignment-sizes greater than the
+    // minimum required by requested-size + header + alignment.
+    if (page_free - size < sizeof(BlockHeader) + kAllocAlignment)
+      size = page_free;
+
+    const uint32_t new_freeptr = freeptr + size;
+    if (new_freeptr > mem_size_) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    // Save our work. Try again if another thread has completed an allocation
+    // while we were processing. A "weak" exchange would be permissable here
+    // because the code will just loop and try again but the above processing
+    // is significant so make the extra effort of a "strong" exchange.
+    if (!shared_meta()->freeptr.compare_exchange_strong(freeptr, new_freeptr))
+      continue;
+
+    // Given that all memory was zeroed before ever being given to an instance
+    // of this class and given that we only allocate in a monotomic fashion
+    // going forward, it must be that the newly allocated block is completely
+    // full of zeros. If we find anything in the block header that is NOT a
+    // zero then something must have previously run amuck through memory,
+    // writing beyond the allocated space and into unallocated space.
+    if (block->size != 0 ||
+        block->cookie != kBlockCookieFree ||
+        block->type_id != 0 ||
+        block->next.load() != 0) {
+      SetCorrupt();
+      return kReferenceNull;
+    }
+
+    block->size = size;
+    block->cookie = kBlockCookieAllocated;
+    block->type_id = type_id;
+    return freeptr;
+  }
+}
+
+void PersistentMemoryAllocator::GetMemoryInfo(MemoryInfo* meminfo) const {
+  uint32_t remaining = std::max(mem_size_ - shared_meta()->freeptr.load(),
+                                (uint32_t)sizeof(BlockHeader));
+  meminfo->total = mem_size_;
+  meminfo->free = IsCorrupt() ? 0 : remaining - sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::MakeIterable(Reference ref) {
+  DCHECK(!readonly_);
+  if (IsCorrupt())
+    return;
+  volatile BlockHeader* block = GetBlock(ref, 0, 0, false, false);
+  if (!block)  // invalid reference
+    return;
+  if (block->next.load(std::memory_order_acquire) != 0)  // Already iterable.
+    return;
+  block->next.store(kReferenceQueue, std::memory_order_release);  // New tail.
+
+  // Try to add this block to the tail of the queue. May take multiple tries.
+  // If so, tail will be automatically updated with a more recent value during
+  // compare-exchange operations.
+  uint32_t tail = shared_meta()->tailptr.load(std::memory_order_acquire);
+  for (;;) {
+    // Acquire the current tail-pointer released by previous call to this
+    // method and validate it.
+    block = GetBlock(tail, 0, 0, true, false);
+    if (!block) {
+      SetCorrupt();
+      return;
+    }
+
+    // Try to insert the block at the tail of the queue. The tail node always
+    // has an existing value of kReferenceQueue; if that is somehow not the
+    // existing value then another thread has acted in the meantime. A "strong"
+    // exchange is necessary so the "else" block does not get executed when
+    // that is not actually the case (which can happen with a "weak" exchange).
+    uint32_t next = kReferenceQueue;  // Will get replaced with existing value.
+    if (block->next.compare_exchange_strong(next, ref,
+                                            std::memory_order_acq_rel,
+                                            std::memory_order_acquire)) {
+      // Update the tail pointer to the new offset. If the "else" clause did
+      // not exist, then this could be a simple Release_Store to set the new
+      // value but because it does, it's possible that other threads could add
+      // one or more nodes at the tail before reaching this point. We don't
+      // have to check the return value because it either operates correctly
+      // or the exact same operation has already been done (by the "else"
+      // clause) on some other thread.
+      shared_meta()->tailptr.compare_exchange_strong(tail, ref,
+                                                     std::memory_order_release,
+                                                     std::memory_order_relaxed);
+      return;
+    } else {
+      // In the unlikely case that a thread crashed or was killed between the
+      // update of "next" and the update of "tailptr", it is necessary to
+      // perform the operation that would have been done. There's no explicit
+      // check for crash/kill which means that this operation may also happen
+      // even when the other thread is in perfect working order which is what
+      // necessitates the CompareAndSwap above.
+      shared_meta()->tailptr.compare_exchange_strong(tail, next,
+                                                     std::memory_order_acq_rel,
+                                                     std::memory_order_acquire);
+    }
+  }
+}
+
+void PersistentMemoryAllocator::CreateIterator(Iterator* state,
+                                               Reference starting_after) const {
+  if (starting_after) {
+    // Ensure that the starting point is a valid, iterable block.
+    const volatile BlockHeader* block =
+        GetBlock(starting_after, 0, 0, false, false);
+    if (!block || !block->next.load()) {
+      NOTREACHED();
+      starting_after = kReferenceQueue;
+    }
+  } else {
+    // A zero beginning is really the Queue reference.
+    starting_after = kReferenceQueue;
+  }
+
+  state->last = starting_after;
+  state->niter = 0;
+}
+
+PersistentMemoryAllocator::Reference PersistentMemoryAllocator::GetNextIterable(
+    Iterator* state,
+    uint32_t* type_id) const {
+  const volatile BlockHeader* block = GetBlock(state->last, 0, 0, true, false);
+  if (!block)  // invalid iterator state
+    return kReferenceNull;
+
+  // The compiler and CPU can freely reorder all memory accesses on which
+  // there are no dependencies. It could, for example, move the load of
+  // "freeptr" above this point because there are no explicit dependencies
+  // between it and "next". If it did, however, then another block could
+  // be queued after that but before the following load meaning there is
+  // one more queued block than the future "detect loop by having more
+  // blocks that could fit before freeptr" will allow.
+  //
+  // By "acquiring" the "next" value here, it's synchronized to the enqueue
+  // of the node which in turn is synchronized to the allocation (which sets
+  // freeptr). Thus, the scenario above cannot happen.
+  uint32_t next = block->next.load(std::memory_order_acquire);
+  block = GetBlock(next, 0, 0, false, false);
+  if (!block)  // no next allocation in queue
+    return kReferenceNull;
+
+  // Memory corruption could cause a loop in the list. We need to detect
+  // that so as to not cause an infinite loop in the caller. We do this
+  // simply by making sure we don't iterate more than the absolute maximum
+  // number of allocations that could have been made. Callers are likely
+  // to loop multiple times before it is detected but at least it stops.
+  uint32_t freeptr = std::min(
+      shared_meta()->freeptr.load(std::memory_order_acquire),
+      mem_size_);
+  if (state->niter > freeptr / (sizeof(BlockHeader) + kAllocAlignment)) {
+    SetCorrupt();
+    return kReferenceNull;
+  }
+
+  state->last = next;
+  state->niter++;
+  *type_id = block->type_id;
+
+  return next;
+}
+
+// The "corrupted" state is held both locally and globally (shared). The
+// shared flag can't be trusted since a malicious actor could overwrite it.
+// Because corruption can be detected during read-only operations such as
+// iteration, this method may be called by other "const" methods. In this
+// case, it's safe to discard the constness and modify the local flag and
+// maybe even the shared flag if the underlying data isn't actually read-only.
+void PersistentMemoryAllocator::SetCorrupt() const {
+  LOG(ERROR) << "Corruption detected in shared-memory segment.";
+  const_cast<std::atomic<bool>*>(&corrupt_)->store(true);
+  if (!readonly_) {
+    SetFlag(const_cast<volatile std::atomic<uint32_t>*>(&shared_meta()->flags),
+            kFlagCorrupt);
+  }
+}
+
+bool PersistentMemoryAllocator::IsCorrupt() const {
+  if (corrupt_.load() || CheckFlag(&shared_meta()->flags, kFlagCorrupt)) {
+    SetCorrupt();  // Make sure all indicators are set.
+    return true;
+  }
+  return false;
+}
+
+bool PersistentMemoryAllocator::IsFull() const {
+  return CheckFlag(&shared_meta()->flags, kFlagFull);
+}
+
+// Dereference a block |ref| and ensure that it's valid for the desired
+// |type_id| and |size|. |special| indicates that we may try to access block
+// headers not available to callers but still accessed by this module. By
+// having internal dereferences go through this same function, the allocator
+// is hardened against corruption.
+const volatile PersistentMemoryAllocator::BlockHeader*
+PersistentMemoryAllocator::GetBlock(Reference ref, uint32_t type_id,
+                                    uint32_t size, bool queue_ok,
+                                    bool free_ok) const {
+  // Validation of parameters.
+  if (ref % kAllocAlignment != 0)
+    return nullptr;
+  if (ref < (queue_ok ? kReferenceQueue : sizeof(SharedMetadata)))
+    return nullptr;
+  size += sizeof(BlockHeader);
+  if (ref + size > mem_size_)
+    return nullptr;
+
+  // Validation of referenced block-header.
+  if (!free_ok) {
+    uint32_t freeptr = shared_meta()->freeptr.load();
+    if (ref + size > freeptr)
+      return nullptr;
+    const volatile BlockHeader* const block =
+        reinterpret_cast<volatile BlockHeader*>(mem_base_ + ref);
+    if (block->size < size)
+      return nullptr;
+    if (ref != kReferenceQueue && block->cookie != kBlockCookieAllocated)
+      return nullptr;
+    if (type_id != 0 && block->type_id != type_id)
+      return nullptr;
+  }
+
+  // Return pointer to block data.
+  return reinterpret_cast<const volatile BlockHeader*>(mem_base_ + ref);
+}
+
+const volatile void* PersistentMemoryAllocator::GetBlockData(
+    Reference ref,
+    uint32_t type_id,
+    uint32_t size) const {
+  DCHECK(size > 0);
+  const volatile BlockHeader* block =
+      GetBlock(ref, type_id, size, false, false);
+  if (!block)
+    return nullptr;
+  return reinterpret_cast<const volatile char*>(block) + sizeof(BlockHeader);
+}
+
+void PersistentMemoryAllocator::UpdateTrackingHistograms() {
+  DCHECK(!readonly_);
+  if (used_histogram_) {
+    MemoryInfo meminfo;
+    GetMemoryInfo(&meminfo);
+    HistogramBase::Sample used_percent = static_cast<HistogramBase::Sample>(
+        ((meminfo.total - meminfo.free) * 100ULL / meminfo.total));
+    used_histogram_->Add(used_percent);
+  }
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+LocalPersistentMemoryAllocator::LocalPersistentMemoryAllocator(
+    size_t size,
+    uint64_t id,
+    base::StringPiece name)
+    : PersistentMemoryAllocator(memset(new char[size], 0, size),
+                                size, 0, id, name, false) {}
+
+LocalPersistentMemoryAllocator::~LocalPersistentMemoryAllocator() {
+  delete [] mem_base_;
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+SharedPersistentMemoryAllocator::SharedPersistentMemoryAllocator(
+    scoped_ptr<SharedMemory> memory,
+    uint64_t id,
+    base::StringPiece name,
+    bool read_only)
+    : PersistentMemoryAllocator(static_cast<uint8_t*>(memory->memory()),
+                                memory->mapped_size(), 0, id, name, read_only),
+      shared_memory_(std::move(memory)) {}
+
+SharedPersistentMemoryAllocator::~SharedPersistentMemoryAllocator() {}
+
+// static
+bool SharedPersistentMemoryAllocator::IsSharedMemoryAcceptable(
+    const SharedMemory& memory) {
+  return IsMemoryAcceptable(memory.memory(), memory.mapped_size(), 0, true);
+}
+
+
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+FilePersistentMemoryAllocator::FilePersistentMemoryAllocator(
+    scoped_ptr<MemoryMappedFile> file,
+    uint64_t id,
+    base::StringPiece name)
+    : PersistentMemoryAllocator(const_cast<uint8_t*>(file->data()),
+                                file->length(), 0, id, name, true),
+      mapped_file_(std::move(file)) {}
+
+FilePersistentMemoryAllocator::~FilePersistentMemoryAllocator() {}
+
+// static
+bool FilePersistentMemoryAllocator::IsFileAcceptable(
+    const MemoryMappedFile& file) {
+  return IsMemoryAcceptable(file.data(), file.length(), 0, true);
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_memory_allocator.h b/base/metrics/persistent_memory_allocator.h
new file mode 100644
index 0000000..f75b1c0
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator.h
@@ -0,0 +1,367 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
+
+#include <stdint.h>
+#include <atomic>
+
+#include "base/atomicops.h"
+#include "base/base_export.h"
+#include "base/gtest_prod_util.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/strings/string_piece.h"
+
+namespace base {
+
+class HistogramBase;
+class MemoryMappedFile;
+class SharedMemory;
+
+// Simple allocator for pieces of a memory block that may be persistent
+// to some storage or shared across multiple processes. This class resides
+// under base/metrics because it was written for that purpose. It is,
+// however, fully general-purpose and can be freely moved to base/memory
+// if other uses are found.
+//
+// This class provides for thread-secure (i.e. safe against other threads
+// or processes that may be compromised and thus have malicious intent)
+// allocation of memory within a designated block and also a mechanism by
+// which other threads can learn of these allocations.
+//
+// There is (currently) no way to release an allocated block of data because
+// doing so would risk invalidating pointers held by other processes and
+// greatly complicate the allocation algorithm.
+//
+// Construction of this object can accept new, clean (i.e. zeroed) memory
+// or previously initialized memory. In the first case, construction must
+// be allowed to complete before letting other allocators attach to the same
+// segment. In other words, don't share the segment until at least one
+// allocator has been attached to it.
+//
+// Note that memory not in active use is not accessed so it is possible to
+// use virtual memory, including memory-mapped files, as backing storage with
+// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
+class BASE_EXPORT PersistentMemoryAllocator {
+ public:
+  typedef uint32_t Reference;
+
+  // Internal state information when iterating over memory allocations.
+  class Iterator {
+   public:
+    Iterator() : last(0) {}
+
+    bool operator==(const Iterator& rhs) const { return last == rhs.last; }
+    bool operator!=(const Iterator& rhs) const { return last != rhs.last; }
+
+    void clear() { last = 0; }
+    bool is_clear() const { return last == 0; }
+
+   private:
+    friend class PersistentMemoryAllocator;
+
+    Reference last;
+    uint32_t niter;
+  };
+
+  // Returned information about the internal state of the heap.
+  struct MemoryInfo {
+    size_t total;
+    size_t free;
+  };
+
+  enum : uint32_t {
+    kTypeIdAny = 0  // Match any type-id inside GetAsObject().
+  };
+
+  // The allocator operates on any arbitrary block of memory. Creation and
+  // persisting or sharing of that block with another process is the
+  // responsibility of the caller. The allocator needs to know only the
+  // block's |base| address, the total |size| of the block, and any internal
+  // |page| size (zero if not paged) across which allocations should not span.
+  // The |id| is an arbitrary value the caller can use to identify a
+  // particular memory segment. It will only be loaded during the initial
+  // creation of the segment and can be checked by the caller for consistency.
+  // The |name|, if provided, is used to distinguish histograms for this
+  // allocator. Only the primary owner of the segment should define this value;
+  // other processes can learn it from the shared state. If the underlying
+  // memory is |readonly| then no changes will be made to it. The resulting
+  // object should be stored as a "const" pointer.
+  //
+  // PersistentMemoryAllocator does NOT take ownership of the memory block.
+  // The caller must manage it and ensure it stays available throughout the
+  // lifetime of this object.
+  //
+  // Memory segments for sharing must have had an allocator attached to them
+  // before actually being shared. If the memory segment was just created, it
+  // should be zeroed before being passed here. If it was an existing segment,
+  // the values here will be compared to copies stored in the shared segment
+  // as a guard against corruption.
+  //
+  // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
+  // method below) before construction if the definition of the segment can
+  // vary in any way at run-time. Invalid memory segments will cause a crash.
+  PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
+                            uint64_t id, base::StringPiece name,
+                            bool readonly);
+  virtual ~PersistentMemoryAllocator();
+
+  // Check if memory segment is acceptable for creation of an Allocator. This
+  // doesn't do any analysis of the data and so doesn't guarantee that the
+  // contents are valid, just that the paramaters won't cause the program to
+  // abort. The IsCorrupt() method will report detection of data problems
+  // found during construction and general operation.
+  static bool IsMemoryAcceptable(const void* data, size_t size,
+                                 size_t page_size, bool readonly);
+
+  // Get the internal identifier for this persistent memory segment.
+  uint64_t Id() const;
+
+  // Get the internal name of this allocator (possibly an empty string).
+  const char* Name() const;
+
+  // Is this segment open only for read?
+  bool IsReadonly() { return readonly_; }
+
+  // Create internal histograms for tracking memory use and allocation sizes
+  // for allocator of |name| (which can simply be the result of Name()). This
+  // is done seperately from construction for situations such as when the
+  // histograms will be backed by memory provided by this very allocator.
+  //
+  // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
+  // with the following histograms:
+  //    UMA.PersistentAllocator.name.Allocs
+  //    UMA.PersistentAllocator.name.UsedPct
+  void CreateTrackingHistograms(base::StringPiece name);
+
+  // Direct access to underlying memory segment. If the segment is shared
+  // across threads or processes, reading data through these values does
+  // not guarantee consistency. Use with care. Do not write.
+  const void* data() const { return const_cast<const char*>(mem_base_); }
+  size_t length() const { return mem_size_; }
+  size_t used() const;
+
+  // Get an object referenced by a |ref|. For safety reasons, the |type_id|
+  // code and size-of(|T|) are compared to ensure the reference is valid
+  // and cannot return an object outside of the memory segment. A |type_id| of
+  // kTypeIdAny (zero) will match any though the size is still checked. NULL is
+  // returned if any problem is detected, such as corrupted storage or incorrect
+  // parameters. Callers MUST check that the returned value is not-null EVERY
+  // TIME before accessing it or risk crashing! Once dereferenced, the pointer
+  // is safe to reuse forever.
+  //
+  // NOTE: Though this method will guarantee that an object of the specified
+  // type can be accessed without going outside the bounds of the memory
+  // segment, it makes no guarantees of the validity of the data within the
+  // object itself. If it is expected that the contents of the segment could
+  // be compromised with malicious intent, the object must be hardened as well.
+  //
+  // Though the persistent data may be "volatile" if it is shared with
+  // other processes, such is not necessarily the case. The internal
+  // "volatile" designation is discarded so as to not propagate the viral
+  // nature of that keyword to the caller. It can add it back, if necessary,
+  // based on knowledge of how the allocator is being used.
+  template <typename T>
+  T* GetAsObject(Reference ref, uint32_t type_id) {
+    static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+    return const_cast<T*>(
+        reinterpret_cast<volatile T*>(GetBlockData(ref, type_id, sizeof(T))));
+  }
+  template <typename T>
+  const T* GetAsObject(Reference ref, uint32_t type_id) const {
+    static_assert(!std::is_polymorphic<T>::value, "no polymorphic objects");
+    return const_cast<const T*>(
+        reinterpret_cast<const volatile T*>(GetBlockData(
+            ref, type_id, sizeof(T))));
+  }
+
+  // Get the number of bytes allocated to a block. This is useful when storing
+  // arrays in order to validate the ending boundary. The returned value will
+  // include any padding added to achieve the required alignment and so could
+  // be larger than given in the original Allocate() request.
+  size_t GetAllocSize(Reference ref) const;
+
+  // Access the internal "type" of an object. This generally isn't necessary
+  // but can be used to "clear" the type and so effectively mark it as deleted
+  // even though the memory stays valid and allocated.
+  uint32_t GetType(Reference ref) const;
+  void SetType(Reference ref, uint32_t type_id);
+
+  // Reserve space in the memory segment of the desired |size| and |type_id|.
+  // A return value of zero indicates the allocation failed, otherwise the
+  // returned reference can be used by any process to get a real pointer via
+  // the GetAsObject() call.
+  Reference Allocate(size_t size, uint32_t type_id);
+
+  // Allocated objects can be added to an internal list that can then be
+  // iterated over by other processes. If an allocated object can be found
+  // another way, such as by having its reference within a different object
+  // that will be made iterable, then this call is not necessary. This always
+  // succeeds unless corruption is detected; check IsCorrupted() to find out.
+  // Once an object is made iterable, its position in iteration can never
+  // change; new iterable objects will always be added after it in the series.
+  void MakeIterable(Reference ref);
+
+  // Get the information about the amount of free space in the allocator. The
+  // amount of free space should be treated as approximate due to extras from
+  // alignment and metadata. Concurrent allocations from other threads will
+  // also make the true amount less than what is reported.
+  void GetMemoryInfo(MemoryInfo* meminfo) const;
+
+  // Iterating uses a |state| structure (initialized by CreateIterator) and
+  // returns both the reference to the object as well as the |type_id| of
+  // that object. A zero return value indicates there are currently no more
+  // objects to be found but future attempts can be made without having to
+  // reset the iterator to "first". Creating an iterator |starting_after|
+  // a known iterable object allows "resume" from that point with the next
+  // call to GetNextIterable returning the object after it.
+  void CreateIterator(Iterator* state) const { CreateIterator(state, 0); };
+  void CreateIterator(Iterator* state, Reference starting_after) const;
+  Reference GetNextIterable(Iterator* state, uint32_t* type_id) const;
+
+  // If there is some indication that the memory has become corrupted,
+  // calling this will attempt to prevent further damage by indicating to
+  // all processes that something is not as expected.
+  void SetCorrupt() const;
+
+  // This can be called to determine if corruption has been detected in the
+  // segment, possibly my a malicious actor. Once detected, future allocations
+  // will fail and iteration may not locate all objects.
+  bool IsCorrupt() const;
+
+  // Flag set if an allocation has failed because the memory segment was full.
+  bool IsFull() const;
+
+  // Update those "tracking" histograms which do not get updates during regular
+  // operation, such as how much memory is currently used. This should be
+  // called before such information is to be displayed or uploaded.
+  void UpdateTrackingHistograms();
+
+ protected:
+  volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
+  const uint32_t mem_size_;        // Size of entire memory segment.
+  const uint32_t mem_page_;        // Page size allocations shouldn't cross.
+
+ private:
+  struct SharedMetadata;
+  struct BlockHeader;
+  static const uint32_t kAllocAlignment;
+  static const Reference kReferenceQueue;
+  static const Reference kReferenceNull;
+
+  // The shared metadata is always located at the top of the memory segment.
+  // These convenience functions eliminate constant casting of the base
+  // pointer within the code.
+  const SharedMetadata* shared_meta() const {
+    return reinterpret_cast<const SharedMetadata*>(
+        const_cast<const char*>(mem_base_));
+  }
+  SharedMetadata* shared_meta() {
+    return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
+  }
+
+  // Actual method for doing the allocation.
+  Reference AllocateImpl(size_t size, uint32_t type_id);
+
+  // Get the block header associated with a specific reference.
+  const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
+                                       uint32_t size, bool queue_ok,
+                                       bool free_ok) const;
+  volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
+                                 bool queue_ok, bool free_ok) {
+      return const_cast<volatile BlockHeader*>(
+          const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
+              ref, type_id, size, queue_ok, free_ok));
+  }
+
+  // Get the actual data within a block associated with a specific reference.
+  const volatile void* GetBlockData(Reference ref, uint32_t type_id,
+                                    uint32_t size) const;
+  volatile void* GetBlockData(Reference ref, uint32_t type_id,
+                              uint32_t size) {
+      return const_cast<volatile void*>(
+          const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
+              ref, type_id, size));
+  }
+
+  const bool readonly_;              // Indicates access to read-only memory.
+  std::atomic<bool> corrupt_;        // Local version of "corrupted" flag.
+
+  HistogramBase* allocs_histogram_;  // Histogram recording allocs.
+  HistogramBase* used_histogram_;    // Histogram recording used space.
+
+  friend class PersistentMemoryAllocatorTest;
+  FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
+  DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
+};
+
+
+// This allocator uses a local memory block it allocates from the general
+// heap. It is generally used when some kind of "death rattle" handler will
+// save the contents to persistent storage during process shutdown. It is
+// also useful for testing.
+class BASE_EXPORT LocalPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  LocalPersistentMemoryAllocator(size_t size, uint64_t id,
+                                 base::StringPiece name);
+  ~LocalPersistentMemoryAllocator() override;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
+};
+
+
+// This allocator takes a shared-memory object and performs allocation from
+// it. The memory must be previously mapped via Map() or MapAt(). The allocator
+// takes ownership of the memory object.
+class BASE_EXPORT SharedPersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  SharedPersistentMemoryAllocator(scoped_ptr<SharedMemory> memory, uint64_t id,
+                                  base::StringPiece name, bool read_only);
+  ~SharedPersistentMemoryAllocator() override;
+
+  SharedMemory* shared_memory() { return shared_memory_.get(); }
+
+  // Ensure that the memory isn't so invalid that it won't crash when passing it
+  // to the allocator. This doesn't guarantee the data is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
+
+ private:
+  scoped_ptr<SharedMemory> shared_memory_;
+
+  DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
+};
+
+
+// This allocator takes a memory-mapped file object and performs allocation
+// from it. The allocator takes ownership of the file object. Only read access
+// is provided due to limitions of the MemoryMappedFile class.
+class BASE_EXPORT FilePersistentMemoryAllocator
+    : public PersistentMemoryAllocator {
+ public:
+  FilePersistentMemoryAllocator(scoped_ptr<MemoryMappedFile> file, uint64_t id,
+                                base::StringPiece name);
+  ~FilePersistentMemoryAllocator() override;
+
+  // Ensure that the file isn't so invalid that it won't crash when passing it
+  // to the allocator. This doesn't guarantee the file is valid, just that it
+  // won't cause the program to abort. The existing IsCorrupt() call will handle
+  // the rest.
+  static bool IsFileAcceptable(const MemoryMappedFile& file);
+
+ private:
+  scoped_ptr<MemoryMappedFile> mapped_file_;
+
+  DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
diff --git a/base/metrics/persistent_memory_allocator_unittest.cc b/base/metrics/persistent_memory_allocator_unittest.cc
new file mode 100644
index 0000000..c79d0c1
--- /dev/null
+++ b/base/metrics/persistent_memory_allocator_unittest.cc
@@ -0,0 +1,620 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_memory_allocator.h"
+
+#include "base/files/file.h"
+#include "base/files/file_util.h"
+#include "base/files/memory_mapped_file.h"
+#include "base/files/scoped_temp_dir.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/memory/shared_memory.h"
+#include "base/metrics/histogram.h"
+#include "base/rand_util.h"
+#include "base/strings/safe_sprintf.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+const uint32_t TEST_MEMORY_SIZE = 1 << 20;   // 1 MiB
+const uint32_t TEST_MEMORY_PAGE = 64 << 10;  // 64 KiB
+const uint32_t TEST_ID = 12345;
+const char TEST_NAME[] = "TestAllocator";
+
+}  // namespace
+
+namespace base {
+
+typedef PersistentMemoryAllocator::Reference Reference;
+
+class PersistentMemoryAllocatorTest : public testing::Test {
+ public:
+  // This can't be statically initialized because it's value isn't defined
+  // in the PersistentMemoryAllocator header file. Instead, it's simply set
+  // in the constructor.
+  uint32_t kAllocAlignment;
+
+  struct TestObject1 {
+    int onething;
+    char oranother;
+  };
+
+  struct TestObject2 {
+    int thiis;
+    long that;
+    float andthe;
+    char other;
+    double thing;
+  };
+
+  PersistentMemoryAllocatorTest() {
+    kAllocAlignment = PersistentMemoryAllocator::kAllocAlignment;
+    mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
+  }
+
+  void SetUp() override {
+    allocator_.reset();
+    ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
+    allocator_.reset(new PersistentMemoryAllocator(
+        mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
+        TEST_ID, TEST_NAME, false));
+    allocator_->CreateTrackingHistograms(allocator_->Name());
+  }
+
+  void TearDown() override {
+    allocator_.reset();
+  }
+
+  unsigned CountIterables() {
+    PersistentMemoryAllocator::Iterator iter;
+    uint32_t type;
+    unsigned count = 0;
+    for (allocator_->CreateIterator(&iter);
+         allocator_->GetNextIterable(&iter, &type) != 0;) {
+      count++;
+    }
+    return count;
+  }
+
+ protected:
+  scoped_ptr<char[]> mem_segment_;
+  scoped_ptr<PersistentMemoryAllocator> allocator_;
+};
+
+TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
+  std::string base_name(TEST_NAME);
+  EXPECT_EQ(TEST_ID, allocator_->Id());
+  EXPECT_TRUE(allocator_->used_histogram_);
+  EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
+            allocator_->used_histogram_->histogram_name());
+  EXPECT_TRUE(allocator_->allocs_histogram_);
+  EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs",
+            allocator_->allocs_histogram_->histogram_name());
+
+  // Get base memory info for later comparison.
+  PersistentMemoryAllocator::MemoryInfo meminfo0;
+  allocator_->GetMemoryInfo(&meminfo0);
+  EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
+  EXPECT_GT(meminfo0.total, meminfo0.free);
+
+  // Validate allocation of test object and make sure it can be referenced
+  // and all metadata looks correct.
+  Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+  EXPECT_NE(0U, block1);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1, 1));
+  EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
+  EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
+            allocator_->GetAllocSize(block1));
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  allocator_->GetMemoryInfo(&meminfo1);
+  EXPECT_EQ(meminfo0.total, meminfo1.total);
+  EXPECT_GT(meminfo0.free, meminfo1.free);
+
+  // Ensure that the test-object can be made iterable.
+  PersistentMemoryAllocator::Iterator iter;
+  uint32_t type;
+  allocator_->CreateIterator(&iter);
+  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+  allocator_->MakeIterable(block1);
+  EXPECT_EQ(block1, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(1U, type);
+  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+
+  // Create second test-object and ensure everything is good and it cannot
+  // be confused with test-object of another type.
+  Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2);
+  EXPECT_NE(0U, block2);
+  EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2, 2));
+  EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block2, 1));
+  EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
+  EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
+            allocator_->GetAllocSize(block2));
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  allocator_->GetMemoryInfo(&meminfo2);
+  EXPECT_EQ(meminfo1.total, meminfo2.total);
+  EXPECT_GT(meminfo1.free, meminfo2.free);
+
+  // Ensure that second test-object can also be made iterable.
+  allocator_->MakeIterable(block2);
+  EXPECT_EQ(block2, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(2U, type);
+  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+
+  // Check that iteration can begin after an arbitrary location.
+  allocator_->CreateIterator(&iter, block1);
+  EXPECT_EQ(block2, allocator_->GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, allocator_->GetNextIterable(&iter, &type));
+
+  // Ensure nothing has gone noticably wrong.
+  EXPECT_FALSE(allocator_->IsFull());
+  EXPECT_FALSE(allocator_->IsCorrupt());
+
+  // Check the internal histogram record of used memory.
+  allocator_->UpdateTrackingHistograms();
+  scoped_ptr<HistogramSamples> used_samples(
+      allocator_->used_histogram_->SnapshotSamples());
+  EXPECT_TRUE(used_samples.get());
+  EXPECT_EQ(1, used_samples->TotalCount());
+
+  // Check the internal histogram record of allocation requests.
+  scoped_ptr<HistogramSamples> allocs_samples(
+      allocator_->allocs_histogram_->SnapshotSamples());
+  EXPECT_TRUE(allocs_samples.get());
+  EXPECT_EQ(2, allocs_samples->TotalCount());
+  EXPECT_EQ(0, allocs_samples->GetCount(0));
+  EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1)));
+  EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2)));
+#if !DCHECK_IS_ON()  // DCHECK builds will die at a NOTREACHED().
+  EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0));
+  allocs_samples = allocator_->allocs_histogram_->SnapshotSamples();
+  EXPECT_EQ(3, allocs_samples->TotalCount());
+  EXPECT_EQ(1, allocs_samples->GetCount(0));
+#endif
+
+  // Check that an objcet's type can be changed.
+  EXPECT_EQ(2U, allocator_->GetType(block2));
+  allocator_->SetType(block2, 3);
+  EXPECT_EQ(3U, allocator_->GetType(block2));
+  allocator_->SetType(block2, 2);
+  EXPECT_EQ(2U, allocator_->GetType(block2));
+
+  // Create second allocator (read/write) using the same memory segment.
+  scoped_ptr<PersistentMemoryAllocator> allocator2(
+      new PersistentMemoryAllocator(
+          mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, 0, "",
+          false));
+  EXPECT_EQ(TEST_ID, allocator2->Id());
+  EXPECT_FALSE(allocator2->used_histogram_);
+  EXPECT_FALSE(allocator2->allocs_histogram_);
+  EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_);
+
+  // Ensure that iteration and access through second allocator works.
+  allocator2->CreateIterator(&iter);
+  EXPECT_EQ(block1, allocator2->GetNextIterable(&iter, &type));
+  EXPECT_EQ(block2, allocator2->GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, allocator2->GetNextIterable(&iter, &type));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2, 2));
+
+  // Create a third allocator (read-only) using the same memory segment.
+  scoped_ptr<const PersistentMemoryAllocator> allocator3(
+      new PersistentMemoryAllocator(
+          mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, 0, "", true));
+  EXPECT_EQ(TEST_ID, allocator3->Id());
+  EXPECT_FALSE(allocator3->used_histogram_);
+  EXPECT_FALSE(allocator3->allocs_histogram_);
+
+  // Ensure that iteration and access through third allocator works.
+  allocator3->CreateIterator(&iter);
+  EXPECT_EQ(block1, allocator3->GetNextIterable(&iter, &type));
+  EXPECT_EQ(block2, allocator3->GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, allocator3->GetNextIterable(&iter, &type));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1, 1));
+  EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2, 2));
+}
+
+TEST_F(PersistentMemoryAllocatorTest, PageTest) {
+  // This allocation will go into the first memory page.
+  Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
+  EXPECT_LT(0U, block1);
+  EXPECT_GT(TEST_MEMORY_PAGE, block1);
+
+  // This allocation won't fit in same page as previous block.
+  Reference block2 =
+      allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
+  EXPECT_EQ(TEST_MEMORY_PAGE, block2);
+
+  // This allocation will also require a new page.
+  Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
+  EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
+}
+
+// A simple thread that takes an allocator and repeatedly allocates random-
+// sized chunks from it until no more can be done.
+class AllocatorThread : public SimpleThread {
+ public:
+  AllocatorThread(const std::string& name,
+                  void* base,
+                  uint32_t size,
+                  uint32_t page_size)
+      : SimpleThread(name, Options()),
+        count_(0),
+        iterable_(0),
+        allocator_(base, size, page_size, 0, std::string(), false) {}
+
+  void Run() override {
+    for (;;) {
+      uint32_t size = RandInt(1, 99);
+      uint32_t type = RandInt(100, 999);
+      Reference block = allocator_.Allocate(size, type);
+      if (!block)
+        break;
+
+      count_++;
+      if (RandInt(0, 1)) {
+        allocator_.MakeIterable(block);
+        iterable_++;
+      }
+    }
+  }
+
+  unsigned iterable() { return iterable_; }
+  unsigned count() { return count_; }
+
+ private:
+  unsigned count_;
+  unsigned iterable_;
+  PersistentMemoryAllocator allocator_;
+};
+
+// Test parallel allocation/iteration and ensure consistency across all
+// instances.
+TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
+  void* memory = mem_segment_.get();
+  AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  unsigned last_count = 0;
+  do {
+    unsigned count = CountIterables();
+    EXPECT_LE(last_count, count);
+  } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  EXPECT_FALSE(allocator_->IsCorrupt());
+  EXPECT_TRUE(allocator_->IsFull());
+  EXPECT_EQ(CountIterables(),
+            t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
+            t5.iterable());
+}
+
+// This test doesn't verify anything other than it doesn't crash. Its goal
+// is to find coding errors that aren't otherwise tested for, much like a
+// "fuzzer" would.
+// This test is suppsoed to fail on TSAN bot (crbug.com/579867).
+#if defined(THREAD_SANITIZER)
+#define MAYBE_CorruptionTest DISABLED_CorruptionTest
+#else
+#define MAYBE_CorruptionTest CorruptionTest
+#endif
+TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
+  char* memory = mem_segment_.get();
+  AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+  AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
+
+  t1.Start();
+  t2.Start();
+  t3.Start();
+  t4.Start();
+  t5.Start();
+
+  do {
+    size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
+    char value = RandInt(0, 255);
+    memory[offset] = value;
+  } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
+
+  t1.Join();
+  t2.Join();
+  t3.Join();
+  t4.Join();
+  t5.Join();
+
+  CountIterables();
+}
+
+// Attempt to cause crashes or loops by expressly creating dangerous conditions.
+TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
+  Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
+  Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
+  Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
+  Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
+  Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
+  allocator_->MakeIterable(block1);
+  allocator_->MakeIterable(block2);
+  allocator_->MakeIterable(block3);
+  allocator_->MakeIterable(block4);
+  allocator_->MakeIterable(block5);
+  EXPECT_EQ(5U, CountIterables());
+  EXPECT_FALSE(allocator_->IsCorrupt());
+
+  // Create loop in iterable list and ensure it doesn't hang. The return value
+  // from CountIterables() in these cases is unpredictable. If there is a
+  // failure, the call will hang and the test killed for taking too long.
+  uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
+  EXPECT_EQ(block5, header4[3]);
+  header4[3] = block4;
+  CountIterables();  // loop: 1-2-3-4-4
+  EXPECT_TRUE(allocator_->IsCorrupt());
+
+  // Test where loop goes back to previous block.
+  header4[3] = block3;
+  CountIterables();  // loop: 1-2-3-4-3
+
+  // Test where loop goes back to the beginning.
+  header4[3] = block1;
+  CountIterables();  // loop: 1-2-3-4-1
+}
+
+
+//----- LocalPersistentMemoryAllocator -----------------------------------------
+
+TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
+  LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
+  EXPECT_EQ(42U, allocator.Id());
+  EXPECT_NE(0U, allocator.Allocate(24, 1));
+  EXPECT_FALSE(allocator.IsFull());
+  EXPECT_FALSE(allocator.IsCorrupt());
+}
+
+
+//----- SharedPersistentMemoryAllocator ----------------------------------------
+
+TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
+  SharedMemoryHandle shared_handle;
+
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  Reference r123, r456, r789;
+  {
+    scoped_ptr<SharedMemory> shmem1(new SharedMemory());
+    ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
+    SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
+                                          false);
+    EXPECT_FALSE(local.IsReadonly());
+    r123 = local.Allocate(123, 123);
+    r456 = local.Allocate(456, 456);
+    r789 = local.Allocate(789, 789);
+    local.MakeIterable(r123);
+    local.SetType(r456, 654);
+    local.MakeIterable(r789);
+    local.GetMemoryInfo(&meminfo1);
+    EXPECT_FALSE(local.IsFull());
+    EXPECT_FALSE(local.IsCorrupt());
+
+    ASSERT_TRUE(local.shared_memory()->ShareToProcess(
+                    GetCurrentProcessHandle(),
+                    &shared_handle));
+  }
+
+  // Read-only test.
+  scoped_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle,
+                                                   /*readonly=*/true));
+  ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
+
+  SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
+  EXPECT_TRUE(shalloc2.IsReadonly());
+  EXPECT_EQ(TEST_ID, shalloc2.Id());
+  EXPECT_FALSE(shalloc2.IsFull());
+  EXPECT_FALSE(shalloc2.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter2;
+  uint32_t type;
+  shalloc2.CreateIterator(&iter2);
+  EXPECT_EQ(r123, shalloc2.GetNextIterable(&iter2, &type));
+  EXPECT_EQ(r789, shalloc2.GetNextIterable(&iter2, &type));
+  EXPECT_EQ(0U, shalloc2.GetNextIterable(&iter2, &type));
+
+  EXPECT_EQ(123U, shalloc2.GetType(r123));
+  EXPECT_EQ(654U, shalloc2.GetType(r456));
+  EXPECT_EQ(789U, shalloc2.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  shalloc2.GetMemoryInfo(&meminfo2);
+  EXPECT_EQ(meminfo1.total, meminfo2.total);
+  EXPECT_EQ(meminfo1.free, meminfo2.free);
+
+  // Read/write test.
+  scoped_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle,
+                                                   /*readonly=*/false));
+  ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
+
+  SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
+  EXPECT_FALSE(shalloc3.IsReadonly());
+  EXPECT_EQ(TEST_ID, shalloc3.Id());
+  EXPECT_FALSE(shalloc3.IsFull());
+  EXPECT_FALSE(shalloc3.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter3;
+  shalloc3.CreateIterator(&iter3);
+  EXPECT_EQ(r123, shalloc3.GetNextIterable(&iter3, &type));
+  EXPECT_EQ(r789, shalloc3.GetNextIterable(&iter3, &type));
+  EXPECT_EQ(0U, shalloc3.GetNextIterable(&iter3, &type));
+
+  EXPECT_EQ(123U, shalloc3.GetType(r123));
+  EXPECT_EQ(654U, shalloc3.GetType(r456));
+  EXPECT_EQ(789U, shalloc3.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo3;
+  shalloc3.GetMemoryInfo(&meminfo3);
+  EXPECT_EQ(meminfo1.total, meminfo3.total);
+  EXPECT_EQ(meminfo1.free, meminfo3.free);
+
+  // Interconnectivity test.
+  Reference obj = shalloc3.Allocate(42, 42);
+  ASSERT_TRUE(obj);
+  shalloc3.MakeIterable(obj);
+  EXPECT_EQ(obj, shalloc2.GetNextIterable(&iter2, &type));
+  EXPECT_EQ(42U, type);
+}
+
+
+//----- FilePersistentMemoryAllocator ------------------------------------------
+
+TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path = temp_dir.path().AppendASCII("persistent_memory");
+
+  PersistentMemoryAllocator::MemoryInfo meminfo1;
+  Reference r123, r456, r789;
+  {
+    LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+    EXPECT_FALSE(local.IsReadonly());
+    r123 = local.Allocate(123, 123);
+    r456 = local.Allocate(456, 456);
+    r789 = local.Allocate(789, 789);
+    local.MakeIterable(r123);
+    local.SetType(r456, 654);
+    local.MakeIterable(r789);
+    local.GetMemoryInfo(&meminfo1);
+    EXPECT_FALSE(local.IsFull());
+    EXPECT_FALSE(local.IsCorrupt());
+
+    File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+    ASSERT_TRUE(writer.IsValid());
+    writer.Write(0, (const char*)local.data(), local.used());
+  }
+
+  scoped_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
+  mmfile->Initialize(file_path);
+  EXPECT_TRUE(mmfile->IsValid());
+  const size_t mmlength = mmfile->length();
+  EXPECT_GE(meminfo1.total, mmlength);
+
+  FilePersistentMemoryAllocator file(std::move(mmfile), 0, "");
+  EXPECT_TRUE(file.IsReadonly());
+  EXPECT_EQ(TEST_ID, file.Id());
+  EXPECT_FALSE(file.IsFull());
+  EXPECT_FALSE(file.IsCorrupt());
+
+  PersistentMemoryAllocator::Iterator iter;
+  uint32_t type;
+  file.CreateIterator(&iter);
+  EXPECT_EQ(r123, file.GetNextIterable(&iter, &type));
+  EXPECT_EQ(r789, file.GetNextIterable(&iter, &type));
+  EXPECT_EQ(0U, file.GetNextIterable(&iter, &type));
+
+  EXPECT_EQ(123U, file.GetType(r123));
+  EXPECT_EQ(654U, file.GetType(r456));
+  EXPECT_EQ(789U, file.GetType(r789));
+
+  PersistentMemoryAllocator::MemoryInfo meminfo2;
+  file.GetMemoryInfo(&meminfo2);
+  EXPECT_GE(meminfo1.total, meminfo2.total);
+  EXPECT_GE(meminfo1.free, meminfo2.free);
+  EXPECT_EQ(mmlength, meminfo2.total);
+  EXPECT_EQ(0U, meminfo2.free);
+}
+
+TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
+  ScopedTempDir temp_dir;
+  ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
+  FilePath file_path_base = temp_dir.path().AppendASCII("persistent_memory_");
+
+  LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
+  local.Allocate(1, 1);
+  local.Allocate(11, 11);
+  const size_t minsize = local.used();
+  scoped_ptr<char[]> garbage(new char[minsize]);
+  RandBytes(garbage.get(), minsize);
+
+  scoped_ptr<MemoryMappedFile> mmfile;
+  char filename[100];
+  for (size_t filesize = minsize; filesize > 0; --filesize) {
+    strings::SafeSPrintf(filename, "memory_%d_A", filesize);
+    FilePath file_path = temp_dir.path().AppendASCII(filename);
+    ASSERT_FALSE(PathExists(file_path));
+    {
+      File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+      ASSERT_TRUE(writer.IsValid());
+      writer.Write(0, (const char*)local.data(), filesize);
+    }
+    ASSERT_TRUE(PathExists(file_path));
+
+    mmfile.reset(new MemoryMappedFile());
+    mmfile->Initialize(file_path);
+    EXPECT_EQ(filesize, mmfile->length());
+    if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
+      // Make sure construction doesn't crash.
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
+      // Also make sure that iteration doesn't crash.
+      PersistentMemoryAllocator::Iterator iter;
+      allocator.CreateIterator(&iter);
+      for (;;) {
+        Reference ref = allocator.GetNextIterable(&iter, 0);
+        if (!ref)
+          break;
+        const char* data = allocator.GetAsObject<char>(ref, 0);
+        uint32_t type = allocator.GetType(ref);
+        size_t size = allocator.GetAllocSize(ref);
+        // Ensure compiler can't optimize-out above variables.
+        (void)data;
+        (void)type;
+        (void)size;
+        // Ensure that corruption-detected flag gets properly set.
+        EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
+      }
+    } else {
+      // For filesize >= minsize, the file must be acceptable. This
+      // else clause (file-not-acceptable) should be reached only if
+      // filesize < minsize.
+      EXPECT_LT(filesize, minsize);
+    }
+
+    strings::SafeSPrintf(filename, "memory_%d_B", filesize);
+    file_path = temp_dir.path().AppendASCII(filename);
+    ASSERT_FALSE(PathExists(file_path));
+    {
+      File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
+      ASSERT_TRUE(writer.IsValid());
+      writer.Write(0, (const char*)garbage.get(), filesize);
+    }
+    ASSERT_TRUE(PathExists(file_path));
+
+    mmfile.reset(new MemoryMappedFile());
+    mmfile->Initialize(file_path);
+    EXPECT_EQ(filesize, mmfile->length());
+    if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile)) {
+      // Just need to make sure it doesn't crash.
+      FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, "");
+      EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
+    } else {
+      // For filesize >= minsize, the file must be acceptable. This
+      // else clause (file-not-acceptable) should be reached only if
+      // filesize < minsize.
+      EXPECT_GT(minsize, filesize);
+    }
+  }
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_sample_map.cc b/base/metrics/persistent_sample_map.cc
new file mode 100644
index 0000000..014a865
--- /dev/null
+++ b/base/metrics/persistent_sample_map.cc
@@ -0,0 +1,267 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/logging.h"
+#include "base/stl_util.h"
+
+namespace base {
+
+typedef HistogramBase::Count Count;
+typedef HistogramBase::Sample Sample;
+
+namespace {
+
+// An iterator for going through a PersistentSampleMap. The logic here is
+// identical to that of SampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class PersistentSampleMapIterator : public SampleCountIterator {
+ public:
+  typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
+      SampleToCountMap;
+
+  explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
+  ~PersistentSampleMapIterator() override;
+
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           HistogramBase::Sample* max,
+           HistogramBase::Count* count) const override;
+
+ private:
+  void SkipEmptyBuckets();
+
+  SampleToCountMap::const_iterator iter_;
+  const SampleToCountMap::const_iterator end_;
+};
+
+PersistentSampleMapIterator::PersistentSampleMapIterator(
+    const SampleToCountMap& sample_counts)
+    : iter_(sample_counts.begin()),
+      end_(sample_counts.end()) {
+  SkipEmptyBuckets();
+}
+
+PersistentSampleMapIterator::~PersistentSampleMapIterator() {}
+
+bool PersistentSampleMapIterator::Done() const {
+  return iter_ == end_;
+}
+
+void PersistentSampleMapIterator::Next() {
+  DCHECK(!Done());
+  ++iter_;
+  SkipEmptyBuckets();
+}
+
+void PersistentSampleMapIterator::Get(Sample* min,
+                                      Sample* max,
+                                      Count* count) const {
+  DCHECK(!Done());
+  if (min)
+    *min = iter_->first;
+  if (max)
+    *max = iter_->first + 1;
+  if (count)
+    *count = *iter_->second;
+}
+
+void PersistentSampleMapIterator::SkipEmptyBuckets() {
+  while (!Done() && *iter_->second == 0) {
+    ++iter_;
+  }
+}
+
+// This structure holds an entry for a PersistentSampleMap within a persistent
+// memory allocator. The "id" must be unique across all maps held by an
+// allocator or they will get attached to the wrong sample map.
+struct SampleRecord {
+  uint64_t id;   // Unique identifier of owner.
+  Sample value;  // The value for which this record holds a count.
+  Count count;   // The count associated with the above value.
+};
+
+// The type-id used to identify sample records inside an allocator.
+const uint32_t kTypeIdSampleRecord = 0x8FE6A69F + 1;  // SHA1(SampleRecord) v1
+
+}  // namespace
+
+PersistentSampleMap::PersistentSampleMap(
+    uint64_t id,
+    PersistentMemoryAllocator* allocator,
+    Metadata* meta)
+    : HistogramSamples(id, meta),
+      allocator_(allocator) {
+  // This is created once but will continue to return new iterables even when
+  // it has previously reached the end.
+  allocator->CreateIterator(&sample_iter_);
+
+  // Load all existing samples during construction. It's no worse to do it
+  // here than at some point in the future and could be better if construction
+  // takes place on some background thread. New samples could be created at
+  // any time by parallel threads; if so, they'll get loaded when needed.
+  ImportSamples(kAllSamples);
+}
+
+PersistentSampleMap::~PersistentSampleMap() {}
+
+void PersistentSampleMap::Accumulate(Sample value, Count count) {
+  *GetOrCreateSampleCountStorage(value) += count;
+  IncreaseSum(static_cast<int64_t>(count) * value);
+  IncreaseRedundantCount(count);
+}
+
+Count PersistentSampleMap::GetCount(Sample value) const {
+  // Have to override "const" to make sure all samples have been loaded before
+  // being able to know what value to return.
+  Count* count_pointer =
+      const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
+  return count_pointer ? *count_pointer : 0;
+}
+
+Count PersistentSampleMap::TotalCount() const {
+  // Have to override "const" in order to make sure all samples have been
+  // loaded before trying to iterate over the map.
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
+
+  Count count = 0;
+  for (const auto& entry : sample_counts_) {
+    count += *entry.second;
+  }
+  return count;
+}
+
+scoped_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
+  // Have to override "const" in order to make sure all samples have been
+  // loaded before trying to iterate over the map.
+  const_cast<PersistentSampleMap*>(this)->ImportSamples(kAllSamples);
+  return make_scoped_ptr(new PersistentSampleMapIterator(sample_counts_));
+}
+
+bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
+                                          Operator op) {
+  Sample min;
+  Sample max;
+  Count count;
+  for (; !iter->Done(); iter->Next()) {
+    iter->Get(&min, &max, &count);
+    if (min + 1 != max)
+      return false;  // SparseHistogram only supports bucket with size 1.
+
+    *GetOrCreateSampleCountStorage(min) +=
+        (op == HistogramSamples::ADD) ? count : -count;
+  }
+  return true;
+}
+
+Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
+  DCHECK_LE(0, value);
+
+  // If |value| is already in the map, just return that.
+  auto it = sample_counts_.find(value);
+  if (it != sample_counts_.end())
+    return it->second;
+
+  // Import any new samples from persistent memory looking for the value.
+  return ImportSamples(value);
+}
+
+Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
+  // Get any existing count storage.
+  Count* count_pointer = GetSampleCountStorage(value);
+  if (count_pointer)
+    return count_pointer;
+
+  // Create a new record in persistent memory for the value.
+  PersistentMemoryAllocator::Reference ref =
+      allocator_->Allocate(sizeof(SampleRecord), kTypeIdSampleRecord);
+  SampleRecord* record =
+      allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+  if (!record) {
+    // If the allocator was unable to create a record then it is full or
+    // corrupt. Instead, allocate the counter from the heap. This sample will
+    // not be persistent, will not be shared, and will leak but it's better
+    // than crashing.
+    NOTREACHED() << "full=" << allocator_->IsFull()
+                 << ", corrupt=" << allocator_->IsCorrupt();
+    count_pointer = new Count(0);
+    sample_counts_[value] = count_pointer;
+    return count_pointer;
+  }
+  record->id = id();
+  record->value = value;
+  record->count = 0;  // Should already be zero but don't trust other processes.
+  allocator_->MakeIterable(ref);
+
+  // A race condition between two independent processes (i.e. two independent
+  // histogram objects sharing the same sample data) could cause two of the
+  // above records to be created. The allocator, however, forces a strict
+  // ordering on iterable objects so use the import method to actually add the
+  // just-created record. This ensures that all PersistentSampleMap objects
+  // will always use the same record, whichever was first made iterable.
+  // Thread-safety within a process where multiple threads use the same
+  // histogram object is delegated to the controlling histogram object which,
+  // for sparse histograms, is a lock object.
+  count_pointer = ImportSamples(value);
+  DCHECK(count_pointer);
+  return count_pointer;
+}
+
+Count* PersistentSampleMap::ImportSamples(Sample until_value) {
+  // TODO(bcwhite): This import operates in O(V+N) total time per sparse
+  // histogram where V is the number of values for this object and N is
+  // the number of other iterable objects in the allocator. This becomes
+  // O(S*(SV+N)) or O(S^2*V + SN) overall where S is the number of sparse
+  // histograms.
+  //
+  // This is actually okay when histograms are expected to exist for the
+  // lifetime of the program, spreading the cost out, and S and V are
+  // relatively small, as is the current case.
+  //
+  // However, it is not so good for objects that are created, detroyed, and
+  // recreated on a periodic basis, such as when making a snapshot of
+  // sparse histograms owned by another, ongoing process. In that case, the
+  // entire cost is compressed into a single sequential operation... on the
+  // UI thread no less.
+  //
+  // This will be addressed in a future CL.
+
+  uint32_t type_id;
+  PersistentMemoryAllocator::Reference ref;
+  while ((ref = allocator_->GetNextIterable(&sample_iter_, &type_id)) != 0) {
+    if (type_id == kTypeIdSampleRecord) {
+      SampleRecord* record =
+          allocator_->GetAsObject<SampleRecord>(ref, kTypeIdSampleRecord);
+      if (!record)
+        continue;
+
+      // A sample record has been found but may not be for this histogram.
+      if (record->id != id())
+        continue;
+
+      // Check if the record's value is already known.
+      if (!ContainsKey(sample_counts_, record->value)) {
+        // No: Add it to map of known values if the value is valid.
+        if (record->value >= 0)
+          sample_counts_[record->value] = &record->count;
+      } else {
+        // Yes: Ignore it; it's a duplicate caused by a race condition -- see
+        // code & comment in GetOrCreateSampleCountStorage() for details.
+        // Check that nothing ever operated on the duplicate record.
+        DCHECK_EQ(0, record->count);
+      }
+
+      // Stop if it's the value being searched for.
+      if (record->value == until_value)
+        return &record->count;
+    }
+  }
+
+  return nullptr;
+}
+
+}  // namespace base
diff --git a/base/metrics/persistent_sample_map.h b/base/metrics/persistent_sample_map.h
new file mode 100644
index 0000000..a23b751
--- /dev/null
+++ b/base/metrics/persistent_sample_map.h
@@ -0,0 +1,78 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// PersistentSampleMap implements HistogramSamples interface. It is used
+// by the SparseHistogram class to store samples in persistent memory which
+// allows it to be shared between processes or live across restarts.
+
+#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
+
+#include <stdint.h>
+
+#include <map>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/metrics/histogram_base.h"
+#include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_memory_allocator.h"
+
+namespace base {
+
+// The logic here is similar to that of SampleMap but with different data
+// structures. Changes here likely need to be duplicated there.
+class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
+ public:
+  PersistentSampleMap(uint64_t id,
+                      PersistentMemoryAllocator* allocator,
+                      Metadata* meta);
+  ~PersistentSampleMap() override;
+
+  // HistogramSamples:
+  void Accumulate(HistogramBase::Sample value,
+                  HistogramBase::Count count) override;
+  HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
+  HistogramBase::Count TotalCount() const override;
+  scoped_ptr<SampleCountIterator> Iterator() const override;
+
+ protected:
+  // Performs arithemetic. |op| is ADD or SUBTRACT.
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
+
+  // Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
+  // if sample does not exist.
+  HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
+
+  // Gets a pointer to a "count" corresponding to a given |value|, creating
+  // the sample (initialized to zero) if it does not already exists.
+  HistogramBase::Count* GetOrCreateSampleCountStorage(
+      HistogramBase::Sample value);
+
+ private:
+  enum : HistogramBase::Sample { kAllSamples = -1 };
+
+  // Imports samples from persistent memory by iterating over all sample
+  // records found therein, adding them to the sample_counts_ map. If a
+  // count for the sample |until_value| is found, stop the import and return
+  // a pointer to that counter. If that value is not found, null will be
+  // returned after all currently available samples have been loaded. Pass
+  // kAllSamples to force the importing of all available samples.
+  HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value);
+
+  // All created/loaded sample values and their associated counts. The storage
+  // for the actual Count numbers is owned by the |allocator_|.
+  std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
+
+  // The persistent memory allocator holding samples and an iterator through it.
+  PersistentMemoryAllocator* allocator_;
+  PersistentMemoryAllocator::Iterator sample_iter_;
+
+  DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
+};
+
+}  // namespace base
+
+#endif  // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
diff --git a/base/metrics/persistent_sample_map_unittest.cc b/base/metrics/persistent_sample_map_unittest.cc
new file mode 100644
index 0000000..c735f8f
--- /dev/null
+++ b/base/metrics/persistent_sample_map_unittest.cc
@@ -0,0 +1,243 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/persistent_sample_map.h"
+
+#include "base/memory/scoped_ptr.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace {
+
+TEST(PersistentSampleMapTest, AccumulateTest) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples(1, &allocator, meta);
+
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(1, -200);
+  EXPECT_EQ(-100, samples.GetCount(1));
+  EXPECT_EQ(200, samples.GetCount(2));
+
+  EXPECT_EQ(300, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, Accumulate_LargeValuesDontOverflow) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples(1, &allocator, meta);
+
+  samples.Accumulate(250000000, 100);
+  samples.Accumulate(500000000, 200);
+  samples.Accumulate(250000000, -200);
+  EXPECT_EQ(-100, samples.GetCount(250000000));
+  EXPECT_EQ(200, samples.GetCount(500000000));
+
+  EXPECT_EQ(75000000000LL, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, AddSubtractTest) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta1 =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  HistogramSamples::Metadata* meta2 =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples1(1, &allocator, meta1);
+  PersistentSampleMap samples2(2, &allocator, meta2);
+
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 100);
+  samples1.Accumulate(3, 100);
+
+  samples2.Accumulate(1, 200);
+  samples2.Accumulate(2, 200);
+  samples2.Accumulate(4, 200);
+
+  samples1.Add(samples2);
+  EXPECT_EQ(300, samples1.GetCount(1));
+  EXPECT_EQ(300, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(200, samples1.GetCount(4));
+  EXPECT_EQ(2000, samples1.sum());
+  EXPECT_EQ(900, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  samples1.Subtract(samples2);
+  EXPECT_EQ(100, samples1.GetCount(1));
+  EXPECT_EQ(100, samples1.GetCount(2));
+  EXPECT_EQ(100, samples1.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(4));
+  EXPECT_EQ(600, samples1.sum());
+  EXPECT_EQ(300, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+}
+
+TEST(PersistentSampleMapTest, PersistenceTest) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta12 =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples1(12, &allocator, meta12);
+  samples1.Accumulate(1, 100);
+  samples1.Accumulate(2, 200);
+  samples1.Accumulate(1, -200);
+  EXPECT_EQ(-100, samples1.GetCount(1));
+  EXPECT_EQ(200, samples1.GetCount(2));
+  EXPECT_EQ(300, samples1.sum());
+  EXPECT_EQ(100, samples1.TotalCount());
+  EXPECT_EQ(samples1.redundant_count(), samples1.TotalCount());
+
+  PersistentSampleMap samples2(12, &allocator, meta12);
+  EXPECT_EQ(samples1.id(), samples2.id());
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+  EXPECT_EQ(-100, samples2.GetCount(1));
+  EXPECT_EQ(200, samples2.GetCount(2));
+  EXPECT_EQ(300, samples2.sum());
+  EXPECT_EQ(100, samples2.TotalCount());
+  EXPECT_EQ(samples2.redundant_count(), samples2.TotalCount());
+
+  EXPECT_EQ(0, samples2.GetCount(3));
+  EXPECT_EQ(0, samples1.GetCount(3));
+  samples2.Accumulate(3, 300);
+  EXPECT_EQ(300, samples2.GetCount(3));
+  EXPECT_EQ(300, samples1.GetCount(3));
+  EXPECT_EQ(samples1.sum(), samples2.sum());
+  EXPECT_EQ(samples1.redundant_count(), samples2.redundant_count());
+  EXPECT_EQ(samples1.TotalCount(), samples2.TotalCount());
+}
+
+TEST(PersistentSampleMapIteratorTest, IterateTest) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples(1, &allocator, meta);
+  samples.Accumulate(1, 100);
+  samples.Accumulate(2, 200);
+  samples.Accumulate(4, -300);
+  samples.Accumulate(5, 0);
+
+  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+
+  HistogramBase::Sample min;
+  HistogramBase::Sample max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(1, min);
+  EXPECT_EQ(2, max);
+  EXPECT_EQ(100, count);
+  EXPECT_FALSE(it->GetBucketIndex(NULL));
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(2, min);
+  EXPECT_EQ(3, max);
+  EXPECT_EQ(200, count);
+
+  it->Next();
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(4, min);
+  EXPECT_EQ(5, max);
+  EXPECT_EQ(-300, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+TEST(PersistentSampleMapIteratorTest, SkipEmptyRanges) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples(1, &allocator, meta);
+  samples.Accumulate(5, 1);
+  samples.Accumulate(10, 2);
+  samples.Accumulate(15, 3);
+  samples.Accumulate(20, 4);
+  samples.Accumulate(25, 5);
+
+  HistogramSamples::Metadata* meta2 =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples2(2, &allocator, meta2);
+  samples2.Accumulate(5, 1);
+  samples2.Accumulate(20, 4);
+  samples2.Accumulate(25, 5);
+
+  samples.Subtract(samples2);
+
+  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+
+  HistogramBase::Sample min;
+  HistogramBase::Sample max;
+  HistogramBase::Count count;
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(10, min);
+  EXPECT_EQ(11, max);
+  EXPECT_EQ(2, count);
+
+  it->Next();
+  EXPECT_FALSE(it->Done());
+
+  it->Get(&min, &max, &count);
+  EXPECT_EQ(15, min);
+  EXPECT_EQ(16, max);
+  EXPECT_EQ(3, count);
+
+  it->Next();
+  EXPECT_TRUE(it->Done());
+}
+
+// Only run this test on builds that support catching a DCHECK crash.
+#if (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+TEST(PersistentSampleMapIteratorDeathTest, IterateDoneTest) {
+  LocalPersistentMemoryAllocator allocator(64 << 10, 0, "");  // 64 KiB
+
+  HistogramSamples::Metadata* meta =
+      allocator.GetAsObject<HistogramSamples::Metadata>(
+          allocator.Allocate(sizeof(HistogramSamples::Metadata), 0), 0);
+  PersistentSampleMap samples(1, &allocator, meta);
+
+  scoped_ptr<SampleCountIterator> it = samples.Iterator();
+
+  EXPECT_TRUE(it->Done());
+
+  HistogramBase::Sample min;
+  HistogramBase::Sample max;
+  HistogramBase::Count count;
+  EXPECT_DEATH(it->Get(&min, &max, &count), "");
+
+  EXPECT_DEATH(it->Next(), "");
+
+  samples.Accumulate(1, 100);
+  it = samples.Iterator();
+  EXPECT_FALSE(it->Done());
+}
+#endif
+// (!defined(NDEBUG) || defined(DCHECK_ALWAYS_ON)) && GTEST_HAS_DEATH_TEST
+
+}  // namespace
+}  // namespace base
diff --git a/base/metrics/sample_map.cc b/base/metrics/sample_map.cc
index a691243..21a4e35 100644
--- a/base/metrics/sample_map.cc
+++ b/base/metrics/sample_map.cc
@@ -5,57 +5,39 @@
 #include "base/metrics/sample_map.h"
 
 #include "base/logging.h"
+#include "base/stl_util.h"
 
 namespace base {
 
 typedef HistogramBase::Count Count;
 typedef HistogramBase::Sample Sample;
 
-SampleMap::SampleMap() : SampleMap(0) {}
+namespace {
 
-SampleMap::SampleMap(uint64_t id) : HistogramSamples(id) {}
+// An iterator for going through a SampleMap. The logic here is identical
+// to that of PersistentSampleMapIterator but with different data structures.
+// Changes here likely need to be duplicated there.
+class SampleMapIterator : public SampleCountIterator {
+ public:
+  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
+      SampleToCountMap;
 
-SampleMap::~SampleMap() {}
+  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
+  ~SampleMapIterator() override;
 
-void SampleMap::Accumulate(Sample value, Count count) {
-  sample_counts_[value] += count;
-  IncreaseSum(count * value);
-  IncreaseRedundantCount(count);
-}
+  // SampleCountIterator:
+  bool Done() const override;
+  void Next() override;
+  void Get(HistogramBase::Sample* min,
+           HistogramBase::Sample* max,
+           HistogramBase::Count* count) const override;
 
-Count SampleMap::GetCount(Sample value) const {
-  std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
-  if (it == sample_counts_.end())
-    return 0;
-  return it->second;
-}
+ private:
+  void SkipEmptyBuckets();
 
-Count SampleMap::TotalCount() const {
-  Count count = 0;
-  for (const auto& entry : sample_counts_) {
-    count += entry.second;
-  }
-  return count;
-}
-
-scoped_ptr<SampleCountIterator> SampleMap::Iterator() const {
-  return scoped_ptr<SampleCountIterator>(new SampleMapIterator(sample_counts_));
-}
-
-bool SampleMap::AddSubtractImpl(SampleCountIterator* iter,
-                                HistogramSamples::Operator op) {
-  Sample min;
-  Sample max;
-  Count count;
-  for (; !iter->Done(); iter->Next()) {
-    iter->Get(&min, &max, &count);
-    if (min + 1 != max)
-      return false;  // SparseHistogram only supports bucket with size 1.
-
-    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
-  }
-  return true;
-}
+  SampleToCountMap::const_iterator iter_;
+  const SampleToCountMap::const_iterator end_;
+};
 
 SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
     : iter_(sample_counts.begin()),
@@ -77,11 +59,11 @@
 
 void SampleMapIterator::Get(Sample* min, Sample* max, Count* count) const {
   DCHECK(!Done());
-  if (min != NULL)
+  if (min)
     *min = iter_->first;
-  if (max != NULL)
+  if (max)
     *max = iter_->first + 1;
-  if (count != NULL)
+  if (count)
     *count = iter_->second;
 }
 
@@ -91,4 +73,51 @@
   }
 }
 
+}  // namespace
+
+SampleMap::SampleMap() : SampleMap(0) {}
+
+SampleMap::SampleMap(uint64_t id) : HistogramSamples(id) {}
+
+SampleMap::~SampleMap() {}
+
+void SampleMap::Accumulate(Sample value, Count count) {
+  sample_counts_[value] += count;
+  IncreaseSum(static_cast<int64_t>(count) * value);
+  IncreaseRedundantCount(count);
+}
+
+Count SampleMap::GetCount(Sample value) const {
+  std::map<Sample, Count>::const_iterator it = sample_counts_.find(value);
+  if (it == sample_counts_.end())
+    return 0;
+  return it->second;
+}
+
+Count SampleMap::TotalCount() const {
+  Count count = 0;
+  for (const auto& entry : sample_counts_) {
+    count += entry.second;
+  }
+  return count;
+}
+
+scoped_ptr<SampleCountIterator> SampleMap::Iterator() const {
+  return make_scoped_ptr(new SampleMapIterator(sample_counts_));
+}
+
+bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
+  Sample min;
+  Sample max;
+  Count count;
+  for (; !iter->Done(); iter->Next()) {
+    iter->Get(&min, &max, &count);
+    if (min + 1 != max)
+      return false;  // SparseHistogram only supports bucket with size 1.
+
+    sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
+  }
+  return true;
+}
+
 }  // namespace base
diff --git a/base/metrics/sample_map.h b/base/metrics/sample_map.h
index da536e3..2f24e1f 100644
--- a/base/metrics/sample_map.h
+++ b/base/metrics/sample_map.h
@@ -20,13 +20,15 @@
 
 namespace base {
 
+// The logic here is similar to that of PersistentSampleMap but with different
+// data structures. Changes here likely need to be duplicated there.
 class BASE_EXPORT SampleMap : public HistogramSamples {
  public:
   SampleMap();
   explicit SampleMap(uint64_t id);
   ~SampleMap() override;
 
-  // HistogramSamples implementation:
+  // HistogramSamples:
   void Accumulate(HistogramBase::Sample value,
                   HistogramBase::Count count) override;
   HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
@@ -34,9 +36,8 @@
   scoped_ptr<SampleCountIterator> Iterator() const override;
 
  protected:
-  bool AddSubtractImpl(
-      SampleCountIterator* iter,
-      HistogramSamples::Operator op) override;  // |op| is ADD or SUBTRACT.
+  // Performs arithemetic. |op| is ADD or SUBTRACT.
+  bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
 
  private:
   std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
@@ -44,28 +45,6 @@
   DISALLOW_COPY_AND_ASSIGN(SampleMap);
 };
 
-class BASE_EXPORT SampleMapIterator : public SampleCountIterator {
- public:
-  typedef std::map<HistogramBase::Sample, HistogramBase::Count>
-      SampleToCountMap;
-
-  explicit SampleMapIterator(const SampleToCountMap& sample_counts);
-  ~SampleMapIterator() override;
-
-  // SampleCountIterator implementation:
-  bool Done() const override;
-  void Next() override;
-  void Get(HistogramBase::Sample* min,
-           HistogramBase::Sample* max,
-           HistogramBase::Count* count) const override;
-
- private:
-  void SkipEmptyBuckets();
-
-  SampleToCountMap::const_iterator iter_;
-  const SampleToCountMap::const_iterator end_;
-};
-
 }  // namespace base
 
 #endif  // BASE_METRICS_SAMPLE_MAP_H_
diff --git a/base/metrics/sample_map_unittest.cc b/base/metrics/sample_map_unittest.cc
index c941d65..3626bd0 100644
--- a/base/metrics/sample_map_unittest.cc
+++ b/base/metrics/sample_map_unittest.cc
@@ -24,6 +24,20 @@
   EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
 }
 
+TEST(SampleMapTest, Accumulate_LargeValuesDontOverflow) {
+  SampleMap samples(1);
+
+  samples.Accumulate(250000000, 100);
+  samples.Accumulate(500000000, 200);
+  samples.Accumulate(250000000, -200);
+  EXPECT_EQ(-100, samples.GetCount(250000000));
+  EXPECT_EQ(200, samples.GetCount(500000000));
+
+  EXPECT_EQ(75000000000LL, samples.sum());
+  EXPECT_EQ(100, samples.TotalCount());
+  EXPECT_EQ(samples.redundant_count(), samples.TotalCount());
+}
+
 TEST(SampleMapTest, AddSubtractTest) {
   SampleMap samples1(1);
   SampleMap samples2(2);
diff --git a/base/metrics/sample_vector.cc b/base/metrics/sample_vector.cc
index 6120c50..e1603d9 100644
--- a/base/metrics/sample_vector.cc
+++ b/base/metrics/sample_vector.cc
@@ -43,7 +43,7 @@
   size_t bucket_index = GetBucketIndex(value);
   subtle::NoBarrier_Store(&counts_[bucket_index],
       subtle::NoBarrier_Load(&counts_[bucket_index]) + count);
-  IncreaseSum(count * value);
+  IncreaseSum(static_cast<int64_t>(count) * value);
   IncreaseRedundantCount(count);
 }
 
diff --git a/base/metrics/sample_vector.h b/base/metrics/sample_vector.h
index 0317869..86319ea 100644
--- a/base/metrics/sample_vector.h
+++ b/base/metrics/sample_vector.h
@@ -54,6 +54,7 @@
 
  private:
   FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
+  FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
 
   // In the case where this class manages the memory, here it is.
   std::vector<HistogramBase::AtomicCount> local_counts_;
diff --git a/base/metrics/sample_vector_unittest.cc b/base/metrics/sample_vector_unittest.cc
index 744cbfa..434def7 100644
--- a/base/metrics/sample_vector_unittest.cc
+++ b/base/metrics/sample_vector_unittest.cc
@@ -44,6 +44,33 @@
   EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
 }
 
+TEST(SampleVectorTest, Accumulate_LargeValuesDontOverflow) {
+  // Custom buckets: [1, 250000000) [250000000, 500000000)
+  BucketRanges ranges(3);
+  ranges.set_range(0, 1);
+  ranges.set_range(1, 250000000);
+  ranges.set_range(2, 500000000);
+  SampleVector samples(1, &ranges);
+
+  samples.Accumulate(240000000, 200);
+  samples.Accumulate(249999999, -300);
+  EXPECT_EQ(-100, samples.GetCountAtIndex(0));
+
+  samples.Accumulate(250000000, 200);
+  EXPECT_EQ(200, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(23000000300LL, samples.sum());
+  EXPECT_EQ(100, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+
+  samples.Accumulate(250000000, -100);
+  EXPECT_EQ(100, samples.GetCountAtIndex(1));
+
+  EXPECT_EQ(-1999999700LL, samples.sum());
+  EXPECT_EQ(0, samples.redundant_count());
+  EXPECT_EQ(samples.TotalCount(), samples.redundant_count());
+}
+
 TEST(SampleVectorTest, AddSubtractTest) {
   // Custom buckets: [0, 1) [1, 2) [2, 3) [3, INT_MAX)
   BucketRanges ranges(5);
diff --git a/base/metrics/sparse_histogram.cc b/base/metrics/sparse_histogram.cc
index 37ea5e7..491fff0 100644
--- a/base/metrics/sparse_histogram.cc
+++ b/base/metrics/sparse_histogram.cc
@@ -7,6 +7,8 @@
 #include <utility>
 
 #include "base/metrics/metrics_hashes.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_sample_map.h"
 #include "base/metrics/sample_map.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
@@ -21,23 +23,75 @@
 // static
 HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
                                            int32_t flags) {
-  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
+  // Import histograms from known persistent storage. Histograms could have
+  // been added by other processes and they must be fetched and recognized
+  // locally in order to be found by FindHistograms() below. If the persistent
+  // memory segment is not shared between processes, this call does nothing.
+  PersistentHistogramAllocator::ImportGlobalHistograms();
 
+  HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
   if (!histogram) {
-    // To avoid racy destruction at shutdown, the following will be leaked.
-    HistogramBase* tentative_histogram = new SparseHistogram(name);
-    tentative_histogram->SetFlags(flags);
-    histogram =
-        StatisticsRecorder::RegisterOrDeleteDuplicate(tentative_histogram);
+    // Try to create the histogram using a "persistent" allocator. As of
+    // 2016-02-25, the availability of such is controlled by a base::Feature
+    // that is off by default. If the allocator doesn't exist or if
+    // allocating from it fails, code below will allocate the histogram from
+    // the process heap.
+    PersistentMemoryAllocator::Reference histogram_ref = 0;
+    scoped_ptr<HistogramBase> tentative_histogram;
+    PersistentHistogramAllocator* allocator =
+        PersistentHistogramAllocator::GetGlobalAllocator();
+    if (allocator) {
+      tentative_histogram = allocator->AllocateHistogram(
+          SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
+    }
+
+    // Handle the case where no persistent allocator is present or the
+    // persistent allocation fails (perhaps because it is full).
+    if (!tentative_histogram) {
+      DCHECK(!histogram_ref);  // Should never have been set.
+      DCHECK(!allocator);      // Shouldn't have failed.
+      flags &= ~HistogramBase::kIsPersistent;
+      tentative_histogram.reset(new SparseHistogram(name));
+      tentative_histogram->SetFlags(flags);
+    }
+
+    // Register this histogram with the StatisticsRecorder. Keep a copy of
+    // the pointer value to tell later whether the locally created histogram
+    // was registered or deleted. The type is "void" because it could point
+    // to released memory after the following line.
+    const void* tentative_histogram_ptr = tentative_histogram.get();
+    histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
+        tentative_histogram.release());
+
+    // Persistent histograms need some follow-up processing.
+    if (histogram_ref) {
+      allocator->FinalizeHistogram(histogram_ref,
+                                   histogram == tentative_histogram_ptr);
+    }
+
+    ReportHistogramActivity(*histogram, HISTOGRAM_CREATED);
+  } else {
+    ReportHistogramActivity(*histogram, HISTOGRAM_LOOKUP);
   }
+
   DCHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
   return histogram;
 }
 
+// static
+scoped_ptr<HistogramBase> SparseHistogram::PersistentCreate(
+    PersistentMemoryAllocator* allocator,
+    const std::string& name,
+    HistogramSamples::Metadata* meta,
+    HistogramSamples::Metadata* logged_meta) {
+  return make_scoped_ptr(
+      new SparseHistogram(allocator, name, meta, logged_meta));
+}
+
 SparseHistogram::~SparseHistogram() {}
 
 uint64_t SparseHistogram::name_hash() const {
-  return samples_.id();
+  return samples_->id();
 }
 
 HistogramType SparseHistogram::GetHistogramType() const {
@@ -47,7 +101,7 @@
 bool SparseHistogram::HasConstructionArguments(
     Sample /* expected_minimum */,
     Sample /* expected_maximum */,
-    size_t /* expected_bucket_count */) const {
+    uint32_t /* expected_bucket_count */) const {
   // SparseHistogram never has min/max/bucket_count limit.
   return false;
 }
@@ -63,7 +117,7 @@
   }
   {
     base::AutoLock auto_lock(lock_);
-    samples_.Accumulate(value, count);
+    samples_->Accumulate(value, count);
   }
 
   FindAndRunCallback(value);
@@ -73,18 +127,29 @@
   scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
 
   base::AutoLock auto_lock(lock_);
-  snapshot->Add(samples_);
+  snapshot->Add(*samples_);
+  return std::move(snapshot);
+}
+
+scoped_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
+  scoped_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
+  base::AutoLock auto_lock(lock_);
+  snapshot->Add(*samples_);
+
+  // Subtract what was previously logged and update that information.
+  snapshot->Subtract(*logged_samples_);
+  logged_samples_->Add(*snapshot);
   return std::move(snapshot);
 }
 
 void SparseHistogram::AddSamples(const HistogramSamples& samples) {
   base::AutoLock auto_lock(lock_);
-  samples_.Add(samples);
+  samples_->Add(samples);
 }
 
 bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
   base::AutoLock auto_lock(lock_);
-  return samples_.AddFromPickle(iter);
+  return samples_->AddFromPickle(iter);
 }
 
 void SparseHistogram::WriteHTMLGraph(std::string* output) const {
@@ -103,7 +168,28 @@
 
 SparseHistogram::SparseHistogram(const std::string& name)
     : HistogramBase(name),
-      samples_(HashMetricName(name)) {}
+      samples_(new SampleMap(HashMetricName(name))),
+      logged_samples_(new SampleMap(samples_->id())) {}
+
+SparseHistogram::SparseHistogram(PersistentMemoryAllocator* allocator,
+                                 const std::string& name,
+                                 HistogramSamples::Metadata* meta,
+                                 HistogramSamples::Metadata* logged_meta)
+    : HistogramBase(name),
+      // While other histogram types maintain a static vector of values with
+      // sufficient space for both "active" and "logged" samples, with each
+      // SampleVector being given the appropriate half, sparse histograms
+      // have no such initial allocation. Each sample has its own record
+      // attached to a single PersistentSampleMap by a common 64-bit identifier.
+      // Since a sparse histogram has two sample maps (active and logged),
+      // there must be two sets of sample records with diffent IDs. The
+      // "active" samples use, for convenience purposes, an ID matching
+      // that of the histogram while the "logged" samples use that number
+      // plus 1.
+      samples_(new PersistentSampleMap(HashMetricName(name), allocator, meta)),
+      logged_samples_(
+          new PersistentSampleMap(samples_->id() + 1, allocator, logged_meta)) {
+}
 
 HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
   std::string histogram_name;
diff --git a/base/metrics/sparse_histogram.h b/base/metrics/sparse_histogram.h
index a77c020..b876737 100644
--- a/base/metrics/sparse_histogram.h
+++ b/base/metrics/sparse_histogram.h
@@ -22,6 +22,27 @@
 
 namespace base {
 
+// Sparse histograms are well suited for recording counts of exact sample values
+// that are sparsely distributed over a large range.
+//
+// The implementation uses a lock and a map, whereas other histogram types use a
+// vector and no lock. It is thus more costly to add values to, and each value
+// stored has more overhead, compared to the other histogram types. However it
+// may be more efficient in memory if the total number of sample values is small
+// compared to the range of their values.
+//
+// UMA_HISTOGRAM_ENUMERATION would be better suited for a smaller range of
+// enumerations that are (nearly) contiguous. Also for code that is expected to
+// run often or in a tight loop.
+//
+// UMA_HISTOGRAM_SPARSE_SLOWLY is good for sparsely distributed and or
+// infrequently recorded values.
+//
+// For instance, Sqlite.Version.* are SPARSE because for any given database,
+// there's going to be exactly one version logged, meaning no gain to having a
+// pre-allocated vector of slots once the fleet gets to version 4 or 5 or 10.
+// Likewise Sqlite.Error.* are SPARSE, because most databases generate few or no
+// errors and there are large gaps in the set of possible errors.
 #define UMA_HISTOGRAM_SPARSE_SLOWLY(name, sample) \
     do { \
       base::HistogramBase* histogram = base::SparseHistogram::FactoryGet( \
@@ -30,6 +51,7 @@
     } while (0)
 
 class HistogramSamples;
+class PersistentMemoryAllocator;
 
 class BASE_EXPORT SparseHistogram : public HistogramBase {
  public:
@@ -37,6 +59,13 @@
   // new one.
   static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
 
+  // Create a histogram using data in persistent storage.
+  static scoped_ptr<HistogramBase> PersistentCreate(
+      PersistentMemoryAllocator* allocator,
+      const std::string& name,
+      HistogramSamples::Metadata* meta,
+      HistogramSamples::Metadata* logged_meta);
+
   ~SparseHistogram() override;
 
   // HistogramBase implementation:
@@ -44,12 +73,13 @@
   HistogramType GetHistogramType() const override;
   bool HasConstructionArguments(Sample expected_minimum,
                                 Sample expected_maximum,
-                                size_t expected_bucket_count) const override;
+                                uint32_t expected_bucket_count) const override;
   void Add(Sample value) override;
   void AddCount(Sample value, int count) override;
   void AddSamples(const HistogramSamples& samples) override;
   bool AddSamplesFromPickle(base::PickleIterator* iter) override;
   scoped_ptr<HistogramSamples> SnapshotSamples() const override;
+  scoped_ptr<HistogramSamples> SnapshotDelta() override;
   void WriteHTMLGraph(std::string* output) const override;
   void WriteAscii(std::string* output) const override;
 
@@ -61,6 +91,11 @@
   // Clients should always use FactoryGet to create SparseHistogram.
   explicit SparseHistogram(const std::string& name);
 
+  SparseHistogram(PersistentMemoryAllocator* allocator,
+                  const std::string& name,
+                  HistogramSamples::Metadata* meta,
+                  HistogramSamples::Metadata* logged_meta);
+
   friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
       base::PickleIterator* iter);
   static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
@@ -85,7 +120,8 @@
   // Protects access to |samples_|.
   mutable base::Lock lock_;
 
-  SampleMap samples_;
+  scoped_ptr<HistogramSamples> samples_;
+  scoped_ptr<HistogramSamples> logged_samples_;
 
   DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
 };
diff --git a/base/metrics/sparse_histogram_unittest.cc b/base/metrics/sparse_histogram_unittest.cc
index 83cf5d3..5d5dbcb 100644
--- a/base/metrics/sparse_histogram_unittest.cc
+++ b/base/metrics/sparse_histogram_unittest.cc
@@ -9,6 +9,8 @@
 #include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_base.h"
 #include "base/metrics/histogram_samples.h"
+#include "base/metrics/persistent_histogram_allocator.h"
+#include "base/metrics/persistent_memory_allocator.h"
 #include "base/metrics/sample_map.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/pickle.h"
@@ -17,17 +19,35 @@
 
 namespace base {
 
-class SparseHistogramTest : public testing::Test {
+// Test parameter indicates if a persistent memory allocator should be used
+// for histogram allocation. False will allocate histograms from the process
+// heap.
+class SparseHistogramTest : public testing::TestWithParam<bool> {
  protected:
+  const int32_t kAllocatorMemorySize = 8 << 20;  // 8 MiB
+
+  SparseHistogramTest() : use_persistent_histogram_allocator_(GetParam()) {}
+
   void SetUp() override {
+    if (use_persistent_histogram_allocator_)
+      CreatePersistentMemoryAllocator();
+
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
   }
 
-  void TearDown() override { UninitializeStatisticsRecorder(); }
+  void TearDown() override {
+    if (allocator_) {
+      ASSERT_FALSE(allocator_->IsFull());
+      ASSERT_FALSE(allocator_->IsCorrupt());
+    }
+    UninitializeStatisticsRecorder();
+    DestroyPersistentMemoryAllocator();
+  }
 
   void InitializeStatisticsRecorder() {
+    StatisticsRecorder::ResetForTesting();
     statistics_recorder_ = new StatisticsRecorder();
   }
 
@@ -36,14 +56,44 @@
     statistics_recorder_ = NULL;
   }
 
+  void CreatePersistentMemoryAllocator() {
+    // By getting the results-histogram before any persistent allocator
+    // is attached, that histogram is guaranteed not to be stored in
+    // any persistent memory segment (which simplifies some tests).
+    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
+
+    PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+        kAllocatorMemorySize, 0, "SparseHistogramAllocatorTest");
+    allocator_ =
+        PersistentHistogramAllocator::GetGlobalAllocator()->memory_allocator();
+  }
+
+  void DestroyPersistentMemoryAllocator() {
+    allocator_ = nullptr;
+    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+  }
+
   scoped_ptr<SparseHistogram> NewSparseHistogram(const std::string& name) {
     return scoped_ptr<SparseHistogram>(new SparseHistogram(name));
   }
 
+  const bool use_persistent_histogram_allocator_;
+
   StatisticsRecorder* statistics_recorder_;
+  scoped_ptr<char[]> allocator_memory_;
+  PersistentMemoryAllocator* allocator_ = nullptr;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(SparseHistogramTest);
 };
 
-TEST_F(SparseHistogramTest, BasicTest) {
+// Run all HistogramTest cases with both heap and persistent memory.
+INSTANTIATE_TEST_CASE_P(HeapAndPersistent,
+                        SparseHistogramTest,
+                        testing::Bool());
+
+
+TEST_P(SparseHistogramTest, BasicTest) {
   scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
@@ -62,7 +112,7 @@
   EXPECT_EQ(1, snapshot2->GetCount(101));
 }
 
-TEST_F(SparseHistogramTest, BasicTestAddCount) {
+TEST_P(SparseHistogramTest, BasicTestAddCount) {
   scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
   EXPECT_EQ(0, snapshot->TotalCount());
@@ -81,7 +131,27 @@
   EXPECT_EQ(25, snapshot2->GetCount(101));
 }
 
-TEST_F(SparseHistogramTest, MacroBasicTest) {
+TEST_P(SparseHistogramTest, AddCount_LargeValuesDontOverflow) {
+  scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
+  scoped_ptr<HistogramSamples> snapshot(histogram->SnapshotSamples());
+  EXPECT_EQ(0, snapshot->TotalCount());
+  EXPECT_EQ(0, snapshot->sum());
+
+  histogram->AddCount(1000000000, 15);
+  scoped_ptr<HistogramSamples> snapshot1(histogram->SnapshotSamples());
+  EXPECT_EQ(15, snapshot1->TotalCount());
+  EXPECT_EQ(15, snapshot1->GetCount(1000000000));
+
+  histogram->AddCount(1000000000, 15);
+  histogram->AddCount(1010000000, 25);
+  scoped_ptr<HistogramSamples> snapshot2(histogram->SnapshotSamples());
+  EXPECT_EQ(55, snapshot2->TotalCount());
+  EXPECT_EQ(30, snapshot2->GetCount(1000000000));
+  EXPECT_EQ(25, snapshot2->GetCount(1010000000));
+  EXPECT_EQ(55250000000LL, snapshot2->sum());
+}
+
+TEST_P(SparseHistogramTest, MacroBasicTest) {
   UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
   UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 200);
   UMA_HISTOGRAM_SPARSE_SLOWLY("Sparse", 100);
@@ -94,8 +164,11 @@
 
   EXPECT_EQ(SPARSE_HISTOGRAM, sparse_histogram->GetHistogramType());
   EXPECT_EQ("Sparse", sparse_histogram->histogram_name());
-  EXPECT_EQ(HistogramBase::kUmaTargetedHistogramFlag,
-            sparse_histogram->flags());
+  EXPECT_EQ(
+      HistogramBase::kUmaTargetedHistogramFlag |
+          (use_persistent_histogram_allocator_ ? HistogramBase::kIsPersistent
+                                               : 0),
+      sparse_histogram->flags());
 
   scoped_ptr<HistogramSamples> samples = sparse_histogram->SnapshotSamples();
   EXPECT_EQ(3, samples->TotalCount());
@@ -103,7 +176,7 @@
   EXPECT_EQ(1, samples->GetCount(200));
 }
 
-TEST_F(SparseHistogramTest, MacroInLoopTest) {
+TEST_P(SparseHistogramTest, MacroInLoopTest) {
   // Unlike the macros in histogram.h, SparseHistogram macros can have a
   // variable as histogram name.
   for (int i = 0; i < 2; i++) {
@@ -121,7 +194,7 @@
               ("Sparse2" == name1 && "Sparse1" == name2));
 }
 
-TEST_F(SparseHistogramTest, Serialize) {
+TEST_P(SparseHistogramTest, Serialize) {
   scoped_ptr<SparseHistogram> histogram(NewSparseHistogram("Sparse"));
   histogram->SetFlags(HistogramBase::kIPCSerializationSourceFlag);
 
@@ -146,4 +219,64 @@
   EXPECT_FALSE(iter.SkipBytes(1));
 }
 
+TEST_P(SparseHistogramTest, FactoryTime) {
+  const int kTestCreateCount = 1 << 10;  // Must be power-of-2.
+  const int kTestLookupCount = 100000;
+  const int kTestAddCount = 100000;
+
+  // Create all histogram names in advance for accurate timing below.
+  std::vector<std::string> histogram_names;
+  for (int i = 0; i < kTestCreateCount; ++i) {
+    histogram_names.push_back(
+        StringPrintf("TestHistogram.%d", i % kTestCreateCount));
+  }
+
+  // Calculate cost of creating histograms.
+  TimeTicks create_start = TimeTicks::Now();
+  for (int i = 0; i < kTestCreateCount; ++i)
+    SparseHistogram::FactoryGet(histogram_names[i], HistogramBase::kNoFlags);
+  TimeDelta create_ticks = TimeTicks::Now() - create_start;
+  int64_t create_ms = create_ticks.InMilliseconds();
+
+  VLOG(1) << kTestCreateCount << " histogram creations took " << create_ms
+          << "ms or about "
+          << (create_ms * 1000000) / kTestCreateCount
+          << "ns each.";
+
+  // Calculate cost of looking up existing histograms.
+  TimeTicks lookup_start = TimeTicks::Now();
+  for (int i = 0; i < kTestLookupCount; ++i) {
+    // 6007 is co-prime with kTestCreateCount and so will do lookups in an
+    // order less likely to be cacheable (but still hit them all) should the
+    // underlying storage use the exact histogram name as the key.
+    const int i_mult = 6007;
+    static_assert(i_mult < INT_MAX / kTestCreateCount, "Multiplier too big");
+    int index = (i * i_mult) & (kTestCreateCount - 1);
+    SparseHistogram::FactoryGet(histogram_names[index],
+                                HistogramBase::kNoFlags);
+  }
+  TimeDelta lookup_ticks = TimeTicks::Now() - lookup_start;
+  int64_t lookup_ms = lookup_ticks.InMilliseconds();
+
+  VLOG(1) << kTestLookupCount << " histogram lookups took " << lookup_ms
+          << "ms or about "
+          << (lookup_ms * 1000000) / kTestLookupCount
+          << "ns each.";
+
+  // Calculate cost of accessing histograms.
+  HistogramBase* histogram =
+      SparseHistogram::FactoryGet(histogram_names[0], HistogramBase::kNoFlags);
+  ASSERT_TRUE(histogram);
+  TimeTicks add_start = TimeTicks::Now();
+  for (int i = 0; i < kTestAddCount; ++i)
+    histogram->Add(i & 127);
+  TimeDelta add_ticks = TimeTicks::Now() - add_start;
+  int64_t add_ms = add_ticks.InMilliseconds();
+
+  VLOG(1) << kTestAddCount << " histogram adds took " << add_ms
+          << "ms or about "
+          << (add_ms * 1000000) / kTestAddCount
+          << "ns each.";
+}
+
 }  // namespace base
diff --git a/base/metrics/statistics_recorder.cc b/base/metrics/statistics_recorder.cc
index f8257f4..6156e72 100644
--- a/base/metrics/statistics_recorder.cc
+++ b/base/metrics/statistics_recorder.cc
@@ -16,13 +16,65 @@
 #include "base/values.h"
 
 namespace {
+
 // Initialize histogram statistics gathering system.
 base::LazyInstance<base::StatisticsRecorder>::Leaky g_statistics_recorder_ =
     LAZY_INSTANCE_INITIALIZER;
+
+bool HistogramNameLesser(const base::HistogramBase* a,
+                         const base::HistogramBase* b) {
+  return a->histogram_name() < b->histogram_name();
+}
+
 }  // namespace
 
 namespace base {
 
+StatisticsRecorder::HistogramIterator::HistogramIterator(
+    const HistogramMap::iterator& iter, bool include_persistent)
+    : iter_(iter),
+      include_persistent_(include_persistent) {
+}
+
+StatisticsRecorder::HistogramIterator::HistogramIterator(
+    const HistogramIterator& rhs)
+    : iter_(rhs.iter_),
+      include_persistent_(rhs.include_persistent_) {
+}
+
+StatisticsRecorder::HistogramIterator::~HistogramIterator() {}
+
+StatisticsRecorder::HistogramIterator&
+StatisticsRecorder::HistogramIterator::operator++() {
+  const HistogramMap::iterator histograms_end = histograms_->end();
+  if (iter_ == histograms_end || lock_ == NULL)
+    return *this;
+
+  base::AutoLock auto_lock(*lock_);
+
+  for (;;) {
+    ++iter_;
+    if (iter_ == histograms_end)
+      break;
+    if (!include_persistent_ && (iter_->second->flags() &
+                                 HistogramBase::kIsPersistent)) {
+      continue;
+    }
+    break;
+  }
+
+  return *this;
+}
+
+StatisticsRecorder::~StatisticsRecorder() {
+  DCHECK(lock_);
+  DCHECK(histograms_);
+  DCHECK(ranges_);
+
+  // Global clean up.
+  Reset();
+}
+
 // static
 void StatisticsRecorder::Initialize() {
   // Ensure that an instance of the StatisticsRecorder object is created.
@@ -53,7 +105,8 @@
       histogram_to_return = histogram;
     } else {
       const std::string& name = histogram->histogram_name();
-      uint64_t name_hash = histogram->name_hash();
+      const uint64_t name_hash = histogram->name_hash();
+      DCHECK_NE(0U, name_hash);
       HistogramMap::iterator it = histograms_->find(name_hash);
       if (histograms_->end() == it) {
         (*histograms_)[name_hash] = histogram;
@@ -132,6 +185,7 @@
 
   Histograms snapshot;
   GetSnapshot(query, &snapshot);
+  std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
   for (const HistogramBase* histogram : snapshot) {
     histogram->WriteHTMLGraph(output);
     output->append("<br><hr><br>");
@@ -150,6 +204,7 @@
 
   Histograms snapshot;
   GetSnapshot(query, &snapshot);
+  std::sort(snapshot.begin(), snapshot.end(), &HistogramNameLesser);
   for (const HistogramBase* histogram : snapshot) {
     histogram->WriteAscii(output);
     output->append("\n");
@@ -216,7 +271,7 @@
 }
 
 // static
-HistogramBase* StatisticsRecorder::FindHistogram(const std::string& name) {
+HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
   if (lock_ == NULL)
     return NULL;
   base::AutoLock auto_lock(*lock_);
@@ -231,6 +286,32 @@
 }
 
 // static
+StatisticsRecorder::HistogramIterator StatisticsRecorder::begin(
+    bool include_persistent) {
+  return HistogramIterator(histograms_->begin(), include_persistent);
+}
+
+// static
+StatisticsRecorder::HistogramIterator StatisticsRecorder::end() {
+  return HistogramIterator(histograms_->end(), true);
+}
+
+// static
+void StatisticsRecorder::GetSnapshot(const std::string& query,
+                                     Histograms* snapshot) {
+  if (lock_ == NULL)
+    return;
+  base::AutoLock auto_lock(*lock_);
+  if (histograms_ == NULL)
+    return;
+
+  for (const auto& entry : *histograms_) {
+    if (entry.second->histogram_name().find(query) != std::string::npos)
+      snapshot->push_back(entry.second);
+  }
+}
+
+// static
 bool StatisticsRecorder::SetCallback(
     const std::string& name,
     const StatisticsRecorder::OnSampleCallback& cb) {
@@ -245,7 +326,7 @@
     return false;
   callbacks_->insert(std::make_pair(name, cb));
 
-  HistogramMap::iterator it = histograms_->find(HashMetricName(name));
+  auto it = histograms_->find(HashMetricName(name));
   if (it != histograms_->end()) {
     DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
     it->second->SetFlags(HistogramBase::kCallbackExists);
@@ -265,7 +346,7 @@
   callbacks_->erase(name);
 
   // We also clear the flag from the histogram (if it exists).
-  HistogramMap::iterator it = histograms_->find(HashMetricName(name));
+  auto it = histograms_->find(HashMetricName(name));
   if (it != histograms_->end()) {
     DCHECK_EQ(name, it->second->histogram_name()) << "hash collision";
     it->second->ClearFlags(HistogramBase::kCallbackExists);
@@ -286,19 +367,27 @@
                                                 : OnSampleCallback();
 }
 
-// private static
-void StatisticsRecorder::GetSnapshot(const std::string& query,
-                                     Histograms* snapshot) {
-  if (lock_ == NULL)
-    return;
-  base::AutoLock auto_lock(*lock_);
-  if (histograms_ == NULL)
-    return;
+// static
+size_t StatisticsRecorder::GetHistogramCount() {
+  if (!lock_)
+    return 0;
 
-  for (const auto& entry : *histograms_) {
-    if (entry.second->histogram_name().find(query) != std::string::npos)
-      snapshot->push_back(entry.second);
-  }
+  base::AutoLock auto_lock(*lock_);
+  if (!histograms_)
+    return 0;
+  return histograms_->size();
+}
+
+// static
+void StatisticsRecorder::ResetForTesting() {
+  // Just call the private version that is used also by the destructor.
+  Reset();
+}
+
+// static
+void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
+  if (histograms_)
+    histograms_->erase(HashMetricName(name.as_string()));
 }
 
 // This singleton instance should be started during the single threaded portion
@@ -325,16 +414,11 @@
 }
 
 // static
-void StatisticsRecorder::DumpHistogramsToVlog(void* /* instance */) {
-  std::string output;
-  StatisticsRecorder::WriteGraph(std::string(), &output);
-  VLOG(1) << output;
-}
+void StatisticsRecorder::Reset() {
+  // If there's no lock then there is nothing to reset.
+  if (!lock_)
+    return;
 
-StatisticsRecorder::~StatisticsRecorder() {
-  DCHECK(histograms_ && ranges_ && lock_);
-
-  // Clean up.
   scoped_ptr<HistogramMap> histograms_deleter;
   scoped_ptr<CallbackMap> callbacks_deleter;
   scoped_ptr<RangesMap> ranges_deleter;
@@ -352,6 +436,13 @@
   // We are going to leak the histograms and the ranges.
 }
 
+// static
+void StatisticsRecorder::DumpHistogramsToVlog(void* /* instance */) {
+  std::string output;
+  StatisticsRecorder::WriteGraph(std::string(), &output);
+  VLOG(1) << output;
+}
+
 
 // static
 StatisticsRecorder::HistogramMap* StatisticsRecorder::histograms_ = NULL;
diff --git a/base/metrics/statistics_recorder.h b/base/metrics/statistics_recorder.h
index b1d182e..6eaf079 100644
--- a/base/metrics/statistics_recorder.h
+++ b/base/metrics/statistics_recorder.h
@@ -23,6 +23,7 @@
 #include "base/lazy_instance.h"
 #include "base/macros.h"
 #include "base/metrics/histogram_base.h"
+#include "base/strings/string_piece.h"
 
 namespace base {
 
@@ -31,8 +32,39 @@
 
 class BASE_EXPORT StatisticsRecorder {
  public:
+  typedef std::map<uint64_t, HistogramBase*> HistogramMap;  // Key is name-hash.
   typedef std::vector<HistogramBase*> Histograms;
 
+  // A class for iterating over the histograms held within this global resource.
+  class BASE_EXPORT HistogramIterator {
+   public:
+    HistogramIterator(const HistogramMap::iterator& iter,
+                      bool include_persistent);
+    HistogramIterator(const HistogramIterator& rhs);  // Must be copyable.
+    ~HistogramIterator();
+
+    HistogramIterator& operator++();
+    HistogramIterator operator++(int) {
+      HistogramIterator tmp(*this);
+      operator++();
+      return tmp;
+    }
+
+    bool operator==(const HistogramIterator& rhs) const {
+      return iter_ == rhs.iter_;
+    }
+    bool operator!=(const HistogramIterator& rhs) const {
+      return iter_ != rhs.iter_;
+    }
+    HistogramBase* operator*() { return iter_->second; }
+
+   private:
+    HistogramMap::iterator iter_;
+    const bool include_persistent_;
+  };
+
+  ~StatisticsRecorder();
+
   // Initializes the StatisticsRecorder system. Safe to call multiple times.
   static void Initialize();
 
@@ -70,7 +102,11 @@
 
   // Find a histogram by name. It matches the exact name. This method is thread
   // safe.  It returns NULL if a matching histogram is not found.
-  static HistogramBase* FindHistogram(const std::string& name);
+  static HistogramBase* FindHistogram(base::StringPiece name);
+
+  // Support for iterating over known histograms.
+  static HistogramIterator begin(bool include_persistent);
+  static HistogramIterator end();
 
   // GetSnapshot copies some of the pointers to registered histograms into the
   // caller supplied vector (Histograms). Only histograms which have |query| as
@@ -96,11 +132,19 @@
   // histogram. This method is thread safe.
   static OnSampleCallback FindCallback(const std::string& histogram_name);
 
- private:
-  // We keep all registered histograms in a map, indexed by the hash of the
-  // name of the histogram.
-  typedef std::map<uint64_t, HistogramBase*> HistogramMap;
+  // Returns the number of known histograms.
+  static size_t GetHistogramCount();
 
+  // Clears all of the known histograms and resets static variables to a
+  // state that allows a new initialization.
+  static void ResetForTesting();
+
+  // Removes a histogram from the internal set of known ones. This can be
+  // necessary during testing persistent histograms where the underlying
+  // memory is being released.
+  static void ForgetHistogramForTesting(base::StringPiece name);
+
+ private:
   // We keep a map of callbacks to histograms, so that as histograms are
   // created, we can set the callback properly.
   typedef std::map<std::string, OnSampleCallback> CallbackMap;
@@ -115,6 +159,7 @@
   friend class HistogramSnapshotManagerTest;
   friend class HistogramTest;
   friend class JsonPrefStoreTest;
+  friend class SharedHistogramTest;
   friend class SparseHistogramTest;
   friend class StatisticsRecorderTest;
   FRIEND_TEST_ALL_PREFIXES(HistogramDeltaSerializationTest,
@@ -122,10 +167,10 @@
 
   // The constructor just initializes static members. Usually client code should
   // use Initialize to do this. But in test code, you can friend this class and
-  // call destructor/constructor to get a clean StatisticsRecorder.
+  // call the constructor to get a clean StatisticsRecorder.
   StatisticsRecorder();
-  ~StatisticsRecorder();
 
+  static void Reset();
   static void DumpHistogramsToVlog(void* instance);
 
   static HistogramMap* histograms_;
diff --git a/base/metrics/statistics_recorder_unittest.cc b/base/metrics/statistics_recorder_unittest.cc
index af5c1e7..073cbb1 100644
--- a/base/metrics/statistics_recorder_unittest.cc
+++ b/base/metrics/statistics_recorder_unittest.cc
@@ -10,6 +10,7 @@
 #include "base/json/json_reader.h"
 #include "base/memory/scoped_ptr.h"
 #include "base/metrics/histogram_macros.h"
+#include "base/metrics/persistent_histogram_allocator.h"
 #include "base/metrics/sparse_histogram.h"
 #include "base/metrics/statistics_recorder.h"
 #include "base/values.h"
@@ -20,12 +21,18 @@
 class StatisticsRecorderTest : public testing::Test {
  protected:
   void SetUp() override {
+    // Get this first so it never gets created in persistent storage and will
+    // not appear in the StatisticsRecorder after it is re-initialized.
+    PersistentHistogramAllocator::GetCreateHistogramResultHistogram();
     // Each test will have a clean state (no Histogram / BucketRanges
     // registered).
     InitializeStatisticsRecorder();
   }
 
-  void TearDown() override { UninitializeStatisticsRecorder(); }
+  void TearDown() override {
+    UninitializeStatisticsRecorder();
+    PersistentHistogramAllocator::ReleaseGlobalAllocatorForTesting();
+  }
 
   void InitializeStatisticsRecorder() {
     statistics_recorder_ = new StatisticsRecorder();
@@ -315,6 +322,23 @@
   EXPECT_TRUE(json.empty());
 }
 
+TEST_F(StatisticsRecorderTest, IterationTest) {
+  StatisticsRecorder::Histograms registered_histograms;
+  LOCAL_HISTOGRAM_COUNTS("TestHistogram.IterationTest1", 30);
+  PersistentHistogramAllocator::CreateGlobalAllocatorOnLocalMemory(
+      64 << 10 /* 64 KiB */, 0, "");
+  LOCAL_HISTOGRAM_COUNTS("TestHistogram.IterationTest2", 30);
+
+  StatisticsRecorder::HistogramIterator i1 = StatisticsRecorder::begin(true);
+  EXPECT_NE(StatisticsRecorder::end(), i1);
+  EXPECT_NE(StatisticsRecorder::end(), ++i1);
+  EXPECT_EQ(StatisticsRecorder::end(), ++i1);
+
+  StatisticsRecorder::HistogramIterator i2 = StatisticsRecorder::begin(false);
+  EXPECT_NE(StatisticsRecorder::end(), i2);
+  EXPECT_EQ(StatisticsRecorder::end(), ++i2);
+}
+
 namespace {
 
 // CallbackCheckWrapper is simply a convenient way to check and store that
diff --git a/base/metrics/user_metrics.cc b/base/metrics/user_metrics.cc
new file mode 100644
index 0000000..55467e6
--- /dev/null
+++ b/base/metrics/user_metrics.cc
@@ -0,0 +1,77 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/metrics/user_metrics.h"
+
+#include <stddef.h>
+
+#include <vector>
+
+#include "base/lazy_instance.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+
+namespace base {
+namespace {
+
+// A helper class for tracking callbacks and ensuring thread-safety.
+class Callbacks {
+ public:
+  Callbacks() {}
+
+  // Records the |action|.
+  void Record(const std::string& action) {
+    DCHECK(thread_checker_.CalledOnValidThread());
+    for (size_t i = 0; i < callbacks_.size(); ++i) {
+      callbacks_[i].Run(action);
+    }
+  }
+
+  // Adds |callback| to the list of |callbacks_|.
+  void AddCallback(const ActionCallback& callback) {
+    DCHECK(thread_checker_.CalledOnValidThread());
+    callbacks_.push_back(callback);
+  }
+
+  // Removes the first instance of |callback| from the list of |callbacks_|, if
+  // there is one.
+  void RemoveCallback(const ActionCallback& callback) {
+    DCHECK(thread_checker_.CalledOnValidThread());
+    for (size_t i = 0; i < callbacks_.size(); ++i) {
+      if (callbacks_[i].Equals(callback)) {
+        callbacks_.erase(callbacks_.begin() + i);
+        return;
+      }
+    }
+  }
+
+ private:
+  base::ThreadChecker thread_checker_;
+  std::vector<ActionCallback> callbacks_;
+
+  DISALLOW_COPY_AND_ASSIGN(Callbacks);
+};
+
+base::LazyInstance<Callbacks> g_callbacks = LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+void RecordAction(const UserMetricsAction& action) {
+  g_callbacks.Get().Record(action.str_);
+}
+
+void RecordComputedAction(const std::string& action) {
+  g_callbacks.Get().Record(action);
+}
+
+void AddActionCallback(const ActionCallback& callback) {
+  g_callbacks.Get().AddCallback(callback);
+}
+
+void RemoveActionCallback(const ActionCallback& callback) {
+  g_callbacks.Get().RemoveCallback(callback);
+
+}
+
+}  // namespace base
diff --git a/base/metrics/user_metrics.h b/base/metrics/user_metrics.h
new file mode 100644
index 0000000..bcfefb8
--- /dev/null
+++ b/base/metrics/user_metrics.h
@@ -0,0 +1,60 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_METRICS_USER_METRICS_H_
+#define BASE_METRICS_USER_METRICS_H_
+
+#include <string>
+
+#include "base/base_export.h"
+#include "base/callback.h"
+#include "base/metrics/user_metrics_action.h"
+
+namespace base {
+
+// This module provides some helper functions for logging actions tracked by
+// the user metrics system.
+
+// Record that the user performed an action.
+// This method *must* be called from the main thread.
+//
+// "Action" here means a user-generated event:
+//   good: "Reload", "CloseTab", and "IMEInvoked"
+//   not good: "SSLDialogShown", "PageLoaded", "DiskFull"
+// We use this to gather anonymized information about how users are
+// interacting with the browser.
+// WARNING: In calls to this function, UserMetricsAction and a
+// string literal parameter must be on the same line, e.g.
+//   RecordAction(UserMetricsAction("my extremely long action name"));
+// This ensures that our processing scripts can associate this action's hash
+// with its metric name. Therefore, it will be possible to retrieve the metric
+// name from the hash later on.
+//
+// Once a new recorded action is added, run
+//   tools/metrics/actions/extract_actions.py
+// to add the metric to actions.xml, then update the <owner>s and <description>
+// sections. Make sure to include the actions.xml file when you upload your code
+// for review!
+//
+// For more complicated situations (like when there are many different
+// possible actions), see RecordComputedAction.
+BASE_EXPORT void RecordAction(const UserMetricsAction& action);
+
+// This function has identical input and behavior to RecordAction, but is
+// not automatically found by the action-processing scripts.  It can be used
+// when it's a pain to enumerate all possible actions, but if you use this
+// you need to also update the rules for extracting known actions in
+// tools/metrics/actions/extract_actions.py.
+BASE_EXPORT void RecordComputedAction(const std::string& action);
+
+// Called with the action string.
+typedef base::Callback<void(const std::string&)> ActionCallback;
+
+// Add/remove action callbacks (see above).
+BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
+BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
+
+}  // namespace base
+
+#endif  // BASE_METRICS_USER_METRICS_H_
diff --git a/base/move.h b/base/move.h
index 24bf9d7..42242b4 100644
--- a/base/move.h
+++ b/base/move.h
@@ -5,6 +5,7 @@
 #ifndef BASE_MOVE_H_
 #define BASE_MOVE_H_
 
+// TODO(dcheng): Remove this header.
 #include <utility>
 
 #include "base/compiler_specific.h"
@@ -25,13 +26,11 @@
 // into a scoped_ptr.  The class must define a move constructor and move
 // assignment operator to make this work.
 //
-// This version of the macro adds a Pass() function and a cryptic
-// MoveOnlyTypeForCPP03 typedef for the base::Callback implementation to use.
-// See IsMoveOnlyType template and its usage in base/callback_internal.h
-// for more details.
+// This version of the macro adds a cryptic MoveOnlyTypeForCPP03 typedef for the
+// base::Callback implementation to use. See IsMoveOnlyType template and its
+// usage in base/callback_internal.h for more details.
 // TODO(crbug.com/566182): Remove this macro and use DISALLOW_COPY_AND_ASSIGN
 // everywhere instead.
-#if defined(OS_ANDROID) || defined(OS_LINUX)
 #define DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)       \
  private:                                                       \
   type(const type&) = delete;                                   \
@@ -41,17 +40,5 @@
   typedef void MoveOnlyTypeForCPP03;                            \
                                                                 \
  private:
-#else
-#define DISALLOW_COPY_AND_ASSIGN_WITH_MOVE_FOR_BIND(type)       \
- private:                                                       \
-  type(const type&) = delete;                                   \
-  void operator=(const type&) = delete;                         \
-                                                                \
- public:                                                        \
-  type&& Pass() WARN_UNUSED_RESULT { return std::move(*this); } \
-  typedef void MoveOnlyTypeForCPP03;                            \
-                                                                \
- private:
-#endif
 
 #endif  // BASE_MOVE_H_
diff --git a/base/numerics/safe_conversions_impl.h b/base/numerics/safe_conversions_impl.h
index 181dd7e..03e7ee6 100644
--- a/base/numerics/safe_conversions_impl.h
+++ b/base/numerics/safe_conversions_impl.h
@@ -8,10 +8,6 @@
 #include <limits.h>
 #include <stdint.h>
 
-#include <limits>
-
-#include "base/template_util.h"
-
 namespace base {
 namespace internal {
 
diff --git a/base/numerics/safe_math.h b/base/numerics/safe_math.h
index d169690..9757f1c 100644
--- a/base/numerics/safe_math.h
+++ b/base/numerics/safe_math.h
@@ -6,6 +6,7 @@
 #define BASE_NUMERICS_SAFE_MATH_H_
 
 #include <stddef.h>
+#include <type_traits>
 
 #include "base/numerics/safe_math_impl.h"
 
@@ -190,7 +191,7 @@
   template <typename Src>
   static CheckedNumeric<T> cast(
       const CheckedNumeric<Src>& u,
-      typename std::enable_if<!is_same<Src, T>::value, int>::type = 0) {
+      typename std::enable_if<!std::is_same<Src, T>::value, int>::type = 0) {
     return u;
   }
 
diff --git a/base/numerics/safe_math_impl.h b/base/numerics/safe_math_impl.h
index f5ec2b8..487b3bc 100644
--- a/base/numerics/safe_math_impl.h
+++ b/base/numerics/safe_math_impl.h
@@ -14,7 +14,6 @@
 #include <type_traits>
 
 #include "base/numerics/safe_conversions.h"
-#include "base/template_util.h"
 
 namespace base {
 namespace internal {
diff --git a/base/numerics/safe_numerics_unittest.cc b/base/numerics/safe_numerics_unittest.cc
index cb63ad0..861f515 100644
--- a/base/numerics/safe_numerics_unittest.cc
+++ b/base/numerics/safe_numerics_unittest.cc
@@ -11,7 +11,6 @@
 #include "base/compiler_specific.h"
 #include "base/numerics/safe_conversions.h"
 #include "base/numerics/safe_math.h"
-#include "base/template_util.h"
 #include "build/build_config.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
diff --git a/base/os_compat_android.cc b/base/os_compat_android.cc
new file mode 100644
index 0000000..1eb6536
--- /dev/null
+++ b/base/os_compat_android.cc
@@ -0,0 +1,177 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include <asm/unistd.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+
+#if !defined(__LP64__)
+#include <time64.h>
+#endif
+
+#include "base/rand_util.h"
+#include "base/strings/string_piece.h"
+#include "base/strings/stringprintf.h"
+
+extern "C" {
+// There is no futimes() avaiable in Bionic, so we provide our own
+// implementation until it is there.
+int futimes(int fd, const struct timeval tv[2]) {
+  if (tv == NULL)
+    return syscall(__NR_utimensat, fd, NULL, NULL, 0);
+
+  if (tv[0].tv_usec < 0 || tv[0].tv_usec >= 1000000 ||
+      tv[1].tv_usec < 0 || tv[1].tv_usec >= 1000000) {
+    errno = EINVAL;
+    return -1;
+  }
+
+  // Convert timeval to timespec.
+  struct timespec ts[2];
+  ts[0].tv_sec = tv[0].tv_sec;
+  ts[0].tv_nsec = tv[0].tv_usec * 1000;
+  ts[1].tv_sec = tv[1].tv_sec;
+  ts[1].tv_nsec = tv[1].tv_usec * 1000;
+  return syscall(__NR_utimensat, fd, NULL, ts, 0);
+}
+
+#if !defined(__LP64__)
+// 32-bit Android has only timegm64() and not timegm().
+// We replicate the behaviour of timegm() when the result overflows time_t.
+time_t timegm(struct tm* const t) {
+  // time_t is signed on Android.
+  static const time_t kTimeMax = ~(1L << (sizeof(time_t) * CHAR_BIT - 1));
+  static const time_t kTimeMin = (1L << (sizeof(time_t) * CHAR_BIT - 1));
+  time64_t result = timegm64(t);
+  if (result < kTimeMin || result > kTimeMax)
+    return -1;
+  return result;
+}
+#endif
+
+// The following is only needed when building with GCC 4.6 or higher
+// (i.e. not with Android GCC 4.4.3, nor with Clang).
+//
+// GCC is now capable of optimizing successive calls to sin() and cos() into
+// a single call to sincos(). This means that source code that looks like:
+//
+//     double c, s;
+//     c = cos(angle);
+//     s = sin(angle);
+//
+// Will generate machine code that looks like:
+//
+//     double c, s;
+//     sincos(angle, &s, &c);
+//
+// Unfortunately, sincos() and friends are not part of the Android libm.so
+// library provided by the NDK for API level 9. When the optimization kicks
+// in, it makes the final build fail with a puzzling message (puzzling
+// because 'sincos' doesn't appear anywhere in the sources!).
+//
+// To solve this, we provide our own implementation of the sincos() function
+// and related friends. Note that we must also explicitely tell GCC to disable
+// optimizations when generating these. Otherwise, the generated machine code
+// for each function would simply end up calling itself, resulting in a
+// runtime crash due to stack overflow.
+//
+#if defined(__GNUC__) && !defined(__clang__) && \
+    !defined(ANDROID_SINCOS_PROVIDED)
+
+// For the record, Clang does not support the 'optimize' attribute.
+// In the unlikely event that it begins performing this optimization too,
+// we'll have to find a different way to achieve this. NOTE: Tested with O1
+// which still performs the optimization.
+//
+#define GCC_NO_OPTIMIZE  __attribute__((optimize("O0")))
+
+GCC_NO_OPTIMIZE
+void sincos(double angle, double* s, double *c) {
+  *c = cos(angle);
+  *s = sin(angle);
+}
+
+GCC_NO_OPTIMIZE
+void sincosf(float angle, float* s, float* c) {
+  *c = cosf(angle);
+  *s = sinf(angle);
+}
+
+#endif // __GNUC__ && !__clang__
+
+// An implementation of mkdtemp, since it is not exposed by the NDK
+// for native API level 9 that we target.
+//
+// For any changes in the mkdtemp function, you should manually run the unittest
+// OsCompatAndroidTest.DISABLED_TestMkdTemp in your local machine to check if it
+// passes. Please don't enable it, since it creates a directory and may be
+// source of flakyness.
+char* mkdtemp(char* path) {
+  if (path == NULL) {
+    errno = EINVAL;
+    return NULL;
+  }
+
+  const int path_len = strlen(path);
+
+  // The last six characters of 'path' must be XXXXXX.
+  const base::StringPiece kSuffix("XXXXXX");
+  const int kSuffixLen = kSuffix.length();
+  if (!base::StringPiece(path, path_len).ends_with(kSuffix)) {
+    errno = EINVAL;
+    return NULL;
+  }
+
+  // If the path contains a directory, as in /tmp/foo/XXXXXXXX, make sure
+  // that /tmp/foo exists, otherwise we're going to loop a really long
+  // time for nothing below
+  char* dirsep = strrchr(path, '/');
+  if (dirsep != NULL) {
+    struct stat st;
+    int ret;
+
+    *dirsep = '\0';  // Terminating directory path temporarily
+
+    ret = stat(path, &st);
+
+    *dirsep = '/';  // Restoring directory separator
+    if (ret < 0)  // Directory probably does not exist
+      return NULL;
+    if (!S_ISDIR(st.st_mode)) {  // Not a directory
+      errno = ENOTDIR;
+      return NULL;
+    }
+  }
+
+  // Max number of tries using different random suffixes.
+  const int kMaxTries = 100;
+
+  // Now loop until we CAN create a directory by that name or we reach the max
+  // number of tries.
+  for (int i = 0; i < kMaxTries; ++i) {
+    // Fill the suffix XXXXXX with a random string composed of a-z chars.
+    for (int pos = 0; pos < kSuffixLen; ++pos) {
+      char rand_char = static_cast<char>(base::RandInt('a', 'z'));
+      path[path_len - kSuffixLen + pos] = rand_char;
+    }
+    if (mkdir(path, 0700) == 0) {
+      // We just created the directory succesfully.
+      return path;
+    }
+    if (errno != EEXIST) {
+      // The directory doesn't exist, but an error occured
+      return NULL;
+    }
+  }
+
+  // We reached the max number of tries.
+  return NULL;
+}
+
+}  // extern "C"
diff --git a/base/os_compat_android.h b/base/os_compat_android.h
new file mode 100644
index 0000000..0f25444
--- /dev/null
+++ b/base/os_compat_android.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_OS_COMPAT_ANDROID_H_
+#define BASE_OS_COMPAT_ANDROID_H_
+
+#include <fcntl.h>
+#include <sys/types.h>
+#include <utime.h>
+
+// Not implemented in Bionic.
+extern "C" int futimes(int fd, const struct timeval tv[2]);
+
+// Not exposed or implemented in Bionic.
+extern "C" char* mkdtemp(char* path);
+
+// Android has no timegm().
+extern "C" time_t timegm(struct tm* const t);
+
+// The lockf() function is not available on Android; we translate to flock().
+#define F_LOCK LOCK_EX
+#define F_ULOCK LOCK_UN
+inline int lockf(int fd, int cmd, off_t ignored_len) {
+  return flock(fd, cmd);
+}
+
+#endif  // BASE_OS_COMPAT_ANDROID_H_
diff --git a/base/os_compat_android_unittest.cc b/base/os_compat_android_unittest.cc
new file mode 100644
index 0000000..7fbdc6d
--- /dev/null
+++ b/base/os_compat_android_unittest.cc
@@ -0,0 +1,41 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/os_compat_android.h"
+
+#include "base/files/file_util.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+
+typedef testing::Test OsCompatAndroidTest;
+
+// Keep this Unittest DISABLED_ , because it actually creates a directory in the
+// device and it may be source of flakyness. For any changes in the mkdtemp
+// function, you should run this unittest in your local machine to check if it
+// passes.
+TEST_F(OsCompatAndroidTest, DISABLED_TestMkdTemp) {
+  FilePath tmp_dir;
+  EXPECT_TRUE(base::GetTempDir(&tmp_dir));
+
+  // Not six XXXXXX at the suffix of the path.
+  FilePath sub_dir = tmp_dir.Append("XX");
+  std::string sub_dir_string = sub_dir.value();
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer = const_cast<char*>(sub_dir_string.c_str());
+  EXPECT_EQ(NULL, mkdtemp(buffer));
+
+  // Directory does not exist
+  char invalid_path2[] = "doesntoexist/foobarXXXXXX";
+  EXPECT_EQ(NULL, mkdtemp(invalid_path2));
+
+  // Successfully create a tmp dir.
+  FilePath sub_dir2 = tmp_dir.Append("XXXXXX");
+  std::string sub_dir2_string = sub_dir2.value();
+  // this should be OK since mkdtemp just replaces characters in place
+  char* buffer2 = const_cast<char*>(sub_dir2_string.c_str());
+  EXPECT_TRUE(mkdtemp(buffer2) != NULL);
+}
+
+}  // namespace base
diff --git a/base/pending_task.cc b/base/pending_task.cc
index 3d78914..d21f7c7 100644
--- a/base/pending_task.cc
+++ b/base/pending_task.cc
@@ -30,6 +30,8 @@
       is_high_res(false) {
 }
 
+PendingTask::PendingTask(const PendingTask& other) = default;
+
 PendingTask::~PendingTask() {
 }
 
diff --git a/base/pending_task.h b/base/pending_task.h
index fddfc86..fd0b883 100644
--- a/base/pending_task.h
+++ b/base/pending_task.h
@@ -24,6 +24,7 @@
               const Closure& task,
               TimeTicks delayed_run_time,
               bool nestable);
+  PendingTask(const PendingTask& other);
   ~PendingTask();
 
   // Used to support sorting.
diff --git a/base/pickle.cc b/base/pickle.cc
index d83391b..016e934 100644
--- a/base/pickle.cc
+++ b/base/pickle.cc
@@ -11,6 +11,7 @@
 
 #include "base/bits.h"
 #include "base/macros.h"
+#include "base/numerics/safe_conversions.h"
 #include "build/build_config.h"
 
 namespace base {
@@ -89,7 +90,15 @@
 }
 
 bool PickleIterator::ReadLong(long* result) {
-  return ReadBuiltinType(result);
+  // Always read long as a 64-bit value to ensure compatibility between 32-bit
+  // and 64-bit processes.
+  int64_t result_int64 = 0;
+  if (!ReadBuiltinType(&result_int64))
+    return false;
+  // CHECK if the cast truncates the value so that we know to change this IPC
+  // parameter to use int64_t.
+  *result = base::checked_cast<long>(result_int64);
+  return true;
 }
 
 bool PickleIterator::ReadUInt16(uint16_t* result) {
@@ -108,16 +117,6 @@
   return ReadBuiltinType(result);
 }
 
-bool PickleIterator::ReadSizeT(size_t* result) {
-  // Always read size_t as a 64-bit value to ensure compatibility between 32-bit
-  // and 64-bit processes.
-  uint64_t result_uint64 = 0;
-  bool success = ReadBuiltinType(&result_uint64);
-  *result = static_cast<size_t>(result_uint64);
-  // Fail if the cast above truncates the value.
-  return success && (*result == result_uint64);
-}
-
 bool PickleIterator::ReadFloat(float* result) {
   // crbug.com/315213
   // The source data may not be properly aligned, and unaligned float reads
@@ -208,6 +207,43 @@
   return true;
 }
 
+PickleSizer::PickleSizer() {}
+
+PickleSizer::~PickleSizer() {}
+
+void PickleSizer::AddString(const StringPiece& value) {
+  AddInt();
+  AddBytes(static_cast<int>(value.size()));
+}
+
+void PickleSizer::AddString16(const StringPiece16& value) {
+  AddInt();
+  AddBytes(static_cast<int>(value.size() * sizeof(char16)));
+}
+
+void PickleSizer::AddData(int length) {
+  CHECK_GE(length, 0);
+  AddInt();
+  AddBytes(length);
+}
+
+void PickleSizer::AddBytes(int length) {
+  payload_size_ += bits::Align(length, sizeof(uint32_t));
+}
+
+template <size_t length> void PickleSizer::AddBytesStatic() {
+  DCHECK_LE(length, static_cast<size_t>(std::numeric_limits<int>::max()));
+  AddBytes(length);
+}
+
+template void PickleSizer::AddBytesStatic<2>();
+template void PickleSizer::AddBytesStatic<4>();
+template void PickleSizer::AddBytesStatic<8>();
+
+Pickle::Attachment::Attachment() {}
+
+Pickle::Attachment::~Attachment() {}
+
 // Payload is uint32_t aligned.
 
 Pickle::Pickle()
@@ -322,6 +358,19 @@
     Resize(capacity_after_header_ * 2 + new_size);
 }
 
+bool Pickle::WriteAttachment(scoped_refptr<Attachment> /* attachment */) {
+  return false;
+}
+
+bool Pickle::ReadAttachment(base::PickleIterator* /* iter */,
+                            scoped_refptr<Attachment>* /* attachment */) const {
+  return false;
+}
+
+bool Pickle::HasAttachments() const {
+  return false;
+}
+
 void Pickle::Resize(size_t new_capacity) {
   CHECK_NE(capacity_after_header_, kCapacityReadOnly);
   capacity_after_header_ = bits::Align(new_capacity, kPayloadUnit);
diff --git a/base/pickle.h b/base/pickle.h
index 02bc432..eb4888a 100644
--- a/base/pickle.h
+++ b/base/pickle.h
@@ -14,9 +14,14 @@
 #include "base/compiler_specific.h"
 #include "base/gtest_prod_util.h"
 #include "base/logging.h"
+#include "base/memory/ref_counted.h"
 #include "base/strings/string16.h"
 #include "base/strings/string_piece.h"
 
+#if defined(OS_POSIX)
+#include "base/files/file.h"
+#endif
+
 namespace base {
 
 class Pickle;
@@ -40,7 +45,6 @@
   bool ReadUInt32(uint32_t* result) WARN_UNUSED_RESULT;
   bool ReadInt64(int64_t* result) WARN_UNUSED_RESULT;
   bool ReadUInt64(uint64_t* result) WARN_UNUSED_RESULT;
-  bool ReadSizeT(size_t* result) WARN_UNUSED_RESULT;
   bool ReadFloat(float* result) WARN_UNUSED_RESULT;
   bool ReadDouble(double* result) WARN_UNUSED_RESULT;
   bool ReadString(std::string* result) WARN_UNUSED_RESULT;
@@ -104,6 +108,41 @@
   FRIEND_TEST_ALL_PREFIXES(PickleTest, GetReadPointerAndAdvance);
 };
 
+// This class provides an interface analogous to base::Pickle's WriteFoo()
+// methods and can be used to accurately compute the size of a hypothetical
+// Pickle's payload without having to reference the Pickle implementation.
+class BASE_EXPORT PickleSizer {
+ public:
+  PickleSizer();
+  ~PickleSizer();
+
+  // Returns the computed size of the payload.
+  size_t payload_size() const { return payload_size_; }
+
+  void AddBool() { return AddInt(); }
+  void AddInt() { AddPOD<int>(); }
+  void AddLong() { AddPOD<uint64_t>(); }
+  void AddUInt16() { return AddPOD<uint16_t>(); }
+  void AddUInt32() { return AddPOD<uint32_t>(); }
+  void AddInt64() { return AddPOD<int64_t>(); }
+  void AddUInt64() { return AddPOD<uint64_t>(); }
+  void AddFloat() { return AddPOD<float>(); }
+  void AddDouble() { return AddPOD<double>(); }
+  void AddString(const StringPiece& value);
+  void AddString16(const StringPiece16& value);
+  void AddData(int length);
+  void AddBytes(int length);
+
+ private:
+  // Just like AddBytes() but with a compile-time size for performance.
+  template<size_t length> void BASE_EXPORT AddBytesStatic();
+
+  template <typename T>
+  void AddPOD() { AddBytesStatic<sizeof(T)>(); }
+
+  size_t payload_size_ = 0;
+};
+
 // This class provides facilities for basic binary value packing and unpacking.
 //
 // The Pickle class supports appending primitive values (ints, strings, etc.)
@@ -123,6 +162,21 @@
 //
 class BASE_EXPORT Pickle {
  public:
+  // Auxiliary data attached to a Pickle. Pickle must be subclassed along with
+  // this interface in order to provide a concrete implementation of support
+  // for attachments. The base Pickle implementation does not accept
+  // attachments.
+  class BASE_EXPORT Attachment : public RefCountedThreadSafe<Attachment> {
+   public:
+    Attachment();
+
+   protected:
+    friend class RefCountedThreadSafe<Attachment>;
+    virtual ~Attachment();
+
+    DISALLOW_COPY_AND_ASSIGN(Attachment);
+  };
+
   // Initialize a Pickle object using the default header size.
   Pickle();
 
@@ -173,23 +227,15 @@
   bool WriteInt(int value) {
     return WritePOD(value);
   }
-  // WARNING: DO NOT USE THIS METHOD IF PICKLES ARE PERSISTED IN ANY WAY.
-  // It will write whatever a "long" is on this architecture. On 32-bit
-  // platforms, it is 32 bits. On 64-bit platforms, it is 64 bits. If persisted
-  // pickles are still around after upgrading to 64-bit, or if they are copied
-  // between dissimilar systems, YOUR PICKLES WILL HAVE GONE BAD.
-  bool WriteLongUsingDangerousNonPortableLessPersistableForm(long value) {
-    return WritePOD(value);
+  bool WriteLong(long value) {
+    // Always write long as a 64-bit value to ensure compatibility between
+    // 32-bit and 64-bit processes.
+    return WritePOD(static_cast<int64_t>(value));
   }
   bool WriteUInt16(uint16_t value) { return WritePOD(value); }
   bool WriteUInt32(uint32_t value) { return WritePOD(value); }
   bool WriteInt64(int64_t value) { return WritePOD(value); }
   bool WriteUInt64(uint64_t value) { return WritePOD(value); }
-  bool WriteSizeT(size_t value) {
-    // Always write size_t as a 64-bit value to ensure compatibility between
-    // 32-bit and 64-bit processes.
-    return WritePOD(static_cast<uint64_t>(value));
-  }
   bool WriteFloat(float value) {
     return WritePOD(value);
   }
@@ -206,6 +252,19 @@
   // known size. See also WriteData.
   bool WriteBytes(const void* data, int length);
 
+  // WriteAttachment appends |attachment| to the pickle. It returns
+  // false iff the set is full or if the Pickle implementation does not support
+  // attachments.
+  virtual bool WriteAttachment(scoped_refptr<Attachment> attachment);
+
+  // ReadAttachment parses an attachment given the parsing state |iter| and
+  // writes it to |*attachment|. It returns true on success.
+  virtual bool ReadAttachment(base::PickleIterator* iter,
+                              scoped_refptr<Attachment>* attachment) const;
+
+  // Indicates whether the pickle has any attachments.
+  virtual bool HasAttachments() const;
+
   // Reserves space for upcoming writes when multiple writes will be made and
   // their sizes are computed in advance. It can be significantly faster to call
   // Reserve() before calling WriteFoo() multiple times.
diff --git a/base/pickle_unittest.cc b/base/pickle_unittest.cc
index b195a81..307cb51 100644
--- a/base/pickle_unittest.cc
+++ b/base/pickle_unittest.cc
@@ -27,7 +27,6 @@
 const uint32_t testuint32 = 1593847192;
 const int64_t testint64 = -0x7E8CA9253104BDFCLL;
 const uint64_t testuint64 = 0xCE8CA9253104BDF7ULL;
-const size_t testsizet = 0xFEDC7654;
 const float testfloat = 3.1415926935f;
 const double testdouble = 2.71828182845904523;
 const std::string teststring("Hello world");  // note non-aligned string length
@@ -73,10 +72,6 @@
   EXPECT_TRUE(iter.ReadUInt64(&outuint64));
   EXPECT_EQ(testuint64, outuint64);
 
-  size_t outsizet;
-  EXPECT_TRUE(iter.ReadSizeT(&outsizet));
-  EXPECT_EQ(testsizet, outsizet);
-
   float outfloat;
   EXPECT_TRUE(iter.ReadFloat(&outfloat));
   EXPECT_EQ(testfloat, outfloat);
@@ -119,13 +114,11 @@
   EXPECT_TRUE(pickle.WriteBool(testbool1));
   EXPECT_TRUE(pickle.WriteBool(testbool2));
   EXPECT_TRUE(pickle.WriteInt(testint));
-  EXPECT_TRUE(
-      pickle.WriteLongUsingDangerousNonPortableLessPersistableForm(testlong));
+  EXPECT_TRUE(pickle.WriteLong(testlong));
   EXPECT_TRUE(pickle.WriteUInt16(testuint16));
   EXPECT_TRUE(pickle.WriteUInt32(testuint32));
   EXPECT_TRUE(pickle.WriteInt64(testint64));
   EXPECT_TRUE(pickle.WriteUInt64(testuint64));
-  EXPECT_TRUE(pickle.WriteSizeT(testsizet));
   EXPECT_TRUE(pickle.WriteFloat(testfloat));
   EXPECT_TRUE(pickle.WriteDouble(testdouble));
   EXPECT_TRUE(pickle.WriteString(teststring));
@@ -145,25 +138,26 @@
   VerifyResult(pickle3);
 }
 
-// Tests that reading/writing a size_t works correctly when the source process
+// Tests that reading/writing a long works correctly when the source process
 // is 64-bit.  We rely on having both 32- and 64-bit trybots to validate both
 // arms of the conditional in this test.
-TEST(PickleTest, SizeTFrom64Bit) {
+TEST(PickleTest, LongFrom64Bit) {
   Pickle pickle;
-  // Under the hood size_t is always written as a 64-bit value, so simulate a
-  // 64-bit size_t even on 32-bit architectures by explicitly writing a
-  // uint64_t.
-  EXPECT_TRUE(pickle.WriteUInt64(testuint64));
+  // Under the hood long is always written as a 64-bit value, so simulate a
+  // 64-bit long even on 32-bit architectures by explicitly writing an int64_t.
+  EXPECT_TRUE(pickle.WriteInt64(testint64));
 
   PickleIterator iter(pickle);
-  size_t outsizet;
-  if (sizeof(size_t) < sizeof(uint64_t)) {
-    // ReadSizeT() should return false when the original written value can't be
-    // represented as a size_t.
-    EXPECT_FALSE(iter.ReadSizeT(&outsizet));
+  long outlong;
+  if (sizeof(long) < sizeof(int64_t)) {
+    // ReadLong() should return false when the original written value can't be
+    // represented as a long.
+#if GTEST_HAS_DEATH_TEST
+    EXPECT_DEATH(ignore_result(iter.ReadLong(&outlong)), "");
+#endif
   } else {
-    EXPECT_TRUE(iter.ReadSizeT(&outsizet));
-    EXPECT_EQ(testuint64, outsizet);
+    EXPECT_TRUE(iter.ReadLong(&outlong));
+    EXPECT_EQ(testint64, outlong);
   }
 }
 
@@ -556,14 +550,14 @@
   std::string data("Hello, world!");
 
   TestingPickle pickle;
-  pickle.WriteSizeT(data.size());
+  pickle.WriteUInt32(data.size());
   void* bytes = pickle.ClaimBytes(data.size());
   pickle.WriteInt(42);
   memcpy(bytes, data.data(), data.size());
 
   PickleIterator iter(pickle);
-  size_t out_data_length;
-  EXPECT_TRUE(iter.ReadSizeT(&out_data_length));
+  uint32_t out_data_length;
+  EXPECT_TRUE(iter.ReadUInt32(&out_data_length));
   EXPECT_EQ(data.size(), out_data_length);
 
   const char* out_data = nullptr;
@@ -575,4 +569,99 @@
   EXPECT_EQ(42, out_value);
 }
 
+// Checks that PickleSizer and Pickle agree on the size of things.
+TEST(PickleTest, PickleSizer) {
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteBool(true);
+    sizer.AddBool();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteInt(42);
+    sizer.AddInt();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteLong(42);
+    sizer.AddLong();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteUInt16(42);
+    sizer.AddUInt16();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteUInt32(42);
+    sizer.AddUInt32();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteInt64(42);
+    sizer.AddInt64();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteUInt64(42);
+    sizer.AddUInt64();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteFloat(42.0f);
+    sizer.AddFloat();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteDouble(42.0);
+    sizer.AddDouble();
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteString(teststring);
+    sizer.AddString(teststring);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteString16(teststring16);
+    sizer.AddString16(teststring16);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteData(testdata, testdatalen);
+    sizer.AddData(testdatalen);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+  {
+    TestingPickle pickle;
+    base::PickleSizer sizer;
+    pickle.WriteBytes(testdata, testdatalen);
+    sizer.AddBytes(testdatalen);
+    EXPECT_EQ(sizer.payload_size(), pickle.payload_size());
+  }
+}
+
 }  // namespace base
diff --git a/base/prefs/OWNERS b/base/prefs/OWNERS
deleted file mode 100644
index 2d87038..0000000
--- a/base/prefs/OWNERS
+++ /dev/null
@@ -1,4 +0,0 @@
-battre@chromium.org
-bauerb@chromium.org
-gab@chromium.org
-pam@chromium.org
diff --git a/base/prefs/README b/base/prefs/README
deleted file mode 100644
index 52d9c43..0000000
--- a/base/prefs/README
+++ /dev/null
@@ -1,6 +0,0 @@
-Prefs is a general-purpose key-value store for application preferences.
-
-The Prefs code lives in base/prefs but is not part of the
-'base/base.gyp:base' library because of a desire to keep its use
-optional. If you use Prefs, you should add a GYP dependency on
-base/base.gyp:base_prefs.
diff --git a/base/prefs/base_prefs_export.h b/base/prefs/base_prefs_export.h
deleted file mode 100644
index 3d207db..0000000
--- a/base/prefs/base_prefs_export.h
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_BASE_PREFS_EXPORT_H_
-#define BASE_PREFS_BASE_PREFS_EXPORT_H_
-
-#if defined(COMPONENT_BUILD)
-#if defined(WIN32)
-
-#if defined(BASE_PREFS_IMPLEMENTATION)
-#define BASE_PREFS_EXPORT __declspec(dllexport)
-#else
-#define BASE_PREFS_EXPORT __declspec(dllimport)
-#endif  // defined(BASE_PREFS_IMPLEMENTATION)
-
-#else  // defined(WIN32)
-#if defined(BASE_PREFS_IMPLEMENTATION)
-#define BASE_PREFS_EXPORT __attribute__((visibility("default")))
-#else
-#define BASE_PREFS_EXPORT
-#endif
-#endif
-
-#else  // defined(COMPONENT_BUILD)
-#define BASE_PREFS_EXPORT
-#endif
-
-#endif  // BASE_PREFS_BASE_PREFS_EXPORT_H_
diff --git a/base/prefs/persistent_pref_store.h b/base/prefs/persistent_pref_store.h
deleted file mode 100644
index 89c7a71..0000000
--- a/base/prefs/persistent_pref_store.h
+++ /dev/null
@@ -1,77 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PERSISTENT_PREF_STORE_H_
-#define BASE_PREFS_PERSISTENT_PREF_STORE_H_
-
-#include <string>
-
-#include "base/prefs/base_prefs_export.h"
-#include "base/prefs/writeable_pref_store.h"
-
-// This interface is complementary to the PrefStore interface, declaring
-// additional functionality that adds support for setting values and persisting
-// the data to some backing store.
-class BASE_PREFS_EXPORT PersistentPrefStore : public WriteablePrefStore {
- public:
-  // Unique integer code for each type of error so we can report them
-  // distinctly in a histogram.
-  // NOTE: Don't change the explicit values of the enums as it will change the
-  // server's meaning of the histogram.
-  enum PrefReadError {
-    PREF_READ_ERROR_NONE = 0,
-    PREF_READ_ERROR_JSON_PARSE = 1,
-    PREF_READ_ERROR_JSON_TYPE = 2,
-    PREF_READ_ERROR_ACCESS_DENIED = 3,
-    PREF_READ_ERROR_FILE_OTHER = 4,
-    PREF_READ_ERROR_FILE_LOCKED = 5,
-    PREF_READ_ERROR_NO_FILE = 6,
-    PREF_READ_ERROR_JSON_REPEAT = 7,
-    // PREF_READ_ERROR_OTHER = 8,  // Deprecated.
-    PREF_READ_ERROR_FILE_NOT_SPECIFIED = 9,
-    // Indicates that ReadPrefs() couldn't complete synchronously and is waiting
-    // for an asynchronous task to complete first.
-    PREF_READ_ERROR_ASYNCHRONOUS_TASK_INCOMPLETE = 10,
-    PREF_READ_ERROR_MAX_ENUM
-  };
-
-  class ReadErrorDelegate {
-   public:
-    virtual ~ReadErrorDelegate() {}
-
-    virtual void OnError(PrefReadError error) = 0;
-  };
-
-  // Whether the store is in a pseudo-read-only mode where changes are not
-  // actually persisted to disk.  This happens in some cases when there are
-  // read errors during startup.
-  virtual bool ReadOnly() const = 0;
-
-  // Gets the read error. Only valid if IsInitializationComplete() returns true.
-  virtual PrefReadError GetReadError() const = 0;
-
-  // Reads the preferences from disk. Notifies observers via
-  // "PrefStore::OnInitializationCompleted" when done.
-  virtual PrefReadError ReadPrefs() = 0;
-
-  // Reads the preferences from disk asynchronously. Notifies observers via
-  // "PrefStore::OnInitializationCompleted" when done. Also it fires
-  // |error_delegate| if it is not NULL and reading error has occurred.
-  // Owns |error_delegate|.
-  virtual void ReadPrefsAsync(ReadErrorDelegate* error_delegate) = 0;
-
-  // Lands any pending writes to disk.
-  virtual void CommitPendingWrite() = 0;
-
-  // Schedule a write if there is any lossy data pending. Unlike
-  // CommitPendingWrite() this does not immediately sync to disk, instead it
-  // triggers an eventual write if there is lossy data pending and if there
-  // isn't one scheduled already.
-  virtual void SchedulePendingLossyWrites() = 0;
-
- protected:
-  ~PersistentPrefStore() override {}
-};
-
-#endif  // BASE_PREFS_PERSISTENT_PREF_STORE_H_
diff --git a/base/prefs/pref_filter.h b/base/prefs/pref_filter.h
deleted file mode 100644
index 82a44c6..0000000
--- a/base/prefs/pref_filter.h
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2013 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PREF_FILTER_H_
-#define BASE_PREFS_PREF_FILTER_H_
-
-#include <string>
-
-#include "base/callback_forward.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/prefs/base_prefs_export.h"
-
-namespace base {
-class DictionaryValue;
-class Value;
-}  // namespace base
-
-// Filters preferences as they are loaded from disk or updated at runtime.
-// Currently supported only by JsonPrefStore.
-class BASE_PREFS_EXPORT PrefFilter {
- public:
-  // A callback to be invoked when |prefs| have been read (and possibly
-  // pre-modified) and are now ready to be handed back to this callback's
-  // builder. |schedule_write| indicates whether a write should be immediately
-  // scheduled (typically because the |prefs| were pre-modified).
-  typedef base::Callback<void(scoped_ptr<base::DictionaryValue> prefs,
-                              bool schedule_write)> PostFilterOnLoadCallback;
-
-  virtual ~PrefFilter() {}
-
-  // This method is given ownership of the |pref_store_contents| read from disk
-  // before the underlying PersistentPrefStore gets to use them. It must hand
-  // them back via |post_filter_on_load_callback|, but may modify them first.
-  // Note: This method is asynchronous, which may make calls like
-  // PersistentPrefStore::ReadPrefs() asynchronous. The owner of filtered
-  // PersistentPrefStores should handle this to make the reads look synchronous
-  // to external users (see SegregatedPrefStore::ReadPrefs() for an example).
-  virtual void FilterOnLoad(
-      const PostFilterOnLoadCallback& post_filter_on_load_callback,
-      scoped_ptr<base::DictionaryValue> pref_store_contents) = 0;
-
-  // Receives notification when a pref store value is changed, before Observers
-  // are notified.
-  virtual void FilterUpdate(const std::string& path) = 0;
-
-  // Receives notification when the pref store is about to serialize data
-  // contained in |pref_store_contents| to a string. Modifications to
-  // |pref_store_contents| will be persisted to disk and also affect the
-  // in-memory state.
-  virtual void FilterSerializeData(
-      base::DictionaryValue* pref_store_contents) = 0;
-};
-
-#endif  // BASE_PREFS_PREF_FILTER_H_
diff --git a/base/prefs/pref_notifier.h b/base/prefs/pref_notifier.h
deleted file mode 100644
index e0df260..0000000
--- a/base/prefs/pref_notifier.h
+++ /dev/null
@@ -1,26 +0,0 @@
-// Copyright (c) 2011 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PREF_NOTIFIER_H_
-#define BASE_PREFS_PREF_NOTIFIER_H_
-
-#include <string>
-
-// Delegate interface used by PrefValueStore to notify its owner about changes
-// to the preference values.
-// TODO(mnissler, danno): Move this declaration to pref_value_store.h once we've
-// cleaned up all public uses of this interface.
-class PrefNotifier {
- public:
-  virtual ~PrefNotifier() {}
-
-  // Sends out a change notification for the preference identified by
-  // |pref_name|.
-  virtual void OnPreferenceChanged(const std::string& pref_name) = 0;
-
-  // Broadcasts the intialization completed notification.
-  virtual void OnInitializationCompleted(bool succeeded) = 0;
-};
-
-#endif  // BASE_PREFS_PREF_NOTIFIER_H_
diff --git a/base/prefs/pref_observer.h b/base/prefs/pref_observer.h
deleted file mode 100644
index 5d8f5b6..0000000
--- a/base/prefs/pref_observer.h
+++ /dev/null
@@ -1,21 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_PREF_OBSERVER_H_
-#define BASE_PREFS_PREF_OBSERVER_H_
-
-#include <string>
-
-class PrefService;
-
-// Used internally to the Prefs subsystem to pass preference change
-// notifications between PrefService, PrefNotifierImpl and
-// PrefChangeRegistrar.
-class PrefObserver {
- public:
-  virtual void OnPreferenceChanged(PrefService* service,
-                                   const std::string& pref_name) = 0;
-};
-
-#endif  // BASE_PREFS_PREF_OBSERVER_H_
diff --git a/base/prefs/writeable_pref_store.h b/base/prefs/writeable_pref_store.h
deleted file mode 100644
index f7da279..0000000
--- a/base/prefs/writeable_pref_store.h
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_PREFS_WRITEABLE_PREF_STORE_H_
-#define BASE_PREFS_WRITEABLE_PREF_STORE_H_
-
-#include <stdint.h>
-
-#include <string>
-
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/prefs/pref_store.h"
-
-namespace base {
-class Value;
-}
-
-// A pref store that can be written to as well as read from.
-class BASE_PREFS_EXPORT WriteablePrefStore : public PrefStore {
- public:
-  // PrefWriteFlags can be used to change the way a pref will be written to
-  // storage.
-  enum PrefWriteFlags : uint32_t {
-    // No flags are specified.
-    DEFAULT_PREF_WRITE_FLAGS = 0,
-
-    // This marks the pref as "lossy". There is no strict time guarantee on when
-    // a lossy pref will be persisted to permanent storage when it is modified.
-    LOSSY_PREF_WRITE_FLAG = 1 << 1
-  };
-
-  WriteablePrefStore() {}
-
-  // Sets a |value| for |key| in the store. |value| must be non-NULL. |flags| is
-  // a bitmask of PrefWriteFlags.
-  virtual void SetValue(const std::string& key,
-                        scoped_ptr<base::Value> value,
-                        uint32_t flags) = 0;
-
-  // Removes the value for |key|.
-  virtual void RemoveValue(const std::string& key, uint32_t flags) = 0;
-
-  // Equivalent to PrefStore::GetValue but returns a mutable value.
-  virtual bool GetMutableValue(const std::string& key,
-                               base::Value** result) = 0;
-
-  // Triggers a value changed notification. This function needs to be called
-  // if one retrieves a list or dictionary with GetMutableValue and change its
-  // value. SetValue takes care of notifications itself. Note that
-  // ReportValueChanged will trigger notifications even if nothing has changed.
-  // |flags| is a bitmask of PrefWriteFlags.
-  virtual void ReportValueChanged(const std::string& key, uint32_t flags) = 0;
-
-  // Same as SetValue, but doesn't generate notifications. This is used by
-  // PrefService::GetMutableUserPref() in order to put empty entries
-  // into the user pref store. Using SetValue is not an option since existing
-  // tests rely on the number of notifications generated. |flags| is a bitmask
-  // of PrefWriteFlags.
-  virtual void SetValueSilently(const std::string& key,
-                                scoped_ptr<base::Value> value,
-                                uint32_t flags) = 0;
-
- protected:
-  ~WriteablePrefStore() override {}
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(WriteablePrefStore);
-};
-
-#endif  // BASE_PREFS_WRITEABLE_PREF_STORE_H_
diff --git a/base/process/launch.cc b/base/process/launch.cc
index f09317d..3ca5155 100644
--- a/base/process/launch.cc
+++ b/base/process/launch.cc
@@ -40,6 +40,8 @@
     {
 }
 
+LaunchOptions::LaunchOptions(const LaunchOptions& other) = default;
+
 LaunchOptions::~LaunchOptions() {
 }
 
diff --git a/base/process/launch.h b/base/process/launch.h
index 9a76e20..b1811d4 100644
--- a/base/process/launch.h
+++ b/base/process/launch.h
@@ -59,6 +59,7 @@
 #endif  // defined(OS_POSIX)
 
   LaunchOptions();
+  LaunchOptions(const LaunchOptions&);
   ~LaunchOptions();
 
   // If true, wait for the process to complete.
diff --git a/base/process/launch_posix.cc b/base/process/launch_posix.cc
index 6a2f5ce..f1318ca 100644
--- a/base/process/launch_posix.cc
+++ b/base/process/launch_posix.cc
@@ -736,7 +736,7 @@
   // internal pid cache. The libc interface unfortunately requires
   // specifying a new stack, so we use setjmp/longjmp to emulate
   // fork-like behavior.
-  char stack_buf[PTHREAD_STACK_MIN];
+  char stack_buf[PTHREAD_STACK_MIN] ALIGNAS(16);
 #if defined(ARCH_CPU_X86_FAMILY) || defined(ARCH_CPU_ARM_FAMILY) || \
     defined(ARCH_CPU_MIPS64_FAMILY) || defined(ARCH_CPU_MIPS_FAMILY)
   // The stack grows downward.
diff --git a/base/process/process_iterator.cc b/base/process/process_iterator.cc
index 94f53b6..d4024d9 100644
--- a/base/process/process_iterator.cc
+++ b/base/process/process_iterator.cc
@@ -9,6 +9,7 @@
 
 #if defined(OS_POSIX)
 ProcessEntry::ProcessEntry() : pid_(0), ppid_(0), gid_(0) {}
+ProcessEntry::ProcessEntry(const ProcessEntry& other) = default;
 ProcessEntry::~ProcessEntry() {}
 #endif
 
diff --git a/base/process/process_iterator.h b/base/process/process_iterator.h
index 26fe690..0d1f1a6 100644
--- a/base/process/process_iterator.h
+++ b/base/process/process_iterator.h
@@ -41,6 +41,7 @@
 #elif defined(OS_POSIX)
 struct BASE_EXPORT ProcessEntry {
   ProcessEntry();
+  ProcessEntry(const ProcessEntry& other);
   ~ProcessEntry();
 
   ProcessId pid() const { return pid_; }
diff --git a/base/process/process_iterator_linux.cc b/base/process/process_iterator_linux.cc
index 94a3576..421565f 100644
--- a/base/process/process_iterator_linux.cc
+++ b/base/process/process_iterator_linux.cc
@@ -61,18 +61,28 @@
 ProcessIterator::ProcessIterator(const ProcessFilter* filter)
     : filter_(filter) {
   procfs_dir_ = opendir(internal::kProcDir);
+  if (!procfs_dir_) {
+    // On Android, SELinux may prevent reading /proc. See
+    // https://crbug.com/581517 for details.
+    PLOG(ERROR) << "opendir " << internal::kProcDir;
+  }
 }
 
 ProcessIterator::~ProcessIterator() {
   if (procfs_dir_) {
     closedir(procfs_dir_);
-    procfs_dir_ = NULL;
+    procfs_dir_ = nullptr;
   }
 }
 
 bool ProcessIterator::CheckForNextProcess() {
   // TODO(port): skip processes owned by different UID
 
+  if (!procfs_dir_) {
+    DLOG(ERROR) << "Skipping CheckForNextProcess(), no procfs_dir_";
+    return false;
+  }
+
   pid_t pid = kNullProcessId;
   std::vector<std::string> cmd_line_args;
   std::string stats_data;
diff --git a/base/process/process_metrics.h b/base/process/process_metrics.h
index 8d8f7fc..0d4d04a 100644
--- a/base/process/process_metrics.h
+++ b/base/process/process_metrics.h
@@ -261,6 +261,7 @@
 // Linux/Android/Chrome OS. Shmem/slab/gem_objects/gem_size are Chrome OS only.
 struct BASE_EXPORT SystemMemoryInfoKB {
   SystemMemoryInfoKB();
+  SystemMemoryInfoKB(const SystemMemoryInfoKB& other);
 
   // Serializes the platform specific fields to value.
   scoped_ptr<Value> ToValue() const;
@@ -336,6 +337,7 @@
 // Data from /proc/diskstats about system-wide disk I/O.
 struct BASE_EXPORT SystemDiskInfo {
   SystemDiskInfo();
+  SystemDiskInfo(const SystemDiskInfo& other);
 
   // Serializes the platform specific fields to value.
   scoped_ptr<Value> ToValue() const;
diff --git a/base/process/process_metrics_linux.cc b/base/process/process_metrics_linux.cc
index bcebcf5..c6aff3e 100644
--- a/base/process/process_metrics_linux.cc
+++ b/base/process/process_metrics_linux.cc
@@ -86,7 +86,8 @@
       return value;
     }
   }
-  NOTREACHED();
+  // This can be reached if the process dies when proc is read -- in that case,
+  // the kernel can return missing fields.
   return 0;
 }
 
@@ -555,6 +556,9 @@
 #endif
 }
 
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
 scoped_ptr<Value> SystemMemoryInfoKB::ToValue() const {
   scoped_ptr<DictionaryValue> res(new DictionaryValue());
 
@@ -766,6 +770,8 @@
   weighted_io_time = 0;
 }
 
+SystemDiskInfo::SystemDiskInfo(const SystemDiskInfo& other) = default;
+
 scoped_ptr<Value> SystemDiskInfo::ToValue() const {
   scoped_ptr<DictionaryValue> res(new DictionaryValue());
 
diff --git a/base/process/process_metrics_mac.cc b/base/process/process_metrics_mac.cc
index d947ce7..8b5d564 100644
--- a/base/process/process_metrics_mac.cc
+++ b/base/process/process_metrics_mac.cc
@@ -84,6 +84,9 @@
   free = 0;
 }
 
+SystemMemoryInfoKB::SystemMemoryInfoKB(const SystemMemoryInfoKB& other) =
+    default;
+
 // Getting a mach task from a pid for another process requires permissions in
 // general, so there doesn't really seem to be a way to do these (and spinning
 // up ps to fetch each stats seems dangerous to put in a base api for anyone to
diff --git a/base/profiler/alternate_timer.cc b/base/profiler/alternate_timer.cc
deleted file mode 100644
index b2d2c70..0000000
--- a/base/profiler/alternate_timer.cc
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/profiler/alternate_timer.h"
-
-namespace {
-
-tracked_objects::NowFunction* g_time_function = nullptr;
-tracked_objects::TimeSourceType g_time_source_type =
-    tracked_objects::TIME_SOURCE_TYPE_WALL_TIME;
-
-}  // anonymous namespace
-
-namespace tracked_objects {
-
-const char kAlternateProfilerTime[] = "CHROME_PROFILER_TIME";
-
-// Set an alternate timer function to replace the OS time function when
-// profiling.
-void SetAlternateTimeSource(NowFunction* now_function, TimeSourceType type) {
-  g_time_function = now_function;
-  g_time_source_type = type;
-}
-
-NowFunction* GetAlternateTimeSource() {
-  return g_time_function;
-}
-
-TimeSourceType GetTimeSourceType() {
-  return g_time_source_type;
-}
-
-}  // namespace tracked_objects
diff --git a/base/profiler/alternate_timer.h b/base/profiler/alternate_timer.h
deleted file mode 100644
index fdc75dc..0000000
--- a/base/profiler/alternate_timer.h
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright (c) 2012 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This is a glue file, which allows third party code to call into our profiler
-// without having to include most any functions from base.
-
-#ifndef BASE_PROFILER_ALTERNATE_TIMER_H_
-#define BASE_PROFILER_ALTERNATE_TIMER_H_
-
-#include "base/base_export.h"
-
-namespace tracked_objects {
-
-enum TimeSourceType {
-  TIME_SOURCE_TYPE_WALL_TIME,
-  TIME_SOURCE_TYPE_TCMALLOC
-};
-
-// Provide type for an alternate timer function.
-typedef unsigned int NowFunction();
-
-// Environment variable name that is used to activate alternate timer profiling
-// (such as using TCMalloc allocations to provide a pseudo-timer) for tasks
-// instead of wall clock profiling.
-BASE_EXPORT extern const char kAlternateProfilerTime[];
-
-// Set an alternate timer function to replace the OS time function when
-// profiling.  Typically this is called by an allocator that is providing a
-// function that indicates how much memory has been allocated on any given
-// thread.
-BASE_EXPORT void SetAlternateTimeSource(NowFunction* now_function,
-                                        TimeSourceType type);
-
-// Gets the pointer to a function that was set via SetAlternateTimeSource().
-// Returns NULL if no set was done prior to calling GetAlternateTimeSource.
-NowFunction* GetAlternateTimeSource();
-
-// Returns the type of the currently set time source.
-BASE_EXPORT TimeSourceType GetTimeSourceType();
-
-}  // namespace tracked_objects
-
-#endif  // BASE_PROFILER_ALTERNATE_TIMER_H_
diff --git a/base/run_loop.cc b/base/run_loop.cc
index b8558db..af2c568 100644
--- a/base/run_loop.cc
+++ b/base/run_loop.cc
@@ -8,10 +8,6 @@
 #include "base/tracked_objects.h"
 #include "build/build_config.h"
 
-#if defined(OS_WIN)
-#include "base/message_loop/message_pump_dispatcher.h"
-#endif
-
 namespace base {
 
 RunLoop::RunLoop()
@@ -23,25 +19,8 @@
       running_(false),
       quit_when_idle_received_(false),
       weak_factory_(this) {
-#if defined(OS_WIN)
-   dispatcher_ = NULL;
-#endif
 }
 
-#if defined(OS_WIN)
-RunLoop::RunLoop(MessagePumpDispatcher* dispatcher)
-    : loop_(MessageLoop::current()),
-      previous_run_loop_(NULL),
-      dispatcher_(dispatcher),
-      run_depth_(0),
-      run_called_(false),
-      quit_called_(false),
-      running_(false),
-      quit_when_idle_received_(false),
-      weak_factory_(this) {
-}
-#endif
-
 RunLoop::~RunLoop() {
 }
 
diff --git a/base/run_loop.h b/base/run_loop.h
index e23d073..61b0fe1 100644
--- a/base/run_loop.h
+++ b/base/run_loop.h
@@ -17,10 +17,6 @@
 class MessagePumpForUI;
 #endif
 
-#if defined(OS_WIN)
-class MessagePumpDispatcher;
-#endif
-
 #if defined(OS_IOS)
 class MessagePumpUIApplication;
 #endif
@@ -33,15 +29,12 @@
 class BASE_EXPORT RunLoop {
  public:
   RunLoop();
-#if defined(OS_WIN)
-  explicit RunLoop(MessagePumpDispatcher* dispatcher);
-#endif
   ~RunLoop();
 
   // Run the current MessageLoop. This blocks until Quit is called. Before
-  // calling Run, be sure to grab an AsWeakPtr or the QuitClosure in order to
-  // stop the MessageLoop asynchronously. MessageLoop::QuitWhenIdle and QuitNow
-  // will also trigger a return from Run, but those are deprecated.
+  // calling Run, be sure to grab the QuitClosure in order to stop the
+  // MessageLoop asynchronously. MessageLoop::QuitWhenIdle and QuitNow will also
+  // trigger a return from Run, but those are deprecated.
   void Run();
 
   // Run the current MessageLoop until it doesn't find any tasks or messages in
@@ -95,10 +88,6 @@
   // Parent RunLoop or NULL if this is the top-most RunLoop.
   RunLoop* previous_run_loop_;
 
-#if defined(OS_WIN)
-  MessagePumpDispatcher* dispatcher_;
-#endif
-
   // Used to count how many nested Run() invocations are on the stack.
   int run_depth_;
 
diff --git a/base/security_unittest.cc b/base/security_unittest.cc
index eebe6e0..a82bc91 100644
--- a/base/security_unittest.cc
+++ b/base/security_unittest.cc
@@ -86,7 +86,7 @@
   }
 }
 
-#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_MACOSX)
+#if defined(OS_IOS) || defined(OS_WIN) || defined(OS_MACOSX) || defined(OS_LINUX)
 #define MAYBE_NewOverflow DISABLED_NewOverflow
 #else
 #define MAYBE_NewOverflow NewOverflow
@@ -95,6 +95,8 @@
 // IOS doesn't honor nothrow, so disable the test there.
 // Crashes on Windows Dbg builds, disable there as well.
 // Fails on Mac 10.8 http://crbug.com/227092
+// Disabled on Linux because failing Linux Valgrind bot, and Valgrind exclusions
+// are not currently read. See http://crbug.com/582398
 TEST(SecurityTest, MAYBE_NewOverflow) {
   const size_t kArraySize = 4096;
   // We want something "dynamic" here, so that the compiler doesn't
diff --git a/base/strings/safe_sprintf_unittest.cc b/base/strings/safe_sprintf_unittest.cc
new file mode 100644
index 0000000..931ace8
--- /dev/null
+++ b/base/strings/safe_sprintf_unittest.cc
@@ -0,0 +1,763 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/safe_sprintf.h"
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <string.h>
+
+#include <limits>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "build/build_config.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Death tests on Android are currently very flaky. No need to add more flaky
+// tests, as they just make it hard to spot real problems.
+// TODO(markus): See if the restrictions on Android can eventually be lifted.
+#if defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define ALLOW_DEATH_TEST
+#endif
+
+namespace base {
+namespace strings {
+
+TEST(SafeSPrintfTest, Empty) {
+  char buf[2] = { 'X', 'X' };
+
+  // Negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, ""));
+  EXPECT_EQ('X', buf[0]);
+  EXPECT_EQ('X', buf[1]);
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 1, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // A larger buffer should leave the trailing bytes unchanged.
+  EXPECT_EQ(0, SafeSNPrintf(buf, 2, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(0, SafeSPrintf(buf, ""));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_EQ('X', buf[1]);
+  buf[0] = 'X';
+}
+
+TEST(SafeSPrintfTest, NoArguments) {
+  // Output a text message that doesn't require any substitutions. This
+  // is roughly equivalent to calling strncpy() (but unlike strncpy(), it does
+  // always add a trailing NUL; it always deduplicates '%' characters).
+  static const char text[] = "hello world";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(ref));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, text));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 1, text));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSNPrintf(buf, 2, text));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, sizeof(buf), text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, text));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%X"));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%X"));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%X"));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%X"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%X"), "src.1. == '%'");
+#endif
+}
+
+TEST(SafeSPrintfTest, OneArgument) {
+  // Test basic single-argument single-character substitution.
+  const char text[] = "hello world";
+  const char fmt[]  = "hello%cworld";
+  char ref[20], buf[20];
+  memset(ref, 'X', sizeof(buf));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A negative buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, static_cast<size_t>(-1), fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // Zero buffer size should always result in an error.
+  EXPECT_EQ(-1, SafeSNPrintf(buf, 0, fmt, ' '));
+  EXPECT_TRUE(!memcmp(buf, ref, sizeof(buf)));
+
+  // A one-byte buffer should always print a single NUL byte.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, 1, fmt, ' '));
+  EXPECT_EQ(0, buf[0]);
+  EXPECT_TRUE(!memcmp(buf+1, ref+1, sizeof(buf)-1));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A larger (but limited) buffer should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, 2, fmt, ' '));
+  EXPECT_EQ(text[0], buf[0]);
+  EXPECT_EQ(0, buf[1]);
+  EXPECT_TRUE(!memcmp(buf+2, ref+2, sizeof(buf)-2));
+  memcpy(buf, ref, sizeof(buf));
+
+  // A unrestricted buffer length should always leave the trailing bytes
+  // unchanged.
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1,
+            SafeSNPrintf(buf, sizeof(buf), fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // The same test using SafeSPrintf() instead of SafeSNPrintf().
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(text))-1, SafeSPrintf(buf, fmt, ' '));
+  EXPECT_EQ(std::string(text), std::string(buf));
+  EXPECT_TRUE(!memcmp(buf + sizeof(text), ref + sizeof(text),
+                      sizeof(buf) - sizeof(text)));
+  memcpy(buf, ref, sizeof(buf));
+
+  // Check for deduplication of '%' percent characters.
+  EXPECT_EQ(1, SafeSPrintf(buf, "%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%Y", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%Y", 0));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%%%%Y", 0));
+#if defined(NDEBUG)
+  EXPECT_EQ(1, SafeSPrintf(buf, "%", 0));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%", 0), "ch");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, MissingArg) {
+#if defined(NDEBUG)
+  char buf[20];
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c", 'A'));
+  EXPECT_EQ("A%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  char buf[20];
+  EXPECT_DEATH(SafeSPrintf(buf, "%c%c", 'A'), "cur_arg < max_args");
+#endif
+}
+
+TEST(SafeSPrintfTest, ASANFriendlyBufferTest) {
+  // Print into a buffer that is sized exactly to size. ASAN can verify that
+  // nobody attempts to write past the end of the buffer.
+  // There is a more complicated test in PrintLongString() that covers a lot
+  // more edge case, but it is also harder to debug in case of a failure.
+  const char kTestString[] = "This is a test";
+  scoped_ptr<char[]> buf(new char[sizeof(kTestString)]);
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+  EXPECT_EQ(static_cast<ssize_t>(sizeof(kTestString) - 1),
+            SafeSNPrintf(buf.get(), sizeof(kTestString), "%s", kTestString));
+  EXPECT_EQ(std::string(kTestString), std::string(buf.get()));
+}
+
+TEST(SafeSPrintfTest, NArgs) {
+  // Pre-C++11 compilers have a different code path, that can only print
+  // up to ten distinct arguments.
+  // We test both SafeSPrintf() and SafeSNPrintf(). This makes sure we don't
+  // have typos in the copy-n-pasted code that is needed to deal with various
+  // numbers of arguments.
+  char buf[12];
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSPrintf(buf, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c",
+                           1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+
+  // Repeat all the tests with SafeSNPrintf() instead of SafeSPrintf().
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+  EXPECT_EQ(1, SafeSNPrintf(buf, 11, "%c", 1));
+  EXPECT_EQ("\1", std::string(buf));
+  EXPECT_EQ(2, SafeSNPrintf(buf, 11, "%c%c", 1, 2));
+  EXPECT_EQ("\1\2", std::string(buf));
+  EXPECT_EQ(3, SafeSNPrintf(buf, 11, "%c%c%c", 1, 2, 3));
+  EXPECT_EQ("\1\2\3", std::string(buf));
+  EXPECT_EQ(4, SafeSNPrintf(buf, 11, "%c%c%c%c", 1, 2, 3, 4));
+  EXPECT_EQ("\1\2\3\4", std::string(buf));
+  EXPECT_EQ(5, SafeSNPrintf(buf, 11, "%c%c%c%c%c", 1, 2, 3, 4, 5));
+  EXPECT_EQ("\1\2\3\4\5", std::string(buf));
+  EXPECT_EQ(6, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6));
+  EXPECT_EQ("\1\2\3\4\5\6", std::string(buf));
+  EXPECT_EQ(7, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c", 1, 2, 3, 4, 5, 6, 7));
+  EXPECT_EQ("\1\2\3\4\5\6\7", std::string(buf));
+  EXPECT_EQ(8, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10", std::string(buf));
+  EXPECT_EQ(9, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11", std::string(buf));
+  EXPECT_EQ(10, SafeSNPrintf(buf, 11, "%c%c%c%c%c%c%c%c%c%c",
+                             1, 2, 3, 4, 5, 6, 7, 8, 9, 10));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12", std::string(buf));
+
+  EXPECT_EQ(11, SafeSPrintf(buf, "%c%c%c%c%c%c%c%c%c%c%c",
+                            1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+  EXPECT_EQ(11, SafeSNPrintf(buf, 12, "%c%c%c%c%c%c%c%c%c%c%c",
+                             1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11));
+  EXPECT_EQ("\1\2\3\4\5\6\7\10\11\12\13", std::string(buf));
+}
+
+TEST(SafeSPrintfTest, DataTypes) {
+  char buf[40];
+
+  // Bytes
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%d", (uint8_t)-1));
+  EXPECT_EQ("255", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int8_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int8_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%d", (int8_t)-128));
+  EXPECT_EQ("-128", std::string(buf));
+
+  // Half-words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%d", (uint16_t)-1));
+  EXPECT_EQ("65535", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int16_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int16_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%d", (int16_t)-32768));
+  EXPECT_EQ("-32768", std::string(buf));
+
+  // Words
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%d", (uint32_t)-1));
+  EXPECT_EQ("4294967295", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int32_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int32_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(11, SafeSPrintf(buf, "%d", (int32_t)-2147483647-1));
+  EXPECT_EQ("-2147483648", std::string(buf));
+
+  // Quads
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (uint64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (uint64_t)-1));
+  EXPECT_EQ("18446744073709551615", std::string(buf));
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", (int64_t)1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%d", (int64_t)-1));
+  EXPECT_EQ("-1", std::string(buf));
+  // Work-around for an limitation of C90
+  EXPECT_EQ(20, SafeSPrintf(buf, "%d", (int64_t)-9223372036854775807LL-1));
+  EXPECT_EQ("-9223372036854775808", std::string(buf));
+
+  // Strings (both const and mutable).
+  EXPECT_EQ(4, SafeSPrintf(buf, "test"));
+  EXPECT_EQ("test", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, buf));
+  EXPECT_EQ("test", std::string(buf));
+
+  // Pointer
+  char addr[20];
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  SafeSPrintf(buf, "%p", (const char *)buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)sprintf);
+  SafeSPrintf(buf, "%p", sprintf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+
+  // Padding for pointers is a little more complicated because of the "0x"
+  // prefix. Padding with '0' zeros is relatively straight-forward, but
+  // padding with ' ' spaces requires more effort.
+  sprintf(addr, "0x%017llX", (unsigned long long)(uintptr_t)buf);
+  SafeSPrintf(buf, "%019p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+  sprintf(addr, "0x%llX", (unsigned long long)(uintptr_t)buf);
+  memset(addr, ' ',
+         (char*)memmove(addr + sizeof(addr) - strlen(addr) - 1,
+                        addr, strlen(addr)+1) - addr);
+  SafeSPrintf(buf, "%19p", buf);
+  EXPECT_EQ(std::string(addr), std::string(buf));
+}
+
+namespace {
+void PrintLongString(char* buf, size_t sz) {
+  // Output a reasonably complex expression into a limited-size buffer.
+  // At least one byte is available for writing the NUL character.
+  CHECK_GT(sz, static_cast<size_t>(0));
+
+  // Allocate slightly more space, so that we can verify that SafeSPrintf()
+  // never writes past the end of the buffer.
+  scoped_ptr<char[]> tmp(new char[sz+2]);
+  memset(tmp.get(), 'X', sz+2);
+
+  // Use SafeSPrintf() to output a complex list of arguments:
+  // - test padding and truncating %c single characters.
+  // - test truncating %s simple strings.
+  // - test mismatching arguments and truncating (for %d != %s).
+  // - test zero-padding and truncating %x hexadecimal numbers.
+  // - test outputting and truncating %d MININT.
+  // - test outputting and truncating %p arbitrary pointer values.
+  // - test outputting, padding and truncating NULL-pointer %s strings.
+  char* out = tmp.get();
+  size_t out_sz = sz;
+  size_t len;
+  for (scoped_ptr<char[]> perfect_buf;;) {
+    size_t needed = SafeSNPrintf(out, out_sz,
+#if defined(NDEBUG)
+                            "A%2cong %s: %d %010X %d %p%7s", 'l', "string", "",
+#else
+                            "A%2cong %s: %%d %010X %d %p%7s", 'l', "string",
+#endif
+                            0xDEADBEEF, std::numeric_limits<intptr_t>::min(),
+                            PrintLongString, static_cast<char*>(NULL)) + 1;
+
+    // Various sanity checks:
+    // The numbered of characters needed to print the full string should always
+    // be bigger or equal to the bytes that have actually been output.
+    len = strlen(tmp.get());
+    CHECK_GE(needed, len+1);
+
+    // The number of characters output should always fit into the buffer that
+    // was passed into SafeSPrintf().
+    CHECK_LT(len, out_sz);
+
+    // The output is always terminated with a NUL byte (actually, this test is
+    // always going to pass, as strlen() already verified this)
+    EXPECT_FALSE(tmp[len]);
+
+    // ASAN can check that we are not overwriting buffers, iff we make sure the
+    // buffer is exactly the size that we are expecting to be written. After
+    // running SafeSNPrintf() the first time, it is possible to compute the
+    // correct buffer size for this test. So, allocate a second buffer and run
+    // the exact same SafeSNPrintf() command again.
+    if (!perfect_buf.get()) {
+      out_sz = std::min(needed, sz);
+      out = new char[out_sz];
+      perfect_buf.reset(out);
+    } else {
+      break;
+    }
+  }
+
+  // All trailing bytes are unchanged.
+  for (size_t i = len+1; i < sz+2; ++i)
+    EXPECT_EQ('X', tmp[i]);
+
+  // The text that was generated by SafeSPrintf() should always match the
+  // equivalent text generated by sprintf(). Please note that the format
+  // string for sprintf() is not complicated, as it does not have the
+  // benefit of getting type information from the C++ compiler.
+  //
+  // N.B.: It would be so much cleaner to use snprintf(). But unfortunately,
+  //       Visual Studio doesn't support this function, and the work-arounds
+  //       are all really awkward.
+  char ref[256];
+  CHECK_LE(sz, sizeof(ref));
+  sprintf(ref, "A long string: %%d 00DEADBEEF %lld 0x%llX <NULL>",
+          static_cast<long long>(std::numeric_limits<intptr_t>::min()),
+          static_cast<unsigned long long>(
+            reinterpret_cast<uintptr_t>(PrintLongString)));
+  ref[sz-1] = '\000';
+
+#if defined(NDEBUG)
+  const size_t kSSizeMax = std::numeric_limits<ssize_t>::max();
+#else
+  const size_t kSSizeMax = internal::GetSafeSPrintfSSizeMaxForTest();
+#endif
+
+  // Compare the output from SafeSPrintf() to the one from sprintf().
+  EXPECT_EQ(std::string(ref).substr(0, kSSizeMax-1), std::string(tmp.get()));
+
+  // We allocated a slightly larger buffer, so that we could perform some
+  // extra sanity checks. Now that the tests have all passed, we copy the
+  // data to the output buffer that the caller provided.
+  memcpy(buf, tmp.get(), len+1);
+}
+
+#if !defined(NDEBUG)
+class ScopedSafeSPrintfSSizeMaxSetter {
+ public:
+  ScopedSafeSPrintfSSizeMaxSetter(size_t sz) {
+    old_ssize_max_ = internal::GetSafeSPrintfSSizeMaxForTest();
+    internal::SetSafeSPrintfSSizeMaxForTest(sz);
+  }
+
+  ~ScopedSafeSPrintfSSizeMaxSetter() {
+    internal::SetSafeSPrintfSSizeMaxForTest(old_ssize_max_);
+  }
+
+ private:
+  size_t old_ssize_max_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedSafeSPrintfSSizeMaxSetter);
+};
+#endif
+
+}  // anonymous namespace
+
+TEST(SafeSPrintfTest, Truncation) {
+  // We use PrintLongString() to print a complex long string and then
+  // truncate to all possible lengths. This ends up exercising a lot of
+  // different code paths in SafeSPrintf() and IToASCII(), as truncation can
+  // happen in a lot of different states.
+  char ref[256];
+  PrintLongString(ref, sizeof(ref));
+  for (size_t i = strlen(ref)+1; i; --i) {
+    char buf[sizeof(ref)];
+    PrintLongString(buf, i);
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // When compiling in debug mode, we have the ability to fake a small
+  // upper limit for the maximum value that can be stored in an ssize_t.
+  // SafeSPrintf() uses this upper limit to determine how many bytes it will
+  // write to the buffer, even if the caller claimed a bigger buffer size.
+  // Repeat the truncation test and verify that this other code path in
+  // SafeSPrintf() works correctly, too.
+#if !defined(NDEBUG)
+  for (size_t i = strlen(ref)+1; i > 1; --i) {
+    ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(i);
+    char buf[sizeof(ref)];
+    PrintLongString(buf, sizeof(buf));
+    EXPECT_EQ(std::string(ref, i - 1), std::string(buf));
+  }
+
+  // kSSizeMax is also used to constrain the maximum amount of padding, before
+  // SafeSPrintf() detects an error in the format string.
+  ScopedSafeSPrintfSSizeMaxSetter ssize_max_setter(100);
+  char buf[256];
+  EXPECT_EQ(99, SafeSPrintf(buf, "%99c", ' '));
+  EXPECT_EQ(std::string(99, ' '), std::string(buf));
+  *buf = '\000';
+#if defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%100c", ' '), "padding <= max_padding");
+#endif
+  EXPECT_EQ(0, *buf);
+#endif
+}
+
+TEST(SafeSPrintfTest, Padding) {
+  char buf[40], fmt[40];
+
+  // Chars %c
+  EXPECT_EQ(1, SafeSPrintf(buf, "%c", 'A'));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02c", 'A'));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2c", 'A'));
+  EXPECT_EQ("%-2c", std::string(buf));
+  SafeSPrintf(fmt, "%%%dc", std::numeric_limits<ssize_t>::max() - 1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1, SafeSPrintf(buf, fmt, 'A'));
+  SafeSPrintf(fmt, "%%%dc",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 'A'));
+  EXPECT_EQ("%c", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 'A'), "padding <= max_padding");
+#endif
+
+  // Octal %o
+  EXPECT_EQ(1, SafeSPrintf(buf, "%o", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2o", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02o", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%12o", -1));
+  EXPECT_EQ(" 37777777777", std::string(buf));
+  EXPECT_EQ(12, SafeSPrintf(buf, "%012o", -1));
+  EXPECT_EQ("037777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%23o", -1LL));
+  EXPECT_EQ(" 1777777777777777777777", std::string(buf));
+  EXPECT_EQ(23, SafeSPrintf(buf, "%023o", -1LL));
+  EXPECT_EQ("01777777777777777777777", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2o", 0111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2o", 1));
+  EXPECT_EQ("%-2o", std::string(buf));
+  SafeSPrintf(fmt, "%%%do", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%do", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%do",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%o", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Decimals %d
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2d", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02d", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%3d", -1));
+  EXPECT_EQ(" -1", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%03d", -1));
+  EXPECT_EQ("-01", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2d", 111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%2d", -111));
+  EXPECT_EQ("-111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2d", 1));
+  EXPECT_EQ("%-2d", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dd", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dd",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%d", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Hex %X
+  EXPECT_EQ(1, SafeSPrintf(buf, "%X", 1));
+  EXPECT_EQ("1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2X", 1));
+  EXPECT_EQ(" 1", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02X", 1));
+  EXPECT_EQ("01", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%9X", -1));
+  EXPECT_EQ(" FFFFFFFF", std::string(buf));
+  EXPECT_EQ(9, SafeSPrintf(buf, "%09X", -1));
+  EXPECT_EQ("0FFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%17X", -1LL));
+  EXPECT_EQ(" FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(17, SafeSPrintf(buf, "%017X", -1LL));
+  EXPECT_EQ("0FFFFFFFFFFFFFFFF", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2X", 0x111));
+  EXPECT_EQ("111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2X", 1));
+  EXPECT_EQ("%-2X", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dX", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, 1));
+  EXPECT_EQ("000", std::string(buf));
+  SafeSPrintf(fmt, "%%%dX",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%X", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // Pointer %p
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", (void*)1));
+  EXPECT_EQ("0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%4p", (void*)1));
+  EXPECT_EQ(" 0x1", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%04p", (void*)1));
+  EXPECT_EQ("0x01", std::string(buf));
+  EXPECT_EQ(5, SafeSPrintf(buf, "%4p", (void*)0x111));
+  EXPECT_EQ("0x111", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2p", (void*)1));
+  EXPECT_EQ("%-2p", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%dp", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, (void*)1));
+  EXPECT_EQ("0x0", std::string(buf));
+  SafeSPrintf(fmt, "%%%dp",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, 1));
+  EXPECT_EQ("%p", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, 1), "padding <= max_padding");
+#endif
+
+  // String
+  EXPECT_EQ(1, SafeSPrintf(buf, "%s", "A"));
+  EXPECT_EQ("A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%2s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%02s", "A"));
+  EXPECT_EQ(" A", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%2s", "AAA"));
+  EXPECT_EQ("AAA", std::string(buf));
+  EXPECT_EQ(4, SafeSPrintf(buf, "%-2s", "A"));
+  EXPECT_EQ("%-2s", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%0%ds", std::numeric_limits<ssize_t>::max()-1);
+  EXPECT_EQ(std::numeric_limits<ssize_t>::max()-1,
+            SafeSNPrintf(buf, 4, fmt, "A"));
+  EXPECT_EQ("   ", std::string(buf));
+  SafeSPrintf(fmt, "%%%ds",
+              static_cast<size_t>(std::numeric_limits<ssize_t>::max()));
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, fmt, "A"));
+  EXPECT_EQ("%s", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, fmt, "A"), "padding <= max_padding");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmbeddedNul) {
+  char buf[] = { 'X', 'X', 'X', 'X' };
+  EXPECT_EQ(2, SafeSPrintf(buf, "%3c", 0));
+  EXPECT_EQ(' ', buf[0]);
+  EXPECT_EQ(' ', buf[1]);
+  EXPECT_EQ(0,   buf[2]);
+  EXPECT_EQ('X', buf[3]);
+
+  // Check handling of a NUL format character. N.B. this takes two different
+  // code paths depending on whether we are actually passing arguments. If
+  // we don't have any arguments, we are running in the fast-path code, that
+  // looks (almost) like a strncpy().
+#if defined(NDEBUG)
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%"));
+  EXPECT_EQ("%%", std::string(buf));
+  EXPECT_EQ(2, SafeSPrintf(buf, "%%%", 0));
+  EXPECT_EQ("%%", std::string(buf));
+#elif defined(ALLOW_DEATH_TEST)
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%"), "src.1. == '%'");
+  EXPECT_DEATH(SafeSPrintf(buf, "%%%", 0), "ch");
+#endif
+}
+
+TEST(SafeSPrintfTest, EmitNULL) {
+  char buf[40];
+#if defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wconversion-null"
+#endif
+  EXPECT_EQ(1, SafeSPrintf(buf, "%d", NULL));
+  EXPECT_EQ("0", std::string(buf));
+  EXPECT_EQ(3, SafeSPrintf(buf, "%p", NULL));
+  EXPECT_EQ("0x0", std::string(buf));
+  EXPECT_EQ(6, SafeSPrintf(buf, "%s", NULL));
+  EXPECT_EQ("<NULL>", std::string(buf));
+#if defined(__GCC__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+TEST(SafeSPrintfTest, PointerSize) {
+  // The internal data representation is a 64bit value, independent of the
+  // native word size. We want to perform sign-extension for signed integers,
+  // but we want to avoid doing so for pointer types. This could be a
+  // problem on systems, where pointers are only 32bit. This tests verifies
+  // that there is no such problem.
+  char *str = reinterpret_cast<char *>(0x80000000u);
+  void *ptr = str;
+  char buf[40];
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", str));
+  EXPECT_EQ("0x80000000", std::string(buf));
+  EXPECT_EQ(10, SafeSPrintf(buf, "%p", ptr));
+  EXPECT_EQ("0x80000000", std::string(buf));
+}
+
+}  // namespace strings
+}  // namespace base
diff --git a/base/strings/string16.h b/base/strings/string16.h
index e47669c..82dd0fa 100644
--- a/base/strings/string16.h
+++ b/base/strings/string16.h
@@ -29,6 +29,8 @@
 #include <stddef.h>
 #include <stdint.h>
 #include <stdio.h>
+
+#include <functional>
 #include <string>
 
 #include "base/base_export.h"
@@ -182,6 +184,21 @@
 extern template
 class BASE_EXPORT std::basic_string<base::char16, base::string16_char_traits>;
 
+// Specialize std::hash for base::string16. Although the style guide forbids
+// this in general, it is necessary for consistency with WCHAR_T_IS_UTF16
+// platforms, where base::string16 is a type alias for std::wstring.
+namespace std {
+template <>
+struct hash<base::string16> {
+  std::size_t operator()(const base::string16& s) const {
+    std::size_t result = 0;
+    for (base::char16 c : s)
+      result = (result * 131) + c;
+    return result;
+  }
+};
+}  // namespace std
+
 #endif  // WCHAR_T_IS_UTF32
 
 #endif  // BASE_STRINGS_STRING16_H_
diff --git a/base/strings/string16_unittest.cc b/base/strings/string16_unittest.cc
index 4e58218..0d2ca80 100644
--- a/base/strings/string16_unittest.cc
+++ b/base/strings/string16_unittest.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include <sstream>
+#include <unordered_set>
 
 #include "base/strings/string16.h"
 
@@ -11,8 +12,6 @@
 
 namespace base {
 
-#if defined(WCHAR_T_IS_UTF32)
-
 // We define a custom operator<< for string16 so we can use it with logging.
 // This tests that conversion.
 TEST(String16Test, OutputStream) {
@@ -53,6 +52,15 @@
   }
 }
 
-#endif
+TEST(String16Test, Hash) {
+  string16 str1 = ASCIIToUTF16("hello");
+  string16 str2 = ASCIIToUTF16("world");
+
+  std::unordered_set<string16> set;
+
+  set.insert(str1);
+  EXPECT_EQ(1u, set.count(str1));
+  EXPECT_EQ(0u, set.count(str2));
+}
 
 }  // namespace base
diff --git a/base/strings/string_number_conversions.cc b/base/strings/string_number_conversions.cc
index f869b41..fb5633a 100644
--- a/base/strings/string_number_conversions.cc
+++ b/base/strings/string_number_conversions.cc
@@ -144,6 +144,7 @@
 
     if (begin != end && *begin == '-') {
       if (!std::numeric_limits<value_type>::is_signed) {
+        *output = 0;
         valid = false;
       } else if (!Negative::Invoke(begin + 1, end, output)) {
         valid = false;
diff --git a/base/strings/string_number_conversions_unittest.cc b/base/strings/string_number_conversions_unittest.cc
index 0ed06a1..2e74419 100644
--- a/base/strings/string_number_conversions_unittest.cc
+++ b/base/strings/string_number_conversions_unittest.cc
@@ -137,12 +137,12 @@
   };
 
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    int output = 0;
+    int output = cases[i].output ^ 1;  // Ensure StringToInt wrote something.
     EXPECT_EQ(cases[i].success, StringToInt(cases[i].input, &output));
     EXPECT_EQ(cases[i].output, output);
 
     string16 utf16_input = UTF8ToUTF16(cases[i].input);
-    output = 0;
+    output = cases[i].output ^ 1;  // Ensure StringToInt wrote something.
     EXPECT_EQ(cases[i].success, StringToInt(utf16_input, &output));
     EXPECT_EQ(cases[i].output, output);
   }
@@ -201,12 +201,13 @@
   };
 
   for (size_t i = 0; i < arraysize(cases); ++i) {
-    unsigned output = 0;
+    unsigned output =
+        cases[i].output ^ 1;  // Ensure StringToUint wrote something.
     EXPECT_EQ(cases[i].success, StringToUint(cases[i].input, &output));
     EXPECT_EQ(cases[i].output, output);
 
     string16 utf16_input = UTF8ToUTF16(cases[i].input);
-    output = 0;
+    output = cases[i].output ^ 1;  // Ensure StringToUint wrote something.
     EXPECT_EQ(cases[i].success, StringToUint(utf16_input, &output));
     EXPECT_EQ(cases[i].output, output);
   }
diff --git a/base/strings/string_piece.h b/base/strings/string_piece.h
index 31e7596..eaec14d 100644
--- a/base/strings/string_piece.h
+++ b/base/strings/string_piece.h
@@ -28,7 +28,6 @@
 #include <string>
 
 #include "base/base_export.h"
-#include "base/containers/hash_tables.h"
 #include "base/logging.h"
 #include "base/strings/string16.h"
 
@@ -224,6 +223,8 @@
   }
 
   value_type operator[](size_type i) const { return ptr_[i]; }
+  value_type front() const { return ptr_[0]; }
+  value_type back() const { return ptr_[length_ - 1]; }
 
   void remove_prefix(size_type n) {
     ptr_ += n;
@@ -432,38 +433,32 @@
 BASE_EXPORT std::ostream& operator<<(std::ostream& o,
                                      const StringPiece& piece);
 
-}  // namespace base
-
 // Hashing ---------------------------------------------------------------------
 
 // We provide appropriate hash functions so StringPiece and StringPiece16 can
 // be used as keys in hash sets and maps.
 
-// This hash function is copied from base/containers/hash_tables.h. We don't
-// use the ones already defined for string and string16 directly because it
-// would require the string constructors to be called, which we don't want.
-#define HASH_STRING_PIECE(StringPieceType, string_piece)                \
-  std::size_t result = 0;                                               \
-  for (StringPieceType::const_iterator i = string_piece.begin();        \
-       i != string_piece.end(); ++i)                                    \
-    result = (result * 131) + *i;                                       \
-  return result;                                                        \
+// This hash function is copied from base/strings/string16.h. We don't use the
+// ones already defined for string and string16 directly because it would
+// require the string constructors to be called, which we don't want.
+#define HASH_STRING_PIECE(StringPieceType, string_piece)         \
+  std::size_t result = 0;                                        \
+  for (StringPieceType::const_iterator i = string_piece.begin(); \
+       i != string_piece.end(); ++i)                             \
+    result = (result * 131) + *i;                                \
+  return result;
 
-namespace BASE_HASH_NAMESPACE {
-
-template<>
-struct hash<base::StringPiece> {
-  std::size_t operator()(const base::StringPiece& sp) const {
-    HASH_STRING_PIECE(base::StringPiece, sp);
+struct StringPieceHash {
+  std::size_t operator()(const StringPiece& sp) const {
+    HASH_STRING_PIECE(StringPiece, sp);
   }
 };
-template<>
-struct hash<base::StringPiece16> {
-  std::size_t operator()(const base::StringPiece16& sp16) const {
-    HASH_STRING_PIECE(base::StringPiece16, sp16);
+struct StringPiece16Hash {
+  std::size_t operator()(const StringPiece16& sp16) const {
+    HASH_STRING_PIECE(StringPiece16, sp16);
   }
 };
 
-}  // namespace BASE_HASH_NAMESPACE
+}  // namespace base
 
 #endif  // BASE_STRINGS_STRING_PIECE_H_
diff --git a/base/strings/string_tokenizer_unittest.cc b/base/strings/string_tokenizer_unittest.cc
new file mode 100644
index 0000000..d391845
--- /dev/null
+++ b/base/strings/string_tokenizer_unittest.cc
@@ -0,0 +1,234 @@
+// Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/string_tokenizer.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+using std::string;
+
+namespace base {
+
+namespace {
+
+TEST(StringTokenizerTest, Simple) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, Reset) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+
+  for (int i = 0; i < 2; ++i) {
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("this"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("is"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("a"), t.token());
+
+    EXPECT_TRUE(t.GetNext());
+    EXPECT_EQ(string("test"), t.token());
+
+    EXPECT_FALSE(t.GetNext());
+    t.Reset();
+  }
+}
+
+TEST(StringTokenizerTest, RetDelims) {
+  string input = "this is a test";
+  StringTokenizer t(input, " ");
+  t.set_options(StringTokenizer::RETURN_DELIMS);
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ManyDelims) {
+  string input = "this: is, a-test";
+  StringTokenizer t(input, ": ,-");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("this"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("is"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("a"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("test"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseHeader) {
+  string input = "Content-Type: text/html ; charset=UTF-8";
+  StringTokenizer t(input, ": ;=");
+  t.set_options(StringTokenizer::RETURN_DELIMS);
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("Content-Type"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(":"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("text/html"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(";"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string(" "), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("charset"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_TRUE(t.token_is_delim());
+  EXPECT_EQ(string("="), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+  EXPECT_EQ(string("UTF-8"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+  EXPECT_FALSE(t.token_is_delim());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString) {
+  string input = "foo bar 'hello world' baz";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hello world'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("baz"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Malformed) {
+  string input = "bar 'hello wo";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hello wo"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_Multiple) {
+  string input = "bar 'hel\"lo\" wo' baz\"";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'\"");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'hel\"lo\" wo'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("baz\""), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes) {
+  string input = "foo 'don\\'t do that'";
+  StringTokenizer t(input, " ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("'don\\'t do that'"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+TEST(StringTokenizerTest, ParseQuotedString_EscapedQuotes2) {
+  string input = "foo='a, b', bar";
+  StringTokenizer t(input, ", ");
+  t.set_quote_chars("'");
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("foo='a, b'"), t.token());
+
+  EXPECT_TRUE(t.GetNext());
+  EXPECT_EQ(string("bar"), t.token());
+
+  EXPECT_FALSE(t.GetNext());
+}
+
+}  // namespace
+
+}  // namespace base
diff --git a/base/strings/stringize_macros_unittest.cc b/base/strings/stringize_macros_unittest.cc
new file mode 100644
index 0000000..d7f9e56
--- /dev/null
+++ b/base/strings/stringize_macros_unittest.cc
@@ -0,0 +1,29 @@
+// Copyright (c) 2010 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/strings/stringize_macros.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+// Macros as per documentation in header file.
+#define PREPROCESSOR_UTIL_UNITTEST_A FOO
+#define PREPROCESSOR_UTIL_UNITTEST_B(x) myobj->FunctionCall(x)
+#define PREPROCESSOR_UTIL_UNITTEST_C "foo"
+
+TEST(StringizeTest, Ansi) {
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_A",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_B(y)",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ(
+      "PREPROCESSOR_UTIL_UNITTEST_C",
+      STRINGIZE_NO_EXPANSION(PREPROCESSOR_UTIL_UNITTEST_C));
+
+  EXPECT_STREQ("FOO", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_A));
+  EXPECT_STREQ("myobj->FunctionCall(y)",
+               STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_B(y)));
+  EXPECT_STREQ("\"foo\"", STRINGIZE(PREPROCESSOR_UTIL_UNITTEST_C));
+}
diff --git a/base/sync_socket_posix.cc b/base/sync_socket_posix.cc
index 34fa5cd..923509c 100644
--- a/base/sync_socket_posix.cc
+++ b/base/sync_socket_posix.cc
@@ -223,7 +223,7 @@
   DCHECK_LE(length, kMaxMessageLength);
   DCHECK_NE(handle_, kInvalidHandle);
 
-  const long flags = fcntl(handle_, F_GETFL, NULL);
+  const int flags = fcntl(handle_, F_GETFL);
   if (flags != -1 && (flags & O_NONBLOCK) == 0) {
     // Set the socket to non-blocking mode for sending if its original mode
     // is blocking.
diff --git a/base/sys_info_linux.cc b/base/sys_info_linux.cc
index 8e1f533..300ef2c 100644
--- a/base/sys_info_linux.cc
+++ b/base/sys_info_linux.cc
@@ -37,7 +37,7 @@
   std::string contents;
   base::ReadFileToString(base::FilePath("/proc/sys/kernel/shmmax"), &contents);
   DCHECK(!contents.empty());
-  if (!contents.empty() && contents[contents.length() - 1] == '\n') {
+  if (!contents.empty() && contents.back() == '\n') {
     contents.erase(contents.length() - 1);
   }
 
diff --git a/base/task_scheduler/OWNERS b/base/task_scheduler/OWNERS
new file mode 100644
index 0000000..e4b383c
--- /dev/null
+++ b/base/task_scheduler/OWNERS
@@ -0,0 +1,3 @@
+fdoray@chromium.org
+gab@chromium.org
+robliao@chromium.org
diff --git a/base/task_scheduler/scheduler_lock.h b/base/task_scheduler/scheduler_lock.h
new file mode 100644
index 0000000..be7c71c
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock.h
@@ -0,0 +1,87 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/synchronization/lock.h"
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+namespace base {
+namespace internal {
+
+// SchedulerLock should be used anywhere a lock would be used in the scheduler.
+// When DCHECK_IS_ON(), lock checking occurs. Otherwise, SchedulerLock is
+// equivalent to base::Lock.
+//
+// The shape of SchedulerLock is as follows:
+// SchedulerLock()
+//     Default constructor, no predecessor lock.
+//     DCHECKs
+//         On Acquisition if any scheduler lock is acquired on this thread.
+//
+// SchedulerLock(const SchedulerLock* predecessor)
+//     Constructor that specifies an allowed predecessor for that lock.
+//     DCHECKs
+//         On Construction if |predecessor| forms a predecessor lock cycle.
+//         On Acquisition if the previous lock acquired on the thread is not
+//             |predecessor|. Okay if there was no previous lock acquired.
+//
+// void Acquire()
+//     Acquires the lock.
+//
+// void Release()
+//     Releases the lock.
+//
+// void AssertAcquired().
+//     DCHECKs if the lock is not acquired.
+//
+// scoped_ptr<ConditionVariable> CreateConditionVariable()
+//     Creates a condition variable using this as a lock.
+
+#if DCHECK_IS_ON()
+class SchedulerLock : public SchedulerLockImpl {
+ public:
+  SchedulerLock() = default;
+  explicit SchedulerLock(const SchedulerLock* predecessor)
+      : SchedulerLockImpl(predecessor) {}
+};
+#else  // DCHECK_IS_ON()
+class SchedulerLock : public Lock {
+ public:
+  SchedulerLock() = default;
+  explicit SchedulerLock(const SchedulerLock*) {}
+
+  scoped_ptr<ConditionVariable> CreateConditionVariable() {
+    return scoped_ptr<ConditionVariable>(new ConditionVariable(this));
+  }
+};
+#endif  // DCHECK_IS_ON()
+
+// Provides the same functionality as base::AutoLock for SchedulerLock.
+class AutoSchedulerLock {
+ public:
+  explicit AutoSchedulerLock(SchedulerLock& lock) : lock_(lock) {
+    lock_.Acquire();
+  }
+
+  ~AutoSchedulerLock() {
+    lock_.AssertAcquired();
+    lock_.Release();
+  }
+
+ private:
+  SchedulerLock& lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(AutoSchedulerLock);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_H
diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
new file mode 100644
index 0000000..609ea22
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_impl.cc
@@ -0,0 +1,144 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock_impl.h"
+
+#include <algorithm>
+#include <unordered_map>
+#include <vector>
+
+#include "base/lazy_instance.h"
+#include "base/logging.h"
+#include "base/synchronization/condition_variable.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/thread_local_storage.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class SafeAcquisitionTracker {
+ public:
+  SafeAcquisitionTracker() : tls_acquired_locks_(&OnTLSDestroy) {}
+
+  void RegisterLock(
+      const SchedulerLockImpl* const lock,
+      const SchedulerLockImpl* const predecessor) {
+    DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported.";
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    allowed_predecessor_map_[lock] = predecessor;
+    AssertSafePredecessor(lock);
+  }
+
+  void UnregisterLock(const SchedulerLockImpl* const lock) {
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    allowed_predecessor_map_.erase(lock);
+  }
+
+  void RecordAcquisition(const SchedulerLockImpl* const lock) {
+    AssertSafeAcquire(lock);
+    GetAcquiredLocksOnCurrentThread()->push_back(lock);
+  }
+
+  void RecordRelease(const SchedulerLockImpl* const lock) {
+    LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+    const auto iter_at_lock =
+        std::find(acquired_locks->begin(), acquired_locks->end(), lock);
+    DCHECK(iter_at_lock != acquired_locks->end());
+    acquired_locks->erase(iter_at_lock);
+  }
+
+ private:
+  using LockVector = std::vector<const SchedulerLockImpl*>;
+  using PredecessorMap = std::unordered_map<
+      const SchedulerLockImpl*, const SchedulerLockImpl*>;
+
+  // This asserts that the lock is safe to acquire. This means that this should
+  // be run before actually recording the acquisition.
+  void AssertSafeAcquire(const SchedulerLockImpl* const lock) {
+    const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
+
+    // If the thread currently holds no locks, this is inherently safe.
+    if (acquired_locks->empty())
+      return;
+
+    // Otherwise, make sure that the previous lock acquired is an allowed
+    // predecessor.
+    AutoLock auto_lock(allowed_predecessor_map_lock_);
+    const SchedulerLockImpl* allowed_predecessor =
+        allowed_predecessor_map_.at(lock);
+    DCHECK_EQ(acquired_locks->back(), allowed_predecessor);
+  }
+
+  void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
+    allowed_predecessor_map_lock_.AssertAcquired();
+    for (const SchedulerLockImpl* predecessor =
+             allowed_predecessor_map_.at(lock);
+         predecessor != nullptr;
+         predecessor = allowed_predecessor_map_.at(predecessor)) {
+      DCHECK_NE(predecessor, lock) <<
+          "Scheduler lock predecessor cycle detected.";
+    }
+  }
+
+  LockVector* GetAcquiredLocksOnCurrentThread() {
+    if (!tls_acquired_locks_.Get())
+      tls_acquired_locks_.Set(new LockVector);
+
+    return reinterpret_cast<LockVector*>(tls_acquired_locks_.Get());
+  }
+
+  static void OnTLSDestroy(void* value) {
+    delete reinterpret_cast<LockVector*>(value);
+  }
+
+  // Synchronizes access to |allowed_predecessor_map_|.
+  Lock allowed_predecessor_map_lock_;
+
+  // A map of allowed predecessors.
+  PredecessorMap allowed_predecessor_map_;
+
+  // A thread-local slot holding a vector of locks currently acquired on the
+  // current thread.
+  ThreadLocalStorage::Slot tls_acquired_locks_;
+
+  DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
+};
+
+LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker =
+    LAZY_INSTANCE_INITIALIZER;
+
+}  // namespace
+
+SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {}
+
+SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) {
+  g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
+}
+
+SchedulerLockImpl::~SchedulerLockImpl() {
+  g_safe_acquisition_tracker.Get().UnregisterLock(this);
+}
+
+void SchedulerLockImpl::Acquire() {
+  lock_.Acquire();
+  g_safe_acquisition_tracker.Get().RecordAcquisition(this);
+}
+
+void SchedulerLockImpl::Release() {
+  lock_.Release();
+  g_safe_acquisition_tracker.Get().RecordRelease(this);
+}
+
+void SchedulerLockImpl::AssertAcquired() const {
+  lock_.AssertAcquired();
+}
+
+scoped_ptr<ConditionVariable> SchedulerLockImpl::CreateConditionVariable() {
+  return scoped_ptr<ConditionVariable>(new ConditionVariable(&lock_));
+}
+
+}  // namespace internal
+}  // base
diff --git a/base/task_scheduler/scheduler_lock_impl.h b/base/task_scheduler/scheduler_lock_impl.h
new file mode 100644
index 0000000..51826fc
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_impl.h
@@ -0,0 +1,45 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+#define BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/synchronization/lock.h"
+
+namespace base {
+
+class ConditionVariable;
+
+namespace internal {
+
+// A regular lock with simple deadlock correctness checking.
+// This lock tracks all of the available locks to make sure that any locks are
+// acquired in an expected order.
+// See scheduler_lock.h for details.
+class BASE_EXPORT SchedulerLockImpl {
+ public:
+  SchedulerLockImpl();
+  explicit SchedulerLockImpl(const SchedulerLockImpl* predecessor);
+  ~SchedulerLockImpl();
+
+  void Acquire();
+  void Release();
+
+  void AssertAcquired() const;
+
+  scoped_ptr<ConditionVariable> CreateConditionVariable();
+
+ private:
+  Lock lock_;
+
+  DISALLOW_COPY_AND_ASSIGN(SchedulerLockImpl);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SCHEDULER_LOCK_IMPL_H
diff --git a/base/task_scheduler/scheduler_lock_unittest.cc b/base/task_scheduler/scheduler_lock_unittest.cc
new file mode 100644
index 0000000..48b8b08
--- /dev/null
+++ b/base/task_scheduler/scheduler_lock_unittest.cc
@@ -0,0 +1,299 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/scheduler_lock.h"
+
+#include <stdlib.h>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/rand_util.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/threading/platform_thread.h"
+#include "base/threading/simple_thread.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+namespace {
+
+// Death tests misbehave on Android.
+#if DCHECK_IS_ON() && defined(GTEST_HAS_DEATH_TEST) && !defined(OS_ANDROID)
+#define EXPECT_DCHECK_DEATH(statement, regex) EXPECT_DEATH(statement, regex)
+#else
+#define EXPECT_DCHECK_DEATH(statement, regex)
+#endif
+
+// Adapted from base::Lock's BasicLockTestThread to make sure
+// Acquire()/Release() don't crash.
+class BasicLockTestThread : public SimpleThread {
+ public:
+  explicit BasicLockTestThread(SchedulerLock* lock)
+      : SimpleThread("BasicLockTestThread"),
+        lock_(lock),
+        acquired_(0) {}
+
+  int acquired() const { return acquired_; }
+
+ private:
+  void Run() override {
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      lock_->Release();
+    }
+    for (int i = 0; i < 10; i++) {
+      lock_->Acquire();
+      acquired_++;
+      PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+      lock_->Release();
+    }
+  }
+
+  SchedulerLock* const lock_;
+  int acquired_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockTestThread);
+};
+
+class BasicLockAcquireAndWaitThread : public SimpleThread {
+ public:
+  explicit BasicLockAcquireAndWaitThread(SchedulerLock* lock)
+      : SimpleThread("BasicLockAcquireAndWaitThread"),
+        lock_(lock),
+        lock_acquire_event_(false, false),
+        main_thread_continue_event_(false, false) {}
+
+  void WaitForLockAcquisition() {
+    lock_acquire_event_.Wait();
+  }
+
+  void ContinueMain() {
+    main_thread_continue_event_.Signal();
+  }
+
+ private:
+  void Run() override {
+    lock_->Acquire();
+    lock_acquire_event_.Signal();
+    main_thread_continue_event_.Wait();
+    lock_->Release();
+  }
+
+  SchedulerLock* const lock_;
+  WaitableEvent lock_acquire_event_;
+  WaitableEvent main_thread_continue_event_;
+
+  DISALLOW_COPY_AND_ASSIGN(BasicLockAcquireAndWaitThread);
+};
+
+TEST(TaskSchedulerLock, Basic) {
+  SchedulerLock lock;
+  BasicLockTestThread thread(&lock);
+
+  thread.Start();
+
+  int acquired = 0;
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    lock.Release();
+  }
+  for (int i = 0; i < 10; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+    lock.Release();
+  }
+  for (int i = 0; i < 5; i++) {
+    lock.Acquire();
+    acquired++;
+    PlatformThread::Sleep(TimeDelta::FromMilliseconds(base::RandInt(0, 19)));
+    lock.Release();
+  }
+
+  thread.Join();
+
+  EXPECT_EQ(acquired, 20);
+  EXPECT_EQ(thread.acquired(), 20);
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessor) {
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  lock.Acquire();
+  lock.Release();
+  predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, AcquirePredecessorWrongOrder) {
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  EXPECT_DCHECK_DEATH({
+    lock.Acquire();
+    predecessor.Acquire();
+  }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireNonPredecessor) {
+  SchedulerLock lock1;
+  SchedulerLock lock2;
+  EXPECT_DCHECK_DEATH({
+    lock1.Acquire();
+    lock2.Acquire();
+  }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInOrder) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  lock1.Acquire();
+  lock2.Acquire();
+  lock3.Acquire();
+  lock3.Release();
+  lock2.Release();
+  lock1.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksInTheMiddleOfAChain) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  lock2.Acquire();
+  lock3.Acquire();
+  lock3.Release();
+  lock2.Release();
+}
+
+TEST(TaskSchedulerLock, AcquireMultipleLocksNoTransitivity) {
+  SchedulerLock lock1;
+  SchedulerLock lock2(&lock1);
+  SchedulerLock lock3(&lock2);
+  EXPECT_DCHECK_DEATH({
+    lock1.Acquire();
+    lock3.Acquire();
+  }, "");
+}
+
+TEST(TaskSchedulerLock, AcquireLocksDifferentThreadsSafely) {
+  SchedulerLock lock1;
+  SchedulerLock lock2;
+  BasicLockAcquireAndWaitThread thread(&lock1);
+  thread.Start();
+
+  lock2.Acquire();
+  thread.WaitForLockAcquisition();
+  thread.ContinueMain();
+  thread.Join();
+  lock2.Release();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorFirst) {
+  // A lock and its predecessor may be safely acquired on different threads.
+  // This Thread                Other Thread
+  // predecessor.Acquire()
+  //                            lock.Acquire()
+  // predecessor.Release()
+  //                            lock.Release()
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  BasicLockAcquireAndWaitThread thread(&lock);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  predecessor.Release();
+  thread.ContinueMain();
+  thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyPredecessorLast) {
+  // A lock and its predecessor may be safely acquired on different threads.
+  // This Thread                Other Thread
+  // lock.Acquire()
+  //                            predecessor.Acquire()
+  // lock.Release()
+  //                            predecessor.Release()
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  lock.Acquire();
+  BasicLockAcquireAndWaitThread thread(&predecessor);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  lock.Release();
+  thread.ContinueMain();
+  thread.Join();
+}
+
+TEST(TaskSchedulerLock,
+     AcquireLocksWithPredecessorDifferentThreadsSafelyNoInterference) {
+  // Acquisition of an unrelated lock on another thread should not affect a
+  // legal lock acquisition with a predecessor on this thread.
+  // This Thread                Other Thread
+  // predecessor.Acquire()
+  //                            unrelated.Acquire()
+  // lock.Acquire()
+  //                            unrelated.Release()
+  // lock.Release()
+  // predecessor.Release();
+  SchedulerLock predecessor;
+  SchedulerLock lock(&predecessor);
+  predecessor.Acquire();
+  SchedulerLock unrelated;
+  BasicLockAcquireAndWaitThread thread(&unrelated);
+  thread.Start();
+  thread.WaitForLockAcquisition();
+  lock.Acquire();
+  thread.ContinueMain();
+  thread.Join();
+  lock.Release();
+  predecessor.Release();
+}
+
+TEST(TaskSchedulerLock, SelfReferentialLock) {
+  struct SelfReferentialLock {
+    SelfReferentialLock() : lock(&lock) {}
+
+    SchedulerLock lock;
+  };
+
+  EXPECT_DCHECK_DEATH({ SelfReferentialLock lock; }, "");
+}
+
+TEST(TaskSchedulerLock, PredecessorCycle) {
+  struct LockCycle {
+    LockCycle() : lock1(&lock2), lock2(&lock1) {}
+
+    SchedulerLock lock1;
+    SchedulerLock lock2;
+  };
+
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+}
+
+TEST(TaskSchedulerLock, PredecessorLongerCycle) {
+  struct LockCycle {
+    LockCycle()
+        : lock1(&lock5),
+          lock2(&lock1),
+          lock3(&lock2),
+          lock4(&lock3),
+          lock5(&lock4) {}
+
+    SchedulerLock lock1;
+    SchedulerLock lock2;
+    SchedulerLock lock3;
+    SchedulerLock lock4;
+    SchedulerLock lock5;
+  };
+
+  EXPECT_DCHECK_DEATH({ LockCycle cycle; }, "");
+}
+
+}  // namespace
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence.cc b/base/task_scheduler/sequence.cc
new file mode 100644
index 0000000..a05c802
--- /dev/null
+++ b/base/task_scheduler/sequence.cc
@@ -0,0 +1,79 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include <utility>
+
+#include "base/logging.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+Sequence::Sequence() = default;
+
+bool Sequence::PushTask(scoped_ptr<Task> task) {
+  DCHECK(task->sequenced_time.is_null());
+  task->sequenced_time = base::TimeTicks::Now();
+
+  AutoSchedulerLock auto_lock(lock_);
+  ++num_tasks_per_priority_[static_cast<int>(task->traits.priority())];
+  queue_.push(std::move(task));
+
+  // Return true if the sequence was empty before the push.
+  return queue_.size() == 1;
+}
+
+const Task* Sequence::PeekTask() const {
+  AutoSchedulerLock auto_lock(lock_);
+
+  if (queue_.empty())
+    return nullptr;
+
+  return queue_.front().get();
+}
+
+bool Sequence::PopTask() {
+  AutoSchedulerLock auto_lock(lock_);
+  DCHECK(!queue_.empty());
+
+  const int priority_index =
+      static_cast<int>(queue_.front()->traits.priority());
+  DCHECK_GT(num_tasks_per_priority_[priority_index], 0U);
+  --num_tasks_per_priority_[priority_index];
+
+  queue_.pop();
+  return queue_.empty();
+}
+
+SequenceSortKey Sequence::GetSortKey() const {
+  TaskPriority priority = TaskPriority::LOWEST;
+  base::TimeTicks next_task_sequenced_time;
+
+  {
+    AutoSchedulerLock auto_lock(lock_);
+    DCHECK(!queue_.empty());
+
+    // Find the highest task priority in the sequence.
+    const int highest_priority_index = static_cast<int>(TaskPriority::HIGHEST);
+    const int lowest_priority_index = static_cast<int>(TaskPriority::LOWEST);
+    for (int i = highest_priority_index; i > lowest_priority_index; --i) {
+      if (num_tasks_per_priority_[i] > 0) {
+        priority = static_cast<TaskPriority>(i);
+        break;
+      }
+    }
+
+    // Save the sequenced time of the next task in the sequence.
+    next_task_sequenced_time = queue_.front()->sequenced_time;
+  }
+
+  return SequenceSortKey(priority, next_task_sequenced_time);
+}
+
+Sequence::~Sequence() = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence.h b/base/task_scheduler/sequence.h
new file mode 100644
index 0000000..e86cf59
--- /dev/null
+++ b/base/task_scheduler/sequence.h
@@ -0,0 +1,66 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_H_
+
+#include <stddef.h>
+
+#include <queue>
+
+#include "base/base_export.h"
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
+#include "base/task_scheduler/scheduler_lock.h"
+#include "base/task_scheduler/sequence_sort_key.h"
+#include "base/task_scheduler/task.h"
+#include "base/task_scheduler/task_traits.h"
+
+namespace base {
+namespace internal {
+
+// A sequence holds tasks that must be executed in posting order.
+// This class is thread-safe.
+class BASE_EXPORT Sequence : public RefCountedThreadSafe<Sequence> {
+ public:
+  Sequence();
+
+  // Adds |task| at the end of the sequence's queue. Returns true if the
+  // sequence was empty before this operation.
+  bool PushTask(scoped_ptr<Task> task);
+
+  // Returns the task in front of the sequence's queue, if any.
+  const Task* PeekTask() const;
+
+  // Removes the task in front of the sequence's queue. Returns true if the
+  // sequence is empty after this operation. Cannot be called on an empty
+  // sequence.
+  bool PopTask();
+
+  // Returns a SequenceSortKey representing the priority of the sequence. Cannot
+  // be called on an empty sequence.
+  SequenceSortKey GetSortKey() const;
+
+ private:
+  friend class RefCountedThreadSafe<Sequence>;
+  ~Sequence();
+
+  // Synchronizes access to all members.
+  mutable SchedulerLock lock_;
+
+  // Queue of tasks to execute.
+  std::queue<scoped_ptr<Task>> queue_;
+
+  // Number of tasks contained in the sequence for each priority.
+  size_t num_tasks_per_priority_[static_cast<int>(TaskPriority::HIGHEST) + 1] =
+      {};
+
+  DISALLOW_COPY_AND_ASSIGN(Sequence);
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SEQUENCE_H_
diff --git a/base/task_scheduler/sequence_sort_key.cc b/base/task_scheduler/sequence_sort_key.cc
new file mode 100644
index 0000000..758a411
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key.cc
@@ -0,0 +1,28 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+namespace base {
+namespace internal {
+
+SequenceSortKey::SequenceSortKey(TaskPriority priority,
+                                 TimeTicks next_task_sequenced_time)
+    : priority(priority), next_task_sequenced_time(next_task_sequenced_time) {}
+
+bool SequenceSortKey::operator<(const SequenceSortKey& other) const {
+  // This SequenceSortKey is considered less important than |other| if it has a
+  // lower priority or if it has the same priority but its next task was posted
+  // later than |other|'s.
+  const int priority_diff =
+      static_cast<int>(priority) - static_cast<int>(other.priority);
+  if (priority_diff < 0)
+    return true;
+  if (priority_diff > 0)
+    return false;
+  return next_task_sequenced_time > other.next_task_sequenced_time;
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence_sort_key.h b/base/task_scheduler/sequence_sort_key.h
new file mode 100644
index 0000000..f2dd561
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key.h
@@ -0,0 +1,34 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+#define BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
+
+#include "base/base_export.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// An immutable representation of the priority of a Sequence.
+struct BASE_EXPORT SequenceSortKey final {
+  SequenceSortKey(TaskPriority priority, TimeTicks next_task_sequenced_time);
+
+  bool operator<(const SequenceSortKey& other) const;
+  bool operator>(const SequenceSortKey& other) const { return other < *this; }
+
+  // Highest task priority in the sequence at the time this sort key was
+  // created.
+  const TaskPriority priority;
+
+  // Sequenced time of the next task to run in the sequence at the time this
+  // sort key was created.
+  const TimeTicks next_task_sequenced_time;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_SEQUENCE_SORT_KEY_H_
diff --git a/base/task_scheduler/sequence_sort_key_unittest.cc b/base/task_scheduler/sequence_sort_key_unittest.cc
new file mode 100644
index 0000000..5c6c917
--- /dev/null
+++ b/base/task_scheduler/sequence_sort_key_unittest.cc
@@ -0,0 +1,129 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence_sort_key.h"
+
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorLessThan) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a < key_a);
+  EXPECT_LT(key_b, key_a);
+  EXPECT_LT(key_c, key_a);
+  EXPECT_LT(key_d, key_a);
+  EXPECT_LT(key_e, key_a);
+  EXPECT_LT(key_f, key_a);
+
+  EXPECT_FALSE(key_a < key_b);
+  EXPECT_FALSE(key_b < key_b);
+  EXPECT_LT(key_c, key_b);
+  EXPECT_LT(key_d, key_b);
+  EXPECT_LT(key_e, key_b);
+  EXPECT_LT(key_f, key_b);
+
+  EXPECT_FALSE(key_a < key_c);
+  EXPECT_FALSE(key_b < key_c);
+  EXPECT_FALSE(key_c < key_c);
+  EXPECT_LT(key_d, key_c);
+  EXPECT_LT(key_e, key_c);
+  EXPECT_LT(key_f, key_c);
+
+  EXPECT_FALSE(key_a < key_d);
+  EXPECT_FALSE(key_b < key_d);
+  EXPECT_FALSE(key_c < key_d);
+  EXPECT_FALSE(key_d < key_d);
+  EXPECT_LT(key_e, key_d);
+  EXPECT_LT(key_f, key_d);
+
+  EXPECT_FALSE(key_a < key_e);
+  EXPECT_FALSE(key_b < key_e);
+  EXPECT_FALSE(key_c < key_e);
+  EXPECT_FALSE(key_d < key_e);
+  EXPECT_FALSE(key_e < key_e);
+  EXPECT_LT(key_f, key_e);
+
+  EXPECT_FALSE(key_a < key_f);
+  EXPECT_FALSE(key_b < key_f);
+  EXPECT_FALSE(key_c < key_f);
+  EXPECT_FALSE(key_d < key_f);
+  EXPECT_FALSE(key_e < key_f);
+  EXPECT_FALSE(key_f < key_f);
+}
+
+TEST(TaskSchedulerSequenceSortKeyTest, OperatorGreaterThan) {
+  SequenceSortKey key_a(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_b(TaskPriority::USER_BLOCKING,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_c(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_d(TaskPriority::USER_VISIBLE,
+                        TimeTicks::FromInternalValue(2000));
+  SequenceSortKey key_e(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(1000));
+  SequenceSortKey key_f(TaskPriority::BACKGROUND,
+                        TimeTicks::FromInternalValue(2000));
+
+  EXPECT_FALSE(key_a > key_a);
+  EXPECT_FALSE(key_b > key_a);
+  EXPECT_FALSE(key_c > key_a);
+  EXPECT_FALSE(key_d > key_a);
+  EXPECT_FALSE(key_e > key_a);
+  EXPECT_FALSE(key_f > key_a);
+
+  EXPECT_GT(key_a, key_b);
+  EXPECT_FALSE(key_b > key_b);
+  EXPECT_FALSE(key_c > key_b);
+  EXPECT_FALSE(key_d > key_b);
+  EXPECT_FALSE(key_e > key_b);
+  EXPECT_FALSE(key_f > key_b);
+
+  EXPECT_GT(key_a, key_c);
+  EXPECT_GT(key_b, key_c);
+  EXPECT_FALSE(key_c > key_c);
+  EXPECT_FALSE(key_d > key_c);
+  EXPECT_FALSE(key_e > key_c);
+  EXPECT_FALSE(key_f > key_c);
+
+  EXPECT_GT(key_a, key_d);
+  EXPECT_GT(key_b, key_d);
+  EXPECT_GT(key_c, key_d);
+  EXPECT_FALSE(key_d > key_d);
+  EXPECT_FALSE(key_e > key_d);
+  EXPECT_FALSE(key_f > key_d);
+
+  EXPECT_GT(key_a, key_e);
+  EXPECT_GT(key_b, key_e);
+  EXPECT_GT(key_c, key_e);
+  EXPECT_GT(key_d, key_e);
+  EXPECT_FALSE(key_e > key_e);
+  EXPECT_FALSE(key_f > key_e);
+
+  EXPECT_GT(key_a, key_f);
+  EXPECT_GT(key_b, key_f);
+  EXPECT_GT(key_c, key_f);
+  EXPECT_GT(key_d, key_f);
+  EXPECT_GT(key_e, key_f);
+  EXPECT_FALSE(key_f > key_f);
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/sequence_unittest.cc b/base/task_scheduler/sequence_unittest.cc
new file mode 100644
index 0000000..d81fece
--- /dev/null
+++ b/base/task_scheduler/sequence_unittest.cc
@@ -0,0 +1,184 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/sequence.h"
+
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace internal {
+
+namespace {
+
+class TaskSchedulerSequenceTest : public testing::Test {
+ public:
+  TaskSchedulerSequenceTest()
+      : task_a_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::BACKGROUND))),
+        task_b_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::USER_VISIBLE))),
+        task_c_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING))),
+        task_d_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::USER_BLOCKING))),
+        task_e_owned_(
+            new Task(FROM_HERE,
+                     Closure(),
+                     TaskTraits().WithPriority(TaskPriority::BACKGROUND))),
+        task_a_(task_a_owned_.get()),
+        task_b_(task_b_owned_.get()),
+        task_c_(task_c_owned_.get()),
+        task_d_(task_d_owned_.get()),
+        task_e_(task_e_owned_.get()) {}
+
+ protected:
+  // Tasks to be handed off to a Sequence for testing.
+  scoped_ptr<Task> task_a_owned_;
+  scoped_ptr<Task> task_b_owned_;
+  scoped_ptr<Task> task_c_owned_;
+  scoped_ptr<Task> task_d_owned_;
+  scoped_ptr<Task> task_e_owned_;
+
+  // Raw pointers to those same tasks for verification. This is needed because
+  // the scoped_ptrs above no longer point to the tasks once they have been
+  // moved into a Sequence.
+  const Task* task_a_;
+  const Task* task_b_;
+  const Task* task_c_;
+  const Task* task_d_;
+  const Task* task_e_;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TaskSchedulerSequenceTest);
+};
+
+void ExpectSortKey(TaskPriority expected_priority,
+                   TimeTicks expected_sequenced_time,
+                   const SequenceSortKey& actual_sort_key) {
+  EXPECT_EQ(expected_priority, actual_sort_key.priority);
+  EXPECT_EQ(expected_sequenced_time, actual_sort_key.next_task_sequenced_time);
+}
+
+}  // namespace
+
+TEST_F(TaskSchedulerSequenceTest, PushPopPeek) {
+  scoped_refptr<Sequence> sequence(new Sequence);
+
+  // Push task A in the sequence. Its sequenced time should be updated and it
+  // should be in front of the sequence.
+  EXPECT_TRUE(sequence->PushTask(std::move(task_a_owned_)));
+  EXPECT_FALSE(task_a_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  // Push task B, C and D in the sequence. Their sequenced time should be
+  // updated and task A should always remain in front of the sequence.
+  EXPECT_FALSE(sequence->PushTask(std::move(task_b_owned_)));
+  EXPECT_FALSE(task_b_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  EXPECT_FALSE(sequence->PushTask(std::move(task_c_owned_)));
+  EXPECT_FALSE(task_c_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  EXPECT_FALSE(sequence->PushTask(std::move(task_d_owned_)));
+  EXPECT_FALSE(task_d_->sequenced_time.is_null());
+  EXPECT_EQ(task_a_, sequence->PeekTask());
+
+  // Pop task A. Task B should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_b_, sequence->PeekTask());
+
+  // Pop task B. Task C should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_c_, sequence->PeekTask());
+
+  // Pop task C. Task D should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_d_, sequence->PeekTask());
+
+  // Push task E in the sequence. Its sequenced time should be updated and
+  // task D should remain in front.
+  EXPECT_FALSE(sequence->PushTask(std::move(task_e_owned_)));
+  EXPECT_FALSE(task_e_->sequenced_time.is_null());
+  EXPECT_EQ(task_d_, sequence->PeekTask());
+
+  // Pop task D. Task E should now be in front.
+  EXPECT_FALSE(sequence->PopTask());
+  EXPECT_EQ(task_e_, sequence->PeekTask());
+
+  // Pop task E. The sequence should now be empty.
+  EXPECT_TRUE(sequence->PopTask());
+  EXPECT_EQ(nullptr, sequence->PeekTask());
+}
+
+TEST_F(TaskSchedulerSequenceTest, GetSortKey) {
+  scoped_refptr<Sequence> sequence(new Sequence);
+
+  // Push task A in the sequence. The highest priority is from task A
+  // (BACKGROUND). Task A is in front of the sequence.
+  sequence->PushTask(std::move(task_a_owned_));
+  ExpectSortKey(TaskPriority::BACKGROUND, task_a_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Push task B in the sequence. The highest priority is from task B
+  // (USER_VISIBLE). Task A is still in front of the sequence.
+  sequence->PushTask(std::move(task_b_owned_));
+  ExpectSortKey(TaskPriority::USER_VISIBLE, task_a_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Push task C in the sequence. The highest priority is from task C
+  // (USER_BLOCKING). Task A is still in front of the sequence.
+  sequence->PushTask(std::move(task_c_owned_));
+  ExpectSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Push task D in the sequence. The highest priority is from tasks C/D
+  // (USER_BLOCKING). Task A is still in front of the sequence.
+  sequence->PushTask(std::move(task_d_owned_));
+  ExpectSortKey(TaskPriority::USER_BLOCKING, task_a_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Pop task A. The highest priority is still USER_BLOCKING. The task in front
+  // of the sequence is now task B.
+  sequence->PopTask();
+  ExpectSortKey(TaskPriority::USER_BLOCKING, task_b_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Pop task B. The highest priority is still USER_BLOCKING. The task in front
+  // of the sequence is now task C.
+  sequence->PopTask();
+  ExpectSortKey(TaskPriority::USER_BLOCKING, task_c_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Pop task C. The highest priority is still USER_BLOCKING. The task in front
+  // of the sequence is now task D.
+  sequence->PopTask();
+  ExpectSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Push task E in the sequence. The highest priority is still USER_BLOCKING.
+  // The task in front of the sequence is still task D.
+  sequence->PushTask(std::move(task_e_owned_));
+  ExpectSortKey(TaskPriority::USER_BLOCKING, task_d_->sequenced_time,
+                sequence->GetSortKey());
+
+  // Pop task D. The highest priority is now from task E (BACKGROUND). The
+  // task in front of the sequence is now task E.
+  sequence->PopTask();
+  ExpectSortKey(TaskPriority::BACKGROUND, task_e_->sequenced_time,
+                sequence->GetSortKey());
+}
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task.cc b/base/task_scheduler/task.cc
new file mode 100644
index 0000000..ae63403
--- /dev/null
+++ b/base/task_scheduler/task.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task.h"
+
+namespace base {
+namespace internal {
+
+Task::Task(const tracked_objects::Location& posted_from,
+           const Closure& task,
+           const TaskTraits& traits)
+    : PendingTask(posted_from,
+                  task,
+                  TimeTicks(),  // No delayed run time.
+                  false),       // Not nestable.
+      traits(traits) {}
+
+Task::~Task() = default;
+
+}  // namespace internal
+}  // namespace base
diff --git a/base/task_scheduler/task.h b/base/task_scheduler/task.h
new file mode 100644
index 0000000..6ac483d
--- /dev/null
+++ b/base/task_scheduler/task.h
@@ -0,0 +1,39 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_H_
+#define BASE_TASK_SCHEDULER_TASK_H_
+
+#include "base/base_export.h"
+#include "base/callback_forward.h"
+#include "base/location.h"
+#include "base/pending_task.h"
+#include "base/task_scheduler/task_traits.h"
+#include "base/time/time.h"
+
+namespace base {
+namespace internal {
+
+// A task is a unit of work inside the task scheduler. Support for tracing and
+// profiling inherited from PendingTask.
+struct BASE_EXPORT Task : public PendingTask {
+  Task(const tracked_objects::Location& posted_from,
+       const Closure& task,
+       const TaskTraits& traits);
+  ~Task();
+
+  // The TaskTraits of this task.
+  const TaskTraits traits;
+
+  // The time at which the task was inserted in its sequence. For an undelayed
+  // task, this happens at post time. For a delayed task, this happens some
+  // time after the task's delay has expired. If the task hasn't been inserted
+  // in a sequence yet, this defaults to a null TimeTicks.
+  TimeTicks sequenced_time;
+};
+
+}  // namespace internal
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_H_
diff --git a/base/task_scheduler/task_traits.cc b/base/task_scheduler/task_traits.cc
new file mode 100644
index 0000000..9e5be32
--- /dev/null
+++ b/base/task_scheduler/task_traits.cc
@@ -0,0 +1,51 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/task_scheduler/task_traits.h"
+
+#include <ostream>
+
+namespace base {
+
+// Do not rely on defaults hard-coded below beyond the guarantees described in
+// the header; anything else is subject to change. Tasks should explicitly
+// request defaults if the behavior is critical to the task.
+TaskTraits::TaskTraits()
+    : with_file_io_(false),
+      priority_(TaskPriority::BACKGROUND),
+      shutdown_behavior_(TaskShutdownBehavior::BLOCK_SHUTDOWN) {}
+
+TaskTraits::~TaskTraits() = default;
+
+TaskTraits& TaskTraits::WithFileIO() {
+  with_file_io_ = true;
+  return *this;
+}
+
+TaskTraits& TaskTraits::WithPriority(TaskPriority priority) {
+  priority_ = priority;
+  return *this;
+}
+
+TaskTraits& TaskTraits::WithShutdownBehavior(
+    TaskShutdownBehavior shutdown_behavior) {
+  shutdown_behavior_ = shutdown_behavior;
+  return *this;
+}
+
+void PrintTo(const TaskPriority& task_priority, std::ostream* os) {
+  switch (task_priority) {
+    case TaskPriority::BACKGROUND:
+      *os << "BACKGROUND";
+      break;
+    case TaskPriority::USER_VISIBLE:
+      *os << "USER_VISIBLE";
+      break;
+    case TaskPriority::USER_BLOCKING:
+      *os << "USER_BLOCKING";
+      break;
+  }
+}
+
+}  // namespace base
diff --git a/base/task_scheduler/task_traits.h b/base/task_scheduler/task_traits.h
new file mode 100644
index 0000000..fbd63c5
--- /dev/null
+++ b/base/task_scheduler/task_traits.h
@@ -0,0 +1,134 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+#define BASE_TASK_SCHEDULER_TASK_TRAITS_H_
+
+#include <stdint.h>
+
+#include <iosfwd>
+
+#include "base/base_export.h"
+#include "build/build_config.h"
+
+namespace base {
+
+// Valid priorities supported by the task scheduler. Note: internal algorithms
+// depend on priorities being expressed as a continuous zero-based list from
+// lowest to highest priority. Users of this API shouldn't otherwise care about
+// nor use the underlying values.
+enum class TaskPriority {
+  // This will always be equal to the lowest priority available.
+  LOWEST = 0,
+  // User won't notice if this task takes an arbitrarily long time to complete.
+  BACKGROUND = LOWEST,
+  // This task affects UI or responsiveness of future user interactions. It is
+  // not an immediate response to a user interaction.
+  // Examples:
+  // - Updating the UI to reflect progress on a long task.
+  // - Loading data that might be shown in the UI after a future user
+  //   interaction.
+  USER_VISIBLE,
+  // This task affects UI immediately after a user interaction.
+  // Example: Generating data shown in the UI immediately after a click.
+  USER_BLOCKING,
+  // This will always be equal to the highest priority available.
+  HIGHEST = USER_BLOCKING,
+};
+
+// Valid shutdown behaviors supported by the task scheduler.
+enum class TaskShutdownBehavior {
+  // Tasks posted with this mode which have not started executing before
+  // shutdown is initiated will never run. Tasks with this mode running at
+  // shutdown will be ignored (the worker thread will not be joined).
+  //
+  // This option provides a nice way to post stuff you don't want blocking
+  // shutdown. For example, you might be doing a slow DNS lookup and if it's
+  // blocked on the OS, you may not want to stop shutdown, since the result
+  // doesn't really matter at that point.
+  //
+  // However, you need to be very careful what you do in your callback when you
+  // use this option. Since the thread will continue to run until the OS
+  // terminates the process, the app can be in the process of tearing down when
+  // you're running. This means any singletons or global objects you use may
+  // suddenly become invalid out from under you. For this reason, it's best to
+  // use this only for slow but simple operations like the DNS example.
+  CONTINUE_ON_SHUTDOWN,
+
+  // Tasks posted with this mode that have not started executing at
+  // shutdown will never run. However, any task that has already begun
+  // executing when shutdown is invoked will be allowed to continue and
+  // will block shutdown until completion.
+  //
+  // Note: Because TaskScheduler::Shutdown() may block while these tasks are
+  // executing, care must be taken to ensure that they do not block on the
+  // thread that called TaskScheduler::Shutdown(), as this may lead to deadlock.
+  SKIP_ON_SHUTDOWN,
+
+  // Tasks posted with this mode before shutdown is complete will block shutdown
+  // until they're executed. Generally, this should be used only to save
+  // critical user data.
+  //
+  // Note: Tasks with BACKGROUND priority that block shutdown will be promoted
+  // to USER_VISIBLE priority during shutdown.
+  BLOCK_SHUTDOWN,
+};
+
+// Describes metadata for a single task or a group of tasks.
+class BASE_EXPORT TaskTraits {
+ public:
+  // Constructs a default TaskTraits for tasks with
+  //     (1) no I/O,
+  //     (2) low priority, and
+  //     (3) may block shutdown or be skipped on shutdown.
+  // Tasks that require stricter guarantees should highlight those by requesting
+  // explicit traits below.
+  TaskTraits();
+  TaskTraits(const TaskTraits& other) = default;
+  TaskTraits& operator=(const TaskTraits& other) = default;
+  ~TaskTraits();
+
+  // Allows tasks with these traits to do file I/O.
+  TaskTraits& WithFileIO();
+
+  // Applies |priority| to tasks with these traits.
+  TaskTraits& WithPriority(TaskPriority priority);
+
+  // Applies |shutdown_behavior| to tasks with these traits.
+  TaskTraits& WithShutdownBehavior(TaskShutdownBehavior shutdown_behavior);
+
+  // Returns true if file I/O is allowed by these traits.
+  bool with_file_io() const { return with_file_io_; }
+
+  // Returns the priority of tasks with these traits.
+  TaskPriority priority() const { return priority_; }
+
+  // Returns the shutdown behavior of tasks with these traits.
+  TaskShutdownBehavior shutdown_behavior() const { return shutdown_behavior_; }
+
+ private:
+  bool with_file_io_;
+  TaskPriority priority_;
+  TaskShutdownBehavior shutdown_behavior_;
+};
+
+// Describes how tasks are executed by a task runner.
+enum class ExecutionMode {
+  // Can execute multiple tasks at a time in any order.
+  PARALLEL,
+
+  // Executes one task at a time in posting order. The sequence’s priority is
+  // equivalent to the highest priority pending task in the sequence.
+  SEQUENCED,
+
+  // Executes one task at a time on a single thread in posting order.
+  SINGLE_THREADED,
+};
+
+// Pretty Printer for Google Test.
+void BASE_EXPORT PrintTo(const TaskPriority& task_priority, std::ostream* os);
+
+}  // namespace base
+
+#endif  // BASE_TASK_SCHEDULER_TASK_TRAITS_H_
diff --git a/base/template_util.h b/base/template_util.h
index d58807a..0c3cac2 100644
--- a/base/template_util.h
+++ b/base/template_util.h
@@ -5,117 +5,15 @@
 #ifndef BASE_TEMPLATE_UTIL_H_
 #define BASE_TEMPLATE_UTIL_H_
 
-#include <stddef.h>
+#include <type_traits>
 
 #include "build/build_config.h"
 
 namespace base {
 
-// template definitions from tr1
-
-template<class T, T v>
-struct integral_constant {
-  static const T value = v;
-  typedef T value_type;
-  typedef integral_constant<T, v> type;
-};
-
-template <class T, T v> const T integral_constant<T, v>::value;
-
-typedef integral_constant<bool, true> true_type;
-typedef integral_constant<bool, false> false_type;
-
-template <class T> struct is_pointer : false_type {};
-template <class T> struct is_pointer<T*> : true_type {};
-
-// Member function pointer detection. This is built-in to C++ 11's stdlib, and
-// we can remove this when we switch to it.
-template<typename T>
-struct is_member_function_pointer : false_type {};
-
-template <typename R, typename Z, typename... A>
-struct is_member_function_pointer<R(Z::*)(A...)> : true_type {};
-template <typename R, typename Z, typename... A>
-struct is_member_function_pointer<R(Z::*)(A...) const> : true_type {};
-
-
-template <class T, class U> struct is_same : public false_type {};
-template <class T> struct is_same<T,T> : true_type {};
-
-template<class> struct is_array : public false_type {};
-template<class T, size_t n> struct is_array<T[n]> : public true_type {};
-template<class T> struct is_array<T[]> : public true_type {};
-
-template <class T> struct is_non_const_reference : false_type {};
-template <class T> struct is_non_const_reference<T&> : true_type {};
-template <class T> struct is_non_const_reference<const T&> : false_type {};
-
-template <class T> struct is_const : false_type {};
-template <class T> struct is_const<const T> : true_type {};
-
-template <class T> struct is_void : false_type {};
-template <> struct is_void<void> : true_type {};
-
-namespace internal {
-
-// Types YesType and NoType are guaranteed such that sizeof(YesType) <
-// sizeof(NoType).
-typedef char YesType;
-
-struct NoType {
-  YesType dummy[2];
-};
-
-// This class is an implementation detail for is_convertible, and you
-// don't need to know how it works to use is_convertible. For those
-// who care: we declare two different functions, one whose argument is
-// of type To and one with a variadic argument list. We give them
-// return types of different size, so we can use sizeof to trick the
-// compiler into telling us which function it would have chosen if we
-// had called it with an argument of type From.  See Alexandrescu's
-// _Modern C++ Design_ for more details on this sort of trick.
-
-struct ConvertHelper {
-  template <typename To>
-  static YesType Test(To);
-
-  template <typename To>
-  static NoType Test(...);
-
-  template <typename From>
-  static From& Create();
-};
-
-// Used to determine if a type is a struct/union/class. Inspired by Boost's
-// is_class type_trait implementation.
-struct IsClassHelper {
-  template <typename C>
-  static YesType Test(void(C::*)(void));
-
-  template <typename C>
-  static NoType Test(...);
-};
-
-}  // namespace internal
-
-// Inherits from true_type if From is convertible to To, false_type otherwise.
-//
-// Note that if the type is convertible, this will be a true_type REGARDLESS
-// of whether or not the conversion would emit a warning.
-template <typename From, typename To>
-struct is_convertible
-    : integral_constant<bool,
-                        sizeof(internal::ConvertHelper::Test<To>(
-                                   internal::ConvertHelper::Create<From>())) ==
-                        sizeof(internal::YesType)> {
-};
-
-template <typename T>
-struct is_class
-    : integral_constant<bool,
-                        sizeof(internal::IsClassHelper::Test<T>(0)) ==
-                            sizeof(internal::YesType)> {
-};
+template <class T> struct is_non_const_reference : std::false_type {};
+template <class T> struct is_non_const_reference<T&> : std::true_type {};
+template <class T> struct is_non_const_reference<const T&> : std::false_type {};
 
 }  // namespace base
 
diff --git a/base/template_util_unittest.cc b/base/template_util_unittest.cc
index b960ab1..25441cd 100644
--- a/base/template_util_unittest.cc
+++ b/base/template_util_unittest.cc
@@ -9,99 +9,11 @@
 namespace base {
 namespace {
 
-struct AStruct {};
-class AClass {};
-enum AnEnum {};
-
-class Parent {};
-class Child : public Parent {};
-
-// is_pointer<Type>
-static_assert(!is_pointer<int>::value, "IsPointer");
-static_assert(!is_pointer<int&>::value, "IsPointer");
-static_assert(is_pointer<int*>::value, "IsPointer");
-static_assert(is_pointer<const int*>::value, "IsPointer");
-
-// is_array<Type>
-static_assert(!is_array<int>::value, "IsArray");
-static_assert(!is_array<int*>::value, "IsArray");
-static_assert(!is_array<int (*)[3]>::value, "IsArray");
-static_assert(is_array<int[]>::value, "IsArray");
-static_assert(is_array<const int[]>::value, "IsArray");
-static_assert(is_array<int[3]>::value, "IsArray");
-
 // is_non_const_reference<Type>
 static_assert(!is_non_const_reference<int>::value, "IsNonConstReference");
 static_assert(!is_non_const_reference<const int&>::value,
               "IsNonConstReference");
 static_assert(is_non_const_reference<int&>::value, "IsNonConstReference");
 
-// is_convertible<From, To>
-
-// Extra parens needed to make preprocessor macro parsing happy. Otherwise,
-// it sees the equivalent of:
-//
-//     (is_convertible < Child), (Parent > ::value)
-//
-// Silly C++.
-static_assert((is_convertible<Child, Parent>::value), "IsConvertible");
-static_assert(!(is_convertible<Parent, Child>::value), "IsConvertible");
-static_assert(!(is_convertible<Parent, AStruct>::value), "IsConvertible");
-static_assert((is_convertible<int, double>::value), "IsConvertible");
-static_assert((is_convertible<int*, void*>::value), "IsConvertible");
-static_assert(!(is_convertible<void*, int*>::value), "IsConvertible");
-
-// Array types are an easy corner case.  Make sure to test that
-// it does indeed compile.
-static_assert(!(is_convertible<int[10], double>::value), "IsConvertible");
-static_assert(!(is_convertible<double, int[10]>::value), "IsConvertible");
-static_assert((is_convertible<int[10], int*>::value), "IsConvertible");
-
-// is_same<Type1, Type2>
-static_assert(!(is_same<Child, Parent>::value), "IsSame");
-static_assert(!(is_same<Parent, Child>::value), "IsSame");
-static_assert((is_same<Parent, Parent>::value), "IsSame");
-static_assert((is_same<int*, int*>::value), "IsSame");
-static_assert((is_same<int, int>::value), "IsSame");
-static_assert((is_same<void, void>::value), "IsSame");
-static_assert(!(is_same<int, double>::value), "IsSame");
-
-// is_class<Type>
-static_assert(is_class<AStruct>::value, "IsClass");
-static_assert(is_class<AClass>::value, "IsClass");
-static_assert(!is_class<AnEnum>::value, "IsClass");
-static_assert(!is_class<int>::value, "IsClass");
-static_assert(!is_class<char*>::value, "IsClass");
-static_assert(!is_class<int&>::value, "IsClass");
-static_assert(!is_class<char[3]>::value, "IsClass");
-
-static_assert(!is_member_function_pointer<int>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<int*>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<void*>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<AStruct>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<AStruct*>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<void (*)()>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<int (*)(int)>::value,
-              "IsMemberFunctionPointer");
-static_assert(!is_member_function_pointer<int (*)(int, int)>::value,
-              "IsMemberFunctionPointer");
-
-static_assert(is_member_function_pointer<void (AStruct::*)()>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<void (AStruct::*)(int)>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<int (AStruct::*)(int)>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<int (AStruct::*)(int) const>::value,
-              "IsMemberFunctionPointer");
-static_assert(is_member_function_pointer<int (AStruct::*)(int, int)>::value,
-              "IsMemberFunctionPointer");
-
 }  // namespace
 }  // namespace base
diff --git a/base/test/ios/OWNERS b/base/test/ios/OWNERS
index 1b3348e..40a68c7 100644
--- a/base/test/ios/OWNERS
+++ b/base/test/ios/OWNERS
@@ -1,2 +1 @@
 rohitrao@chromium.org
-stuartmorgan@chromium.org
diff --git a/base/test/simple_test_tick_clock.cc b/base/test/simple_test_tick_clock.cc
index 1b4696f..c6375bd 100644
--- a/base/test/simple_test_tick_clock.cc
+++ b/base/test/simple_test_tick_clock.cc
@@ -23,4 +23,9 @@
   now_ticks_ += delta;
 }
 
+void SimpleTestTickClock::SetNowTicks(TimeTicks ticks) {
+  AutoLock lock(lock_);
+  now_ticks_ = ticks;
+}
+
 }  // namespace base
diff --git a/base/test/simple_test_tick_clock.h b/base/test/simple_test_tick_clock.h
index aebdebc..f2f7581 100644
--- a/base/test/simple_test_tick_clock.h
+++ b/base/test/simple_test_tick_clock.h
@@ -26,6 +26,9 @@
   // Advances the clock by |delta|, which must not be negative.
   void Advance(TimeDelta delta);
 
+  // Sets the clock to the given time.
+  void SetNowTicks(TimeTicks ticks);
+
  private:
   // Protects |now_ticks_|.
   Lock lock_;
diff --git a/base/test/test_pending_task.cc b/base/test/test_pending_task.cc
index 3f2c79d..d912df4 100644
--- a/base/test/test_pending_task.cc
+++ b/base/test/test_pending_task.cc
@@ -22,6 +22,8 @@
       delay(delay),
       nestability(nestability) {}
 
+TestPendingTask::TestPendingTask(const TestPendingTask& other) = default;
+
 TimeTicks TestPendingTask::GetTimeToRun() const {
   return post_time + delay;
 }
@@ -50,12 +52,12 @@
   state->SetInteger("delay", delay.ToInternalValue());
 }
 
-scoped_refptr<base::trace_event::ConvertableToTraceFormat>
+scoped_ptr<base::trace_event::ConvertableToTraceFormat>
 TestPendingTask::AsValue() const {
-  scoped_refptr<base::trace_event::TracedValue> state =
-      new base::trace_event::TracedValue();
+  scoped_ptr<base::trace_event::TracedValue> state(
+      new base::trace_event::TracedValue());
   AsValueInto(state.get());
-  return state;
+  return std::move(state);
 }
 
 std::string TestPendingTask::ToString() const {
diff --git a/base/test/test_pending_task.h b/base/test/test_pending_task.h
index 829baa6..3b29961 100644
--- a/base/test/test_pending_task.h
+++ b/base/test/test_pending_task.h
@@ -21,6 +21,7 @@
   enum TestNestability { NESTABLE, NON_NESTABLE };
 
   TestPendingTask();
+  TestPendingTask(const TestPendingTask& other);
   TestPendingTask(const tracked_objects::Location& location,
                   const Closure& task,
                   TimeTicks post_time,
@@ -58,7 +59,7 @@
   // Functions for using test pending task with tracing, useful in unit
   // testing.
   void AsValueInto(base::trace_event::TracedValue* state) const;
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
+  scoped_ptr<base::trace_event::ConvertableToTraceFormat> AsValue() const;
   std::string ToString() const;
 };
 
diff --git a/base/third_party/nspr/prtime.cc b/base/third_party/nspr/prtime.cc
index 88bd47b..6c07f0d 100644
--- a/base/third_party/nspr/prtime.cc
+++ b/base/third_party/nspr/prtime.cc
@@ -53,8 +53,8 @@
  * PR_NormalizeTime
  * PR_GMTParameters
  * PR_ImplodeTime
- *   This was modified to use the Win32 SYSTEMTIME/FILETIME structures
- *   and the timezone offsets are applied to the FILETIME structure.
+ *   Upstream implementation from
+ *   http://lxr.mozilla.org/nspr/source/pr/src/misc/prtime.c#221
  * All types and macros are defined in the base/third_party/prtime.h file.
  * These have been copied from the following nspr files. We have only copied
  * over the types we need.
@@ -71,136 +71,10 @@
 #include "base/third_party/nspr/prtime.h"
 #include "build/build_config.h"
 
-#if defined(OS_WIN)
-#include <windows.h>
-#elif defined(OS_MACOSX)
-#include <CoreFoundation/CoreFoundation.h>
-#elif defined(OS_ANDROID)
-#include <ctype.h>
-#include "base/os_compat_android.h"  // For timegm()
-#elif defined(OS_NACL)
-#include "base/os_compat_nacl.h"  // For timegm()
-#endif
 #include <errno.h>  /* for EINVAL */
 #include <time.h>
 
-/* Implements the Unix localtime_r() function for windows */
-#if defined(OS_WIN)
-static void localtime_r(const time_t* secs, struct tm* time) {
-  (void) localtime_s(time, secs);
-}
-#endif
-
 /*
- *------------------------------------------------------------------------
- *
- * PR_ImplodeTime --
- *
- *     Cf. time_t mktime(struct tm *tp)
- *     Note that 1 year has < 2^25 seconds.  So an PRInt32 is large enough.
- *
- *------------------------------------------------------------------------
- */
-PRTime
-PR_ImplodeTime(const PRExplodedTime *exploded)
-{
-    // This is important, we want to make sure multiplications are
-    // done with the correct precision.
-    static const PRTime kSecondsToMicroseconds = static_cast<PRTime>(1000000);
-#if defined(OS_WIN)
-   // Create the system struct representing our exploded time.
-    SYSTEMTIME st = {};
-    FILETIME ft = {};
-    ULARGE_INTEGER uli = {};
-
-    st.wYear = exploded->tm_year;
-    st.wMonth = static_cast<WORD>(exploded->tm_month + 1);
-    st.wDayOfWeek = exploded->tm_wday;
-    st.wDay = static_cast<WORD>(exploded->tm_mday);
-    st.wHour = static_cast<WORD>(exploded->tm_hour);
-    st.wMinute = static_cast<WORD>(exploded->tm_min);
-    st.wSecond = static_cast<WORD>(exploded->tm_sec);
-    st.wMilliseconds = static_cast<WORD>(exploded->tm_usec/1000);
-     // Convert to FILETIME.
-    if (!SystemTimeToFileTime(&st, &ft)) {
-      NOTREACHED() << "Unable to convert time";
-      return 0;
-    }
-    // Apply offsets.
-    uli.LowPart = ft.dwLowDateTime;
-    uli.HighPart = ft.dwHighDateTime;
-    // Convert from Windows epoch to NSPR epoch, and 100-nanoseconds units
-    // to microsecond units.
-    PRTime result =
-        static_cast<PRTime>((uli.QuadPart / 10) - 11644473600000000i64);
-    // Adjust for time zone and dst.  Convert from seconds to microseconds.
-    result -= (exploded->tm_params.tp_gmt_offset +
-               exploded->tm_params.tp_dst_offset) * kSecondsToMicroseconds;
-    // Add microseconds that cannot be represented in |st|.
-    result += exploded->tm_usec % 1000;
-    return result;
-#elif defined(OS_MACOSX)
-    // Create the system struct representing our exploded time.
-    CFGregorianDate gregorian_date;
-    gregorian_date.year = exploded->tm_year;
-    gregorian_date.month = exploded->tm_month + 1;
-    gregorian_date.day = exploded->tm_mday;
-    gregorian_date.hour = exploded->tm_hour;
-    gregorian_date.minute = exploded->tm_min;
-    gregorian_date.second = exploded->tm_sec;
-
-    // Compute |absolute_time| in seconds, correct for gmt and dst
-    // (note the combined offset will be negative when we need to add it), then
-    // convert to microseconds which is what PRTime expects.
-    CFAbsoluteTime absolute_time =
-        CFGregorianDateGetAbsoluteTime(gregorian_date, NULL);
-    PRTime result = static_cast<PRTime>(absolute_time);
-    result -= exploded->tm_params.tp_gmt_offset +
-              exploded->tm_params.tp_dst_offset;
-    result += kCFAbsoluteTimeIntervalSince1970;  // PRTime epoch is 1970
-    result *= kSecondsToMicroseconds;
-    result += exploded->tm_usec;
-    return result;
-#elif defined(OS_POSIX)
-    struct tm exp_tm;
-    memset(&exp_tm, 0, sizeof(exp_tm));
-    exp_tm.tm_sec  = exploded->tm_sec;
-    exp_tm.tm_min  = exploded->tm_min;
-    exp_tm.tm_hour = exploded->tm_hour;
-    exp_tm.tm_mday = exploded->tm_mday;
-    exp_tm.tm_mon  = exploded->tm_month;
-    exp_tm.tm_year = exploded->tm_year - 1900;
-
-    time_t absolute_time = timegm(&exp_tm);
-
-    // If timegm returned -1.  Since we don't pass it a time zone, the only
-    // valid case of returning -1 is 1 second before Epoch (Dec 31, 1969).
-    if (absolute_time == -1 &&
-        !(exploded->tm_year == 1969 && exploded->tm_month == 11 &&
-        exploded->tm_mday == 31 && exploded->tm_hour == 23 &&
-        exploded->tm_min == 59 && exploded->tm_sec == 59)) {
-      // If we get here, time_t must be 32 bits.
-      // Date was possibly too far in the future and would overflow.  Return
-      // the most future date possible (year 2038).
-      if (exploded->tm_year >= 1970)
-        return INT_MAX * kSecondsToMicroseconds;
-      // Date was possibly too far in the past and would underflow.  Return
-      // the most past date possible (year 1901).
-      return INT_MIN * kSecondsToMicroseconds;
-    }
-
-    PRTime result = static_cast<PRTime>(absolute_time);
-    result -= exploded->tm_params.tp_gmt_offset +
-              exploded->tm_params.tp_dst_offset;
-    result *= kSecondsToMicroseconds;
-    result += exploded->tm_usec;
-    return result;
-#else
-#error No PR_ImplodeTime implemented on your platform.
-#endif
-}
-
-/* 
  * The COUNT_LEAPS macro counts the number of leap years passed by
  * till the start of the given year Y.  At the start of the year 4
  * A.D. the number of leap years passed by is 0, while at the start of
@@ -215,9 +89,16 @@
  * midnight 00:00:00.
  */
 
-#define COUNT_LEAPS(Y)   ( ((Y)-1)/4 - ((Y)-1)/100 + ((Y)-1)/400 )
-#define COUNT_DAYS(Y)  ( ((Y)-1)*365 + COUNT_LEAPS(Y) )
-#define DAYS_BETWEEN_YEARS(A, B)  (COUNT_DAYS(B) - COUNT_DAYS(A))
+#define COUNT_LEAPS(Y) (((Y)-1) / 4 - ((Y)-1) / 100 + ((Y)-1) / 400)
+#define COUNT_DAYS(Y) (((Y)-1) * 365 + COUNT_LEAPS(Y))
+#define DAYS_BETWEEN_YEARS(A, B) (COUNT_DAYS(B) - COUNT_DAYS(A))
+
+/* Implements the Unix localtime_r() function for windows */
+#if defined(OS_WIN)
+static void localtime_r(const time_t* secs, struct tm* time) {
+  (void) localtime_s(time, secs);
+}
+#endif
 
 /*
  * Static variables used by functions in this file
@@ -243,6 +124,56 @@
 };
 
 /*
+ *------------------------------------------------------------------------
+ *
+ * PR_ImplodeTime --
+ *
+ *     Cf. time_t mktime(struct tm *tp)
+ *     Note that 1 year has < 2^25 seconds.  So an PRInt32 is large enough.
+ *
+ *------------------------------------------------------------------------
+ */
+PRTime
+PR_ImplodeTime(const PRExplodedTime *exploded)
+{
+  PRExplodedTime copy;
+  PRTime retVal;
+  PRInt64 secPerDay, usecPerSec;
+  PRInt64 temp;
+  PRInt64 numSecs64;
+  PRInt32 numDays;
+  PRInt32 numSecs;
+
+  /* Normalize first.  Do this on our copy */
+  copy = *exploded;
+  PR_NormalizeTime(&copy, PR_GMTParameters);
+
+  numDays = DAYS_BETWEEN_YEARS(1970, copy.tm_year);
+
+  numSecs = copy.tm_yday * 86400 + copy.tm_hour * 3600 + copy.tm_min * 60 +
+            copy.tm_sec;
+
+  LL_I2L(temp, numDays);
+  LL_I2L(secPerDay, 86400);
+  LL_MUL(temp, temp, secPerDay);
+  LL_I2L(numSecs64, numSecs);
+  LL_ADD(numSecs64, numSecs64, temp);
+
+  /* apply the GMT and DST offsets */
+  LL_I2L(temp, copy.tm_params.tp_gmt_offset);
+  LL_SUB(numSecs64, numSecs64, temp);
+  LL_I2L(temp, copy.tm_params.tp_dst_offset);
+  LL_SUB(numSecs64, numSecs64, temp);
+
+  LL_I2L(usecPerSec, 1000000L);
+  LL_MUL(temp, numSecs64, usecPerSec);
+  LL_I2L(retVal, copy.tm_usec);
+  LL_ADD(retVal, retVal, temp);
+
+  return retVal;
+}
+
+/*
  *-------------------------------------------------------------------------
  *
  * IsLeapYear --
diff --git a/base/third_party/nspr/prtime.h b/base/third_party/nspr/prtime.h
index 01a4e54..20bae38 100644
--- a/base/third_party/nspr/prtime.h
+++ b/base/third_party/nspr/prtime.h
@@ -73,6 +73,17 @@
 #define PR_INT16_MAX 32767
 #define NSPR_API(__type) extern __type
 
+/*
+ * Long-long (64-bit signed integer type) support macros used by
+ * PR_ImplodeTime().
+ * See http://lxr.mozilla.org/nspr/source/pr/include/prlong.h
+ */
+
+#define LL_I2L(l, i) ((l) = (PRInt64)(i))
+#define LL_MUL(r, a, b) ((r) = (a) * (b))
+#define LL_ADD(r, a, b) ((r) = (a) + (b))
+#define LL_SUB(r, a, b) ((r) = (a) - (b))
+
 /**********************************************************************/
 /************************* TYPES AND CONSTANTS ************************/
 /**********************************************************************/
diff --git a/base/threading/platform_thread.h b/base/threading/platform_thread.h
index e2b09bc..72da93b 100644
--- a/base/threading/platform_thread.h
+++ b/base/threading/platform_thread.h
@@ -99,7 +99,7 @@
 
 // Valid values for priority of Thread::Options and SimpleThread::Options, and
 // SetCurrentThreadPriority(), listed in increasing order of importance.
-enum class ThreadPriority {
+enum class ThreadPriority : int {
   // Suitable for threads that shouldn't disrupt high priority work.
   BACKGROUND,
   // Default priority level.
@@ -182,7 +182,8 @@
 
   // Toggles the current thread's priority at runtime. A thread may not be able
   // to raise its priority back up after lowering it if the process does not
-  // have a proper permission, e.g. CAP_SYS_NICE on Linux.
+  // have a proper permission, e.g. CAP_SYS_NICE on Linux. A thread may not be
+  // able to lower its priority back down after raising it to REALTIME_AUDIO.
   // Since changing other threads' priority is not permitted in favor of
   // security, this interface is restricted to change only the current thread
   // priority (https://crbug.com/399473).
diff --git a/base/threading/platform_thread_freebsd.cc b/base/threading/platform_thread_freebsd.cc
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/base/threading/platform_thread_freebsd.cc
diff --git a/base/threading/platform_thread_linux.cc b/base/threading/platform_thread_linux.cc
index 3e7ee68..7e2365c 100644
--- a/base/threading/platform_thread_linux.cc
+++ b/base/threading/platform_thread_linux.cc
@@ -29,30 +29,19 @@
 namespace {
 #if !defined(OS_NACL)
 const struct sched_param kRealTimePrio = {8};
-const struct sched_param kResetPrio = {0};
 #endif
 }  // namespace
 
 const ThreadPriorityToNiceValuePair kThreadPriorityToNiceValueMap[4] = {
     {ThreadPriority::BACKGROUND, 10},
     {ThreadPriority::NORMAL, 0},
-    {ThreadPriority::DISPLAY, -6},
+    {ThreadPriority::DISPLAY, -8},
     {ThreadPriority::REALTIME_AUDIO, -10},
 };
 
 bool SetCurrentThreadPriorityForPlatform(ThreadPriority priority) {
 #if !defined(OS_NACL)
-  ThreadPriority current_priority;
-  if (priority != ThreadPriority::REALTIME_AUDIO &&
-      GetCurrentThreadPriorityForPlatform(&current_priority) &&
-      current_priority == ThreadPriority::REALTIME_AUDIO) {
-    // If the pthread's round-robin scheduler is already enabled, and the new
-    // priority will use setpriority() instead, the pthread scheduler should be
-    // reset to use SCHED_OTHER so that setpriority() just works.
-    pthread_setschedparam(pthread_self(), SCHED_OTHER, &kResetPrio);
-    return false;
-  }
-  return priority == ThreadPriority::REALTIME_AUDIO  &&
+  return priority == ThreadPriority::REALTIME_AUDIO &&
          pthread_setschedparam(pthread_self(), SCHED_RR, &kRealTimePrio) == 0;
 #else
   return false;
@@ -103,8 +92,6 @@
 
 void InitThreading() {}
 
-void InitOnThread() {}
-
 void TerminateOnThread() {}
 
 size_t GetDefaultThreadStackSize(const pthread_attr_t& /* attributes */) {
diff --git a/base/threading/platform_thread_mac.mm b/base/threading/platform_thread_mac.mm
index df11f85..51f3621 100644
--- a/base/threading/platform_thread_mac.mm
+++ b/base/threading/platform_thread_mac.mm
@@ -15,6 +15,7 @@
 
 #include "base/lazy_instance.h"
 #include "base/logging.h"
+#include "base/mac/foundation_util.h"
 #include "base/mac/mach_logging.h"
 #include "base/threading/thread_id_name_manager.h"
 #include "base/tracked_objects.h"
@@ -22,6 +23,10 @@
 
 namespace base {
 
+namespace {
+NSString* const kThreadPriorityKey = @"CrThreadPriorityKey";
+}  // namespace
+
 // If Cocoa is to be used on more than one thread, it must know that the
 // application is multithreaded.  Since it's possible to enter Cocoa code
 // from threads created by pthread_thread_create, Cocoa won't necessarily
@@ -164,21 +169,41 @@
 
   switch (priority) {
     case ThreadPriority::NORMAL:
+    case ThreadPriority::BACKGROUND:
+    case ThreadPriority::DISPLAY:
+      // Add support for non-NORMAL thread priorities. https://crbug.com/554651
       SetPriorityNormal(mach_thread_id);
       break;
     case ThreadPriority::REALTIME_AUDIO:
       SetPriorityRealtimeAudio(mach_thread_id);
       break;
-    default:
-      NOTREACHED() << "Unknown priority.";
-      break;
   }
+
+  [[[NSThread currentThread] threadDictionary]
+      setObject:@(static_cast<int>(priority))
+         forKey:kThreadPriorityKey];
 }
 
 // static
 ThreadPriority PlatformThread::GetCurrentThreadPriority() {
-  NOTIMPLEMENTED();
-  return ThreadPriority::NORMAL;
+  NSNumber* priority = base::mac::ObjCCast<NSNumber>([[[NSThread currentThread]
+      threadDictionary] objectForKey:kThreadPriorityKey]);
+
+  if (!priority)
+    return ThreadPriority::NORMAL;
+
+  ThreadPriority thread_priority =
+      static_cast<ThreadPriority>(priority.intValue);
+  switch (thread_priority) {
+    case ThreadPriority::BACKGROUND:
+    case ThreadPriority::NORMAL:
+    case ThreadPriority::DISPLAY:
+    case ThreadPriority::REALTIME_AUDIO:
+      return thread_priority;
+    default:
+      NOTREACHED() << "Unknown priority.";
+      return ThreadPriority::NORMAL;
+  }
 }
 
 size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes) {
@@ -216,9 +241,6 @@
 #endif
 }
 
-void InitOnThread() {
-}
-
 void TerminateOnThread() {
 }
 
diff --git a/base/threading/platform_thread_posix.cc b/base/threading/platform_thread_posix.cc
index 39a0073..bd6ae2d 100644
--- a/base/threading/platform_thread_posix.cc
+++ b/base/threading/platform_thread_posix.cc
@@ -29,7 +29,6 @@
 namespace base {
 
 void InitThreading();
-void InitOnThread();
 void TerminateOnThread();
 size_t GetDefaultThreadStackSize(const pthread_attr_t& attributes);
 
@@ -45,8 +44,6 @@
 };
 
 void* ThreadFunc(void* params) {
-  base::InitOnThread();
-
   PlatformThread::Delegate* delegate = nullptr;
 
   {
@@ -56,8 +53,12 @@
     if (!thread_params->joinable)
       base::ThreadRestrictions::SetSingletonAllowed(false);
 
-    if (thread_params->priority != ThreadPriority::NORMAL)
-      PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#if !defined(OS_NACL)
+    // Threads on linux/android may inherit their priority from the thread
+    // where they were created. This explicitly sets the priority of all new
+    // threads.
+    PlatformThread::SetCurrentThreadPriority(thread_params->priority);
+#endif
   }
 
   ThreadIdNameManager::GetInstance()->RegisterThread(
diff --git a/base/threading/platform_thread_unittest.cc b/base/threading/platform_thread_unittest.cc
index 52f8d1b..6738775 100644
--- a/base/threading/platform_thread_unittest.cc
+++ b/base/threading/platform_thread_unittest.cc
@@ -217,7 +217,8 @@
 
 class ThreadPriorityTestThread : public FunctionTestThread {
  public:
-  ThreadPriorityTestThread() = default;
+  explicit ThreadPriorityTestThread(ThreadPriority priority)
+      : priority_(priority) {}
   ~ThreadPriorityTestThread() override = default;
 
  private:
@@ -226,50 +227,48 @@
     EXPECT_EQ(ThreadPriority::NORMAL,
               PlatformThread::GetCurrentThreadPriority());
 
-    // Toggle each supported priority on the current thread and confirm it
-    // affects it.
-    const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
-    for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
-      SCOPED_TRACE(i);
-      if (!bumping_priority_allowed &&
-          kThreadPriorityTestValues[i] >
-              PlatformThread::GetCurrentThreadPriority()) {
-        continue;
-      }
-
-      // Alter and verify the current thread's priority.
-      PlatformThread::SetCurrentThreadPriority(kThreadPriorityTestValues[i]);
-      EXPECT_EQ(kThreadPriorityTestValues[i],
-                PlatformThread::GetCurrentThreadPriority());
-    }
+    // Alter and verify the current thread's priority.
+    PlatformThread::SetCurrentThreadPriority(priority_);
+    EXPECT_EQ(priority_, PlatformThread::GetCurrentThreadPriority());
   }
 
+  const ThreadPriority priority_;
+
   DISALLOW_COPY_AND_ASSIGN(ThreadPriorityTestThread);
 };
 
 }  // namespace
 
-#if defined(OS_MACOSX)
-// PlatformThread::GetCurrentThreadPriority() is not implemented on OS X.
-#define MAYBE_ThreadPriorityCurrentThread DISABLED_ThreadPriorityCurrentThread
-#else
-#define MAYBE_ThreadPriorityCurrentThread ThreadPriorityCurrentThread
-#endif
-
 // Test changing a created thread's priority (which has different semantics on
 // some platforms).
-TEST(PlatformThreadTest, MAYBE_ThreadPriorityCurrentThread) {
-  ThreadPriorityTestThread thread;
-  PlatformThreadHandle handle;
+TEST(PlatformThreadTest, ThreadPriorityCurrentThread) {
+  const bool bumping_priority_allowed = IsBumpingPriorityAllowed();
+  if (bumping_priority_allowed) {
+    // Bump the priority in order to verify that new threads are started with
+    // normal priority.
+    PlatformThread::SetCurrentThreadPriority(ThreadPriority::DISPLAY);
+  }
 
-  ASSERT_FALSE(thread.IsRunning());
-  ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
-  thread.WaitForTerminationReady();
-  ASSERT_TRUE(thread.IsRunning());
+  // Toggle each supported priority on the thread and confirm it affects it.
+  for (size_t i = 0; i < arraysize(kThreadPriorityTestValues); ++i) {
+    if (!bumping_priority_allowed &&
+        kThreadPriorityTestValues[i] >
+            PlatformThread::GetCurrentThreadPriority()) {
+      continue;
+    }
 
-  thread.MarkForTermination();
-  PlatformThread::Join(handle);
-  ASSERT_FALSE(thread.IsRunning());
+    ThreadPriorityTestThread thread(kThreadPriorityTestValues[i]);
+    PlatformThreadHandle handle;
+
+    ASSERT_FALSE(thread.IsRunning());
+    ASSERT_TRUE(PlatformThread::Create(0, &thread, &handle));
+    thread.WaitForTerminationReady();
+    ASSERT_TRUE(thread.IsRunning());
+
+    thread.MarkForTermination();
+    PlatformThread::Join(handle);
+    ASSERT_FALSE(thread.IsRunning());
+  }
 }
 
 }  // namespace base
diff --git a/base/threading/sequenced_worker_pool.cc b/base/threading/sequenced_worker_pool.cc
index 3cc50f4..081a49f 100644
--- a/base/threading/sequenced_worker_pool.cc
+++ b/base/threading/sequenced_worker_pool.cc
@@ -19,7 +19,7 @@
 #include "base/lazy_instance.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/linked_ptr.h"
+#include "base/memory/scoped_ptr.h"
 #include "base/stl_util.h"
 #include "base/strings/stringprintf.h"
 #include "base/synchronization/condition_variable.h"
@@ -448,7 +448,7 @@
   // Owning pointers to all threads we've created so far, indexed by
   // ID. Since we lazily create threads, this may be less than
   // max_threads_ and will be initially empty.
-  typedef std::map<PlatformThreadId, linked_ptr<Worker> > ThreadMap;
+  using ThreadMap = std::map<PlatformThreadId, scoped_ptr<Worker>>;
   ThreadMap threads_;
 
   // Set to true when we're in the process of creating another thread.
@@ -788,9 +788,8 @@
     AutoLock lock(lock_);
     DCHECK(thread_being_created_);
     thread_being_created_ = false;
-    std::pair<ThreadMap::iterator, bool> result =
-        threads_.insert(
-            std::make_pair(this_worker->tid(), make_linked_ptr(this_worker)));
+    auto result = threads_.insert(
+        std::make_pair(this_worker->tid(), make_scoped_ptr(this_worker)));
     DCHECK(result.second);
 
     while (true) {
diff --git a/base/threading/thread.cc b/base/threading/thread.cc
index 783add8..2a27608 100644
--- a/base/threading/thread.cc
+++ b/base/threading/thread.cc
@@ -51,6 +51,8 @@
       priority(ThreadPriority::NORMAL) {
 }
 
+Thread::Options::Options(const Options& other) = default;
+
 Thread::Options::~Options() {
 }
 
@@ -67,7 +69,7 @@
       message_loop_(nullptr),
       message_loop_timer_slack_(TIMER_SLACK_NONE),
       name_(name),
-      start_event_(false, false) {
+      start_event_(true, false) {
 }
 
 Thread::~Thread() {
diff --git a/base/threading/thread.h b/base/threading/thread.h
index da985da..ec19722 100644
--- a/base/threading/thread.h
+++ b/base/threading/thread.h
@@ -45,6 +45,7 @@
 
     Options();
     Options(MessageLoop::Type type, size_t size);
+    Options(const Options& other);
     ~Options();
 
     // Specifies the type of message loop that will be allocated on the thread.
diff --git a/base/threading/thread_restrictions.h b/base/threading/thread_restrictions.h
index fb536ec..5c1fb13 100644
--- a/base/threading/thread_restrictions.h
+++ b/base/threading/thread_restrictions.h
@@ -81,7 +81,7 @@
 }
 
 namespace views {
-class WindowManagerConnection;
+class ScreenMus;
 }
 
 namespace base {
@@ -233,7 +233,7 @@
 #if !defined(OFFICIAL_BUILD)
   friend class content::SoftwareOutputDeviceMus;  // Interim non-production code
 #endif
-  friend class views::WindowManagerConnection;
+  friend class views::ScreenMus;
 // END USAGE THAT NEEDS TO BE FIXED.
 
 #if ENABLE_THREAD_RESTRICTIONS
diff --git a/base/threading/thread_unittest.cc b/base/threading/thread_unittest.cc
index f6ecbe6..20d031f 100644
--- a/base/threading/thread_unittest.cc
+++ b/base/threading/thread_unittest.cc
@@ -142,8 +142,8 @@
   // Ensure that the thread can work with only 12 kb and still process a
   // message.
   Thread::Options options;
-#if defined(ADDRESS_SANITIZER) && defined(OS_MACOSX)
-  // ASan bloats the stack variables and overflows the 12 kb stack on OSX.
+#if defined(ADDRESS_SANITIZER)
+  // ASan bloats the stack variables and overflows the 12 kb stack.
   options.stack_size = 24*1024;
 #else
   options.stack_size = 12*1024;
@@ -288,3 +288,11 @@
   Thread a("Inert");
   EXPECT_EQ(nullptr, a.task_runner());
 }
+
+TEST_F(ThreadTest, MultipleWaitUntilThreadStarted) {
+  Thread a("MultipleWaitUntilThreadStarted");
+  EXPECT_TRUE(a.Start());
+  // It's OK to call WaitUntilThreadStarted() multiple times.
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
+  EXPECT_TRUE(a.WaitUntilThreadStarted());
+}
diff --git a/base/time/time_mac.cc b/base/time/time_mac.cc
index f2bc5ed..f0c7804 100644
--- a/base/time/time_mac.cc
+++ b/base/time/time_mac.cc
@@ -167,19 +167,21 @@
 
 // static
 Time Time::FromExploded(bool is_local, const Exploded& exploded) {
-  CFGregorianDate date;
-  date.second = exploded.second +
-      exploded.millisecond / static_cast<double>(kMillisecondsPerSecond);
-  date.minute = exploded.minute;
-  date.hour = exploded.hour;
-  date.day = exploded.day_of_month;
-  date.month = exploded.month;
-  date.year = exploded.year;
-
   base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
-      is_local ? CFTimeZoneCopySystem() : NULL);
-  CFAbsoluteTime seconds = CFGregorianDateGetAbsoluteTime(date, time_zone) +
-      kCFAbsoluteTimeIntervalSince1970;
+      is_local
+          ? CFTimeZoneCopySystem()
+          : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+      kCFAllocatorDefault, kCFGregorianCalendar));
+  CFCalendarSetTimeZone(gregorian, time_zone);
+  CFAbsoluteTime absolute_time;
+  // 'S' is not defined in componentDesc in Apple documentation, but can be
+  // found at http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+  CFCalendarComposeAbsoluteTime(
+      gregorian, &absolute_time, "yMdHmsS", exploded.year, exploded.month,
+      exploded.day_of_month, exploded.hour, exploded.minute, exploded.second,
+      exploded.millisecond);
+  CFAbsoluteTime seconds = absolute_time + kCFAbsoluteTimeIntervalSince1970;
   return Time(static_cast<int64_t>(seconds * kMicrosecondsPerSecond) +
               kWindowsEpochDeltaMicroseconds);
 }
@@ -195,19 +197,25 @@
                            kCFAbsoluteTimeIntervalSince1970;
 
   base::ScopedCFTypeRef<CFTimeZoneRef> time_zone(
-      is_local ? CFTimeZoneCopySystem() : NULL);
-  CFGregorianDate date = CFAbsoluteTimeGetGregorianDate(seconds, time_zone);
-  // 1 = Monday, ..., 7 = Sunday.
-  int cf_day_of_week = CFAbsoluteTimeGetDayOfWeek(seconds, time_zone);
-
-  exploded->year = date.year;
-  exploded->month = date.month;
-  exploded->day_of_week = cf_day_of_week % 7;
-  exploded->day_of_month = date.day;
-  exploded->hour = date.hour;
-  exploded->minute = date.minute;
+      is_local
+          ? CFTimeZoneCopySystem()
+          : CFTimeZoneCreateWithTimeIntervalFromGMT(kCFAllocatorDefault, 0));
+  base::ScopedCFTypeRef<CFCalendarRef> gregorian(CFCalendarCreateWithIdentifier(
+      kCFAllocatorDefault, kCFGregorianCalendar));
+  CFCalendarSetTimeZone(gregorian, time_zone);
+  int second, day_of_week;
+  // 'E' sets the day of week, but is not defined in componentDesc in Apple
+  // documentation. It can be found in open source code here:
+  // http://www.opensource.apple.com/source/CF/CF-855.17/CFCalendar.c
+  CFCalendarDecomposeAbsoluteTime(gregorian, seconds, "yMdHmsE",
+                                  &exploded->year, &exploded->month,
+                                  &exploded->day_of_month, &exploded->hour,
+                                  &exploded->minute, &second, &day_of_week);
   // Make sure seconds are rounded down towards -infinity.
-  exploded->second = floor(date.second);
+  exploded->second = floor(second);
+  // |Exploded|'s convention for day of week is 0 = Sunday, i.e. different
+  // from CF's 1 = Sunday.
+  exploded->day_of_week = (day_of_week - 1) % 7;
   // Calculate milliseconds ourselves, since we rounded the |seconds|, making
   // sure to round towards -infinity.
   exploded->millisecond =
diff --git a/base/trace_event/common/trace_event_common.h b/base/trace_event/common/trace_event_common.h
index a266cd5..f65b35b 100644
--- a/base/trace_event/common/trace_event_common.h
+++ b/base/trace_event/common/trace_event_common.h
@@ -156,7 +156,7 @@
 //   };
 //
 //   TRACE_EVENT1("foo", "bar", "data",
-//                scoped_refptr<ConvertableToTraceFormat>(new MyData()));
+//                scoped_ptr<ConvertableToTraceFormat>(new MyData()));
 //
 // The trace framework will take ownership if the passed pointer and it will
 // be free'd when the trace buffer is flushed.
@@ -926,6 +926,11 @@
                                    name, id, TRACE_EVENT_FLAG_COPY, arg1_name, \
                                    arg1_val, arg2_name, arg2_val)
 
+// TRACE_EVENT_METADATA* events are information related to other
+// injected events, not events in their own right.
+#define TRACE_EVENT_METADATA1(category_group, name, arg1_name, arg1_val) \
+  INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, arg1_name, arg1_val)
+
 // Records a clock sync event.
 #define TRACE_EVENT_CLOCK_SYNC_RECEIVER(sync_id)                               \
   INTERNAL_TRACE_EVENT_ADD(                                                    \
@@ -962,6 +967,21 @@
       TRACE_EVENT_PHASE_DELETE_OBJECT, category_group, name,         \
       TRACE_ID_DONT_MANGLE(id), TRACE_EVENT_FLAG_NONE)
 
+// Records entering and leaving trace event contexts. |category_group| and
+// |name| specify the context category and type. |context| is a
+// snapshotted context object id.
+#define TRACE_EVENT_ENTER_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                              \
+      TRACE_EVENT_PHASE_ENTER_CONTEXT, category_group, name,     \
+      TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_LEAVE_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_ADD_WITH_ID(                              \
+      TRACE_EVENT_PHASE_LEAVE_CONTEXT, category_group, name,     \
+      TRACE_ID_DONT_MANGLE(context), TRACE_EVENT_FLAG_NONE)
+#define TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name,       \
+                                      TRACE_ID_DONT_MANGLE(context))
+
 // Macro to efficiently determine if a given category group is enabled.
 #define TRACE_EVENT_CATEGORY_GROUP_ENABLED(category_group, ret)             \
   do {                                                                      \
@@ -1025,6 +1045,8 @@
 #define TRACE_EVENT_PHASE_MEMORY_DUMP ('v')
 #define TRACE_EVENT_PHASE_MARK ('R')
 #define TRACE_EVENT_PHASE_CLOCK_SYNC ('c')
+#define TRACE_EVENT_PHASE_ENTER_CONTEXT ('(')
+#define TRACE_EVENT_PHASE_LEAVE_CONTEXT (')')
 
 // Flags for changing the behavior of TRACE_EVENT_API_ADD_TRACE_EVENT.
 #define TRACE_EVENT_FLAG_NONE (static_cast<unsigned int>(0))
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
index cf3d198..9568525 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.cc
@@ -19,6 +19,7 @@
 StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame,
                                              int parent_frame_index)
     : frame(frame), parent_frame_index(parent_frame_index) {}
+StackFrameDeduplicator::FrameNode::FrameNode(const FrameNode& other) = default;
 StackFrameDeduplicator::FrameNode::~FrameNode() {}
 
 StackFrameDeduplicator::StackFrameDeduplicator() {}
@@ -76,7 +77,7 @@
     SStringPrintf(&stringify_buffer, "\"%d\":", i);
     out->append(stringify_buffer);
 
-    scoped_refptr<TracedValue> frame_node_value = new TracedValue;
+    scoped_ptr<TracedValue> frame_node_value(new TracedValue);
     frame_node_value->SetString("name", frame_node->frame);
     if (frame_node->parent_frame_index >= 0) {
       SStringPrintf(&stringify_buffer, "%d", frame_node->parent_frame_index);
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator.h b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
index 60df1ba..4932534 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator.h
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator.h
@@ -31,6 +31,7 @@
   // A node in the call tree.
   struct FrameNode {
     FrameNode(StackFrame frame, int parent_frame_index);
+    FrameNode(const FrameNode& other);
     ~FrameNode();
 
     StackFrame frame;
@@ -46,6 +47,7 @@
   using ConstIterator = std::vector<FrameNode>::const_iterator;
 
   StackFrameDeduplicator();
+  ~StackFrameDeduplicator() override;
 
   // Inserts a backtrace where |beginFrame| is a pointer to the bottom frame
   // (e.g. main) and |endFrame| is a pointer past the top frame (most recently
@@ -65,8 +67,6 @@
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
 
  private:
-  ~StackFrameDeduplicator() override;
-
   std::map<StackFrame, int> roots_;
   std::vector<FrameNode> frames_;
 
diff --git a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
index 433c633..2464036 100644
--- a/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
+++ b/base/trace_event/heap_profiler_stack_frame_deduplicator_unittest.cc
@@ -5,7 +5,7 @@
 #include <iterator>
 
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_allocation_context.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "testing/gtest/include/gtest/gtest.h"
@@ -30,7 +30,7 @@
   //   CreateWidget [1]
   //     malloc [2]
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(2, dedup->Insert(std::begin(bt), std::end(bt)));
 
   auto iter = dedup->begin();
@@ -63,7 +63,7 @@
   // Note that there will be two instances of CreateWidget,
   // with different parents.
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
   ASSERT_EQ(3, dedup->Insert(std::begin(bt1), std::end(bt1)));
 
@@ -95,7 +95,7 @@
   //
   // Note that BrowserMain will be re-used.
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(std::begin(bt0), std::end(bt0)));
   ASSERT_EQ(2, dedup->Insert(std::begin(bt1), std::end(bt1)));
 
@@ -121,7 +121,7 @@
 TEST(StackFrameDeduplicatorTest, NullPaddingIsRemoved) {
   StackFrame bt0[] = {kBrowserMain, nullptr, nullptr, nullptr};
 
-  scoped_refptr<StackFrameDeduplicator> dedup = new StackFrameDeduplicator;
+  scoped_ptr<StackFrameDeduplicator> dedup(new StackFrameDeduplicator);
 
   // There are four frames in the backtrace, but the null pointers should be
   // skipped, so only one frame is inserted, which will have index 0.
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator.h b/base/trace_event/heap_profiler_type_name_deduplicator.h
index 317ea5e..2d26c73 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator.h
+++ b/base/trace_event/heap_profiler_type_name_deduplicator.h
@@ -21,19 +21,18 @@
 class BASE_EXPORT TypeNameDeduplicator : public ConvertableToTraceFormat {
  public:
   TypeNameDeduplicator();
+  ~TypeNameDeduplicator() override;
 
   // Inserts a type name and returns its ID.
   int Insert(const char* type_name);
 
+  // Writes the type ID -> type name mapping to the trace log.
+  void AppendAsTraceFormat(std::string* out) const override;
+
   // Estimates memory overhead including |sizeof(TypeNameDeduplicator)|.
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
 
  private:
-  ~TypeNameDeduplicator() override;
-
-  // Writes the type ID -> type name mapping to the trace log.
-  void AppendAsTraceFormat(std::string* out) const override;
-
   // Map from type name to type ID.
   std::map<const char*, int> type_ids_;
 
diff --git a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
index 82c8fb5..92ffcf8 100644
--- a/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
+++ b/base/trace_event/heap_profiler_type_name_deduplicator_unittest.cc
@@ -5,7 +5,6 @@
 #include <string>
 
 #include "base/json/json_reader.h"
-#include "base/memory/ref_counted.h"
 #include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
 #include "base/values.h"
@@ -21,9 +20,9 @@
 const char kString[] = "string";
 const char kNeedsEscape[] = "\"quotes\"";
 
-scoped_ptr<Value> DumpAndReadBack(const ConvertableToTraceFormat& convertable) {
+scoped_ptr<Value> DumpAndReadBack(const TypeNameDeduplicator& deduplicator) {
   std::string json;
-  convertable.AppendAsTraceFormat(&json);
+  deduplicator.AppendAsTraceFormat(&json);
   return JSONReader::Read(json);
 }
 
@@ -34,7 +33,7 @@
   // 2: bool
   // 3: string
 
-  scoped_refptr<TypeNameDeduplicator> dedup = new TypeNameDeduplicator;
+  scoped_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(kInt));
   ASSERT_EQ(2, dedup->Insert(kBool));
   ASSERT_EQ(3, dedup->Insert(kString));
@@ -49,7 +48,7 @@
 }
 
 TEST(TypeNameDeduplicatorTest, EscapeTypeName) {
-  scoped_refptr<TypeNameDeduplicator> dedup = new TypeNameDeduplicator;
+  scoped_ptr<TypeNameDeduplicator> dedup(new TypeNameDeduplicator);
   ASSERT_EQ(1, dedup->Insert(kNeedsEscape));
 
   // Reading json should not fail, because the type name should have been
diff --git a/base/trace_event/memory_allocator_dump.cc b/base/trace_event/memory_allocator_dump.cc
index 5c5af7e..f9b5799 100644
--- a/base/trace_event/memory_allocator_dump.cc
+++ b/base/trace_event/memory_allocator_dump.cc
@@ -28,7 +28,8 @@
     : absolute_name_(absolute_name),
       process_memory_dump_(process_memory_dump),
       attributes_(new TracedValue),
-      guid_(guid) {
+      guid_(guid),
+      flags_(Flags::DEFAULT) {
   // The |absolute_name| cannot be empty.
   DCHECK(!absolute_name.empty());
 
@@ -90,6 +91,8 @@
   value->BeginDictionaryWithCopiedName(absolute_name_);
   value->SetString("guid", guid_.ToString());
   value->SetValue("attrs", *attributes_);
+  if (flags_)
+    value->SetInteger("flags", flags_);
   value->EndDictionary();  // "allocator_name/heap_subheap": { ... }
 }
 
diff --git a/base/trace_event/memory_allocator_dump.h b/base/trace_event/memory_allocator_dump.h
index 6c514fa..9f91de9 100644
--- a/base/trace_event/memory_allocator_dump.h
+++ b/base/trace_event/memory_allocator_dump.h
@@ -12,7 +12,7 @@
 #include "base/base_export.h"
 #include "base/logging.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/memory_allocator_dump_guid.h"
 #include "base/values.h"
 
@@ -26,6 +26,13 @@
 // Data model for user-land memory allocator dumps.
 class BASE_EXPORT MemoryAllocatorDump {
  public:
+  enum Flags {
+    DEFAULT = 0,
+
+    // A dump marked weak will be discarded by TraceViewer.
+    WEAK = 1 << 0,
+  };
+
   // MemoryAllocatorDump is owned by ProcessMemoryDump.
   MemoryAllocatorDump(const std::string& absolute_name,
                       ProcessMemoryDump* process_memory_dump,
@@ -68,6 +75,11 @@
     return process_memory_dump_;
   }
 
+  // Use enum Flags to set values.
+  void set_flags(int flags) { flags_ |= flags; }
+  void clear_flags(int flags) { flags_ &= ~flags; }
+  int flags() { return flags_; }
+
   // |guid| is an optional global dump identifier, unique across all processes
   // within the scope of a global dump. It is only required when using the
   // graph APIs (see TODO_method_name) to express retention / suballocation or
@@ -81,8 +93,9 @@
  private:
   const std::string absolute_name_;
   ProcessMemoryDump* const process_memory_dump_;  // Not owned (PMD owns this).
-  scoped_refptr<TracedValue> attributes_;
+  scoped_ptr<TracedValue> attributes_;
   MemoryAllocatorDumpGuid guid_;
+  int flags_;  // See enum Flags.
 
   // A local buffer for Sprintf conversion on fastpath. Avoids allocating
   // temporary strings on each AddScalar() call.
diff --git a/base/trace_event/memory_allocator_dump_unittest.cc b/base/trace_event/memory_allocator_dump_unittest.cc
index d1cfe91..649991b 100644
--- a/base/trace_event/memory_allocator_dump_unittest.cc
+++ b/base/trace_event/memory_allocator_dump_unittest.cc
@@ -128,7 +128,7 @@
 
 TEST(MemoryAllocatorDumpTest, DumpIntoProcessMemoryDump) {
   FakeMemoryAllocatorDumpProvider fmadp;
-  ProcessMemoryDump pmd(new MemoryDumpSessionState(nullptr, nullptr));
+  ProcessMemoryDump pmd(new MemoryDumpSessionState);
   MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
 
   fmadp.OnMemoryDump(dump_args, &pmd);
@@ -167,7 +167,7 @@
   ASSERT_FALSE(attrs->HasKey(MemoryAllocatorDump::kNameObjectCount));
 
   // Check that the AsValueInfo doesn't hit any DCHECK.
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  scoped_ptr<TracedValue> traced_value(new TracedValue);
   pmd.AsValueInto(traced_value.get());
 }
 
@@ -175,7 +175,7 @@
 #if !defined(NDEBUG) && !defined(OS_ANDROID) && !defined(OS_IOS)
 TEST(MemoryAllocatorDumpTest, ForbidDuplicatesDeathTest) {
   FakeMemoryAllocatorDumpProvider fmadp;
-  ProcessMemoryDump pmd(new MemoryDumpSessionState(nullptr, nullptr));
+  ProcessMemoryDump pmd(new MemoryDumpSessionState);
   pmd.CreateAllocatorDump("foo_allocator");
   pmd.CreateAllocatorDump("bar_allocator/heap");
   ASSERT_DEATH(pmd.CreateAllocatorDump("foo_allocator"), "");
diff --git a/base/trace_event/memory_dump_manager.cc b/base/trace_event/memory_dump_manager.cc
index aa81e00..4ba7fcb 100644
--- a/base/trace_event/memory_dump_manager.cc
+++ b/base/trace_event/memory_dump_manager.cc
@@ -20,17 +20,10 @@
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/memory_dump_session_state.h"
 #include "base/trace_event/process_memory_dump.h"
+#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
 
-#if !defined(OS_NACL)
-#include "base/trace_event/process_memory_totals_dump_provider.h"
-#endif
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-#include "base/trace_event/process_memory_maps_dump_provider.h"
-#endif
-
 #if defined(OS_ANDROID)
 #include "base/trace_event/java_heap_dump_provider_android.h"
 #endif
@@ -85,6 +78,33 @@
   }
 }
 
+// Proxy class which wraps a ConvertableToTraceFormat owned by the
+// |session_state| into a proxy object that can be added to the trace event log.
+// This is to solve the problem that the MemoryDumpSessionState is refcounted
+// but the tracing subsystem wants a scoped_ptr<ConvertableToTraceFormat>.
+template <typename T>
+struct SessionStateConvertableProxy : public ConvertableToTraceFormat {
+  using GetterFunctPtr = T* (MemoryDumpSessionState::*)() const;
+
+  SessionStateConvertableProxy(
+      scoped_refptr<MemoryDumpSessionState> session_state,
+      GetterFunctPtr getter_function)
+      : session_state(session_state), getter_function(getter_function) {}
+
+  void AppendAsTraceFormat(std::string* out) const override {
+    return (session_state.get()->*getter_function)()->AppendAsTraceFormat(out);
+  }
+
+  void EstimateTraceMemoryOverhead(
+      TraceEventMemoryOverhead* overhead) override {
+    return (session_state.get()->*getter_function)()
+        ->EstimateTraceMemoryOverhead(overhead);
+  }
+
+  scoped_refptr<MemoryDumpSessionState> session_state;
+  GetterFunctPtr const getter_function;
+};
+
 }  // namespace
 
 // static
@@ -126,22 +146,34 @@
       is_coordinator_(false),
       memory_tracing_enabled_(0),
       tracing_process_id_(kInvalidTracingProcessId),
-      dumper_registrations_ignored_for_testing_(false) {
+      dumper_registrations_ignored_for_testing_(false),
+      heap_profiling_enabled_(false) {
   g_next_guid.GetNext();  // Make sure that first guid is not zero.
 
-  heap_profiling_enabled_ = CommandLine::InitializedForCurrentProcess()
-                                ? CommandLine::ForCurrentProcess()->HasSwitch(
-                                      switches::kEnableHeapProfiling)
-                                : false;
-
-  if (heap_profiling_enabled_)
-    AllocationContextTracker::SetCaptureEnabled(true);
+  // At this point the command line may not be initialized but we try to
+  // enable the heap profiler to capture allocations as soon as possible.
+  EnableHeapProfilingIfNeeded();
 }
 
 MemoryDumpManager::~MemoryDumpManager() {
   TraceLog::GetInstance()->RemoveEnabledStateObserver(this);
 }
 
+void MemoryDumpManager::EnableHeapProfilingIfNeeded() {
+  if (heap_profiling_enabled_)
+    return;
+
+  if (!CommandLine::InitializedForCurrentProcess() ||
+      !CommandLine::ForCurrentProcess()->HasSwitch(
+          switches::kEnableHeapProfiling))
+    return;
+
+  AllocationContextTracker::SetCaptureEnabled(true);
+  for (auto mdp : dump_providers_)
+    mdp->dump_provider->OnHeapProfilingEnabled(true);
+  heap_profiling_enabled_ = true;
+}
+
 void MemoryDumpManager::Initialize(MemoryDumpManagerDelegate* delegate,
                                    bool is_coordinator) {
   {
@@ -150,23 +182,14 @@
     DCHECK(!delegate_);
     delegate_ = delegate;
     is_coordinator_ = is_coordinator;
+    EnableHeapProfilingIfNeeded();
   }
 
 // Enable the core dump providers.
-#if !defined(OS_NACL)
-  RegisterDumpProvider(ProcessMemoryTotalsDumpProvider::GetInstance(),
-                       "ProcessMemoryTotals", nullptr);
-#endif
-
 #if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
   RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
 #endif
 
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-  RegisterDumpProvider(ProcessMemoryMapsDumpProvider::GetInstance(),
-                       "ProcessMemoryMaps", nullptr);
-#endif
-
 #if defined(OS_ANDROID)
   RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
                        nullptr);
@@ -189,6 +212,36 @@
     MemoryDumpProvider* mdp,
     const char* name,
     const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+    MemoryDumpProvider::Options options) {
+  options.dumps_on_single_thread_task_runner = true;
+  RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
+  // Set |dumps_on_single_thread_task_runner| to true because all providers
+  // without task runner are run on dump thread.
+  MemoryDumpProvider::Options options;
+  options.dumps_on_single_thread_task_runner = true;
+  RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    const scoped_refptr<SequencedTaskRunner>& task_runner,
+    MemoryDumpProvider::Options options) {
+  DCHECK(task_runner);
+  options.dumps_on_single_thread_task_runner = false;
+  RegisterDumpProviderInternal(mdp, name, task_runner, options);
+}
+
+void MemoryDumpManager::RegisterDumpProviderInternal(
+    MemoryDumpProvider* mdp,
+    const char* name,
+    const scoped_refptr<SequencedTaskRunner>& task_runner,
     const MemoryDumpProvider::Options& options) {
   if (dumper_registrations_ignored_for_testing_)
     return;
@@ -209,13 +262,6 @@
     mdp->OnHeapProfilingEnabled(true);
 }
 
-void MemoryDumpManager::RegisterDumpProvider(
-    MemoryDumpProvider* mdp,
-    const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner) {
-  RegisterDumpProvider(mdp, name, task_runner, MemoryDumpProvider::Options());
-}
-
 void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
   UnregisterDumpProviderInternal(mdp, false /* delete_async */);
 }
@@ -246,28 +292,29 @@
   if (take_mdp_ownership_and_delete_async) {
     // The MDP will be deleted whenever the MDPInfo struct will, that is either:
     // - At the end of this function, if no dump is in progress.
-    // - In the prologue of the ContinueAsyncProcessDump().
+    // - Either in SetupNextMemoryDump() or InvokeOnMemoryDump() when MDPInfo is
+    //   removed from |pending_dump_providers|.
     DCHECK(!(*mdp_iter)->owned_dump_provider);
     (*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
   } else if (subtle::NoBarrier_Load(&memory_tracing_enabled_)) {
     // If you hit this DCHECK, your dump provider has a bug.
     // Unregistration of a MemoryDumpProvider is safe only if:
-    // - The MDP has specified a thread affinity (via task_runner()) AND
-    //   the unregistration happens on the same thread (so the MDP cannot
+    // - The MDP has specified a sequenced task runner affinity AND the
+    //   unregistration happens on the same task runner. So that the MDP cannot
     //   unregister and be in the middle of a OnMemoryDump() at the same time.
-    // - The MDP has NOT specified a thread affinity and its ownership is
+    // - The MDP has NOT specified a task runner affinity and its ownership is
     //   transferred via UnregisterAndDeleteDumpProviderSoon().
     // In all the other cases, it is not possible to guarantee that the
     // unregistration will not race with OnMemoryDump() calls.
     DCHECK((*mdp_iter)->task_runner &&
-           (*mdp_iter)->task_runner->BelongsToCurrentThread())
+           (*mdp_iter)->task_runner->RunsTasksOnCurrentThread())
         << "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
         << "unregister itself in a racy way. Please file a crbug.";
   }
 
   // The MDPInfo instance can still be referenced by the
   // |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
-  // the MDPInfo is flagged as disabled. It will cause ContinueAsyncProcessDump
+  // the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
   // to just skip it, without actually invoking the |mdp|, which might be
   // destroyed by the caller soon after this method returns.
   (*mdp_iter)->disabled = true;
@@ -326,54 +373,41 @@
   scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
   {
     AutoLock lock(lock_);
-    pmd_async_state.reset(
-        new ProcessMemoryDumpAsyncState(args, dump_providers_, session_state_,
-                                        callback, dump_thread_->task_runner()));
+    // |dump_thread_| can be nullptr is tracing was disabled before reaching
+    // here. SetupNextMemoryDump() is robust enough to tolerate it and will
+    // NACK the dump.
+    pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
+        args, dump_providers_, session_state_, callback,
+        dump_thread_ ? dump_thread_->task_runner() : nullptr));
   }
 
   TRACE_EVENT_WITH_FLOW0(kTraceCategory, "MemoryDumpManager::CreateProcessDump",
                          TRACE_ID_MANGLE(args.dump_guid),
                          TRACE_EVENT_FLAG_FLOW_OUT);
 
-  // Start the thread hop. |dump_providers_| are kept sorted by thread, so
-  // ContinueAsyncProcessDump will hop at most once per thread (w.r.t. thread
-  // affinity specified by the MemoryDumpProvider(s) in RegisterDumpProvider()).
-  ContinueAsyncProcessDump(pmd_async_state.release());
+  // Start the process dump. This involves task runner hops as specified by the
+  // MemoryDumpProvider(s) in RegisterDumpProvider()).
+  SetupNextMemoryDump(std::move(pmd_async_state));
 }
 
-// At most one ContinueAsyncProcessDump() can be active at any time for a given
-// PMD, regardless of status of the |lock_|. |lock_| is used here purely to
-// ensure consistency w.r.t. (un)registrations of |dump_providers_|.
-// The linearization of dump providers' OnMemoryDump invocations is achieved by
-// means of subsequent PostTask(s).
-//
-// 1) Prologue:
-//   - If this was the last hop, create a trace event, add it to the trace
-//     and finalize (invoke callback).
-//   - Check if we are on the right thread. If not hop and continue there.
-//   - Check if the dump provider is disabled, if so skip the dump.
-// 2) Invoke the dump provider's OnMemoryDump() (unless skipped).
-// 3) Epilogue:
-//   - Unregister the dump provider if it failed too many times consecutively.
-//   - Pop() the MDP from the |pending_dump_providers| list, eventually
-//     destroying the MDPInfo if that was unregistered in the meantime.
-void MemoryDumpManager::ContinueAsyncProcessDump(
-    ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+// PostTask InvokeOnMemoryDump() to the dump provider's sequenced task runner. A
+// PostTask is always required for a generic SequencedTaskRunner to ensure that
+// no other task is running on it concurrently. SetupNextMemoryDump() and
+// InvokeOnMemoryDump() are called alternatively which linearizes the dump
+// provider's OnMemoryDump invocations.
+// At most one of either SetupNextMemoryDump() or InvokeOnMemoryDump() can be
+// active at any time for a given PMD, regardless of status of the |lock_|.
+// |lock_| is used in these functions purely to ensure consistency w.r.t.
+// (un)registrations of |dump_providers_|.
+void MemoryDumpManager::SetupNextMemoryDump(
+    scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
   // Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
   // in the PostTask below don't end up registering their own dump providers
   // (for discounting trace memory overhead) while holding the |lock_|.
   TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
 
-  // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
-  // why it isn't is because of the corner case logic of |did_post_task| below,
-  // which needs to take back the ownership of the |pmd_async_state| when a
-  // thread goes away and consequently the PostTask() fails.
-  // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
-  // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
-  // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
-  auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
-  owned_pmd_async_state = nullptr;
-
+  // If this was the last hop, create a trace event, add it to the trace and
+  // finalize process dump (invoke callback).
   if (pmd_async_state->pending_dump_providers.empty())
     return FinalizeDumpAndAddToTrace(std::move(pmd_async_state));
 
@@ -382,60 +416,103 @@
   MemoryDumpProviderInfo* mdpinfo =
       pmd_async_state->pending_dump_providers.back().get();
 
-  // If the dump provider did not specify a thread affinity, dump on
-  // |dump_thread_|. Note that |dump_thread_| might have been Stop()-ed at this
-  // point (if tracing was disabled in the meanwhile). In such case the
-  // PostTask() below will fail, but |task_runner| should always be non-null.
-  SingleThreadTaskRunner* task_runner = mdpinfo->task_runner.get();
-  if (!task_runner)
+  // If the dump provider did not specify a task runner affinity, dump on
+  // |dump_thread_|. Note that |dump_thread_| might have been destroyed
+  // meanwhile.
+  SequencedTaskRunner* task_runner = mdpinfo->task_runner.get();
+  if (!task_runner) {
+    DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
     task_runner = pmd_async_state->dump_thread_task_runner.get();
-
-  bool post_task_failed = false;
-  if (!task_runner->BelongsToCurrentThread()) {
-    // It's time to hop onto another thread.
-    post_task_failed = !task_runner->PostTask(
-        FROM_HERE, Bind(&MemoryDumpManager::ContinueAsyncProcessDump,
-                        Unretained(this), Unretained(pmd_async_state.get())));
-    if (!post_task_failed) {
-      // Ownership is tranferred to the next ContinueAsyncProcessDump().
-      ignore_result(pmd_async_state.release());
-      return;
+    if (!task_runner) {
+      // If tracing was disabled before reaching CreateProcessDump() the
+      // dump_thread_ would have been already torn down. Nack current dump and
+      // continue.
+      pmd_async_state->dump_successful = false;
+      pmd_async_state->pending_dump_providers.pop_back();
+      return SetupNextMemoryDump(std::move(pmd_async_state));
     }
   }
 
-  // At this point either:
-  // - The MDP has a task runner affinity and we are on the right thread.
-  // - The MDP has a task runner affinity but the underlying thread is gone,
-  //   hence the above |post_task_failed| == true.
-  // - The MDP does NOT have a task runner affinity. A locked access is required
-  //   to R/W |disabled| (for the UnregisterAndDeleteDumpProviderSoon() case).
-  bool should_dump;
-  const char* disabled_reason = nullptr;
-  {
+  if (mdpinfo->options.dumps_on_single_thread_task_runner &&
+      task_runner->RunsTasksOnCurrentThread()) {
+    // If |dumps_on_single_thread_task_runner| is true then no PostTask is
+    // required if we are on the right thread.
+    return InvokeOnMemoryDump(pmd_async_state.release());
+  }
+
+  bool did_post_task = task_runner->PostTask(
+      FROM_HERE, Bind(&MemoryDumpManager::InvokeOnMemoryDump, Unretained(this),
+                      Unretained(pmd_async_state.get())));
+
+  if (did_post_task) {
+    // Ownership is tranferred to InvokeOnMemoryDump().
+    ignore_result(pmd_async_state.release());
+    return;
+  }
+
+  // PostTask usually fails only if the process or thread is shut down. So, the
+  // dump provider is disabled here. But, don't disable unbound dump providers.
+  // The utility thread is normally shutdown when disabling the trace and
+  // getting here in this case is expected.
+  if (mdpinfo->task_runner) {
+    LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+               << "\". Failed to post task on the task runner provided.";
+
+    // A locked access is required to R/W |disabled| (for the
+    // UnregisterAndDeleteDumpProviderSoon() case).
     AutoLock lock(lock_);
-    if (!mdpinfo->disabled) {
-      if (mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
-        mdpinfo->disabled = true;
-        disabled_reason =
-            "Dump failure, possibly related with sandboxing (crbug.com/461788)."
-            " Try --no-sandbox.";
-      } else if (post_task_failed) {
-        disabled_reason = "The thread it was meant to dump onto is gone.";
-        mdpinfo->disabled = true;
-      }
+    mdpinfo->disabled = true;
+  }
+
+  // PostTask failed. Ignore the dump provider and continue.
+  pmd_async_state->pending_dump_providers.pop_back();
+  SetupNextMemoryDump(std::move(pmd_async_state));
+}
+
+// This function is called on the right task runner for current MDP. It is
+// either the task runner specified by MDP or |dump_thread_task_runner| if the
+// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
+// (unless disabled).
+void MemoryDumpManager::InvokeOnMemoryDump(
+    ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
+  // In theory |owned_pmd_async_state| should be a scoped_ptr. The only reason
+  // why it isn't is because of the corner case logic of |did_post_task|
+  // above, which needs to take back the ownership of the |pmd_async_state| when
+  // the PostTask() fails.
+  // Unfortunately, PostTask() destroys the scoped_ptr arguments upon failure
+  // to prevent accidental leaks. Using a scoped_ptr would prevent us to to
+  // skip the hop and move on. Hence the manual naked -> scoped ptr juggling.
+  auto pmd_async_state = make_scoped_ptr(owned_pmd_async_state);
+  owned_pmd_async_state = nullptr;
+
+  // Read MemoryDumpProviderInfo thread safety considerations in
+  // memory_dump_manager.h when accessing |mdpinfo| fields.
+  MemoryDumpProviderInfo* mdpinfo =
+      pmd_async_state->pending_dump_providers.back().get();
+
+  DCHECK(!mdpinfo->task_runner ||
+         mdpinfo->task_runner->RunsTasksOnCurrentThread());
+
+  bool should_dump;
+  {
+    // A locked access is required to R/W |disabled| (for the
+    // UnregisterAndDeleteDumpProviderSoon() case).
+    AutoLock lock(lock_);
+
+    // Unregister the dump provider if it failed too many times consecutively.
+    if (!mdpinfo->disabled &&
+        mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
+      mdpinfo->disabled = true;
+      LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
+                 << "\". Dump failed multiple times consecutively.";
     }
     should_dump = !mdpinfo->disabled;
-  }
-
-  if (disabled_reason) {
-    LOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name << "\". "
-               << disabled_reason;
-  }
+  }  // AutoLock lock(lock_);
 
   if (should_dump) {
     // Invoke the dump provider.
     TRACE_EVENT_WITH_FLOW1(kTraceCategory,
-                           "MemoryDumpManager::ContinueAsyncProcessDump",
+                           "MemoryDumpManager::InvokeOnMemoryDump",
                            TRACE_ID_MANGLE(pmd_async_state->req_args.dump_guid),
                            TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT,
                            "dump_provider.name", mdpinfo->name);
@@ -450,10 +527,10 @@
     bool dump_successful = mdpinfo->dump_provider->OnMemoryDump(args, pmd);
     mdpinfo->consecutive_failures =
         dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
-  }  // if (!mdpinfo->disabled)
+  }
 
   pmd_async_state->pending_dump_providers.pop_back();
-  ContinueAsyncProcessDump(pmd_async_state.release());
+  SetupNextMemoryDump(std::move(pmd_async_state));
 }
 
 // static
@@ -477,25 +554,31 @@
   for (const auto& kv : pmd_async_state->process_dumps) {
     ProcessId pid = kv.first;  // kNullProcessId for the current process.
     ProcessMemoryDump* process_memory_dump = kv.second.get();
-    TracedValue* traced_value = new TracedValue();
-    scoped_refptr<ConvertableToTraceFormat> event_value(traced_value);
-    process_memory_dump->AsValueInto(traced_value);
+    scoped_ptr<TracedValue> traced_value(new TracedValue);
+    process_memory_dump->AsValueInto(traced_value.get());
     traced_value->SetString("level_of_detail",
                             MemoryDumpLevelOfDetailToString(
                                 pmd_async_state->req_args.level_of_detail));
     const char* const event_name =
         MemoryDumpTypeToString(pmd_async_state->req_args.dump_type);
 
+    scoped_ptr<ConvertableToTraceFormat> event_value(std::move(traced_value));
     TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
         TRACE_EVENT_PHASE_MEMORY_DUMP,
         TraceLog::GetCategoryGroupEnabled(kTraceCategory), event_name,
-        dump_guid, pid, kTraceEventNumArgs, kTraceEventArgNames,
+        trace_event_internal::kGlobalScope, dump_guid, pid,
+        kTraceEventNumArgs, kTraceEventArgNames,
         kTraceEventArgTypes, nullptr /* arg_values */, &event_value,
         TRACE_EVENT_FLAG_HAS_ID);
   }
 
+  bool tracing_still_enabled;
+  TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &tracing_still_enabled);
+  if (!tracing_still_enabled)
+    pmd_async_state->dump_successful = false;
+
   if (!pmd_async_state->callback.is_null()) {
-    pmd_async_state->callback.Run(dump_guid, true /* success */);
+    pmd_async_state->callback.Run(dump_guid, pmd_async_state->dump_successful);
     pmd_async_state->callback.Reset();
   }
 
@@ -524,29 +607,35 @@
   AutoLock lock(lock_);
 
   DCHECK(delegate_);  // At this point we must have a delegate.
-
-  scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator = nullptr;
-  scoped_refptr<TypeNameDeduplicator> type_name_deduplicator = nullptr;
+  session_state_ = new MemoryDumpSessionState;
 
   if (heap_profiling_enabled_) {
     // If heap profiling is enabled, the stack frame deduplicator and type name
     // deduplicator will be in use. Add a metadata events to write the frames
     // and type IDs.
-    stack_frame_deduplicator = new StackFrameDeduplicator;
-    type_name_deduplicator = new TypeNameDeduplicator;
+    session_state_->SetStackFrameDeduplicator(
+        make_scoped_ptr(new StackFrameDeduplicator));
+
+    session_state_->SetTypeNameDeduplicator(
+        make_scoped_ptr(new TypeNameDeduplicator));
+
     TRACE_EVENT_API_ADD_METADATA_EVENT(
-        "stackFrames", "stackFrames",
-        scoped_refptr<ConvertableToTraceFormat>(stack_frame_deduplicator));
+        TraceLog::GetCategoryGroupEnabled("__metadata"), "stackFrames",
+        "stackFrames",
+        make_scoped_ptr(
+            new SessionStateConvertableProxy<StackFrameDeduplicator>(
+                session_state_,
+                &MemoryDumpSessionState::stack_frame_deduplicator)));
+
     TRACE_EVENT_API_ADD_METADATA_EVENT(
-        "typeNames", "typeNames",
-        scoped_refptr<ConvertableToTraceFormat>(type_name_deduplicator));
+        TraceLog::GetCategoryGroupEnabled("__metadata"), "typeNames",
+        "typeNames",
+        make_scoped_ptr(new SessionStateConvertableProxy<TypeNameDeduplicator>(
+            session_state_, &MemoryDumpSessionState::type_name_deduplicator)));
   }
 
   DCHECK(!dump_thread_);
   dump_thread_ = std::move(dump_thread);
-  session_state_ = new MemoryDumpSessionState(stack_frame_deduplicator,
-                                              type_name_deduplicator);
-
   subtle::NoBarrier_Store(&memory_tracing_enabled_, 1);
 
   // TODO(primiano): This is a temporary hack to disable periodic memory dumps
@@ -589,6 +678,9 @@
 }
 
 void MemoryDumpManager::OnTraceLogDisabled() {
+  // There might be a memory dump in progress while this happens. Therefore,
+  // ensure that the MDM state which depends on the tracing enabled / disabled
+  // state is always accessed by the dumping methods holding the |lock_|.
   subtle::NoBarrier_Store(&memory_tracing_enabled_, 0);
   scoped_ptr<Thread> dump_thread;
   {
@@ -598,7 +690,7 @@
   }
 
   // Thread stops are blocking and must be performed outside of the |lock_|
-  // or will deadlock (e.g., if ContinueAsyncProcessDump() tries to acquire it).
+  // or will deadlock (e.g., if SetupNextMemoryDump() tries to acquire it).
   periodic_dump_timer_.Stop();
   if (dump_thread)
     dump_thread->Stop();
@@ -611,7 +703,7 @@
 MemoryDumpManager::MemoryDumpProviderInfo::MemoryDumpProviderInfo(
     MemoryDumpProvider* dump_provider,
     const char* name,
-    const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+    const scoped_refptr<SequencedTaskRunner>& task_runner,
     const MemoryDumpProvider::Options& options)
     : dump_provider(dump_provider),
       name(name),
@@ -643,6 +735,7 @@
     : req_args(req_args),
       session_state(session_state),
       callback(callback),
+      dump_successful(true),
       callback_task_runner(MessageLoop::current()->task_runner()),
       dump_thread_task_runner(dump_thread_task_runner) {
   pending_dump_providers.reserve(dump_providers.size());
diff --git a/base/trace_event/memory_dump_manager.h b/base/trace_event/memory_dump_manager.h
index b3880af..e9b09f8 100644
--- a/base/trace_event/memory_dump_manager.h
+++ b/base/trace_event/memory_dump_manager.h
@@ -69,9 +69,9 @@
   //  - name: a friendly name (duplicates allowed). Used for debugging and
   //      run-time profiling of memory-infra internals. Must be a long-lived
   //      C string.
-  //  - task_runner: if non-null, all the calls to |mdp| will be
-  //      issued on the given thread. Otherwise, |mdp| should be able to
-  //      handle calls on arbitrary threads.
+  //  - task_runner: either a SingleThreadTaskRunner or SequencedTaskRunner. All
+  //      the calls to |mdp| will be run on the given |task_runner|. If passed
+  //      null |mdp| should be able to handle calls on arbitrary threads.
   //  - options: extra optional arguments. See memory_dump_provider.h.
   void RegisterDumpProvider(
       MemoryDumpProvider* mdp,
@@ -81,7 +81,12 @@
       MemoryDumpProvider* mdp,
       const char* name,
       const scoped_refptr<SingleThreadTaskRunner>& task_runner,
-      const MemoryDumpProvider::Options& options);
+      MemoryDumpProvider::Options options);
+  void RegisterDumpProviderWithSequencedTaskRunner(
+      MemoryDumpProvider* mdp,
+      const char* name,
+      const scoped_refptr<SequencedTaskRunner>& task_runner,
+      MemoryDumpProvider::Options options);
   void UnregisterDumpProvider(MemoryDumpProvider* mdp);
 
   // Unregisters an unbound dump provider and takes care about its deletion
@@ -153,14 +158,15 @@
   //   inside ProcessMemoryDumpAsyncState is removed.
   // - In most cases, the MDPInfo is destroyed within UnregisterDumpProvider().
   // - If UnregisterDumpProvider() is called while a dump is in progress, the
-  //   MDPInfo is destroyed in the epilogue of ContinueAsyncProcessDump(), when
-  //   the copy inside ProcessMemoryDumpAsyncState is erase()-d.
+  //   MDPInfo is destroyed in SetupNextMemoryDump() or InvokeOnMemoryDump(),
+  //   when the copy inside ProcessMemoryDumpAsyncState is erase()-d.
   // - The non-const fields of MemoryDumpProviderInfo are safe to access only
-  //   in the |task_runner| thread, unless the thread has been destroyed.
+  //   on tasks running in the |task_runner|, unless the thread has been
+  //   destroyed.
   struct MemoryDumpProviderInfo
       : public RefCountedThreadSafe<MemoryDumpProviderInfo> {
-    // Define a total order based on the thread (i.e. |task_runner|) affinity,
-    // so that all MDP belonging to the same thread are adjacent in the set.
+    // Define a total order based on the |task_runner| affinity, so that MDPs
+    // belonging to the same SequencedTaskRunner are adjacent in the set.
     struct Comparator {
       bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
                       const scoped_refptr<MemoryDumpProviderInfo>& b) const;
@@ -171,7 +177,7 @@
     MemoryDumpProviderInfo(
         MemoryDumpProvider* dump_provider,
         const char* name,
-        const scoped_refptr<SingleThreadTaskRunner>& task_runner,
+        const scoped_refptr<SequencedTaskRunner>& task_runner,
         const MemoryDumpProvider::Options& options);
 
     MemoryDumpProvider* const dump_provider;
@@ -183,9 +189,9 @@
     // Human readable name, for debugging and testing. Not necessarily unique.
     const char* const name;
 
-    // The task_runner affinity. Can be nullptr, in which case the dump provider
+    // The task runner affinity. Can be nullptr, in which case the dump provider
     // will be invoked on |dump_thread_|.
-    const scoped_refptr<SingleThreadTaskRunner> task_runner;
+    const scoped_refptr<SequencedTaskRunner> task_runner;
 
     // The |options| arg passed to RegisterDumpProvider().
     const MemoryDumpProvider::Options options;
@@ -204,8 +210,9 @@
   };
 
   // Holds the state of a process memory dump that needs to be carried over
-  // across threads in order to fulfil an asynchronous CreateProcessDump()
-  // request. At any time exactly one thread owns a ProcessMemoryDumpAsyncState.
+  // across task runners in order to fulfil an asynchronous CreateProcessDump()
+  // request. At any time exactly one task runner owns a
+  // ProcessMemoryDumpAsyncState.
   struct ProcessMemoryDumpAsyncState {
     ProcessMemoryDumpAsyncState(
         MemoryDumpRequestArgs req_args,
@@ -238,6 +245,9 @@
     // Callback passed to the initial call to CreateProcessDump().
     MemoryDumpCallback callback;
 
+    // The |success| field that will be passed as argument to the |callback|.
+    bool dump_successful;
+
     // The thread on which FinalizeDumpAndAddToTrace() (and hence |callback|)
     // should be invoked. This is the thread on which the initial
     // CreateProcessDump() request was called.
@@ -264,6 +274,9 @@
   static void FinalizeDumpAndAddToTrace(
       scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
 
+  // Enable heap profiling if kEnableHeapProfiling is specified.
+  void EnableHeapProfilingIfNeeded();
+
   // Internal, used only by MemoryDumpManagerDelegate.
   // Creates a memory dump for the current process and appends it to the trace.
   // |callback| will be invoked asynchronously upon completion on the same
@@ -271,17 +284,30 @@
   void CreateProcessDump(const MemoryDumpRequestArgs& args,
                          const MemoryDumpCallback& callback);
 
-  // Continues the ProcessMemoryDump started by CreateProcessDump(), hopping
-  // across threads as needed as specified by MDPs in RegisterDumpProvider().
-  void ContinueAsyncProcessDump(
-      ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+  // Calls InvokeOnMemoryDump() for the next MDP on the task runner specified by
+  // the MDP while registration. On failure to do so, skips and continues to
+  // next MDP.
+  void SetupNextMemoryDump(
+      scoped_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
+
+  // Invokes OnMemoryDump() of the next MDP and calls SetupNextMemoryDump() at
+  // the end to continue the ProcessMemoryDump. Should be called on the MDP task
+  // runner.
+  void InvokeOnMemoryDump(ProcessMemoryDumpAsyncState* owned_pmd_async_state);
+
+  // Helper for RegierDumpProvider* functions.
+  void RegisterDumpProviderInternal(
+      MemoryDumpProvider* mdp,
+      const char* name,
+      const scoped_refptr<SequencedTaskRunner>& task_runner,
+      const MemoryDumpProvider::Options& options);
 
   // Helper for the public UnregisterDumpProvider* functions.
   void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
                                       bool take_mdp_ownership_and_delete_async);
 
-  // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by thread
-  // affinity (MDPs belonging to the same thread are adjacent).
+  // An ordererd set of registered MemoryDumpProviderInfo(s), sorted by task
+  // runner affinity (MDPs belonging to the same task runners are adjacent).
   MemoryDumpProviderInfo::OrderedSet dump_providers_;
 
   // Shared among all the PMDs to keep state scoped to the tracing session.
@@ -303,7 +329,8 @@
   // For time-triggered periodic dumps.
   RepeatingTimer periodic_dump_timer_;
 
-  // Thread used for MemoryDumpProviders which don't specify a thread affinity.
+  // Thread used for MemoryDumpProviders which don't specify a task runner
+  // affinity.
   scoped_ptr<Thread> dump_thread_;
 
   // The unique id of the child process. This is created only for tracing and is
diff --git a/base/trace_event/memory_dump_manager_unittest.cc b/base/trace_event/memory_dump_manager_unittest.cc
index 03b3afa..138ba69 100644
--- a/base/trace_event/memory_dump_manager_unittest.cc
+++ b/base/trace_event/memory_dump_manager_unittest.cc
@@ -9,6 +9,7 @@
 #include <vector>
 
 #include "base/bind_helpers.h"
+#include "base/memory/ref_counted_memory.h"
 #include "base/memory/scoped_ptr.h"
 #include "base/message_loop/message_loop.h"
 #include "base/run_loop.h"
@@ -18,6 +19,7 @@
 #include "base/test/trace_event_analyzer.h"
 #include "base/thread_task_runner_handle.h"
 #include "base/threading/platform_thread.h"
+#include "base/threading/sequenced_worker_pool.h"
 #include "base/threading/thread.h"
 #include "base/trace_event/memory_dump_provider.h"
 #include "base/trace_event/process_memory_dump.h"
@@ -49,18 +51,44 @@
 
 void RegisterDumpProvider(
     MemoryDumpProvider* mdp,
-    const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
-    const MemoryDumpProvider::Options& options) {
+    const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+    const MemoryDumpProvider::Options& options,
+    bool dumps_on_single_thread_task_runner) {
   MemoryDumpManager* mdm = MemoryDumpManager::GetInstance();
   mdm->set_dumper_registrations_ignored_for_testing(false);
-  mdm->RegisterDumpProvider(mdp, "TestDumpProvider", task_runner, options);
+  const char* kMDPName = "TestDumpProvider";
+  if (dumps_on_single_thread_task_runner) {
+    scoped_refptr<base::SingleThreadTaskRunner> single_thread_task_runner =
+        static_cast<base::SingleThreadTaskRunner*>(task_runner.get());
+    mdm->RegisterDumpProvider(mdp, kMDPName,
+                              std::move(single_thread_task_runner), options);
+  } else {
+    mdm->RegisterDumpProviderWithSequencedTaskRunner(mdp, kMDPName, task_runner,
+                                                     options);
+  }
   mdm->set_dumper_registrations_ignored_for_testing(true);
 }
 
+void RegisterDumpProvider(
+    MemoryDumpProvider* mdp,
+    const scoped_refptr<base::SingleThreadTaskRunner>& task_runner,
+    const MemoryDumpProvider::Options& options) {
+  RegisterDumpProvider(mdp, task_runner, options,
+                       true /* dumps_on_single_thread_task_runner */);
+}
+
 void RegisterDumpProvider(MemoryDumpProvider* mdp) {
   RegisterDumpProvider(mdp, nullptr, MemoryDumpProvider::Options());
 }
 
+void RegisterDumpProviderWithSequencedTaskRunner(
+    MemoryDumpProvider* mdp,
+    const scoped_refptr<base::SequencedTaskRunner>& task_runner,
+    const MemoryDumpProvider::Options& options) {
+  RegisterDumpProvider(mdp, task_runner, options,
+                       false /* dumps_on_single_thread_task_runner */);
+}
+
 void OnTraceDataCollected(Closure quit_closure,
                           trace_event::TraceResultBuffer* buffer,
                           const scoped_refptr<RefCountedString>& json,
@@ -90,6 +118,9 @@
     NOTREACHED();
     return MemoryDumpManager::kInvalidTracingProcessId;
   }
+
+  // Promote the CreateProcessDump to public so it can be used by test fixtures.
+  using MemoryDumpManagerDelegate::CreateProcessDump;
 };
 
 class MockMemoryDumpProvider : public MemoryDumpProvider {
@@ -107,6 +138,46 @@
   bool enable_mock_destructor;
 };
 
+class TestSequencedTaskRunner : public SequencedTaskRunner {
+ public:
+  TestSequencedTaskRunner()
+      : worker_pool_(
+            new SequencedWorkerPool(2 /* max_threads */, "Test Task Runner")),
+        enabled_(true),
+        num_of_post_tasks_(0) {}
+
+  void set_enabled(bool value) { enabled_ = value; }
+  unsigned no_of_post_tasks() const { return num_of_post_tasks_; }
+
+  bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here,
+                                  const Closure& task,
+                                  TimeDelta delay) override {
+    NOTREACHED();
+    return false;
+  }
+
+  bool PostDelayedTask(const tracked_objects::Location& from_here,
+                       const Closure& task,
+                       TimeDelta delay) override {
+    num_of_post_tasks_++;
+    if (enabled_)
+      return worker_pool_->PostSequencedWorkerTask(token_, from_here, task);
+    return false;
+  }
+
+  bool RunsTasksOnCurrentThread() const override {
+    return worker_pool_->IsRunningSequenceOnCurrentThread(token_);
+  }
+
+ private:
+  ~TestSequencedTaskRunner() override {}
+
+  scoped_refptr<SequencedWorkerPool> worker_pool_;
+  const SequencedWorkerPool::SequenceToken token_;
+  bool enabled_;
+  unsigned num_of_post_tasks_;
+};
+
 class MemoryDumpManagerTest : public testing::Test {
  public:
   MemoryDumpManagerTest() : testing::Test(), kDefaultOptions() {}
@@ -438,6 +509,50 @@
   DisableTracing();
 }
 
+// Check that the memory dump calls are always posted on task runner for
+// SequencedTaskRunner case and that the dump provider gets disabled when
+// PostTask fails, but the dump still succeeds.
+TEST_F(MemoryDumpManagerTest, PostTaskForSequencedTaskRunner) {
+  InitializeMemoryDumpManager(false /* is_coordinator */);
+  std::vector<MockMemoryDumpProvider> mdps(3);
+  scoped_refptr<TestSequencedTaskRunner> task_runner1(
+      make_scoped_refptr(new TestSequencedTaskRunner()));
+  scoped_refptr<TestSequencedTaskRunner> task_runner2(
+      make_scoped_refptr(new TestSequencedTaskRunner()));
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[0], task_runner1,
+                                              kDefaultOptions);
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[1], task_runner2,
+                                              kDefaultOptions);
+  RegisterDumpProviderWithSequencedTaskRunner(&mdps[2], task_runner2,
+                                              kDefaultOptions);
+  // |mdps[0]| should be disabled permanently after first dump.
+  EXPECT_CALL(mdps[0], OnMemoryDump(_, _)).Times(0);
+  EXPECT_CALL(mdps[1], OnMemoryDump(_, _)).Times(2);
+  EXPECT_CALL(mdps[2], OnMemoryDump(_, _)).Times(2);
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _)).Times(2);
+
+  EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+  task_runner1->set_enabled(false);
+  last_callback_success_ = false;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  // Tasks should be individually posted even if |mdps[1]| and |mdps[2]| belong
+  // to same task runner.
+  EXPECT_EQ(1u, task_runner1->no_of_post_tasks());
+  EXPECT_EQ(2u, task_runner2->no_of_post_tasks());
+  EXPECT_TRUE(last_callback_success_);
+
+  task_runner1->set_enabled(true);
+  last_callback_success_ = false;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  EXPECT_EQ(2u, task_runner1->no_of_post_tasks());
+  EXPECT_EQ(4u, task_runner2->no_of_post_tasks());
+  EXPECT_TRUE(last_callback_success_);
+  DisableTracing();
+}
+
 // Checks that providers get disabled after 3 consecutive failures, but not
 // otherwise (e.g., if interleaved).
 TEST_F(MemoryDumpManagerTest, DisableFailingDumpers) {
@@ -826,9 +941,30 @@
   tracing_disabled_event.Signal();
   run_loop.Run();
 
-  // RequestGlobalMemoryDump() should still suceed even if some threads were
-  // torn down during the dump.
-  EXPECT_TRUE(last_callback_success_);
+  EXPECT_FALSE(last_callback_success_);
+}
+
+// Tests against race conditions that can happen if tracing is disabled before
+// the CreateProcessDump() call. Real-world regression: crbug.com/580295 .
+TEST_F(MemoryDumpManagerTest, DisableTracingRightBeforeStartOfDump) {
+  base::WaitableEvent tracing_disabled_event(false, false);
+  InitializeMemoryDumpManager(false /* is_coordinator */);
+
+  MockMemoryDumpProvider mdp;
+  RegisterDumpProvider(&mdp);
+  EnableTracingWithLegacyCategories(MemoryDumpManager::kTraceCategory);
+
+  EXPECT_CALL(*delegate_, RequestGlobalMemoryDump(_, _))
+      .WillOnce(Invoke([this](const MemoryDumpRequestArgs& args,
+                              const MemoryDumpCallback& callback) {
+        DisableTracing();
+        delegate_->CreateProcessDump(args, callback);
+      }));
+
+  last_callback_success_ = true;
+  RequestGlobalDumpAndWait(MemoryDumpType::EXPLICITLY_TRIGGERED,
+                           MemoryDumpLevelOfDetail::DETAILED);
+  EXPECT_FALSE(last_callback_success_);
 }
 
 TEST_F(MemoryDumpManagerTest, DumpOnBehalfOfOtherProcess) {
diff --git a/base/trace_event/memory_dump_provider.h b/base/trace_event/memory_dump_provider.h
index 2ce919d..cf221d3 100644
--- a/base/trace_event/memory_dump_provider.h
+++ b/base/trace_event/memory_dump_provider.h
@@ -26,14 +26,20 @@
  public:
   // Optional arguments for MemoryDumpManager::RegisterDumpProvider().
   struct Options {
-    Options() : target_pid(kNullProcessId) {}
-    explicit Options(ProcessId target_pid) : target_pid(target_pid) {}
+    Options()
+        : target_pid(kNullProcessId),
+          dumps_on_single_thread_task_runner(false) {}
 
     // If the dump provider generates dumps on behalf of another process,
-    // |target_process| contains the pid of that process.
+    // |target_pid| contains the pid of that process.
     // The default value is kNullProcessId, which means that the dump provider
     // generates dumps for the current process.
     ProcessId target_pid;
+
+    // |dumps_on_single_thread_task_runner| is true if the dump provider runs on
+    // a SingleThreadTaskRunner, which is usually the case. It is faster to run
+    // all providers that run on the same thread together without thread hops.
+    bool dumps_on_single_thread_task_runner;
   };
 
   virtual ~MemoryDumpProvider() {}
diff --git a/base/trace_event/memory_dump_session_state.cc b/base/trace_event/memory_dump_session_state.cc
index 5aa79b1..576da31 100644
--- a/base/trace_event/memory_dump_session_state.cc
+++ b/base/trace_event/memory_dump_session_state.cc
@@ -7,13 +7,20 @@
 namespace base {
 namespace trace_event {
 
-MemoryDumpSessionState::MemoryDumpSessionState(
-    const scoped_refptr<StackFrameDeduplicator>& stack_frame_deduplicator,
-    const scoped_refptr<TypeNameDeduplicator>& type_name_deduplicator)
-    : stack_frame_deduplicator_(stack_frame_deduplicator),
-      type_name_deduplicator_(type_name_deduplicator) {}
+MemoryDumpSessionState::MemoryDumpSessionState() {}
 
-MemoryDumpSessionState::~MemoryDumpSessionState() {
+MemoryDumpSessionState::~MemoryDumpSessionState() {}
+
+void MemoryDumpSessionState::SetStackFrameDeduplicator(
+    scoped_ptr<StackFrameDeduplicator> stack_frame_deduplicator) {
+  DCHECK(!stack_frame_deduplicator_);
+  stack_frame_deduplicator_ = std::move(stack_frame_deduplicator);
+}
+
+void MemoryDumpSessionState::SetTypeNameDeduplicator(
+    scoped_ptr<TypeNameDeduplicator> type_name_deduplicator) {
+  DCHECK(!type_name_deduplicator_);
+  type_name_deduplicator_ = std::move(type_name_deduplicator);
 }
 
 }  // namespace trace_event
diff --git a/base/trace_event/memory_dump_session_state.h b/base/trace_event/memory_dump_session_state.h
index 6834471..879545f 100644
--- a/base/trace_event/memory_dump_session_state.h
+++ b/base/trace_event/memory_dump_session_state.h
@@ -6,7 +6,7 @@
 #define BASE_TRACE_EVENT_MEMORY_DUMP_SESSION_STATE_H_
 
 #include "base/base_export.h"
-#include "base/memory/ref_counted.h"
+#include "base/memory/scoped_ptr.h"
 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h"
 #include "base/trace_event/heap_profiler_type_name_deduplicator.h"
 
@@ -18,33 +18,37 @@
 class BASE_EXPORT MemoryDumpSessionState
     : public RefCountedThreadSafe<MemoryDumpSessionState> {
  public:
-  MemoryDumpSessionState(
-      const scoped_refptr<StackFrameDeduplicator>& stack_frame_deduplicator,
-      const scoped_refptr<TypeNameDeduplicator>& type_name_deduplicator);
+  MemoryDumpSessionState();
 
   // Returns the stack frame deduplicator that should be used by memory dump
   // providers when doing a heap dump.
-  StackFrameDeduplicator* stack_frame_deduplicator() {
+  StackFrameDeduplicator* stack_frame_deduplicator() const {
     return stack_frame_deduplicator_.get();
   }
 
+  void SetStackFrameDeduplicator(
+      scoped_ptr<StackFrameDeduplicator> stack_frame_deduplicator);
+
   // Returns the type name deduplicator that should be used by memory dump
   // providers when doing a heap dump.
-  TypeNameDeduplicator* type_name_deduplicator() {
+  TypeNameDeduplicator* type_name_deduplicator() const {
     return type_name_deduplicator_.get();
   }
 
+  void SetTypeNameDeduplicator(
+      scoped_ptr<TypeNameDeduplicator> type_name_deduplicator);
+
  private:
   friend class RefCountedThreadSafe<MemoryDumpSessionState>;
   ~MemoryDumpSessionState();
 
   // Deduplicates backtraces in heap dumps so they can be written once when the
   // trace is finalized.
-  scoped_refptr<StackFrameDeduplicator> stack_frame_deduplicator_;
+  scoped_ptr<StackFrameDeduplicator> stack_frame_deduplicator_;
 
   // Deduplicates type names in heap dumps so they can be written once when the
   // trace is finalized.
-  scoped_refptr<TypeNameDeduplicator> type_name_deduplicator_;
+  scoped_ptr<TypeNameDeduplicator> type_name_deduplicator_;
 };
 
 }  // namespace trace_event
diff --git a/base/trace_event/process_memory_dump.cc b/base/trace_event/process_memory_dump.cc
index ae60bb0..74cbcc2 100644
--- a/base/trace_event/process_memory_dump.cc
+++ b/base/trace_event/process_memory_dump.cc
@@ -12,10 +12,18 @@
 #include "base/trace_event/trace_event_argument.h"
 #include "build/build_config.h"
 
+#if defined(OS_IOS)
+#include <sys/sysctl.h>
+#endif
+
 #if defined(OS_POSIX)
 #include <sys/mman.h>
 #endif
 
+#if defined(OS_WIN)
+#include <Psapi.h>
+#endif
+
 namespace base {
 namespace trace_event {
 
@@ -28,61 +36,108 @@
   return "global/" + guid.ToString();
 }
 
+#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
+  return (mapped_size + page_size - 1) / page_size;
+}
+#endif
+
 }  // namespace
 
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
 // static
+size_t ProcessMemoryDump::GetSystemPageSize() {
+#if defined(OS_IOS)
+  // On iOS, getpagesize() returns the user page sizes, but for allocating
+  // arrays for mincore(), kernel page sizes is needed. sysctlbyname() should
+  // be used for this. Refer to crbug.com/542671 and Apple rdar://23651782
+  int pagesize;
+  size_t pagesize_len;
+  int status = sysctlbyname("vm.pagesize", NULL, &pagesize_len, nullptr, 0);
+  if (!status && pagesize_len == sizeof(pagesize)) {
+    if (!sysctlbyname("vm.pagesize", &pagesize, &pagesize_len, nullptr, 0))
+      return pagesize;
+  }
+  LOG(ERROR) << "sysctlbyname(\"vm.pagesize\") failed.";
+  // Falls back to getpagesize() although it may be wrong in certain cases.
+#endif  // defined(OS_IOS)
+  return base::GetPageSize();
+}
+
+// static
 size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
                                              size_t mapped_size) {
-  const size_t page_size = GetPageSize();
+  const size_t page_size = GetSystemPageSize();
   const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
   DCHECK_EQ(0u, start_pointer % page_size);
 
-  // This function allocates a char vector of size number of pages in the given
-  // mapped_size. To avoid allocating a large array, the memory is split into
-  // chunks. Maximum size of vector allocated, will be
-  // kPageChunkSize / page_size.
-  const size_t kMaxChunkSize = 32 * 1024 * 1024;
   size_t offset = 0;
   size_t total_resident_size = 0;
-  int result = 0;
+  bool failure = false;
+
+  // An array as large as number of pages in memory segment needs to be passed
+  // to the query function. To avoid allocating a large array, the given block
+  // of memory is split into chunks of size |kMaxChunkSize|.
+  const size_t kMaxChunkSize = 8 * 1024 * 1024;
+  size_t max_vec_size =
+      GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
+#if defined(OS_MACOSX) || defined(OS_IOS)
+  scoped_ptr<char[]> vec(new char[max_vec_size]);
+#elif defined(OS_WIN)
+  scoped_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
+      new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
+#elif defined(OS_POSIX)
+  scoped_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
+#endif
+
   while (offset < mapped_size) {
-    void* chunk_start = reinterpret_cast<void*>(start_pointer + offset);
+    uintptr_t chunk_start = (start_pointer + offset);
     const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
-    const size_t page_count = (chunk_size + page_size - 1) / page_size;
+    const size_t page_count = GetSystemPageCount(chunk_size, page_size);
     size_t resident_page_count = 0;
 
 #if defined(OS_MACOSX) || defined(OS_IOS)
-    std::vector<char> vec(page_count + 1);
     // mincore in MAC does not fail with EAGAIN.
-    result = mincore(chunk_start, chunk_size, vec.data());
-    if (result)
-      break;
-
+    failure =
+        !!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
     for (size_t i = 0; i < page_count; i++)
       resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
-#else   // defined(OS_MACOSX) || defined(OS_IOS)
-    std::vector<unsigned char> vec(page_count + 1);
-    int error_counter = 0;
-    // HANDLE_EINTR tries for 100 times. So following the same pattern.
-    do {
-      result = mincore(chunk_start, chunk_size, vec.data());
-    } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
-    if (result)
-      break;
+#elif defined(OS_WIN)
+    for (size_t i = 0; i < page_count; i++) {
+      vec[i].VirtualAddress =
+          reinterpret_cast<void*>(chunk_start + i * page_size);
+    }
+    DWORD vec_size = static_cast<DWORD>(
+        page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
+    failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
 
     for (size_t i = 0; i < page_count; i++)
-      resident_page_count += vec[i];
-#endif  // defined(OS_MACOSX) || defined(OS_IOS)
+      resident_page_count += vec[i].VirtualAttributes.Valid;
+#elif defined(OS_POSIX)
+    int error_counter = 0;
+    int result = 0;
+    // HANDLE_EINTR tries for 100 times. So following the same pattern.
+    do {
+      result =
+          mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
+    } while (result == -1 && errno == EAGAIN && error_counter++ < 100);
+    failure = !!result;
+
+    for (size_t i = 0; i < page_count; i++)
+      resident_page_count += vec[i] & 1;
+#endif
+
+    if (failure)
+      break;
 
     total_resident_size += resident_page_count * page_size;
     offset += kMaxChunkSize;
   }
 
-  DCHECK_EQ(0, result);
-  if (result) {
+  DCHECK(!failure);
+  if (failure) {
     total_resident_size = 0;
-    LOG(ERROR) << "mincore() call failed. The resident size is invalid";
+    LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
   }
   return total_resident_size;
 }
@@ -92,37 +147,35 @@
     const scoped_refptr<MemoryDumpSessionState>& session_state)
     : has_process_totals_(false),
       has_process_mmaps_(false),
-      session_state_(session_state) {
-}
+      session_state_(session_state) {}
 
-ProcessMemoryDump::~ProcessMemoryDump() {
-}
+ProcessMemoryDump::~ProcessMemoryDump() {}
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name) {
-  MemoryAllocatorDump* mad = new MemoryAllocatorDump(absolute_name, this);
-  AddAllocatorDumpInternal(mad);  // Takes ownership of |mad|.
-  return mad;
+  return AddAllocatorDumpInternal(
+      make_scoped_ptr(new MemoryAllocatorDump(absolute_name, this)));
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
     const std::string& absolute_name,
     const MemoryAllocatorDumpGuid& guid) {
-  MemoryAllocatorDump* mad = new MemoryAllocatorDump(absolute_name, this, guid);
-  AddAllocatorDumpInternal(mad);  // Takes ownership of |mad|.
-  return mad;
+  return AddAllocatorDumpInternal(
+      make_scoped_ptr(new MemoryAllocatorDump(absolute_name, this, guid)));
 }
 
-void ProcessMemoryDump::AddAllocatorDumpInternal(MemoryAllocatorDump* mad) {
-  DCHECK_EQ(0ul, allocator_dumps_.count(mad->absolute_name()));
-  allocator_dumps_storage_.push_back(mad);
-  allocator_dumps_[mad->absolute_name()] = mad;
+MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
+    scoped_ptr<MemoryAllocatorDump> mad) {
+  auto insertion_result = allocator_dumps_.insert(
+      std::make_pair(mad->absolute_name(), std::move(mad)));
+  DCHECK(insertion_result.second) << "Duplicate name: " << mad->absolute_name();
+  return insertion_result.first->second.get();
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
     const std::string& absolute_name) const {
   auto it = allocator_dumps_.find(absolute_name);
-  return it == allocator_dumps_.end() ? nullptr : it->second;
+  return it == allocator_dumps_.end() ? nullptr : it->second.get();
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
@@ -135,10 +188,24 @@
     const MemoryAllocatorDumpGuid& guid) {
   // A shared allocator dump can be shared within a process and the guid could
   // have been created already.
-  MemoryAllocatorDump* allocator_dump = GetSharedGlobalAllocatorDump(guid);
-  return allocator_dump ? allocator_dump
-                        : CreateAllocatorDump(
-                              GetSharedGlobalAllocatorDumpName(guid), guid);
+  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+  if (mad) {
+    // The weak flag is cleared because this method should create a non-weak
+    // dump.
+    mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
+    return mad;
+  }
+  return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+}
+
+MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
+    const MemoryAllocatorDumpGuid& guid) {
+  MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
+  if (mad)
+    return mad;
+  mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
+  mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
+  return mad;
 }
 
 MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
@@ -147,9 +214,9 @@
 }
 
 void ProcessMemoryDump::AddHeapDump(const std::string& absolute_name,
-                                    scoped_refptr<TracedValue> heap_dump) {
+                                    scoped_ptr<TracedValue> heap_dump) {
   DCHECK_EQ(0ul, heap_dumps_.count(absolute_name));
-  heap_dumps_[absolute_name] = heap_dump;
+  heap_dumps_[absolute_name] = std::move(heap_dump);
 }
 
 void ProcessMemoryDump::Clear() {
@@ -163,7 +230,6 @@
     has_process_mmaps_ = false;
   }
 
-  allocator_dumps_storage_.clear();
   allocator_dumps_.clear();
   allocator_dumps_edges_.clear();
   heap_dumps_.clear();
@@ -173,14 +239,9 @@
   DCHECK(!other->has_process_totals() && !other->has_process_mmaps());
 
   // Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
-  // into this ProcessMemoryDump.
-  for (MemoryAllocatorDump* mad : other->allocator_dumps_storage_) {
-    // Check that we don't merge duplicates.
-    DCHECK_EQ(0ul, allocator_dumps_.count(mad->absolute_name()));
-    allocator_dumps_storage_.push_back(mad);
-    allocator_dumps_[mad->absolute_name()] = mad;
-  }
-  other->allocator_dumps_storage_.weak_clear();
+  // into this ProcessMemoryDump, checking for duplicates.
+  for (auto& it : other->allocator_dumps_)
+    AddAllocatorDumpInternal(std::move(it.second));
   other->allocator_dumps_.clear();
 
   // Move all the edges.
@@ -189,7 +250,10 @@
                                 other->allocator_dumps_edges_.end());
   other->allocator_dumps_edges_.clear();
 
-  heap_dumps_.insert(other->heap_dumps_.begin(), other->heap_dumps_.end());
+  for (auto& it : other->heap_dumps_) {
+    DCHECK_EQ(0ul, heap_dumps_.count(it.first));
+    heap_dumps_.insert(std::make_pair(it.first, std::move(it.second)));
+  }
   other->heap_dumps_.clear();
 }
 
@@ -206,10 +270,10 @@
     value->EndDictionary();
   }
 
-  if (allocator_dumps_storage_.size() > 0) {
+  if (allocator_dumps_.size() > 0) {
     value->BeginDictionary("allocators");
-    for (const MemoryAllocatorDump* allocator_dump : allocator_dumps_storage_)
-      allocator_dump->AsValueInto(value);
+    for (const auto& allocator_dump_it : allocator_dumps_)
+      allocator_dump_it.second->AsValueInto(value);
     value->EndDictionary();
   }
 
diff --git a/base/trace_event/process_memory_dump.h b/base/trace_event/process_memory_dump.h
index 5a66402..37c0aa1 100644
--- a/base/trace_event/process_memory_dump.h
+++ b/base/trace_event/process_memory_dump.h
@@ -7,11 +7,10 @@
 
 #include <stddef.h>
 
+#include <unordered_map>
 #include <vector>
 
 #include "base/base_export.h"
-#include "base/containers/hash_tables.h"
-#include "base/containers/small_map.h"
 #include "base/macros.h"
 #include "base/memory/ref_counted.h"
 #include "base/memory/scoped_vector.h"
@@ -24,16 +23,13 @@
 
 // Define COUNT_RESIDENT_BYTES_SUPPORTED if platform supports counting of the
 // resident memory.
-// TODO(crbug.com/542671): COUNT_RESIDENT_BYTES_SUPPORTED is disabled on iOS
-// as it cause memory corruption on iOS 9.0+ devices.
-#if defined(OS_POSIX) && !defined(OS_NACL) && !defined(OS_IOS)
+#if (defined(OS_POSIX) && !defined(OS_NACL)) || defined(OS_WIN)
 #define COUNT_RESIDENT_BYTES_SUPPORTED
 #endif
 
 namespace base {
 namespace trace_event {
 
-class ConvertableToTraceFormat;
 class MemoryDumpManager;
 class MemoryDumpSessionState;
 class TracedValue;
@@ -52,12 +48,17 @@
   // Maps allocator dumps absolute names (allocator_name/heap/subheap) to
   // MemoryAllocatorDump instances.
   using AllocatorDumpsMap =
-      SmallMap<hash_map<std::string, MemoryAllocatorDump*>>;
+      std::unordered_map<std::string, scoped_ptr<MemoryAllocatorDump>>;
 
-  using HeapDumpsMap =
-      SmallMap<hash_map<std::string, scoped_refptr<TracedValue>>>;
+  using HeapDumpsMap = std::unordered_map<std::string, scoped_ptr<TracedValue>>;
 
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
+  // Returns the number of bytes in a kernel memory page. Some platforms may
+  // have a different value for kernel page sizes from user page sizes. It is
+  // important to use kernel memory page sizes for resident bytes calculation.
+  // In most cases, the two are the same.
+  static size_t GetSystemPageSize();
+
   // Returns the total bytes resident for a virtual address range, with given
   // |start_address| and |mapped_size|. |mapped_size| is specified in bytes. The
   // value returned is valid only if the given range is currently mmapped by the
@@ -98,6 +99,15 @@
   MemoryAllocatorDump* CreateSharedGlobalAllocatorDump(
       const MemoryAllocatorDumpGuid& guid);
 
+  // Creates a shared MemoryAllocatorDump as CreateSharedGlobalAllocatorDump,
+  // but with a WEAK flag. A weak dump will be discarded unless a non-weak dump
+  // is created using CreateSharedGlobalAllocatorDump by at least one process.
+  // The WEAK flag does not apply if a non-weak dump with the same GUID already
+  // exists or is created later. All owners and children of the discarded dump
+  // will also be discarded transitively.
+  MemoryAllocatorDump* CreateWeakSharedGlobalAllocatorDump(
+      const MemoryAllocatorDumpGuid& guid);
+
   // Looks up a shared MemoryAllocatorDump given its guid.
   MemoryAllocatorDump* GetSharedGlobalAllocatorDump(
       const MemoryAllocatorDumpGuid& guid) const;
@@ -109,7 +119,7 @@
   // must have the correct format. |trace_event::HeapDumper| will generate such
   // a value from a |trace_event::AllocationRegister|.
   void AddHeapDump(const std::string& absolute_name,
-                   scoped_refptr<TracedValue> heap_dump);
+                   scoped_ptr<TracedValue> heap_dump);
 
   // Adds an ownership relationship between two MemoryAllocatorDump(s) with the
   // semantics: |source| owns |target|, and has the effect of attributing
@@ -161,8 +171,11 @@
   bool has_process_mmaps() const { return has_process_mmaps_; }
   void set_has_process_mmaps() { has_process_mmaps_ = true; }
 
+  const HeapDumpsMap& heap_dumps() const { return heap_dumps_; }
+
  private:
-  void AddAllocatorDumpInternal(MemoryAllocatorDump* mad);
+  MemoryAllocatorDump* AddAllocatorDumpInternal(
+      scoped_ptr<MemoryAllocatorDump> mad);
 
   ProcessMemoryTotals process_totals_;
   bool has_process_totals_;
@@ -173,9 +186,6 @@
   AllocatorDumpsMap allocator_dumps_;
   HeapDumpsMap heap_dumps_;
 
-  // ProcessMemoryDump handles the memory ownership of all its belongings.
-  ScopedVector<MemoryAllocatorDump> allocator_dumps_storage_;
-
   // State shared among all PMDs instances created in a given trace session.
   scoped_refptr<MemoryDumpSessionState> session_state_;
 
diff --git a/base/trace_event/process_memory_dump_unittest.cc b/base/trace_event/process_memory_dump_unittest.cc
index 88984ab..e7fe960 100644
--- a/base/trace_event/process_memory_dump_unittest.cc
+++ b/base/trace_event/process_memory_dump_unittest.cc
@@ -15,6 +15,13 @@
 namespace base {
 namespace trace_event {
 
+namespace {
+TracedValue* GetHeapDump(const ProcessMemoryDump& pmd, const char* name) {
+  auto it = pmd.heap_dumps().find(name);
+  return it == pmd.heap_dumps().end() ? nullptr : it->second.get();
+}
+}  // namespace
+
 TEST(ProcessMemoryDumpTest, Clear) {
   scoped_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
   pmd1->CreateAllocatorDump("mad1");
@@ -30,8 +37,10 @@
   pmd1->AddOwnershipEdge(MemoryAllocatorDumpGuid(42),
                          MemoryAllocatorDumpGuid(4242));
 
-  MemoryAllocatorDumpGuid shared_mad_guid(1);
-  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  MemoryAllocatorDumpGuid shared_mad_guid1(1);
+  MemoryAllocatorDumpGuid shared_mad_guid2(2);
+  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid2);
 
   pmd1->Clear();
   ASSERT_TRUE(pmd1->allocator_dumps().empty());
@@ -41,49 +50,73 @@
   ASSERT_FALSE(pmd1->has_process_totals());
   ASSERT_FALSE(pmd1->has_process_mmaps());
   ASSERT_TRUE(pmd1->process_mmaps()->vm_regions().empty());
-  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid));
+  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(nullptr, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  scoped_ptr<TracedValue> traced_value(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   // Check that the pmd can be reused and behaves as expected.
   auto mad1 = pmd1->CreateAllocatorDump("mad1");
   auto mad3 = pmd1->CreateAllocatorDump("mad3");
-  auto shared_mad = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid);
-  ASSERT_EQ(3u, pmd1->allocator_dumps().size());
+  auto shared_mad1 = pmd1->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  auto shared_mad2 =
+      pmd1->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
+  ASSERT_EQ(4u, pmd1->allocator_dumps().size());
   ASSERT_EQ(mad1, pmd1->GetAllocatorDump("mad1"));
   ASSERT_EQ(nullptr, pmd1->GetAllocatorDump("mad2"));
   ASSERT_EQ(mad3, pmd1->GetAllocatorDump("mad3"));
-  ASSERT_EQ(shared_mad, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid));
+  ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+  ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad2->flags());
 
-  traced_value = new TracedValue();
+  traced_value.reset(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   pmd1.reset();
 }
 
 TEST(ProcessMemoryDumpTest, TakeAllDumpsFrom) {
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  scoped_ptr<TracedValue> traced_value(new TracedValue);
+  TracedValue* heap_dumps_ptr[4];
+  scoped_ptr<TracedValue> heap_dump;
 
   scoped_ptr<ProcessMemoryDump> pmd1(new ProcessMemoryDump(nullptr));
   auto mad1_1 = pmd1->CreateAllocatorDump("pmd1/mad1");
   auto mad1_2 = pmd1->CreateAllocatorDump("pmd1/mad2");
   pmd1->AddOwnershipEdge(mad1_1->guid(), mad1_2->guid());
+  heap_dump.reset(new TracedValue);
+  heap_dumps_ptr[0] = heap_dump.get();
+  pmd1->AddHeapDump("pmd1/heap_dump1", std::move(heap_dump));
+  heap_dump.reset(new TracedValue);
+  heap_dumps_ptr[1] = heap_dump.get();
+  pmd1->AddHeapDump("pmd1/heap_dump2", std::move(heap_dump));
 
   scoped_ptr<ProcessMemoryDump> pmd2(new ProcessMemoryDump(nullptr));
   auto mad2_1 = pmd2->CreateAllocatorDump("pmd2/mad1");
   auto mad2_2 = pmd2->CreateAllocatorDump("pmd2/mad2");
-  pmd1->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
+  pmd2->AddOwnershipEdge(mad2_1->guid(), mad2_2->guid());
+  heap_dump.reset(new TracedValue);
+  heap_dumps_ptr[2] = heap_dump.get();
+  pmd2->AddHeapDump("pmd2/heap_dump1", std::move(heap_dump));
+  heap_dump.reset(new TracedValue);
+  heap_dumps_ptr[3] = heap_dump.get();
+  pmd2->AddHeapDump("pmd2/heap_dump2", std::move(heap_dump));
 
-  MemoryAllocatorDumpGuid shared_mad_guid(1);
-  auto shared_mad = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  MemoryAllocatorDumpGuid shared_mad_guid1(1);
+  MemoryAllocatorDumpGuid shared_mad_guid2(2);
+  auto shared_mad1 = pmd2->CreateSharedGlobalAllocatorDump(shared_mad_guid1);
+  auto shared_mad2 =
+      pmd2->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid2);
 
   pmd1->TakeAllDumpsFrom(pmd2.get());
 
   // Make sure that pmd2 is empty but still usable after it has been emptied.
   ASSERT_TRUE(pmd2->allocator_dumps().empty());
   ASSERT_TRUE(pmd2->allocator_dumps_edges().empty());
+  ASSERT_TRUE(pmd2->heap_dumps().empty());
   pmd2->CreateAllocatorDump("pmd2/this_mad_stays_with_pmd2");
   ASSERT_EQ(1u, pmd2->allocator_dumps().size());
   ASSERT_EQ(1u, pmd2->allocator_dumps().count("pmd2/this_mad_stays_with_pmd2"));
@@ -98,16 +131,23 @@
   pmd2.reset();
 
   // Now check that |pmd1| has been effectively merged.
-  ASSERT_EQ(5u, pmd1->allocator_dumps().size());
+  ASSERT_EQ(6u, pmd1->allocator_dumps().size());
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad1"));
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd2/mad1"));
   ASSERT_EQ(1u, pmd1->allocator_dumps().count("pmd1/mad2"));
   ASSERT_EQ(2u, pmd1->allocator_dumps_edges().size());
-  ASSERT_EQ(shared_mad, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid));
+  ASSERT_EQ(shared_mad1, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid1));
+  ASSERT_EQ(shared_mad2, pmd1->GetSharedGlobalAllocatorDump(shared_mad_guid2));
+  ASSERT_TRUE(MemoryAllocatorDump::Flags::WEAK & shared_mad2->flags());
+  ASSERT_EQ(4u, pmd1->heap_dumps().size());
+  ASSERT_EQ(heap_dumps_ptr[0], GetHeapDump(*pmd1, "pmd1/heap_dump1"));
+  ASSERT_EQ(heap_dumps_ptr[1], GetHeapDump(*pmd1, "pmd1/heap_dump2"));
+  ASSERT_EQ(heap_dumps_ptr[2], GetHeapDump(*pmd1, "pmd2/heap_dump1"));
+  ASSERT_EQ(heap_dumps_ptr[3], GetHeapDump(*pmd1, "pmd2/heap_dump2"));
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  traced_value = new TracedValue();
+  traced_value.reset(new TracedValue);
   pmd1->AsValueInto(traced_value.get());
 
   pmd1.reset();
@@ -151,15 +191,39 @@
   ASSERT_TRUE(found_edge[1]);
 
   // Check that calling AsValueInto() doesn't cause a crash.
-  scoped_refptr<TracedValue> traced_value(new TracedValue());
+  scoped_ptr<TracedValue> traced_value(new TracedValue);
   pmd->AsValueInto(traced_value.get());
 
   pmd.reset();
 }
 
+TEST(ProcessMemoryDumpTest, GlobalAllocatorDumpTest) {
+  scoped_ptr<ProcessMemoryDump> pmd(new ProcessMemoryDump(nullptr));
+  MemoryAllocatorDumpGuid shared_mad_guid(1);
+  auto shared_mad1 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad_guid, shared_mad1->guid());
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto shared_mad2 = pmd->GetSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad2);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto shared_mad3 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad3);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::WEAK, shared_mad1->flags());
+
+  auto shared_mad4 = pmd->CreateSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad4);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+
+  auto shared_mad5 = pmd->CreateWeakSharedGlobalAllocatorDump(shared_mad_guid);
+  ASSERT_EQ(shared_mad1, shared_mad5);
+  ASSERT_EQ(MemoryAllocatorDump::Flags::DEFAULT, shared_mad1->flags());
+}
+
 #if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
 TEST(ProcessMemoryDumpTest, CountResidentBytes) {
-  const size_t page_size = base::GetPageSize();
+  const size_t page_size = ProcessMemoryDump::GetSystemPageSize();
 
   // Allocate few page of dirty memory and check if it is resident.
   const size_t size1 = 5 * page_size;
@@ -169,8 +233,8 @@
   size_t res1 = ProcessMemoryDump::CountResidentBytes(memory1.get(), size1);
   ASSERT_EQ(res1, size1);
 
-  // Allocate a large memory segment (>32Mib).
-  const size_t kVeryLargeMemorySize = 34 * 1024 * 1024;
+  // Allocate a large memory segment (> 8Mib).
+  const size_t kVeryLargeMemorySize = 15 * 1024 * 1024;
   scoped_ptr<char, base::AlignedFreeDeleter> memory2(
       static_cast<char*>(base::AlignedAlloc(kVeryLargeMemorySize, page_size)));
   memset(memory2.get(), 0, kVeryLargeMemorySize);
diff --git a/base/trace_event/process_memory_maps.cc b/base/trace_event/process_memory_maps.cc
index 31083a8..a121239 100644
--- a/base/trace_event/process_memory_maps.cc
+++ b/base/trace_event/process_memory_maps.cc
@@ -15,6 +15,7 @@
 const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsRead = 4;
 const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite = 2;
 const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsExec = 1;
+const uint32_t ProcessMemoryMaps::VMRegion::kProtectionFlagsMayshare = 128;
 
 ProcessMemoryMaps::VMRegion::VMRegion()
     : start_address(0),
@@ -28,6 +29,8 @@
       byte_stats_proportional_resident(0) {
 }
 
+ProcessMemoryMaps::VMRegion::VMRegion(const VMRegion& other) = default;
+
 ProcessMemoryMaps::ProcessMemoryMaps() {
 }
 
diff --git a/base/trace_event/process_memory_maps.h b/base/trace_event/process_memory_maps.h
index 3dfcc0c..6a73674 100644
--- a/base/trace_event/process_memory_maps.h
+++ b/base/trace_event/process_memory_maps.h
@@ -25,8 +25,10 @@
     static const uint32_t kProtectionFlagsRead;
     static const uint32_t kProtectionFlagsWrite;
     static const uint32_t kProtectionFlagsExec;
+    static const uint32_t kProtectionFlagsMayshare;
 
     VMRegion();
+    VMRegion(const VMRegion& other);
 
     uint64_t start_address;
     uint64_t size_in_bytes;
diff --git a/base/trace_event/process_memory_maps_dump_provider.cc b/base/trace_event/process_memory_maps_dump_provider.cc
deleted file mode 100644
index 4c3959f..0000000
--- a/base/trace_event/process_memory_maps_dump_provider.cc
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_maps_dump_provider.h"
-
-#include <stdint.h>
-
-#include "base/files/scoped_file.h"
-#include "base/format_macros.h"
-#include "base/logging.h"
-#include "base/strings/string_util.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_maps.h"
-
-namespace base {
-namespace trace_event {
-
-// static
-FILE* ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = nullptr;
-
-namespace {
-
-const uint32_t kMaxLineSize = 4096;
-
-bool ParseSmapsHeader(const char* header_line,
-                      ProcessMemoryMaps::VMRegion* region) {
-  // e.g., "00400000-00421000 r-xp 00000000 fc:01 1234  /foo.so\n"
-  bool res = true;  // Whether this region should be appended or skipped.
-  uint64_t end_addr = 0;
-  char protection_flags[5] = {0};
-  char mapped_file[kMaxLineSize];
-
-  if (sscanf(header_line, "%" SCNx64 "-%" SCNx64 " %4c %*s %*s %*s%4095[^\n]\n",
-             &region->start_address, &end_addr, protection_flags,
-             mapped_file) != 4)
-    return false;
-
-  if (end_addr > region->start_address) {
-    region->size_in_bytes = end_addr - region->start_address;
-  } else {
-    // This is not just paranoia, it can actually happen (See crbug.com/461237).
-    region->size_in_bytes = 0;
-    res = false;
-  }
-
-  region->protection_flags = 0;
-  if (protection_flags[0] == 'r') {
-    region->protection_flags |=
-        ProcessMemoryMaps::VMRegion::kProtectionFlagsRead;
-  }
-  if (protection_flags[1] == 'w') {
-    region->protection_flags |=
-        ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite;
-  }
-  if (protection_flags[2] == 'x') {
-    region->protection_flags |=
-        ProcessMemoryMaps::VMRegion::kProtectionFlagsExec;
-  }
-
-  region->mapped_file = mapped_file;
-  TrimWhitespaceASCII(region->mapped_file, TRIM_ALL, &region->mapped_file);
-
-  return res;
-}
-
-uint64_t ReadCounterBytes(char* counter_line) {
-  uint64_t counter_value = 0;
-  int res = sscanf(counter_line, "%*s %" SCNu64 " kB", &counter_value);
-  DCHECK_EQ(1, res);
-  return counter_value * 1024;
-}
-
-uint32_t ParseSmapsCounter(char* counter_line,
-                           ProcessMemoryMaps::VMRegion* region) {
-  // A smaps counter lines looks as follows: "RSS:  0 Kb\n"
-  uint32_t res = 1;
-  char counter_name[20];
-  int did_read = sscanf(counter_line, "%19[^\n ]", counter_name);
-  DCHECK_EQ(1, did_read);
-
-  if (strcmp(counter_name, "Pss:") == 0) {
-    region->byte_stats_proportional_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Private_Dirty:") == 0) {
-    region->byte_stats_private_dirty_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Private_Clean:") == 0) {
-    region->byte_stats_private_clean_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Shared_Dirty:") == 0) {
-    region->byte_stats_shared_dirty_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Shared_Clean:") == 0) {
-    region->byte_stats_shared_clean_resident = ReadCounterBytes(counter_line);
-  } else if (strcmp(counter_name, "Swap:") == 0) {
-    region->byte_stats_swapped = ReadCounterBytes(counter_line);
-  } else {
-    res = 0;
-  }
-
-  return res;
-}
-
-uint32_t ReadLinuxProcSmapsFile(FILE* smaps_file, ProcessMemoryMaps* pmm) {
-  if (!smaps_file)
-    return 0;
-
-  fseek(smaps_file, 0, SEEK_SET);
-
-  char line[kMaxLineSize];
-  const uint32_t kNumExpectedCountersPerRegion = 6;
-  uint32_t counters_parsed_for_current_region = 0;
-  uint32_t num_valid_regions = 0;
-  ProcessMemoryMaps::VMRegion region;
-  bool should_add_current_region = false;
-  for (;;) {
-    line[0] = '\0';
-    if (fgets(line, kMaxLineSize, smaps_file) == nullptr)
-      break;
-    DCHECK_GT(strlen(line), 0u);
-    if (isxdigit(line[0]) && !isupper(line[0])) {
-      region = ProcessMemoryMaps::VMRegion();
-      counters_parsed_for_current_region = 0;
-      should_add_current_region = ParseSmapsHeader(line, &region);
-    } else {
-      counters_parsed_for_current_region += ParseSmapsCounter(line, &region);
-      DCHECK_LE(counters_parsed_for_current_region,
-                kNumExpectedCountersPerRegion);
-      if (counters_parsed_for_current_region == kNumExpectedCountersPerRegion) {
-        if (should_add_current_region) {
-          pmm->AddVMRegion(region);
-          ++num_valid_regions;
-          should_add_current_region = false;
-        }
-      }
-    }
-  }
-  return num_valid_regions;
-}
-
-}  // namespace
-
-// static
-ProcessMemoryMapsDumpProvider* ProcessMemoryMapsDumpProvider::GetInstance() {
-  return Singleton<ProcessMemoryMapsDumpProvider,
-                   LeakySingletonTraits<ProcessMemoryMapsDumpProvider>>::get();
-}
-
-ProcessMemoryMapsDumpProvider::ProcessMemoryMapsDumpProvider() {
-}
-
-ProcessMemoryMapsDumpProvider::~ProcessMemoryMapsDumpProvider() {
-}
-
-// Called at trace dump point time. Creates a snapshot of the memory maps for
-// the current process.
-bool ProcessMemoryMapsDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
-                                                 ProcessMemoryDump* pmd) {
-  // Snapshot of memory maps is not taken for light dump requests.
-  if (args.level_of_detail == MemoryDumpLevelOfDetail::LIGHT)
-    return true;
-
-  uint32_t res = 0;
-  if (UNLIKELY(proc_smaps_for_testing)) {
-    res = ReadLinuxProcSmapsFile(proc_smaps_for_testing, pmd->process_mmaps());
-  } else {
-    ScopedFILE smaps_file(fopen("/proc/self/smaps", "r"));
-    res = ReadLinuxProcSmapsFile(smaps_file.get(), pmd->process_mmaps());
-  }
-
-  if (res > 0) {
-    pmd->set_has_process_mmaps();
-    return true;
-  }
-  return false;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_maps_dump_provider.h b/base/trace_event/process_memory_maps_dump_provider.h
index 9d8b8b9..e69de29 100644
--- a/base/trace_event/process_memory_maps_dump_provider.h
+++ b/base/trace_event/process_memory_maps_dump_provider.h
@@ -1,41 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_DUMP_PROVIDER_H_
-#define BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_DUMP_PROVIDER_H_
-
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/singleton.h"
-#include "base/trace_event/memory_dump_provider.h"
-#include "build/build_config.h"
-
-namespace base {
-namespace trace_event {
-
-// Dump provider which collects process-wide memory stats.
-class BASE_EXPORT ProcessMemoryMapsDumpProvider : public MemoryDumpProvider {
- public:
-  static ProcessMemoryMapsDumpProvider* GetInstance();
-
-  // MemoryDumpProvider implementation.
-  bool OnMemoryDump(const MemoryDumpArgs& args,
-                    ProcessMemoryDump* pmd) override;
-
- private:
-  friend struct DefaultSingletonTraits<ProcessMemoryMapsDumpProvider>;
-  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps);
-
-  static FILE* proc_smaps_for_testing;
-
-  ProcessMemoryMapsDumpProvider();
-  ~ProcessMemoryMapsDumpProvider() override;
-
-  DISALLOW_COPY_AND_ASSIGN(ProcessMemoryMapsDumpProvider);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_PROCESS_MEMORY_MAPS_DUMP_PROVIDER_H_
diff --git a/base/trace_event/process_memory_maps_dump_provider_unittest.cc b/base/trace_event/process_memory_maps_dump_provider_unittest.cc
deleted file mode 100644
index 624f96f..0000000
--- a/base/trace_event/process_memory_maps_dump_provider_unittest.cc
+++ /dev/null
@@ -1,190 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_maps_dump_provider.h"
-
-#include <stdint.h>
-
-#include "base/files/file_util.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_maps.h"
-#include "base/trace_event/trace_event_argument.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-namespace {
-const char kTestSmaps1[] =
-    "00400000-004be000 r-xp 00000000 fc:01 1234              /file/1\n"
-    "Size:                760 kB\n"
-    "Rss:                 296 kB\n"
-    "Pss:                 162 kB\n"
-    "Shared_Clean:        228 kB\n"
-    "Shared_Dirty:          0 kB\n"
-    "Private_Clean:         0 kB\n"
-    "Private_Dirty:        68 kB\n"
-    "Referenced:          296 kB\n"
-    "Anonymous:            68 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  4 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd\n"
-    "ff000000-ff800000 -w-p 00001080 fc:01 0            /file/name with space\n"
-    "Size:                  0 kB\n"
-    "Rss:                 192 kB\n"
-    "Pss:                 128 kB\n"
-    "Shared_Clean:        120 kB\n"
-    "Shared_Dirty:          4 kB\n"
-    "Private_Clean:        60 kB\n"
-    "Private_Dirty:         8 kB\n"
-    "Referenced:          296 kB\n"
-    "Anonymous:             0 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd";
-
-const char kTestSmaps2[] =
-    // An invalid region, with zero size and overlapping with the last one
-    // (See crbug.com/461237).
-    "7fe7ce79c000-7fe7ce79c000 ---p 00000000 00:00 0 \n"
-    "Size:                  4 kB\n"
-    "Rss:                   0 kB\n"
-    "Pss:                   0 kB\n"
-    "Shared_Clean:          0 kB\n"
-    "Shared_Dirty:          0 kB\n"
-    "Private_Clean:         0 kB\n"
-    "Private_Dirty:         0 kB\n"
-    "Referenced:            0 kB\n"
-    "Anonymous:             0 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd\n"
-    // A invalid region with its range going backwards.
-    "00400000-00200000 ---p 00000000 00:00 0 \n"
-    "Size:                  4 kB\n"
-    "Rss:                   0 kB\n"
-    "Pss:                   0 kB\n"
-    "Shared_Clean:          0 kB\n"
-    "Shared_Dirty:          0 kB\n"
-    "Private_Clean:         0 kB\n"
-    "Private_Dirty:         0 kB\n"
-    "Referenced:            0 kB\n"
-    "Anonymous:             0 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd ex mr mw me dw sd\n"
-    // A good anonymous region at the end.
-    "7fe7ce79c000-7fe7ce7a8000 ---p 00000000 00:00 0 \n"
-    "Size:                 48 kB\n"
-    "Rss:                  40 kB\n"
-    "Pss:                  32 kB\n"
-    "Shared_Clean:         16 kB\n"
-    "Shared_Dirty:         12 kB\n"
-    "Private_Clean:         8 kB\n"
-    "Private_Dirty:         4 kB\n"
-    "Referenced:           40 kB\n"
-    "Anonymous:            16 kB\n"
-    "AnonHugePages:         0 kB\n"
-    "Swap:                  0 kB\n"
-    "KernelPageSize:        4 kB\n"
-    "MMUPageSize:           4 kB\n"
-    "Locked:                0 kB\n"
-    "VmFlags: rd wr mr mw me ac sd\n";
-
-void CreateAndSetSmapsFileForTesting(const char* smaps_string,
-                                     ScopedFILE& file) {
-  FilePath temp_path;
-  FILE* temp_file = CreateAndOpenTemporaryFile(&temp_path);
-  file.reset(temp_file);
-  ASSERT_TRUE(temp_file);
-
-  ASSERT_TRUE(base::WriteFileDescriptor(fileno(temp_file), smaps_string,
-                                        strlen(smaps_string)));
-}
-
-}  // namespace
-
-TEST(ProcessMemoryMapsDumpProviderTest, ParseProcSmaps) {
-  const uint32_t kProtR = ProcessMemoryMaps::VMRegion::kProtectionFlagsRead;
-  const uint32_t kProtW = ProcessMemoryMaps::VMRegion::kProtectionFlagsWrite;
-  const uint32_t kProtX = ProcessMemoryMaps::VMRegion::kProtectionFlagsExec;
-  const MemoryDumpArgs dump_args = {MemoryDumpLevelOfDetail::DETAILED};
-
-  auto pmmdp = ProcessMemoryMapsDumpProvider::GetInstance();
-
-  // Emulate an empty /proc/self/smaps.
-  ProcessMemoryDump pmd_invalid(nullptr /* session_state */);
-  ScopedFILE empty_file(OpenFile(FilePath("/dev/null"), "r"));
-  ASSERT_TRUE(empty_file.get());
-  ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = empty_file.get();
-  pmmdp->OnMemoryDump(dump_args, &pmd_invalid);
-  ASSERT_FALSE(pmd_invalid.has_process_mmaps());
-
-  // Parse the 1st smaps file.
-  ProcessMemoryDump pmd_1(nullptr /* session_state */);
-  ScopedFILE temp_file1;
-  CreateAndSetSmapsFileForTesting(kTestSmaps1, temp_file1);
-  ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = temp_file1.get();
-  pmmdp->OnMemoryDump(dump_args, &pmd_1);
-  ASSERT_TRUE(pmd_1.has_process_mmaps());
-  const auto& regions_1 = pmd_1.process_mmaps()->vm_regions();
-  ASSERT_EQ(2UL, regions_1.size());
-
-  EXPECT_EQ(0x00400000UL, regions_1[0].start_address);
-  EXPECT_EQ(0x004be000UL - 0x00400000UL, regions_1[0].size_in_bytes);
-  EXPECT_EQ(kProtR | kProtX, regions_1[0].protection_flags);
-  EXPECT_EQ("/file/1", regions_1[0].mapped_file);
-  EXPECT_EQ(162 * 1024UL, regions_1[0].byte_stats_proportional_resident);
-  EXPECT_EQ(228 * 1024UL, regions_1[0].byte_stats_shared_clean_resident);
-  EXPECT_EQ(0UL, regions_1[0].byte_stats_shared_dirty_resident);
-  EXPECT_EQ(0UL, regions_1[0].byte_stats_private_clean_resident);
-  EXPECT_EQ(68 * 1024UL, regions_1[0].byte_stats_private_dirty_resident);
-  EXPECT_EQ(4 * 1024UL, regions_1[0].byte_stats_swapped);
-
-  EXPECT_EQ(0xff000000UL, regions_1[1].start_address);
-  EXPECT_EQ(0xff800000UL - 0xff000000UL, regions_1[1].size_in_bytes);
-  EXPECT_EQ(kProtW, regions_1[1].protection_flags);
-  EXPECT_EQ("/file/name with space", regions_1[1].mapped_file);
-  EXPECT_EQ(128 * 1024UL, regions_1[1].byte_stats_proportional_resident);
-  EXPECT_EQ(120 * 1024UL, regions_1[1].byte_stats_shared_clean_resident);
-  EXPECT_EQ(4 * 1024UL, regions_1[1].byte_stats_shared_dirty_resident);
-  EXPECT_EQ(60 * 1024UL, regions_1[1].byte_stats_private_clean_resident);
-  EXPECT_EQ(8 * 1024UL, regions_1[1].byte_stats_private_dirty_resident);
-  EXPECT_EQ(0 * 1024UL, regions_1[1].byte_stats_swapped);
-
-  // Parse the 2nd smaps file.
-  ProcessMemoryDump pmd_2(nullptr /* session_state */);
-  ScopedFILE temp_file2;
-  CreateAndSetSmapsFileForTesting(kTestSmaps2, temp_file2);
-  ProcessMemoryMapsDumpProvider::proc_smaps_for_testing = temp_file2.get();
-  pmmdp->OnMemoryDump(dump_args, &pmd_2);
-  ASSERT_TRUE(pmd_2.has_process_mmaps());
-  const auto& regions_2 = pmd_2.process_mmaps()->vm_regions();
-  ASSERT_EQ(1UL, regions_2.size());
-  EXPECT_EQ(0x7fe7ce79c000UL, regions_2[0].start_address);
-  EXPECT_EQ(0x7fe7ce7a8000UL - 0x7fe7ce79c000UL, regions_2[0].size_in_bytes);
-  EXPECT_EQ(0U, regions_2[0].protection_flags);
-  EXPECT_EQ("", regions_2[0].mapped_file);
-  EXPECT_EQ(32 * 1024UL, regions_2[0].byte_stats_proportional_resident);
-  EXPECT_EQ(16 * 1024UL, regions_2[0].byte_stats_shared_clean_resident);
-  EXPECT_EQ(12 * 1024UL, regions_2[0].byte_stats_shared_dirty_resident);
-  EXPECT_EQ(8 * 1024UL, regions_2[0].byte_stats_private_clean_resident);
-  EXPECT_EQ(4 * 1024UL, regions_2[0].byte_stats_private_dirty_resident);
-  EXPECT_EQ(0 * 1024UL, regions_2[0].byte_stats_swapped);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_totals_dump_provider.cc b/base/trace_event/process_memory_totals_dump_provider.cc
deleted file mode 100644
index 917dcf0..0000000
--- a/base/trace_event/process_memory_totals_dump_provider.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_totals_dump_provider.h"
-
-#include <stddef.h>
-
-#include "base/process/process_metrics.h"
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_totals.h"
-#include "build/build_config.h"
-
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-#include <fcntl.h>
-
-#include "base/files/file_util.h"
-
-namespace {
-bool kernel_supports_rss_peak_reset = true;
-const char kClearPeakRssCommand[] = "5";
-}
-#endif
-
-namespace base {
-namespace trace_event {
-
-// static
-uint64_t ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 0;
-
-// static
-ProcessMemoryTotalsDumpProvider*
-ProcessMemoryTotalsDumpProvider::GetInstance() {
-  return Singleton<
-      ProcessMemoryTotalsDumpProvider,
-      LeakySingletonTraits<ProcessMemoryTotalsDumpProvider>>::get();
-}
-
-ProcessMemoryTotalsDumpProvider::ProcessMemoryTotalsDumpProvider()
-    : process_metrics_(ProcessMetrics::CreateCurrentProcessMetrics()) {}
-
-ProcessMemoryTotalsDumpProvider::~ProcessMemoryTotalsDumpProvider() {
-}
-
-// Called at trace dump point time. Creates a snapshot the memory counters for
-// the current process.
-bool ProcessMemoryTotalsDumpProvider::OnMemoryDump(
-    const MemoryDumpArgs& /* args */,
-    ProcessMemoryDump* pmd) {
-  const uint64_t rss_bytes = rss_bytes_for_testing
-                                 ? rss_bytes_for_testing
-                                 : process_metrics_->GetWorkingSetSize();
-
-  uint64_t peak_rss_bytes = 0;
-
-#if !defined(OS_IOS)
-  peak_rss_bytes = process_metrics_->GetPeakWorkingSetSize();
-#if defined(OS_LINUX) || defined(OS_ANDROID)
-  if (kernel_supports_rss_peak_reset) {
-    // TODO(ssid): Fix crbug.com/461788 to write to the file from sandboxed
-    // processes.
-    int clear_refs_fd = open("/proc/self/clear_refs", O_WRONLY);
-    if (clear_refs_fd > 0 &&
-        WriteFileDescriptor(clear_refs_fd, kClearPeakRssCommand,
-                            sizeof(kClearPeakRssCommand))) {
-      pmd->process_totals()->set_is_peak_rss_resetable(true);
-    } else {
-      kernel_supports_rss_peak_reset = false;
-    }
-    close(clear_refs_fd);
-  }
-#elif defined(OS_MACOSX)
-  size_t private_bytes;
-  bool res = process_metrics_->GetMemoryBytes(&private_bytes,
-                                              nullptr /* shared_bytes */);
-  if (res) {
-    pmd->process_totals()->SetExtraFieldInBytes("private_bytes", private_bytes);
-  }
-#endif  // defined(OS_LINUX) || defined(OS_ANDROID)
-#endif  // !defined(OS_IOS)
-
-  if (rss_bytes > 0) {
-    pmd->process_totals()->set_resident_set_bytes(rss_bytes);
-    pmd->process_totals()->set_peak_resident_set_bytes(peak_rss_bytes);
-    pmd->set_has_process_totals();
-    return true;
-  }
-
-  return false;
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/process_memory_totals_dump_provider.h b/base/trace_event/process_memory_totals_dump_provider.h
deleted file mode 100644
index d9573d3..0000000
--- a/base/trace_event/process_memory_totals_dump_provider.h
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_DUMP_PROVIDER_H_
-#define BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_DUMP_PROVIDER_H_
-
-#include <stdint.h>
-
-#include "base/gtest_prod_util.h"
-#include "base/macros.h"
-#include "base/memory/scoped_ptr.h"
-#include "base/memory/singleton.h"
-#include "base/trace_event/memory_dump_provider.h"
-
-namespace base {
-
-class ProcessMetrics;
-
-namespace trace_event {
-
-// Dump provider which collects process-wide memory stats.
-class BASE_EXPORT ProcessMemoryTotalsDumpProvider : public MemoryDumpProvider {
- public:
-  static ProcessMemoryTotalsDumpProvider* GetInstance();
-
-  // MemoryDumpProvider implementation.
-  bool OnMemoryDump(const MemoryDumpArgs& args,
-                    ProcessMemoryDump* pmd) override;
-
- private:
-  friend struct DefaultSingletonTraits<ProcessMemoryTotalsDumpProvider>;
-  FRIEND_TEST_ALL_PREFIXES(ProcessMemoryTotalsDumpProviderTest, DumpRSS);
-
-  static uint64_t rss_bytes_for_testing;
-
-  ProcessMemoryTotalsDumpProvider();
-  ~ProcessMemoryTotalsDumpProvider() override;
-
-  scoped_ptr<ProcessMetrics> process_metrics_;
-
-  DISALLOW_COPY_AND_ASSIGN(ProcessMemoryTotalsDumpProvider);
-};
-
-}  // namespace trace_event
-}  // namespace base
-
-#endif  // BASE_TRACE_EVENT_PROCESS_MEMORY_TOTALS_DUMP_PROVIDER_H_
diff --git a/base/trace_event/process_memory_totals_dump_provider_unittest.cc b/base/trace_event/process_memory_totals_dump_provider_unittest.cc
deleted file mode 100644
index d3f517e..0000000
--- a/base/trace_event/process_memory_totals_dump_provider_unittest.cc
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2015 The Chromium Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "base/trace_event/process_memory_totals_dump_provider.h"
-
-#include <stddef.h>
-#include <stdint.h>
-
-#include "base/trace_event/process_memory_dump.h"
-#include "base/trace_event/process_memory_totals.h"
-#include "testing/gtest/include/gtest/gtest.h"
-
-namespace base {
-namespace trace_event {
-
-TEST(ProcessMemoryTotalsDumpProviderTest, DumpRSS) {
-  const MemoryDumpArgs high_detail_args = {MemoryDumpLevelOfDetail::DETAILED};
-  auto pmtdp = ProcessMemoryTotalsDumpProvider::GetInstance();
-  scoped_ptr<ProcessMemoryDump> pmd_before(new ProcessMemoryDump(nullptr));
-  scoped_ptr<ProcessMemoryDump> pmd_after(new ProcessMemoryDump(nullptr));
-
-  ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 1024;
-  pmtdp->OnMemoryDump(high_detail_args, pmd_before.get());
-
-  // Pretend that the RSS of the process increased of +1M.
-  const size_t kAllocSize = 1048576;
-  ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing += kAllocSize;
-
-  pmtdp->OnMemoryDump(high_detail_args, pmd_after.get());
-
-  ProcessMemoryTotalsDumpProvider::rss_bytes_for_testing = 0;
-
-  ASSERT_TRUE(pmd_before->has_process_totals());
-  ASSERT_TRUE(pmd_after->has_process_totals());
-
-  const uint64_t rss_before =
-      pmd_before->process_totals()->resident_set_bytes();
-  const uint64_t rss_after = pmd_after->process_totals()->resident_set_bytes();
-
-  EXPECT_NE(0U, rss_before);
-  EXPECT_NE(0U, rss_after);
-
-  EXPECT_EQ(rss_after - rss_before, kAllocSize);
-}
-
-}  // namespace trace_event
-}  // namespace base
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
index 3b2069a..9630a7a 100644
--- a/base/trace_event/trace_buffer.cc
+++ b/base/trace_event/trace_buffer.cc
@@ -99,19 +99,6 @@
     return NULL;
   }
 
-  scoped_ptr<TraceBuffer> CloneForIteration() const override {
-    scoped_ptr<ClonedTraceBuffer> cloned_buffer(new ClonedTraceBuffer());
-    for (size_t queue_index = queue_head_; queue_index != queue_tail_;
-         queue_index = NextQueueIndex(queue_index)) {
-      size_t chunk_index = recyclable_chunks_queue_[queue_index];
-      if (chunk_index >= chunks_.size())  // Skip uninitialized chunks.
-        continue;
-      TraceBufferChunk* chunk = chunks_[chunk_index].get();
-      cloned_buffer->chunks_.push_back(chunk ? chunk->Clone() : NULL);
-    }
-    return std::move(cloned_buffer);
-  }
-
   void EstimateTraceMemoryOverhead(
       TraceEventMemoryOverhead* overhead) override {
     overhead->Add("TraceBufferRingBuffer", sizeof(*this));
@@ -125,43 +112,6 @@
   }
 
  private:
-  class ClonedTraceBuffer : public TraceBuffer {
-   public:
-    ClonedTraceBuffer() : current_iteration_index_(0) {}
-
-    // The only implemented method.
-    const TraceBufferChunk* NextChunk() override {
-      return current_iteration_index_ < chunks_.size()
-                 ? chunks_[current_iteration_index_++].get()
-                 : NULL;
-    }
-
-    scoped_ptr<TraceBufferChunk> GetChunk(size_t* /* index */) override {
-      NOTIMPLEMENTED();
-      return scoped_ptr<TraceBufferChunk>();
-    }
-    void ReturnChunk(size_t /*index*/, scoped_ptr<TraceBufferChunk>) override {
-      NOTIMPLEMENTED();
-    }
-    bool IsFull() const override { return false; }
-    size_t Size() const override { return 0; }
-    size_t Capacity() const override { return 0; }
-    TraceEvent* GetEventByHandle(TraceEventHandle /* handle */) override {
-      return NULL;
-    }
-    scoped_ptr<TraceBuffer> CloneForIteration() const override {
-      NOTIMPLEMENTED();
-      return scoped_ptr<TraceBuffer>();
-    }
-    void EstimateTraceMemoryOverhead(
-        TraceEventMemoryOverhead* /* overhead */) override {
-      NOTIMPLEMENTED();
-    }
-
-    size_t current_iteration_index_;
-    std::vector<scoped_ptr<TraceBufferChunk>> chunks_;
-  };
-
   bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
 
   size_t QueueSize() const {
@@ -257,11 +207,6 @@
     return NULL;
   }
 
-  scoped_ptr<TraceBuffer> CloneForIteration() const override {
-    NOTIMPLEMENTED();
-    return scoped_ptr<TraceBuffer>();
-  }
-
   void EstimateTraceMemoryOverhead(
       TraceEventMemoryOverhead* overhead) override {
     const size_t chunks_ptr_vector_allocated_size =
@@ -308,14 +253,6 @@
   return &chunk_[*event_index];
 }
 
-scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
-  scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
-  cloned_chunk->next_free_ = next_free_;
-  for (size_t i = 0; i < next_free_; ++i)
-    cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
-  return cloned_chunk;
-}
-
 void TraceBufferChunk::EstimateTraceMemoryOverhead(
     TraceEventMemoryOverhead* overhead) {
   if (!cached_overhead_estimate_) {
diff --git a/base/trace_event/trace_buffer.h b/base/trace_event/trace_buffer.h
index a7b8059..c4c1c2b 100644
--- a/base/trace_event/trace_buffer.h
+++ b/base/trace_event/trace_buffer.h
@@ -39,8 +39,6 @@
     return &chunk_[index];
   }
 
-  scoped_ptr<TraceBufferChunk> Clone() const;
-
   void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
 
   // These values must be kept consistent with the numbers of bits of
@@ -73,7 +71,6 @@
   // For iteration. Each TraceBuffer can only be iterated once.
   virtual const TraceBufferChunk* NextChunk() = 0;
 
-  virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0;
 
   // Computes an estimate of the size of the buffer, including all the retained
   // objects.
diff --git a/base/trace_event/trace_config.cc b/base/trace_event/trace_config.cc
index 8e11078..d60c081 100644
--- a/base/trace_event/trace_config.cc
+++ b/base/trace_event/trace_config.cc
@@ -63,13 +63,11 @@
  public:
   explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
       : trace_config_(trace_config) {}
+  ~ConvertableTraceConfigToTraceFormat() override {}
   void AppendAsTraceFormat(std::string* out) const override {
     out->append(trace_config_.ToString());
   }
 
- protected:
-  ~ConvertableTraceConfigToTraceFormat() override {}
-
  private:
   const TraceConfig trace_config_;
 };
@@ -158,9 +156,9 @@
   return json;
 }
 
-scoped_refptr<ConvertableToTraceFormat>
-TraceConfig::AsConvertableToTraceFormat() const {
-  return new ConvertableTraceConfigToTraceFormat(*this);
+scoped_ptr<ConvertableToTraceFormat> TraceConfig::AsConvertableToTraceFormat()
+    const {
+  return make_scoped_ptr(new ConvertableTraceConfigToTraceFormat(*this));
 }
 
 std::string TraceConfig::ToCategoryFilterString() const {
diff --git a/base/trace_event/trace_config.h b/base/trace_event/trace_config.h
index c7d3f4b..81b7d51 100644
--- a/base/trace_event/trace_config.h
+++ b/base/trace_event/trace_config.h
@@ -153,8 +153,8 @@
   // formatted.
   std::string ToString() const;
 
-  // Returns a scoped_refptr and wrap TraceConfig in ConvertableToTraceFormat
-  scoped_refptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
+  // Returns a copy of the TraceConfig wrapped in a ConvertableToTraceFormat
+  scoped_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
 
   // Write the string representation of the CategoryFilter part.
   std::string ToCategoryFilterString() const;
diff --git a/base/trace_event/trace_event.gypi b/base/trace_event/trace_event.gypi
index 6948d7c..d5583fa 100644
--- a/base/trace_event/trace_event.gypi
+++ b/base/trace_event/trace_event.gypi
@@ -36,11 +36,8 @@
       'trace_event/process_memory_dump.h',
       'trace_event/process_memory_maps.cc',
       'trace_event/process_memory_maps.h',
-      'trace_event/process_memory_maps_dump_provider.h',
       'trace_event/process_memory_totals.cc',
       'trace_event/process_memory_totals.h',
-      'trace_event/process_memory_totals_dump_provider.cc',
-      'trace_event/process_memory_totals_dump_provider.h',
       'trace_event/trace_buffer.cc',
       'trace_event/trace_buffer.h',
       'trace_event/trace_config.cc',
@@ -79,7 +76,6 @@
       'trace_event/memory_allocator_dump_unittest.cc',
       'trace_event/memory_dump_manager_unittest.cc',
       'trace_event/process_memory_dump_unittest.cc',
-      'trace_event/process_memory_totals_dump_provider_unittest.cc',
       'trace_event/trace_config_memory_test_util.h',
       'trace_event/trace_config_unittest.cc',
       'trace_event/trace_event_argument_unittest.cc',
@@ -89,20 +85,12 @@
       'trace_event/winheap_dump_provider_win_unittest.cc',
     ],
     'conditions': [
-      ['OS == "linux" or OS=="android" or OS=="mac"', {
+      ['OS == "linux" or OS=="android" or OS=="mac" or OS=="ios"', {
         'trace_event_sources': [
           'trace_event/malloc_dump_provider.cc',
           'trace_event/malloc_dump_provider.h',
         ],
       }],
-      ['OS == "linux" or OS == "android"', {
-          'trace_event_sources': [
-            'trace_event/process_memory_maps_dump_provider.cc',
-          ],
-          'trace_event_test_sources' : [
-            'trace_event/process_memory_maps_dump_provider_unittest.cc',
-          ],
-      }],
       ['OS == "android"', {
         'trace_event_test_sources' : [
           'trace_event/trace_event_android_unittest.cc',
diff --git a/base/trace_event/trace_event.h b/base/trace_event/trace_event.h
index 75bb81b..6255bc0 100644
--- a/base/trace_event/trace_event.h
+++ b/base/trace_event/trace_event.h
@@ -7,7 +7,7 @@
 
 // This header file defines implementation details of how the trace macros in
 // trace_event_common.h collect and store trace events. Anything not
-// implementation-specific should go in trace_macros_common.h instead of here.
+// implementation-specific should go in trace_event_common.h instead of here.
 
 #include <stddef.h>
 #include <stdint.h>
@@ -37,6 +37,11 @@
 #define TRACE_ID_DONT_MANGLE(id) \
     trace_event_internal::TraceID::DontMangle(id)
 
+// By default, trace IDs are eventually converted to a single 64-bit number. Use
+// this macro to add a scope string.
+#define TRACE_ID_WITH_SCOPE(scope, id) \
+    trace_event_internal::TraceID::WithScope(scope, id)
+
 // Sets the current sample state to the given category and name (both must be
 // constant strings). These states are intended for a sampling profiler.
 // Implementation note: we store category and name together because we don't
@@ -99,12 +104,13 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    int num_args,
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    scoped_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT \
@@ -116,13 +122,14 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    unsigned long long bind_id,
 //                    int num_args,
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    scoped_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID \
@@ -135,13 +142,14 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    int process_id,
 //                    int num_args,
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    scoped_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID \
@@ -153,6 +161,7 @@
 //                    char phase,
 //                    const unsigned char* category_group_enabled,
 //                    const char* name,
+//                    const char* scope,
 //                    unsigned long long id,
 //                    int thread_id,
 //                    const TimeTicks& timestamp,
@@ -160,7 +169,7 @@
 //                    const char** arg_names,
 //                    const unsigned char* arg_types,
 //                    const unsigned long long* arg_values,
-//                    const scoped_refptr<ConvertableToTraceFormat>*
+//                    scoped_ptr<ConvertableToTraceFormat>*
 //                    convertable_values,
 //                    unsigned int flags)
 #define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
@@ -178,9 +187,10 @@
 // Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method
 // on the convertable value will be called at flush time.
 // TRACE_EVENT_API_ADD_METADATA_EVENT(
-//   const char* event_name,
-//   const char* arg_name,
-//   scoped_refptr<ConvertableToTraceFormat> arg_value)
+//     const unsigned char* category_group_enabled,
+//     const char* event_name,
+//     const char* arg_name,
+//     scoped_ptr<ConvertableToTraceFormat> arg_value)
 #define TRACE_EVENT_API_ADD_METADATA_EVENT \
     trace_event_internal::AddMetadataEvent
 
@@ -244,8 +254,8 @@
       if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
         trace_event_internal::AddTraceEvent( \
             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-            trace_event_internal::kNoId, flags, \
-            trace_event_internal::kNoId, ##__VA_ARGS__); \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
       } \
     } while (0)
 
@@ -260,8 +270,9 @@
           trace_event_internal::AddTraceEvent( \
               TRACE_EVENT_PHASE_COMPLETE, \
               INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-              trace_event_internal::kNoId, TRACE_EVENT_FLAG_NONE, \
-              trace_event_internal::kNoId, ##__VA_ARGS__); \
+              trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+              TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
+              ##__VA_ARGS__); \
       INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
           INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
     }
@@ -278,8 +289,8 @@
         trace_event_internal::AddTraceEvent( \
             TRACE_EVENT_PHASE_COMPLETE, \
             INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
-            trace_event_internal::kNoId, trace_event_flags, \
-            trace_event_bind_id.data(), ##__VA_ARGS__); \
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
+            trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
     INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
         INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
   }
@@ -296,8 +307,8 @@
             id, &trace_event_flags); \
         trace_event_internal::AddTraceEvent( \
             phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \
-            name, trace_event_trace_id.data(), trace_event_flags, \
-            trace_event_internal::kNoId, ##__VA_ARGS__); \
+            name, trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
+            trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
       } \
     } while (0)
 
@@ -310,7 +321,8 @@
     if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) {  \
       trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(           \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,     \
-          trace_event_internal::kNoId, TRACE_EVENT_API_CURRENT_THREAD_ID,    \
+          trace_event_internal::kGlobalScope, trace_event_internal::kNoId,   \
+          TRACE_EVENT_API_CURRENT_THREAD_ID,                                 \
           base::TimeTicks::FromInternalValue(timestamp),                     \
           flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,                       \
           trace_event_internal::kNoId, ##__VA_ARGS__);                       \
@@ -329,18 +341,53 @@
                                                          &trace_event_flags); \
       trace_event_internal::AddTraceEventWithThreadIdAndTimestamp(            \
           phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,      \
-          trace_event_trace_id.data(), thread_id,                             \
-          base::TimeTicks::FromInternalValue(timestamp),                      \
+          trace_event_trace_id.scope(), trace_event_trace_id.raw_id(),        \
+          thread_id, base::TimeTicks::FromInternalValue(timestamp),           \
           trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP,            \
           trace_event_internal::kNoId, ##__VA_ARGS__);                        \
     }                                                                         \
   } while (0)
 
+// Implementation detail: internal macro to create static category and add
+// metadata event if the category is enabled.
+#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...)        \
+  do {                                                                      \
+    INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group);                 \
+    if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \
+      TRACE_EVENT_API_ADD_METADATA_EVENT(                                   \
+          INTERNAL_TRACE_EVENT_UID(category_group_enabled), name,           \
+          ##__VA_ARGS__);                                                   \
+    }                                                                       \
+  } while (0)
+
+// Implementation detail: internal macro to enter and leave a
+// context based on the current scope.
+#define INTERNAL_TRACE_EVENT_SCOPED_CONTEXT(category_group, name, context) \
+  struct INTERNAL_TRACE_EVENT_UID(ScopedContext) {                         \
+   public:                                                                 \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)(uint64_t cid) : cid_(cid) {    \
+      TRACE_EVENT_ENTER_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+    ~INTERNAL_TRACE_EVENT_UID(ScopedContext)() {                           \
+      TRACE_EVENT_LEAVE_CONTEXT(category_group, name, cid_);               \
+    }                                                                      \
+                                                                           \
+   private:                                                                \
+    uint64_t cid_;                                                         \
+    /* Local class friendly DISALLOW_COPY_AND_ASSIGN */                    \
+    INTERNAL_TRACE_EVENT_UID(ScopedContext)                                \
+    (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};                   \
+    void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {};     \
+  };                                                                       \
+  INTERNAL_TRACE_EVENT_UID(ScopedContext)                                  \
+  INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id());
+
 namespace trace_event_internal {
 
 // Specify these values when the corresponding argument of AddTraceEvent is not
 // used.
 const int kZeroNumArgs = 0;
+const std::nullptr_t kGlobalScope = nullptr;
 const unsigned long long kNoId = 0;
 
 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers
@@ -348,87 +395,112 @@
 // collide when the same pointer is used on different processes.
 class TraceID {
  public:
+  class WithScope {
+   public:
+    WithScope(const char* scope, unsigned long long raw_id)
+        : scope_(scope), raw_id_(raw_id) {}
+    unsigned long long raw_id() const { return raw_id_; }
+    const char* scope() const { return scope_; }
+   private:
+    const char* scope_ = nullptr;
+    unsigned long long raw_id_;
+  };
+
   class DontMangle {
    public:
-    explicit DontMangle(const void* id)
-        : data_(static_cast<unsigned long long>(
-              reinterpret_cast<uintptr_t>(id))) {}
-    explicit DontMangle(unsigned long long id) : data_(id) {}
-    explicit DontMangle(unsigned long id) : data_(id) {}
-    explicit DontMangle(unsigned int id) : data_(id) {}
-    explicit DontMangle(unsigned short id) : data_(id) {}
-    explicit DontMangle(unsigned char id) : data_(id) {}
-    explicit DontMangle(long long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(int id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(short id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit DontMangle(signed char id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    unsigned long long data() const { return data_; }
+    explicit DontMangle(const void* raw_id)
+        : raw_id_(static_cast<unsigned long long>(
+              reinterpret_cast<uintptr_t>(raw_id))) {}
+    explicit DontMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit DontMangle(long long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(int raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(short raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(signed char raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit DontMangle(WithScope scoped_id)
+        : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
+    const char* scope() const { return scope_; }
+    unsigned long long raw_id() const { return raw_id_; }
    private:
-    unsigned long long data_;
+    const char* scope_ = nullptr;
+    unsigned long long raw_id_;
   };
 
   class ForceMangle {
    public:
-    explicit ForceMangle(unsigned long long id) : data_(id) {}
-    explicit ForceMangle(unsigned long id) : data_(id) {}
-    explicit ForceMangle(unsigned int id) : data_(id) {}
-    explicit ForceMangle(unsigned short id) : data_(id) {}
-    explicit ForceMangle(unsigned char id) : data_(id) {}
-    explicit ForceMangle(long long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(long id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(int id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(short id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    explicit ForceMangle(signed char id)
-        : data_(static_cast<unsigned long long>(id)) {}
-    unsigned long long data() const { return data_; }
+    explicit ForceMangle(unsigned long long raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned long raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned int raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned short raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(unsigned char raw_id) : raw_id_(raw_id) {}
+    explicit ForceMangle(long long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(long raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(int raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(short raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    explicit ForceMangle(signed char raw_id)
+        : raw_id_(static_cast<unsigned long long>(raw_id)) {}
+    unsigned long long raw_id() const { return raw_id_; }
    private:
-    unsigned long long data_;
+    unsigned long long raw_id_;
   };
-  TraceID(const void* id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(
-              reinterpret_cast<uintptr_t>(id))) {
+  TraceID(const void* raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(
+                reinterpret_cast<uintptr_t>(raw_id))) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(ForceMangle id, unsigned int* flags) : data_(id.data()) {
+  TraceID(ForceMangle raw_id, unsigned int* flags) : raw_id_(raw_id.raw_id()) {
     *flags |= TRACE_EVENT_FLAG_MANGLE_ID;
   }
-  TraceID(DontMangle id, unsigned int* /* flags */) : data_(id.data()) {
+  TraceID(DontMangle maybe_scoped_id, unsigned int* /* flags */)
+      : scope_(maybe_scoped_id.scope()), raw_id_(maybe_scoped_id.raw_id()) {
   }
-  TraceID(unsigned long long id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned long id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned int id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned short id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(unsigned char id, unsigned int* flags)
-      : data_(id) { (void)flags; }
-  TraceID(long long id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(long id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(int id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(short id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
-  TraceID(signed char id, unsigned int* flags)
-      : data_(static_cast<unsigned long long>(id)) { (void)flags; }
+  TraceID(unsigned long long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned long raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned int raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned short raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(unsigned char raw_id, unsigned int* flags) : raw_id_(raw_id) {
+    (void)flags;
+  }
+  TraceID(long long raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(long raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(int raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(short raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(signed char raw_id, unsigned int* flags)
+      : raw_id_(static_cast<unsigned long long>(raw_id)) { (void)flags; }
+  TraceID(WithScope scoped_id, unsigned int* /* flags */)
+      : scope_(scoped_id.scope()), raw_id_(scoped_id.raw_id()) {}
 
-  unsigned long long data() const { return data_; }
+  unsigned long long raw_id() const { return raw_id_; }
+  const char* scope() const { return scope_; }
 
  private:
-  unsigned long long data_;
+  const char* scope_ = nullptr;
+  unsigned long long raw_id_;
 };
 
 // Simple union to store various types as unsigned long long.
@@ -539,32 +611,37 @@
 // pointers to the internal c_str and pass through to the tracing API,
 // the arg_values must live throughout these procedures.
 
+template <class ARG1_CONVERTABLE_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
-        arg1_val) {
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
   const int num_args = 1;
   unsigned char arg_types[1] = { TRACE_VALUE_TYPE_CONVERTABLE };
+  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[1] = {std::move(arg1_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, &arg1_name, arg_types, NULL, &arg1_val, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, &arg1_name, arg_types, NULL, convertable_values,
+      flags);
 }
 
-template<class ARG1_TYPE>
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
@@ -573,8 +650,7 @@
     const char* arg1_name,
     const ARG1_TYPE& arg1_val,
     const char* arg2_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
-        arg2_val) {
+    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = { arg1_name, arg2_name };
 
@@ -582,29 +658,28 @@
   unsigned long long arg_values[2];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   arg_types[1] = TRACE_VALUE_TYPE_CONVERTABLE;
-
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[2];
-  convertable_values[1] = arg2_val;
-
+  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {nullptr, std::move(arg2_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+      flags);
 }
 
-template<class ARG2_TYPE>
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>& arg1_val,
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
     const ARG2_TYPE& arg2_val) {
   const int num_args = 2;
@@ -615,41 +690,40 @@
   arg_types[0] = TRACE_VALUE_TYPE_CONVERTABLE;
   arg_values[0] = 0;
   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
-
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[2];
-  convertable_values[0] = arg1_val;
-
+  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {std::move(arg1_val), nullptr};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, arg_values, convertable_values, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, convertable_values,
+      flags);
 }
 
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
 static inline base::trace_event::TraceEventHandle
 AddTraceEventWithThreadIdAndTimestamp(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id,
     const char* arg1_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>& arg1_val,
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
     const char* arg2_name,
-    const scoped_refptr<base::trace_event::ConvertableToTraceFormat>&
-        arg2_val) {
+    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
   const int num_args = 2;
   const char* arg_names[2] = { arg1_name, arg2_name };
   unsigned char arg_types[2] =
       { TRACE_VALUE_TYPE_CONVERTABLE, TRACE_VALUE_TYPE_CONVERTABLE };
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[2] = {arg1_val, arg2_val};
-
+  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[2] = {std::move(arg1_val), std::move(arg2_val)};
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, NULL, convertable_values, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, NULL, convertable_values,
+      flags);
 }
 
 static inline base::trace_event::TraceEventHandle
@@ -657,27 +731,30 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
     unsigned int flags,
     unsigned long long bind_id) {
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, kZeroNumArgs, NULL, NULL, NULL, NULL, flags);
 }
 
 static inline base::trace_event::TraceEventHandle AddTraceEvent(
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned int flags,
     unsigned long long bind_id) {
   const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   const base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
-      phase, category_group_enabled, name, id, thread_id, now, flags, bind_id);
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id);
 }
 
 template<class ARG1_TYPE>
@@ -686,6 +763,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
@@ -698,8 +776,8 @@
   unsigned long long arg_values[1];
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, &arg1_name, arg_types, arg_values, NULL, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, &arg1_name, arg_types, arg_values, NULL, flags);
 }
 
 template<class ARG1_TYPE>
@@ -707,6 +785,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned int flags,
     unsigned long long bind_id,
@@ -714,9 +793,27 @@
     const ARG1_TYPE& arg1_val) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
-  return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
-                                               name, id, thread_id, now, flags,
-                                               bind_id, arg1_name, arg1_val);
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val);
+}
+
+template <class ARG1_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val));
 }
 
 template<class ARG1_TYPE, class ARG2_TYPE>
@@ -725,6 +822,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const base::TimeTicks& timestamp,
@@ -741,8 +839,68 @@
   SetTraceValue(arg1_val, &arg_types[0], &arg_values[0]);
   SetTraceValue(arg2_val, &arg_types[1], &arg_values[1]);
   return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
-      phase, category_group_enabled, name, id, bind_id, thread_id, timestamp,
-      num_args, arg_names, arg_types, arg_values, NULL, flags);
+      phase, category_group_enabled, name, scope, id, bind_id, thread_id,
+      timestamp, num_args, arg_names, arg_types, arg_values, NULL, flags);
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    const ARG2_TYPE& arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val), arg2_name, arg2_val);
+}
+
+template <class ARG1_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    const ARG1_TYPE& arg1_val,
+    const char* arg2_name,
+    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val, arg2_name, std::move(arg2_val));
+}
+
+template <class ARG1_CONVERTABLE_TYPE, class ARG2_CONVERTABLE_TYPE>
+static inline base::trace_event::TraceEventHandle AddTraceEvent(
+    char phase,
+    const unsigned char* category_group_enabled,
+    const char* name,
+    const char* scope,
+    unsigned long long id,
+    unsigned int flags,
+    unsigned long long bind_id,
+    const char* arg1_name,
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg1_val,
+    const char* arg2_name,
+    scoped_ptr<ARG2_CONVERTABLE_TYPE> arg2_val) {
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  base::TimeTicks now = base::TimeTicks::Now();
+  return AddTraceEventWithThreadIdAndTimestamp(
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, std::move(arg1_val), arg2_name, std::move(arg2_val));
 }
 
 template<class ARG1_TYPE, class ARG2_TYPE>
@@ -750,6 +908,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned int flags,
     unsigned long long bind_id,
@@ -760,20 +919,22 @@
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
-      phase, category_group_enabled, name, id, thread_id, now, flags, bind_id,
-      arg1_name, arg1_val, arg2_name, arg2_val);
+      phase, category_group_enabled, name, scope, id, thread_id, now, flags,
+      bind_id, arg1_name, arg1_val, arg2_name, arg2_val);
 }
 
+template <class ARG1_CONVERTABLE_TYPE>
 static inline void AddMetadataEvent(
+    const unsigned char* category_group_enabled,
     const char* event_name,
     const char* arg_name,
-    scoped_refptr<base::trace_event::ConvertableToTraceFormat> arg_value) {
+    scoped_ptr<ARG1_CONVERTABLE_TYPE> arg_value) {
   const char* arg_names[1] = {arg_name};
-  scoped_refptr<base::trace_event::ConvertableToTraceFormat>
-      convertable_values[1] = {arg_value};
   unsigned char arg_types[1] = {TRACE_VALUE_TYPE_CONVERTABLE};
+  scoped_ptr<base::trace_event::ConvertableToTraceFormat>
+      convertable_values[1] = {std::move(arg_value)};
   base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
-      event_name,
+      category_group_enabled, event_name,
       1,  // num_args
       arg_names, arg_types,
       nullptr,  // arg_values
@@ -781,7 +942,8 @@
 }
 
 template <class ARG1_TYPE>
-static void AddMetadataEvent(const char* event_name,
+static void AddMetadataEvent(const unsigned char* category_group_enabled,
+                             const char* event_name,
                              const char* arg_name,
                              const ARG1_TYPE& arg_val) {
   const int num_args = 1;
@@ -791,8 +953,8 @@
   SetTraceValue(arg_val, &arg_types[0], &arg_values[0]);
 
   base::trace_event::TraceLog::GetInstance()->AddMetadataEvent(
-      event_name, num_args, arg_names, arg_types, arg_values, nullptr,
-      TRACE_EVENT_FLAG_NONE);
+      category_group_enabled, event_name, num_args, arg_names, arg_types,
+      arg_values, nullptr, TRACE_EVENT_FLAG_NONE);
 }
 
 // Used by TRACE_EVENTx macros. Do not use directly.
diff --git a/base/trace_event/trace_event_argument.h b/base/trace_event/trace_event_argument.h
index a127b0d..d706479 100644
--- a/base/trace_event/trace_event_argument.h
+++ b/base/trace_event/trace_event_argument.h
@@ -26,6 +26,7 @@
  public:
   TracedValue();
   explicit TracedValue(size_t capacity);
+  ~TracedValue() override;
 
   void EndDictionary();
   void EndArray();
@@ -75,8 +76,6 @@
   scoped_ptr<base::Value> ToBaseValue() const;
 
  private:
-  ~TracedValue() override;
-
   Pickle pickle_;
 
 #ifndef NDEBUG
diff --git a/base/trace_event/trace_event_argument_unittest.cc b/base/trace_event/trace_event_argument_unittest.cc
index 82436ba..644d494 100644
--- a/base/trace_event/trace_event_argument_unittest.cc
+++ b/base/trace_event/trace_event_argument_unittest.cc
@@ -15,7 +15,7 @@
 namespace trace_event {
 
 TEST(TraceEventArgumentTest, FlatDictionary) {
-  scoped_refptr<TracedValue> value = new TracedValue();
+  scoped_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("int", 2014);
   value->SetDouble("double", 0.0);
   value->SetBoolean("bool", true);
@@ -28,7 +28,7 @@
 }
 
 TEST(TraceEventArgumentTest, NoDotPathExpansion) {
-  scoped_refptr<TracedValue> value = new TracedValue();
+  scoped_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("in.t", 2014);
   value->SetDouble("doub.le", 0.0);
   value->SetBoolean("bo.ol", true);
@@ -41,7 +41,7 @@
 }
 
 TEST(TraceEventArgumentTest, Hierarchy) {
-  scoped_refptr<TracedValue> value = new TracedValue();
+  scoped_ptr<TracedValue> value(new TracedValue());
   value->SetInteger("i0", 2014);
   value->BeginDictionary("dict1");
   value->SetInteger("i1", 2014);
@@ -77,7 +77,7 @@
     kLongString3[i] = 'a' + (i % 25);
   kLongString3[sizeof(kLongString3) - 1] = '\0';
 
-  scoped_refptr<TracedValue> value = new TracedValue();
+  scoped_ptr<TracedValue> value(new TracedValue());
   value->SetString("a", "short");
   value->SetString("b", kLongString);
   value->BeginArray("c");
@@ -113,7 +113,7 @@
   list_value->AppendString("in_list");
   list_value->Append(std::move(dict_value));
 
-  scoped_refptr<TracedValue> value = new TracedValue();
+  scoped_ptr<TracedValue> value(new TracedValue());
   value->BeginDictionary("outer_dict");
   value->SetValue("inner_list", std::move(list_value));
   value->EndDictionary();
@@ -130,10 +130,10 @@
 }
 
 TEST(TraceEventArgumentTest, PassTracedValue) {
-  auto dict_value = make_scoped_refptr(new TracedValue);
+  auto dict_value = make_scoped_ptr(new TracedValue());
   dict_value->SetInteger("a", 1);
 
-  auto nested_dict_value = make_scoped_refptr(new TracedValue);
+  auto nested_dict_value = make_scoped_ptr(new TracedValue());
   nested_dict_value->SetInteger("b", 2);
   nested_dict_value->BeginArray("c");
   nested_dict_value->AppendString("foo");
diff --git a/base/trace_event/trace_event_impl.cc b/base/trace_event/trace_event_impl.cc
index 24d6568..c0dc843 100644
--- a/base/trace_event/trace_event_impl.cc
+++ b/base/trace_event/trace_event_impl.cc
@@ -41,6 +41,7 @@
 
 TraceEvent::TraceEvent()
     : duration_(TimeDelta::FromInternalValue(-1)),
+      scope_(trace_event_internal::kGlobalScope),
       id_(0u),
       category_group_enabled_(NULL),
       name_(NULL),
@@ -55,26 +56,27 @@
 TraceEvent::~TraceEvent() {
 }
 
-void TraceEvent::CopyFrom(const TraceEvent& other) {
-  timestamp_ = other.timestamp_;
-  thread_timestamp_ = other.thread_timestamp_;
-  duration_ = other.duration_;
-  id_ = other.id_;
-  category_group_enabled_ = other.category_group_enabled_;
-  name_ = other.name_;
-  if (other.flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID)
-    process_id_ = other.process_id_;
+void TraceEvent::MoveFrom(scoped_ptr<TraceEvent> other) {
+  timestamp_ = other->timestamp_;
+  thread_timestamp_ = other->thread_timestamp_;
+  duration_ = other->duration_;
+  scope_ = other->scope_;
+  id_ = other->id_;
+  category_group_enabled_ = other->category_group_enabled_;
+  name_ = other->name_;
+  if (other->flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID)
+    process_id_ = other->process_id_;
   else
-    thread_id_ = other.thread_id_;
-  phase_ = other.phase_;
-  flags_ = other.flags_;
-  parameter_copy_storage_ = other.parameter_copy_storage_;
+    thread_id_ = other->thread_id_;
+  phase_ = other->phase_;
+  flags_ = other->flags_;
+  parameter_copy_storage_ = std::move(other->parameter_copy_storage_);
 
   for (int i = 0; i < kTraceMaxNumArgs; ++i) {
-    arg_names_[i] = other.arg_names_[i];
-    arg_types_[i] = other.arg_types_[i];
-    arg_values_[i] = other.arg_values_[i];
-    convertable_values_[i] = other.convertable_values_[i];
+    arg_names_[i] = other->arg_names_[i];
+    arg_types_[i] = other->arg_types_[i];
+    arg_values_[i] = other->arg_values_[i];
+    convertable_values_[i] = std::move(other->convertable_values_[i]);
   }
 }
 
@@ -85,17 +87,19 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned long long bind_id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   timestamp_ = timestamp;
   thread_timestamp_ = thread_timestamp;
   duration_ = TimeDelta::FromInternalValue(-1);
+  scope_ = scope;
   id_ = id;
   category_group_enabled_ = category_group_enabled;
   name_ = name;
@@ -111,22 +115,24 @@
     arg_names_[i] = arg_names[i];
     arg_types_[i] = arg_types[i];
 
-    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE)
-      convertable_values_[i] = convertable_values[i];
-    else
+    if (arg_types[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
+      convertable_values_[i] = std::move(convertable_values[i]);
+    } else {
       arg_values_[i].as_uint = arg_values[i];
+      convertable_values_[i].reset();
+    }
   }
   for (; i < kTraceMaxNumArgs; ++i) {
     arg_names_[i] = NULL;
     arg_values_[i].as_uint = 0u;
-    convertable_values_[i] = NULL;
+    convertable_values_[i].reset();
     arg_types_[i] = TRACE_VALUE_TYPE_UINT;
   }
 
   bool copy = !!(flags & TRACE_EVENT_FLAG_COPY);
   size_t alloc_size = 0;
   if (copy) {
-    alloc_size += GetAllocLength(name);
+    alloc_size += GetAllocLength(name) + GetAllocLength(scope);
     for (i = 0; i < num_args; ++i) {
       alloc_size += GetAllocLength(arg_names_[i]);
       if (arg_types_[i] == TRACE_VALUE_TYPE_STRING)
@@ -147,12 +153,13 @@
   }
 
   if (alloc_size) {
-    parameter_copy_storage_ = new RefCountedString;
-    parameter_copy_storage_->data().resize(alloc_size);
-    char* ptr = string_as_array(&parameter_copy_storage_->data());
+    parameter_copy_storage_.reset(new std::string);
+    parameter_copy_storage_->resize(alloc_size);
+    char* ptr = string_as_array(parameter_copy_storage_.get());
     const char* end = ptr + alloc_size;
     if (copy) {
       CopyTraceEventParameter(&ptr, &name_, end);
+      CopyTraceEventParameter(&ptr, &scope_, end);
       for (i = 0; i < num_args; ++i) {
         CopyTraceEventParameter(&ptr, &arg_names_[i], end);
       }
@@ -171,9 +178,9 @@
   // Only reset fields that won't be initialized in Initialize(), or that may
   // hold references to other objects.
   duration_ = TimeDelta::FromInternalValue(-1);
-  parameter_copy_storage_ = NULL;
+  parameter_copy_storage_.reset();
   for (int i = 0; i < kTraceMaxNumArgs; ++i)
-    convertable_values_[i] = NULL;
+    convertable_values_[i].reset();
 }
 
 void TraceEvent::UpdateDuration(const TimeTicks& now,
@@ -191,11 +198,8 @@
     TraceEventMemoryOverhead* overhead) {
   overhead->Add("TraceEvent", sizeof(*this));
 
-  // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory,
-  // could be shared by several events and we might overcount. In practice
-  // this is unlikely but it's worth checking.
   if (parameter_copy_storage_)
-    overhead->AddRefCountedString(*parameter_copy_storage_.get());
+    overhead->AddString(*parameter_copy_storage_);
 
   for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
     if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
@@ -354,8 +358,11 @@
 
   // If id_ is set, print it out as a hex string so we don't loose any
   // bits (it might be a 64-bit pointer).
-  if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
+  if (flags_ & TRACE_EVENT_FLAG_HAS_ID) {
+    if (scope_ != trace_event_internal::kGlobalScope)
+      StringAppendF(out, ",\"scope\":\"%s\"", scope_);
     StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"", static_cast<uint64_t>(id_));
+  }
 
   if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
     StringAppendF(out, ",\"bp\":\"e\"");
diff --git a/base/trace_event/trace_event_impl.h b/base/trace_event/trace_event_impl.h
index 36461e2..df7151a 100644
--- a/base/trace_event/trace_event_impl.h
+++ b/base/trace_event/trace_event_impl.h
@@ -17,7 +17,7 @@
 #include "base/callback.h"
 #include "base/containers/hash_tables.h"
 #include "base/macros.h"
-#include "base/memory/ref_counted_memory.h"
+#include "base/memory/scoped_ptr.h"
 #include "base/observer_list.h"
 #include "base/single_thread_task_runner.h"
 #include "base/strings/string_util.h"
@@ -44,9 +44,11 @@
 
 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
 // class must implement this interface.
-class BASE_EXPORT ConvertableToTraceFormat
-    : public RefCounted<ConvertableToTraceFormat> {
+class BASE_EXPORT ConvertableToTraceFormat {
  public:
+  ConvertableToTraceFormat() {}
+  virtual ~ConvertableToTraceFormat() {}
+
   // Append the class info to the provided |out| string. The appended
   // data must be a valid JSON object. Strings must be properly quoted, and
   // escaped. There is no processing applied to the content after it is
@@ -61,11 +63,8 @@
     return result;
   }
 
- protected:
-  virtual ~ConvertableToTraceFormat() {}
-
  private:
-  friend class RefCounted<ConvertableToTraceFormat>;
+  DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormat);
 };
 
 const int kTraceMaxNumArgs = 2;
@@ -93,25 +92,23 @@
   TraceEvent();
   ~TraceEvent();
 
-  // We don't need to copy TraceEvent except when TraceEventBuffer is cloned.
-  // Use explicit copy method to avoid accidentally misuse of copy.
-  void CopyFrom(const TraceEvent& other);
+  void MoveFrom(scoped_ptr<TraceEvent> other);
 
-  void Initialize(
-      int thread_id,
-      TimeTicks timestamp,
-      ThreadTicks thread_timestamp,
-      char phase,
-      const unsigned char* category_group_enabled,
-      const char* name,
-      unsigned long long id,
-      unsigned long long bind_id,
-      int num_args,
-      const char** arg_names,
-      const unsigned char* arg_types,
-      const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
-      unsigned int flags);
+  void Initialize(int thread_id,
+                  TimeTicks timestamp,
+                  ThreadTicks thread_timestamp,
+                  char phase,
+                  const unsigned char* category_group_enabled,
+                  const char* name,
+                  const char* scope,
+                  unsigned long long id,
+                  unsigned long long bind_id,
+                  int num_args,
+                  const char** arg_names,
+                  const unsigned char* arg_types,
+                  const unsigned long long* arg_values,
+                  scoped_ptr<ConvertableToTraceFormat>* convertable_values,
+                  unsigned int flags);
 
   void Reset();
 
@@ -135,12 +132,13 @@
   int thread_id() const { return thread_id_; }
   TimeDelta duration() const { return duration_; }
   TimeDelta thread_duration() const { return thread_duration_; }
+  const char* scope() const { return scope_; }
   unsigned long long id() const { return id_; }
   unsigned int flags() const { return flags_; }
 
   // Exposed for unittesting:
 
-  const base::RefCountedString* parameter_copy_storage() const {
+  const std::string* parameter_copy_storage() const {
     return parameter_copy_storage_.get();
   }
 
@@ -160,14 +158,15 @@
   ThreadTicks thread_timestamp_;
   TimeDelta duration_;
   TimeDelta thread_duration_;
-  // id_ can be used to store phase-specific data.
+  // scope_ and id_ can be used to store phase-specific data.
+  const char* scope_;
   unsigned long long id_;
   TraceValue arg_values_[kTraceMaxNumArgs];
   const char* arg_names_[kTraceMaxNumArgs];
-  scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
+  scoped_ptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs];
   const unsigned char* category_group_enabled_;
   const char* name_;
-  scoped_refptr<base::RefCountedString> parameter_copy_storage_;
+  scoped_ptr<std::string> parameter_copy_storage_;
   // Depending on TRACE_EVENT_FLAG_HAS_PROCESS_ID the event will have either:
   //  tid: thread_id_, pid: current_process_id (default case).
   //  tid: -1, pid: process_id_ (when flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID).
diff --git a/base/trace_event/trace_event_unittest.cc b/base/trace_event/trace_event_unittest.cc
index 09f2a91..c98c698 100644
--- a/base/trace_event/trace_event_unittest.cc
+++ b/base/trace_event/trace_event_unittest.cc
@@ -134,19 +134,6 @@
                    base::Unretained(flush_complete_event)));
   }
 
-  void FlushMonitoring() {
-    WaitableEvent flush_complete_event(false, false);
-    FlushMonitoring(&flush_complete_event);
-    flush_complete_event.Wait();
-  }
-
-  void FlushMonitoring(WaitableEvent* flush_complete_event) {
-    TraceLog::GetInstance()->FlushButLeaveBufferIntact(
-        base::Bind(&TraceEventTestFixture::OnTraceDataCollected,
-                   base::Unretained(static_cast<TraceEventTestFixture*>(this)),
-                   base::Unretained(flush_complete_event)));
-  }
-
   void SetUp() override {
     const char* name = PlatformThread::GetName();
     old_thread_name_ = name ? strdup(name) : NULL;
@@ -501,8 +488,27 @@
                                               0x2128506);
     trackable.snapshot("world");
 
+    TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+    TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42), "hello");
+    TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+        "all", "tracked object 3", TRACE_ID_WITH_SCOPE("scope", 0x42));
+
     TRACE_EVENT1(kControlCharacters, kControlCharacters,
                  kControlCharacters, kControlCharacters);
+
+    uint64_t context_id = 0x20151021;
+
+    TRACE_EVENT_ENTER_CONTEXT("all", "TRACE_EVENT_ENTER_CONTEXT call",
+                              TRACE_ID_WITH_SCOPE("scope", context_id));
+    TRACE_EVENT_LEAVE_CONTEXT("all", "TRACE_EVENT_LEAVE_CONTEXT call",
+                              TRACE_ID_WITH_SCOPE("scope", context_id));
+    TRACE_EVENT_SCOPED_CONTEXT("disabled-by-default-cat",
+                               "TRACE_EVENT_SCOPED_CONTEXT disabled call",
+                               context_id);
+    TRACE_EVENT_SCOPED_CONTEXT("all", "TRACE_EVENT_SCOPED_CONTEXT call",
+                               context_id);
   }  // Scope close causes TRACE_EVENT0 etc to send their END events.
 
   if (task_complete_event)
@@ -800,6 +806,7 @@
 
     EXPECT_TRUE((item && item->GetString("ph", &phase)));
     EXPECT_EQ("N", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
     EXPECT_TRUE((item && item->GetString("id", &id)));
     EXPECT_EQ("0x42", id);
 
@@ -807,6 +814,7 @@
     EXPECT_TRUE(item);
     EXPECT_TRUE(item && item->GetString("ph", &phase));
     EXPECT_EQ("O", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
     EXPECT_TRUE(item && item->GetString("id", &id));
     EXPECT_EQ("0x42", id);
     EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
@@ -816,6 +824,7 @@
     EXPECT_TRUE(item);
     EXPECT_TRUE(item && item->GetString("ph", &phase));
     EXPECT_EQ("D", phase);
+    EXPECT_FALSE((item && item->HasKey("scope")));
     EXPECT_TRUE(item && item->GetString("id", &id));
     EXPECT_EQ("0x42", id);
   }
@@ -848,8 +857,98 @@
     EXPECT_EQ("0x2128506", id);
   }
 
+  EXPECT_FIND_("tracked object 3");
+  {
+    std::string phase;
+    std::string scope;
+    std::string id;
+    std::string snapshot;
+
+    EXPECT_TRUE((item && item->GetString("ph", &phase)));
+    EXPECT_EQ("N", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x42", id);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("O", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+    EXPECT_TRUE(item && item->GetString("args.snapshot", &snapshot));
+    EXPECT_EQ("hello", snapshot);
+
+    item = FindTraceEntry(trace_parsed, "tracked object 3", item);
+    EXPECT_TRUE(item);
+    EXPECT_TRUE(item && item->GetString("ph", &phase));
+    EXPECT_EQ("D", phase);
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE(item && item->GetString("id", &id));
+    EXPECT_EQ("0x42", id);
+  }
+
   EXPECT_FIND_(kControlCharacters);
   EXPECT_SUB_FIND_(kControlCharacters);
+
+  EXPECT_FIND_("TRACE_EVENT_ENTER_CONTEXT call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("(", ph);
+
+    std::string scope;
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  EXPECT_FIND_("TRACE_EVENT_LEAVE_CONTEXT call");
+  {
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ(")", ph);
+
+    std::string scope;
+    std::string id;
+    EXPECT_TRUE((item && item->GetString("scope", &scope)));
+    EXPECT_EQ("scope", scope);
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  std::vector<const DictionaryValue*> scoped_context_calls =
+      FindTraceEntries(trace_parsed, "TRACE_EVENT_SCOPED_CONTEXT call");
+  EXPECT_EQ(2u, scoped_context_calls.size());
+  {
+    item = scoped_context_calls[0];
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ("(", ph);
+
+    std::string id;
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
+
+  {
+    item = scoped_context_calls[1];
+    std::string ph;
+    EXPECT_TRUE((item && item->GetString("ph", &ph)));
+    EXPECT_EQ(")", ph);
+
+    std::string id;
+    EXPECT_FALSE((item && item->HasKey("scope")));
+    EXPECT_TRUE((item && item->GetString("id", &id)));
+    EXPECT_EQ("0x20151021", id);
+  }
 }
 
 void TraceManyInstantEvents(int thread_id, int num_events,
@@ -1138,55 +1237,41 @@
   class Convertable : public ConvertableToTraceFormat {
    public:
     explicit Convertable(int* num_calls) : num_calls_(num_calls) {}
+    ~Convertable() override {}
     void AppendAsTraceFormat(std::string* out) const override {
       (*num_calls_)++;
       out->append("\"metadata_value\"");
     }
 
    private:
-    ~Convertable() override {}
     int* num_calls_;
   };
 
-  scoped_refptr<ConvertableToTraceFormat> convertable =
-      new Convertable(&num_calls);
+  scoped_ptr<ConvertableToTraceFormat> conv1(new Convertable(&num_calls));
+  scoped_ptr<Convertable> conv2(new Convertable(&num_calls));
 
   BeginTrace();
-  TRACE_EVENT_API_ADD_METADATA_EVENT("metadata_event_name", "metadata_arg_name",
-                                     convertable);
-
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_1",
+      "metadata_arg_name", std::move(conv1));
+  TRACE_EVENT_API_ADD_METADATA_EVENT(
+      TraceLog::GetCategoryGroupEnabled("__metadata"), "metadata_event_2",
+      "metadata_arg_name", std::move(conv2));
   // |AppendAsTraceFormat| should only be called on flush, not when the event
   // is added.
   ASSERT_EQ(0, num_calls);
   EndTraceAndFlush();
-  ASSERT_EQ(1, num_calls);
-  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_name", "M",
+  ASSERT_EQ(2, num_calls);
+  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_1", "M",
+                                    "metadata_arg_name", "metadata_value"));
+  EXPECT_TRUE(FindNamePhaseKeyValue("metadata_event_2", "M",
                                     "metadata_arg_name", "metadata_value"));
 
   // The metadata event should only be adde to the current trace. In this new
   // trace, the event should not appear.
   BeginTrace();
   EndTraceAndFlush();
-  ASSERT_EQ(1, num_calls);
-
-  // Flushing should cause |AppendAsTraceFormat| to be called, but if the buffer
-  // is left intact, it the flush at the end of the trace should still call it;
-  // the metadata event should not be removed.
-  TraceLog::GetInstance()->SetEnabled(
-      TraceConfig(kRecordAllCategoryFilter,
-                  "record-until-full,enable-sampling"),
-      TraceLog::MONITORING_MODE);
-  TRACE_EVENT_API_ADD_METADATA_EVENT("metadata_event_name", "metadata_arg_name",
-                                     convertable);
-  FlushMonitoring();
   ASSERT_EQ(2, num_calls);
-
-  // Flushing the trace at this point will case |AppendAsTraceFormat| to be
-  // called twice: once for the event that was added by the monitoring flush,
-  // and once for the end trace flush; the metadata event will be duplicated.
-  // This is consistent with the other metadata events.
-  EndTraceAndFlush();
-  ASSERT_EQ(4, num_calls);
 }
 
 // Test that categories work.
@@ -1460,14 +1545,16 @@
     // Test that string arguments are copied.
     TraceEventHandle handle1 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", std::string("argval"), "arg2", std::string("argval"));
     // Test that static TRACE_STR_COPY string arguments are copied.
     TraceEventHandle handle2 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", TRACE_STR_COPY("argval"),
             "arg2", TRACE_STR_COPY("argval"));
     EXPECT_GT(tracer->GetStatus().event_count, 1u);
@@ -1489,16 +1576,18 @@
     // Test that static literal string arguments are not copied.
     TraceEventHandle handle1 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name1",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", "argval", "arg2", "argval");
     // Test that static TRACE_STR_COPY NULL string arguments are not copied.
     const char* str1 = NULL;
     const char* str2 = NULL;
     TraceEventHandle handle2 =
         trace_event_internal::AddTraceEvent(
-            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2", 0, 0,
-            trace_event_internal::kNoId,
+            TRACE_EVENT_PHASE_INSTANT, category_group_enabled, "name2",
+            trace_event_internal::kGlobalScope, trace_event_internal::kNoId,
+            0, trace_event_internal::kNoId,
             "arg1", TRACE_STR_COPY(str1),
             "arg2", TRACE_STR_COPY(str2));
     EXPECT_GT(tracer->GetStatus().event_count, 1u);
@@ -1948,62 +2037,16 @@
   EndTraceAndFlush();
 }
 
-TEST_F(TraceEventTestFixture, TraceContinuousSampling) {
-  TraceLog::GetInstance()->SetEnabled(
-    TraceConfig(kRecordAllCategoryFilter, "record-until-full,enable-sampling"),
-    TraceLog::MONITORING_MODE);
-
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "AAA");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "BBB");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
-  FlushMonitoring();
-
-  // Make sure we can get the profiled data.
-  EXPECT_TRUE(FindNamePhase("AAA", "P"));
-  EXPECT_TRUE(FindNamePhase("BBB", "P"));
-
-  Clear();
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "CCC");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-  TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(1, "category", "DDD");
-  TraceLog::GetInstance()->WaitSamplingEventForTesting();
-
-  FlushMonitoring();
-
-  // Make sure the profiled data is accumulated.
-  EXPECT_TRUE(FindNamePhase("AAA", "P"));
-  EXPECT_TRUE(FindNamePhase("BBB", "P"));
-  EXPECT_TRUE(FindNamePhase("CCC", "P"));
-  EXPECT_TRUE(FindNamePhase("DDD", "P"));
-
-  Clear();
-
-  TraceLog::GetInstance()->SetDisabled();
-
-  // Make sure disabling the continuous sampling thread clears
-  // the profiled data.
-  EXPECT_FALSE(FindNamePhase("AAA", "P"));
-  EXPECT_FALSE(FindNamePhase("BBB", "P"));
-  EXPECT_FALSE(FindNamePhase("CCC", "P"));
-  EXPECT_FALSE(FindNamePhase("DDD", "P"));
-
-  Clear();
-}
-
 class MyData : public ConvertableToTraceFormat {
  public:
   MyData() {}
+  ~MyData() override {}
 
   void AppendAsTraceFormat(std::string* out) const override {
     out->append("{\"foo\":1}");
   }
 
  private:
-  ~MyData() override {}
   DISALLOW_COPY_AND_ASSIGN(MyData);
 };
 
@@ -2011,31 +2054,25 @@
   TraceLog::GetInstance()->SetEnabled(TraceConfig(kRecordAllCategoryFilter, ""),
                                       TraceLog::RECORDING_MODE);
 
-  scoped_refptr<ConvertableToTraceFormat> data(new MyData());
-  scoped_refptr<ConvertableToTraceFormat> data1(new MyData());
-  scoped_refptr<ConvertableToTraceFormat> data2(new MyData());
-  TRACE_EVENT1("foo", "bar", "data", data);
-  TRACE_EVENT2("foo", "baz",
-               "data1", data1,
-               "data2", data2);
+  scoped_ptr<ConvertableToTraceFormat> data(new MyData());
+  scoped_ptr<ConvertableToTraceFormat> data1(new MyData());
+  scoped_ptr<ConvertableToTraceFormat> data2(new MyData());
+  TRACE_EVENT1("foo", "bar", "data", std::move(data));
+  TRACE_EVENT2("foo", "baz", "data1", std::move(data1), "data2",
+               std::move(data2));
 
-
-  scoped_refptr<ConvertableToTraceFormat> convertData1(new MyData());
-  scoped_refptr<ConvertableToTraceFormat> convertData2(new MyData());
-  TRACE_EVENT2(
-      "foo",
-      "string_first",
-      "str",
-      "string value 1",
-      "convert",
-      convertData1);
-  TRACE_EVENT2(
-      "foo",
-      "string_second",
-      "convert",
-      convertData2,
-      "str",
-      "string value 2");
+  // Check that scoped_ptr<DerivedClassOfConvertable> are properly treated as
+  // convertable and not accidentally casted to bool.
+  scoped_ptr<MyData> convertData1(new MyData());
+  scoped_ptr<MyData> convertData2(new MyData());
+  scoped_ptr<MyData> convertData3(new MyData());
+  scoped_ptr<MyData> convertData4(new MyData());
+  TRACE_EVENT2("foo", "string_first", "str", "string value 1", "convert",
+               std::move(convertData1));
+  TRACE_EVENT2("foo", "string_second", "convert", std::move(convertData2),
+               "str", "string value 2");
+  TRACE_EVENT2("foo", "both_conv", "convert1", std::move(convertData3),
+               "convert2", std::move(convertData4));
   EndTraceAndFlush();
 
   // One arg version.
@@ -2110,6 +2147,21 @@
   ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
   EXPECT_TRUE(convertable_dict->GetInteger("foo", &foo_val));
   EXPECT_EQ(1, foo_val);
+
+  dict = FindNamePhase("both_conv", "X");
+  ASSERT_TRUE(dict);
+
+  args_dict = NULL;
+  dict->GetDictionary("args", &args_dict);
+  ASSERT_TRUE(args_dict);
+
+  value = NULL;
+  convertable_dict = NULL;
+  foo_val = 0;
+  EXPECT_TRUE(args_dict->Get("convert1", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
+  EXPECT_TRUE(args_dict->Get("convert2", &value));
+  ASSERT_TRUE(value->GetAsDictionary(&convertable_dict));
 }
 
 TEST_F(TraceEventTestFixture, PrimitiveArgs) {
@@ -2403,6 +2455,7 @@
                        char phase,
                        const unsigned char* category_group_enabled,
                        const char* name,
+                       const char* scope,
                        unsigned long long id,
                        int num_args,
                        const char* const arg_names[],
diff --git a/base/trace_event/trace_log.cc b/base/trace_event/trace_log.cc
index 17f6b66..09d9f96 100644
--- a/base/trace_event/trace_log.cc
+++ b/base/trace_event/trace_log.cc
@@ -14,6 +14,7 @@
 #include "base/lazy_instance.h"
 #include "base/location.h"
 #include "base/macros.h"
+#include "base/memory/ref_counted_memory.h"
 #include "base/memory/scoped_ptr.h"
 #include "base/memory/singleton.h"
 #include "base/process/process_metrics.h"
@@ -76,8 +77,6 @@
     "Too many vector buffer chunks");
 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
 
-// Can store results for 30 seconds with 1 ms sampling interval.
-const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
 
@@ -138,6 +137,7 @@
       TRACE_EVENT_PHASE_METADATA,
       &g_category_group_enabled[g_category_metadata],
       metadata_name,
+      trace_event_internal::kGlobalScope,  // scope
       trace_event_internal::kNoId,  // id
       trace_event_internal::kNoId,  // bind_id
       num_args,
@@ -447,14 +447,15 @@
   unsigned char enabled_flag = 0;
   const char* category_group = g_category_groups[category_index];
   if (mode_ == RECORDING_MODE &&
-      trace_config_.IsCategoryGroupEnabled(category_group))
+      trace_config_.IsCategoryGroupEnabled(category_group)) {
     enabled_flag |= ENABLED_FOR_RECORDING;
-  else if (mode_ == MONITORING_MODE &&
-           trace_config_.IsCategoryGroupEnabled(category_group))
-    enabled_flag |= ENABLED_FOR_MONITORING;
+  }
+
   if (event_callback_ &&
-      event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
+      event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
     enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
+  }
+
 #if defined(OS_WIN)
   if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
           category_group)) {
@@ -750,8 +751,8 @@
 TraceLogStatus TraceLog::GetStatus() const {
   AutoLock lock(lock_);
   TraceLogStatus result;
-  result.event_capacity = logged_events_->Capacity();
-  result.event_count = logged_events_->Size();
+  result.event_capacity = static_cast<uint32_t>(logged_events_->Capacity());
+  result.event_count = static_cast<uint32_t>(logged_events_->Size());
   return result;
 }
 
@@ -1011,31 +1012,6 @@
   FinishFlush(generation, discard_events);
 }
 
-void TraceLog::FlushButLeaveBufferIntact(
-    const TraceLog::OutputCallback& flush_output_callback) {
-  scoped_ptr<TraceBuffer> previous_logged_events;
-  ArgumentFilterPredicate argument_filter_predicate;
-  {
-    AutoLock lock(lock_);
-    AddMetadataEventsWhileLocked();
-    if (thread_shared_chunk_) {
-      // Return the chunk to the main buffer to flush the sampling data.
-      logged_events_->ReturnChunk(thread_shared_chunk_index_,
-                                  std::move(thread_shared_chunk_));
-    }
-    previous_logged_events = logged_events_->CloneForIteration();
-
-    if (trace_options() & kInternalEnableArgumentFilter) {
-      CHECK(!argument_filter_predicate_.is_null());
-      argument_filter_predicate = argument_filter_predicate_;
-    }
-  }  // release lock
-
-  ConvertTraceEventsToTraceFormat(std::move(previous_logged_events),
-                                  flush_output_callback,
-                                  argument_filter_predicate);
-}
-
 void TraceLog::UseNextTraceBuffer() {
   logged_events_.reset(CreateTraceBuffer());
   subtle::NoBarrier_AtomicIncrement(&generation_, 1);
@@ -1047,12 +1023,13 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
@@ -1060,6 +1037,7 @@
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       trace_event_internal::kNoId,  // bind_id
       thread_id,
@@ -1076,13 +1054,14 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned long long bind_id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
   base::TimeTicks now = base::TimeTicks::Now();
@@ -1090,6 +1069,7 @@
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       bind_id,
       thread_id,
@@ -1106,19 +1086,21 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int process_id,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   base::TimeTicks now = base::TimeTicks::Now();
   return AddTraceEventWithThreadIdAndTimestamp(
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       trace_event_internal::kNoId,  // bind_id
       process_id,
@@ -1137,6 +1119,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     int thread_id,
     const TimeTicks& timestamp,
@@ -1144,12 +1127,13 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   return AddTraceEventWithThreadIdAndTimestamp(
       phase,
       category_group_enabled,
       name,
+      scope,
       id,
       trace_event_internal::kNoId,  // bind_id
       thread_id,
@@ -1166,6 +1150,7 @@
     char phase,
     const unsigned char* category_group_enabled,
     const char* name,
+    const char* scope,
     unsigned long long id,
     unsigned long long bind_id,
     int thread_id,
@@ -1174,7 +1159,7 @@
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   TraceEventHandle handle = {0, 0, 0};
   if (!*category_group_enabled)
@@ -1253,8 +1238,7 @@
 #endif  // OS_WIN
 
   std::string console_message;
-  if (*category_group_enabled &
-      (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) {
+  if (*category_group_enabled & ENABLED_FOR_RECORDING) {
     OptionalAutoLock lock(&lock_);
 
     TraceEvent* trace_event = NULL;
@@ -1272,6 +1256,7 @@
                               phase,
                               category_group_enabled,
                               name,
+                              scope,
                               id,
                               bind_id,
                               num_args,
@@ -1318,39 +1303,47 @@
       event_callback(
           offset_event_timestamp,
           phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
-          category_group_enabled, name, id, num_args, arg_names, arg_types,
-          arg_values, flags);
+          category_group_enabled, name, scope, id, num_args, arg_names,
+          arg_types, arg_values, flags);
     }
   }
 
-  if (base::trace_event::AllocationContextTracker::capture_enabled()) {
-    if (phase == TRACE_EVENT_PHASE_BEGIN || phase == TRACE_EVENT_PHASE_COMPLETE)
-      base::trace_event::AllocationContextTracker::PushPseudoStackFrame(name);
-    else if (phase == TRACE_EVENT_PHASE_END)
-      // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
-      // is in |TraceLog::UpdateTraceEventDuration|.
-      base::trace_event::AllocationContextTracker::PopPseudoStackFrame(name);
+  // TODO(primiano): Add support for events with copied name crbug.com/581078
+  if (!(flags & TRACE_EVENT_FLAG_COPY)) {
+    if (AllocationContextTracker::capture_enabled()) {
+      if (phase == TRACE_EVENT_PHASE_BEGIN ||
+          phase == TRACE_EVENT_PHASE_COMPLETE)
+        AllocationContextTracker::PushPseudoStackFrame(name);
+      else if (phase == TRACE_EVENT_PHASE_END)
+        // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
+        // is in |TraceLog::UpdateTraceEventDuration|.
+        AllocationContextTracker::PopPseudoStackFrame(name);
+    }
   }
 
   return handle;
 }
 
 void TraceLog::AddMetadataEvent(
+    const unsigned char* category_group_enabled,
     const char* name,
     int num_args,
     const char** arg_names,
     const unsigned char* arg_types,
     const unsigned long long* arg_values,
-    const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+    scoped_ptr<ConvertableToTraceFormat>* convertable_values,
     unsigned int flags) {
   scoped_ptr<TraceEvent> trace_event(new TraceEvent);
+  int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
+  ThreadTicks thread_now = ThreadNow();
+  TimeTicks now = OffsetNow();
   AutoLock lock(lock_);
   trace_event->Initialize(
-      0,  // thread_id
-      TimeTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA,
-      &g_category_group_enabled[g_category_metadata], name,
-      trace_event_internal::kNoId,  // id
-      trace_event_internal::kNoId,  // bind_id
+      thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
+      category_group_enabled, name,
+      trace_event_internal::kGlobalScope,  // scope
+      trace_event_internal::kNoId,         // id
+      trace_event_internal::kNoId,         // bind_id
       num_args, arg_names, arg_types, arg_values, convertable_values, flags);
   metadata_events_.push_back(std::move(trace_event));
 }
@@ -1459,9 +1452,10 @@
     EventCallback event_callback = reinterpret_cast<EventCallback>(
         subtle::NoBarrier_Load(&event_callback_));
     if (event_callback) {
-      event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
-                     trace_event_internal::kNoId, 0,
-                     nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
+      event_callback(
+        now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
+        trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+        nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE);
     }
   }
 }
@@ -1492,9 +1486,12 @@
 void TraceLog::AddMetadataEventsWhileLocked() {
   lock_.AssertAcquired();
 
-  // Copy metadata added by |AddMetadataEvent| into the trace log.
-  for (const scoped_ptr<TraceEvent>& event : metadata_events_)
-    AddEventToThreadSharedChunkWhileLocked(nullptr, false)->CopyFrom(*event);
+  // Move metadata added by |AddMetadataEvent| into the trace log.
+  while (!metadata_events_.empty()) {
+    TraceEvent* event = AddEventToThreadSharedChunkWhileLocked(nullptr, false);
+    event->MoveFrom(std::move(metadata_events_.back()));
+    metadata_events_.pop_back();
+  }
 
 #if !defined(OS_NACL)  // NaCl shouldn't expose the process id.
   InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false),
@@ -1662,9 +1659,6 @@
   if (options & kInternalRecordContinuously)
     return TraceBuffer::CreateTraceBufferRingBuffer(
         kTraceEventRingBufferChunks);
-  else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE)
-    return TraceBuffer::CreateTraceBufferRingBuffer(
-        kMonitorTraceEventBufferChunks);
   else if (options & kInternalEchoToConsole)
     return TraceBuffer::CreateTraceBufferRingBuffer(
         kEchoToConsoleTraceEventBufferChunks);
@@ -1719,6 +1713,7 @@
             TRACE_EVENT_PHASE_COMPLETE,
             category_group_enabled_,
             name,
+            trace_event_internal::kGlobalScope,  // scope
             trace_event_internal::kNoId,  // id
             static_cast<int>(base::PlatformThread::CurrentId()),  // thread_id
             base::TimeTicks::Now(),
diff --git a/base/trace_event/trace_log.h b/base/trace_event/trace_log.h
index a079f04..67477c4 100644
--- a/base/trace_event/trace_log.h
+++ b/base/trace_event/trace_log.h
@@ -39,16 +39,15 @@
 struct BASE_EXPORT TraceLogStatus {
   TraceLogStatus();
   ~TraceLogStatus();
-  size_t event_capacity;
-  size_t event_count;
+  uint32_t event_capacity;
+  uint32_t event_count;
 };
 
 class BASE_EXPORT TraceLog : public MemoryDumpProvider {
  public:
   enum Mode {
     DISABLED = 0,
-    RECORDING_MODE,
-    MONITORING_MODE,
+    RECORDING_MODE
   };
 
   // The pointer returned from GetCategoryGroupEnabledInternal() points to a
@@ -58,8 +57,6 @@
   enum CategoryGroupEnabledFlags {
     // Category group enabled for the recording mode.
     ENABLED_FOR_RECORDING = 1 << 0,
-    // Category group enabled for the monitoring mode.
-    ENABLED_FOR_MONITORING = 1 << 1,
     // Category group enabled by SetEventCallbackEnabled().
     ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
     // Category group enabled to export events to ETW.
@@ -142,6 +139,7 @@
                                 char phase,
                                 const unsigned char* category_group_enabled,
                                 const char* name,
+                                const char* scope,
                                 unsigned long long id,
                                 int num_args,
                                 const char* const arg_names[],
@@ -169,7 +167,6 @@
   typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&,
                               bool has_more_events)> OutputCallback;
   void Flush(const OutputCallback& cb, bool use_worker_thread = false);
-  void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback);
 
   // Cancels tracing and discards collected data.
   void CancelTracing(const OutputCallback& cb);
@@ -188,41 +185,45 @@
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithBindId(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       unsigned long long bind_id,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithProcessId(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       int process_id,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       int thread_id,
       const TimeTicks& timestamp,
@@ -230,12 +231,13 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
   TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
       char phase,
       const unsigned char* category_group_enabled,
       const char* name,
+      const char* scope,
       unsigned long long id,
       unsigned long long bind_id,
       int thread_id,
@@ -244,17 +246,18 @@
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
 
   // Adds a metadata event that will be written when the trace log is flushed.
   void AddMetadataEvent(
+      const unsigned char* category_group_enabled,
       const char* name,
       int num_args,
       const char** arg_names,
       const unsigned char* arg_types,
       const unsigned long long* arg_values,
-      const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
+      scoped_ptr<ConvertableToTraceFormat>* convertable_values,
       unsigned int flags);
 
   void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
diff --git a/base/trace_event/trace_sampling_thread.cc b/base/trace_event/trace_sampling_thread.cc
index ec4602c..a8d32d6 100644
--- a/base/trace_event/trace_sampling_thread.cc
+++ b/base/trace_event/trace_sampling_thread.cc
@@ -4,6 +4,7 @@
 
 #include <stddef.h>
 
+#include "base/trace_event/trace_event.h"
 #include "base/trace_event/trace_event_impl.h"
 #include "base/trace_event/trace_log.h"
 #include "base/trace_event/trace_sampling_thread.h"
@@ -54,8 +55,9 @@
   ExtractCategoryAndName(combined, &category_group, &name);
   TRACE_EVENT_API_ADD_TRACE_EVENT(
       TRACE_EVENT_PHASE_SAMPLE,
-      TraceLog::GetCategoryGroupEnabled(category_group), name, 0, 0, NULL, NULL,
-      NULL, NULL, 0);
+      TraceLog::GetCategoryGroupEnabled(category_group), name,
+      trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0,
+      NULL, NULL, NULL, NULL, 0);
 }
 
 void TraceSamplingThread::GetSamples() {
diff --git a/base/tracked_objects.cc b/base/tracked_objects.cc
index 8953554..5ae1dd0 100644
--- a/base/tracked_objects.cc
+++ b/base/tracked_objects.cc
@@ -13,7 +13,6 @@
 #include "base/compiler_specific.h"
 #include "base/logging.h"
 #include "base/process/process_handle.h"
-#include "base/profiler/alternate_timer.h"
 #include "base/strings/stringprintf.h"
 #include "base/tracking_info.h"
 #include "build/build_config.h"
@@ -36,15 +35,6 @@
 // this state may prevail for much or all of the process lifetime.
 const ThreadData::Status kInitialStartupState = ThreadData::PROFILING_ACTIVE;
 
-// Control whether an alternate time source (Now() function) is supported by
-// the ThreadData class.  This compile time flag should be set to true if we
-// want other modules (such as a memory allocator, or a thread-specific CPU time
-// clock) to be able to provide a thread-specific Now() function.  Without this
-// compile-time flag, the code will only support the wall-clock time.  This flag
-// can be flipped to efficiently disable this path (if there is a performance
-// problem with its presence).
-static const bool kAllowAlternateTimeSourceHandling = true;
-
 // Possible states of the profiler timing enabledness.
 enum {
   UNDEFINED_TIMING,
@@ -284,10 +274,7 @@
 // to them.
 
 // static
-NowFunction* ThreadData::now_function_ = NULL;
-
-// static
-bool ThreadData::now_function_is_time_ = false;
+ThreadData::NowFunction* ThreadData::now_function_for_testing_ = NULL;
 
 // A TLS slot which points to the ThreadData instance for the current thread.
 // We do a fake initialization here (zeroing out data), and then the real
@@ -515,16 +502,6 @@
   random_number_ ^=
       static_cast<uint32_t>(&births - reinterpret_cast<Births*>(0));
 
-  // We don't have queue durations without OS timer.  OS timer is automatically
-  // used for task-post-timing, so the use of an alternate timer implies all
-  // queue times are invalid, unless it was explicitly said that we can trust
-  // the alternate timer.
-  if (kAllowAlternateTimeSourceHandling &&
-      now_function_ &&
-      !now_function_is_time_) {
-    queue_duration = 0;
-  }
-
   DeathMap::iterator it = death_map_.find(&births);
   DeathData* death_data;
   if (it != death_map_.end()) {
@@ -691,12 +668,6 @@
   }
 }
 
-static void OptionallyInitializeAlternateTimer() {
-  NowFunction* alternate_time_source = GetAlternateTimeSource();
-  if (alternate_time_source)
-    ThreadData::SetAlternateTimeSource(alternate_time_source);
-}
-
 void ThreadData::Initialize() {
   if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
     return;  // Someone else did the initialization.
@@ -710,13 +681,6 @@
   if (base::subtle::Acquire_Load(&status_) >= DEACTIVATED)
     return;  // Someone raced in here and beat us.
 
-  // Put an alternate timer in place if the environment calls for it, such as
-  // for tracking TCMalloc allocations.  This insertion is idempotent, so we
-  // don't mind if there is a race, and we'd prefer not to be in a lock while
-  // doing this work.
-  if (kAllowAlternateTimeSourceHandling)
-    OptionallyInitializeAlternateTimer();
-
   // Perform the "real" TLS initialization now, and leave it intact through
   // process termination.
   if (!tls_index_.initialized()) {  // Testing may have initialized this.
@@ -762,21 +726,14 @@
 }
 
 // static
-void ThreadData::SetAlternateTimeSource(NowFunction* now_function) {
-  DCHECK(now_function);
-  if (kAllowAlternateTimeSourceHandling)
-    now_function_ = now_function;
-}
-
-// static
 void ThreadData::EnableProfilerTiming() {
   base::subtle::NoBarrier_Store(&g_profiler_timing_enabled, ENABLED_TIMING);
 }
 
 // static
 TrackedTime ThreadData::Now() {
-  if (kAllowAlternateTimeSourceHandling && now_function_)
-    return TrackedTime::FromMilliseconds((*now_function_)());
+  if (now_function_for_testing_)
+    return TrackedTime::FromMilliseconds((*now_function_for_testing_)());
   if (IsProfilerTimingEnabled() && TrackingStatus())
     return TrackedTime::Now();
   return TrackedTime();  // Super fast when disabled, or not compiled.
@@ -993,6 +950,9 @@
 ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot() {
 }
 
+ProcessDataPhaseSnapshot::ProcessDataPhaseSnapshot(
+    const ProcessDataPhaseSnapshot& other) = default;
+
 ProcessDataPhaseSnapshot::~ProcessDataPhaseSnapshot() {
 }
 
@@ -1007,6 +967,9 @@
 #endif
 }
 
+ProcessDataSnapshot::ProcessDataSnapshot(const ProcessDataSnapshot& other) =
+    default;
+
 ProcessDataSnapshot::~ProcessDataSnapshot() {
 }
 
diff --git a/base/tracked_objects.h b/base/tracked_objects.h
index 1a00ec0..168b17d 100644
--- a/base/tracked_objects.h
+++ b/base/tracked_objects.h
@@ -22,7 +22,6 @@
 #include "base/location.h"
 #include "base/macros.h"
 #include "base/process/process_handle.h"
-#include "base/profiler/alternate_timer.h"
 #include "base/profiler/tracked_time.h"
 #include "base/synchronization/lock.h"
 #include "base/threading/thread_checker.h"
@@ -537,12 +536,6 @@
   // the code).
   static TrackedTime Now();
 
-  // Use the function |now| to provide current times, instead of calling the
-  // TrackedTime::Now() function.  Since this alternate function is being used,
-  // the other time arguments (used for calculating queueing delay) will be
-  // ignored.
-  static void SetAlternateTimeSource(NowFunction* now);
-
   // This function can be called at process termination to validate that thread
   // cleanup routines have been called for at least some number of named
   // threads.
@@ -559,8 +552,10 @@
   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, MinimalStartupShutdown);
   FRIEND_TEST_ALL_PREFIXES(TrackedObjectsTest, TinyStartupShutdown);
 
-  typedef std::map<const BirthOnThread*, int> BirthCountMap;
+  // Type for an alternate timer function (testing only).
+  typedef unsigned int NowFunction();
 
+  typedef std::map<const BirthOnThread*, int> BirthCountMap;
   typedef std::vector<std::pair<const Births*, DeathDataPhaseSnapshot>>
       DeathsSnapshot;
 
@@ -635,11 +630,7 @@
 
   // When non-null, this specifies an external function that supplies monotone
   // increasing time functcion.
-  static NowFunction* now_function_;
-
-  // If true, now_function_ returns values that can be used to calculate queue
-  // time.
-  static bool now_function_is_time_;
+  static NowFunction* now_function_for_testing_;
 
   // We use thread local store to identify which ThreadData to interact with.
   static base::ThreadLocalStorage::StaticSlot tls_index_;
@@ -804,6 +795,7 @@
 struct BASE_EXPORT ProcessDataPhaseSnapshot {
  public:
   ProcessDataPhaseSnapshot();
+  ProcessDataPhaseSnapshot(const ProcessDataPhaseSnapshot& other);
   ~ProcessDataPhaseSnapshot();
 
   std::vector<TaskSnapshot> tasks;
@@ -816,6 +808,7 @@
 struct BASE_EXPORT ProcessDataSnapshot {
  public:
   ProcessDataSnapshot();
+  ProcessDataSnapshot(const ProcessDataSnapshot& other);
   ~ProcessDataSnapshot();
 
   PhasedProcessDataSnapshotMap phased_snapshots;
diff --git a/base/tracked_objects_unittest.cc b/base/tracked_objects_unittest.cc
index 69dd85e..be86cbb 100644
--- a/base/tracked_objects_unittest.cc
+++ b/base/tracked_objects_unittest.cc
@@ -31,8 +31,7 @@
     ThreadData::ShutdownSingleThreadedCleanup(true);
 
     test_time_ = 0;
-    ThreadData::SetAlternateTimeSource(&TrackedObjectsTest::GetTestTime);
-    ThreadData::now_function_is_time_ = true;
+    ThreadData::now_function_for_testing_ = &TrackedObjectsTest::GetTestTime;
   }
 
   ~TrackedObjectsTest() override {
diff --git a/base/tuple.h b/base/tuple.h
index e5872cc..78dfd75 100644
--- a/base/tuple.h
+++ b/base/tuple.h
@@ -29,6 +29,7 @@
 #define BASE_TUPLE_H_
 
 #include <stddef.h>
+#include <tuple>
 
 #include "base/bind_helpers.h"
 #include "build/build_config.h"
@@ -109,28 +110,6 @@
 template <size_t N>
 using MakeIndexSequence = typename MakeIndexSequenceImpl<N>::Type;
 
-// Traits ----------------------------------------------------------------------
-//
-// A simple traits class for tuple arguments.
-//
-// ValueType: the bare, nonref version of a type (same as the type for nonrefs).
-// RefType: the ref version of a type (same as the type for refs).
-// ParamType: what type to pass to functions (refs should not be constified).
-
-template <class P>
-struct TupleTraits {
-  typedef P ValueType;
-  typedef P& RefType;
-  typedef const P& ParamType;
-};
-
-template <class P>
-struct TupleTraits<P&> {
-  typedef P ValueType;
-  typedef P& RefType;
-  typedef P& ParamType;
-};
-
 // Tuple -----------------------------------------------------------------------
 //
 // This set of classes is useful for bundling 0 or more heterogeneous data types
@@ -145,75 +124,10 @@
 // want filled by the dispatchee, and the tuple is merely a container for that
 // output (a "tier").  See MakeRefTuple and its usages.
 
-template <typename IxSeq, typename... Ts>
-struct TupleBaseImpl;
 template <typename... Ts>
-using TupleBase = TupleBaseImpl<MakeIndexSequence<sizeof...(Ts)>, Ts...>;
-template <size_t N, typename T>
-struct TupleLeaf;
+using Tuple = std::tuple<Ts...>;
 
-template <typename... Ts>
-struct Tuple final : TupleBase<Ts...> {
-  Tuple() : TupleBase<Ts...>() {}
-  explicit Tuple(typename TupleTraits<Ts>::ParamType... args)
-      : TupleBase<Ts...>(args...) {}
-};
-
-// Avoids ambiguity between Tuple's two constructors.
-template <>
-struct Tuple<> final {};
-
-template <size_t... Ns, typename... Ts>
-struct TupleBaseImpl<IndexSequence<Ns...>, Ts...> : TupleLeaf<Ns, Ts>... {
-  TupleBaseImpl() : TupleLeaf<Ns, Ts>()... {}
-  explicit TupleBaseImpl(typename TupleTraits<Ts>::ParamType... args)
-      : TupleLeaf<Ns, Ts>(args)... {}
-};
-
-template <size_t N, typename T>
-struct TupleLeaf {
-  TupleLeaf() {}
-  explicit TupleLeaf(typename TupleTraits<T>::ParamType x) : x(x) {}
-
-  T& get() { return x; }
-  const T& get() const { return x; }
-
-  T x;
-};
-
-// Tuple getters --------------------------------------------------------------
-//
-// Allows accessing an arbitrary tuple element by index.
-//
-// Example usage:
-//   base::Tuple<int, double> t2;
-//   base::get<0>(t2) = 42;
-//   base::get<1>(t2) = 3.14;
-
-template <size_t I, typename T>
-T& get(TupleLeaf<I, T>& leaf) {
-  return leaf.get();
-}
-
-template <size_t I, typename T>
-const T& get(const TupleLeaf<I, T>& leaf) {
-  return leaf.get();
-}
-
-// Tuple types ----------------------------------------------------------------
-//
-// Allows for selection of ValueTuple/RefTuple/ParamTuple without needing the
-// definitions of class types the tuple takes as parameters.
-
-template <typename T>
-struct TupleTypes;
-
-template <typename... Ts>
-struct TupleTypes<Tuple<Ts...>> {
-  using ValueTuple = Tuple<typename TupleTraits<Ts>::ValueType...>;
-  using RefTuple = Tuple<typename TupleTraits<Ts>::RefType...>;
-  using ParamTuple = Tuple<typename TupleTraits<Ts>::ParamType...>;
-};
+using std::get;
 
 // Tuple creators -------------------------------------------------------------
 //
@@ -245,15 +159,15 @@
 // Non-Static Dispatchers with no out params.
 
 template <typename ObjT, typename Method, typename... Ts, size_t... Ns>
-inline void DispatchToMethodImpl(ObjT* obj,
+inline void DispatchToMethodImpl(const ObjT& obj,
                                  Method method,
                                  const Tuple<Ts...>& arg,
                                  IndexSequence<Ns...>) {
-  (obj->*method)(base::internal::UnwrapTraits<Ts>::Unwrap(get<Ns>(arg))...);
+  (obj->*method)(internal::Unwrap(get<Ns>(arg))...);
 }
 
 template <typename ObjT, typename Method, typename... Ts>
-inline void DispatchToMethod(ObjT* obj,
+inline void DispatchToMethod(const ObjT& obj,
                              Method method,
                              const Tuple<Ts...>& arg) {
   DispatchToMethodImpl(obj, method, arg, MakeIndexSequence<sizeof...(Ts)>());
@@ -265,7 +179,7 @@
 inline void DispatchToFunctionImpl(Function function,
                                    const Tuple<Ts...>& arg,
                                    IndexSequence<Ns...>) {
-  (*function)(base::internal::UnwrapTraits<Ts>::Unwrap(get<Ns>(arg))...);
+  (*function)(internal::Unwrap(get<Ns>(arg))...);
 }
 
 template <typename Function, typename... Ts>
@@ -281,18 +195,17 @@
           typename... OutTs,
           size_t... InNs,
           size_t... OutNs>
-inline void DispatchToMethodImpl(ObjT* obj,
+inline void DispatchToMethodImpl(const ObjT& obj,
                                  Method method,
                                  const Tuple<InTs...>& in,
                                  Tuple<OutTs...>* out,
                                  IndexSequence<InNs...>,
                                  IndexSequence<OutNs...>) {
-  (obj->*method)(base::internal::UnwrapTraits<InTs>::Unwrap(get<InNs>(in))...,
-                 &get<OutNs>(*out)...);
+  (obj->*method)(internal::Unwrap(get<InNs>(in))..., &get<OutNs>(*out)...);
 }
 
 template <typename ObjT, typename Method, typename... InTs, typename... OutTs>
-inline void DispatchToMethod(ObjT* obj,
+inline void DispatchToMethod(const ObjT& obj,
                              Method method,
                              const Tuple<InTs...>& in,
                              Tuple<OutTs...>* out) {
diff --git a/base/values.cc b/base/values.cc
index 3f32b5e..80cc10c 100644
--- a/base/values.cc
+++ b/base/values.cc
@@ -837,6 +837,8 @@
     : target_(target),
       it_(target.dictionary_.begin()) {}
 
+DictionaryValue::Iterator::Iterator(const Iterator& other) = default;
+
 DictionaryValue::Iterator::~Iterator() {}
 
 DictionaryValue* DictionaryValue::DeepCopy() const {
diff --git a/base/values.h b/base/values.h
index 07e5b6c..141ea93 100644
--- a/base/values.h
+++ b/base/values.h
@@ -360,6 +360,7 @@
   class BASE_EXPORT Iterator {
    public:
     explicit Iterator(const DictionaryValue& target);
+    Iterator(const Iterator& other);
     ~Iterator();
 
     bool IsAtEnd() const { return it_ == target_.dictionary_.end(); }
diff --git a/base/values_unittest.cc b/base/values_unittest.cc
index 66453e0..175a0d0 100644
--- a/base/values_unittest.cc
+++ b/base/values_unittest.cc
@@ -233,7 +233,7 @@
                              &removed_item));
     EXPECT_FALSE(list.Remove(1, &removed_item));
     EXPECT_TRUE(list.Remove(0, &removed_item));
-    ASSERT_TRUE(removed_item);
+    ASSERT_TRUE(removed_item.get());
     EXPECT_EQ(0U, list.GetSize());
   }
   EXPECT_FALSE(deletion_flag);
@@ -304,7 +304,7 @@
     EXPECT_FALSE(dict.Remove("absent key", &removed_item));
     EXPECT_TRUE(dict.Remove(key, &removed_item));
     EXPECT_FALSE(dict.HasKey(key));
-    ASSERT_TRUE(removed_item);
+    ASSERT_TRUE(removed_item.get());
   }
   EXPECT_FALSE(deletion_flag);
   removed_item.reset();
@@ -374,7 +374,7 @@
 
   scoped_ptr<Value> removed_item;
   EXPECT_TRUE(dict.RemovePath("a.long.way.down", &removed_item));
-  ASSERT_TRUE(removed_item);
+  ASSERT_TRUE(removed_item.get());
   EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_INTEGER));
   EXPECT_FALSE(dict.HasKey("a.long.way.down"));
   EXPECT_FALSE(dict.HasKey("a.long.way"));
@@ -387,7 +387,7 @@
 
   removed_item.reset();
   EXPECT_TRUE(dict.RemovePath("a.long.key.path", &removed_item));
-  ASSERT_TRUE(removed_item);
+  ASSERT_TRUE(removed_item.get());
   EXPECT_TRUE(removed_item->IsType(base::Value::TYPE_BOOLEAN));
   EXPECT_TRUE(dict.empty());
 }
diff --git a/base/win/scoped_handle_test_dll.cc b/base/win/scoped_handle_test_dll.cc
new file mode 100644
index 0000000..e6e1215
--- /dev/null
+++ b/base/win/scoped_handle_test_dll.cc
@@ -0,0 +1,126 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <windows.h>
+
+#include <vector>
+
+#include "base/win/scoped_handle.h"
+
+// http://blogs.msdn.com/oldnewthing/archive/2004/10/25/247180.aspx
+extern "C" IMAGE_DOS_HEADER __ImageBase;
+
+namespace base {
+namespace win {
+namespace testing {
+
+extern "C" bool __declspec(dllexport) RunTest();
+
+namespace {
+
+struct ThreadParams {
+  HANDLE ready_event;
+  HANDLE start_event;
+};
+
+// Note, this must use all native functions to avoid instantiating the
+// ActiveVerifier. e.g. can't use base::Thread or even base::PlatformThread.
+DWORD __stdcall ThreadFunc(void* params) {
+  ThreadParams* thread_params = reinterpret_cast<ThreadParams*>(params);
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+
+  ::SetEvent(thread_params->ready_event);
+  ::WaitForSingleObject(thread_params->start_event, INFINITE);
+  ScopedHandle handle_holder(handle);
+  return 0;
+}
+
+bool InternalRunThreadTest() {
+  std::vector<HANDLE> threads_;
+  // From manual testing, the bug fixed by crrev.com/678736a starts reliably
+  // causing handle verifier asserts to trigger at around 100 threads, so make
+  // it 200 to be sure to detect any future regressions.
+  const size_t kNumThreads = 200;
+
+  // bManualReset is set to true to allow signalling multiple threads.
+  HANDLE start_event = ::CreateEvent(nullptr, true, false, nullptr);
+  if (!start_event)
+    return false;
+
+  HANDLE ready_event = CreateEvent(nullptr, false, false, nullptr);
+  if (!ready_event)
+    return false;
+
+  ThreadParams thread_params = { ready_event, start_event };
+
+  for (size_t i = 0; i < kNumThreads; i++) {
+    HANDLE thread_handle =
+        ::CreateThread(nullptr, 0, ThreadFunc,
+                       reinterpret_cast<void*>(&thread_params), 0, nullptr);
+    if (!thread_handle)
+      break;
+    ::WaitForSingleObject(ready_event, INFINITE);
+    threads_.push_back(thread_handle);
+  }
+
+  ::CloseHandle(ready_event);
+
+  if (threads_.size() != kNumThreads) {
+    for (const auto& thread : threads_)
+      ::CloseHandle(thread);
+    ::CloseHandle(start_event);
+    return false;
+  }
+
+  ::SetEvent(start_event);
+  ::CloseHandle(start_event);
+  for (const auto& thread : threads_) {
+    ::WaitForSingleObject(thread, INFINITE);
+    ::CloseHandle(thread);
+  }
+
+  return true;
+}
+
+bool InternalRunLocationTest() {
+  // Create a new handle and then set LastError again.
+  HANDLE handle = ::CreateMutex(nullptr, false, nullptr);
+  if (!handle)
+    return false;
+  ScopedHandle handle_holder(handle);
+
+  HMODULE verifier_module = GetHandleVerifierModuleForTesting();
+  if (!verifier_module)
+    return false;
+
+  // Get my module
+  HMODULE my_module = reinterpret_cast<HMODULE>(&__ImageBase);
+  if (!my_module)
+    return false;
+
+  HMODULE main_module = ::GetModuleHandle(NULL);
+
+#if defined(COMPONENT_BUILD)
+  // In a component build ActiveVerifier will always be created inside base.dll
+  // as the code always lives there.
+  if (verifier_module == my_module || verifier_module == main_module)
+    return false;
+#else
+  // In a non-component build, ActiveVerifier should always be created in the
+  // version of base linked with the main executable.
+  if (verifier_module == my_module || verifier_module != main_module)
+    return false;
+#endif
+  return true;
+}
+
+}  // namespace
+
+bool RunTest() {
+  return InternalRunThreadTest() && InternalRunLocationTest();
+}
+
+}  // testing
+}  // win
+}  // base
diff --git a/base/win/windows_version_unittest.cc b/base/win/windows_version_unittest.cc
new file mode 100644
index 0000000..f0d6d96
--- /dev/null
+++ b/base/win/windows_version_unittest.cc
@@ -0,0 +1,22 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/win/windows_version.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace base {
+namespace win {
+namespace {
+
+TEST(WindowsVersion, GetVersionExAndKernelVersionMatch) {
+  // If this fails, we're running in compatibility mode, or need to update the
+  // application manifest.
+  EXPECT_EQ(OSInfo::GetInstance()->version(),
+            OSInfo::GetInstance()->Kernel32Version());
+}
+
+}  // namespace
+}  // namespace win
+}  // namespace base
diff --git a/crypto/BUILD.gn b/crypto/BUILD.gn
index 8d24e60..4e339cc 100644
--- a/crypto/BUILD.gn
+++ b/crypto/BUILD.gn
@@ -13,6 +13,7 @@
     "apple_keychain.h",
     "apple_keychain_ios.mm",
     "apple_keychain_mac.mm",
+    "auto_cbb.h",
     "capi_util.cc",
     "capi_util.h",
     "crypto_export.h",
@@ -34,8 +35,6 @@
     "encryptor.h",
     "encryptor_nss.cc",
     "encryptor_openssl.cc",
-    "ghash.cc",
-    "ghash.h",
     "hkdf.cc",
     "hkdf.h",
     "hmac.cc",
@@ -160,6 +159,7 @@
     sources -= [
       "aead_openssl.cc",
       "aead_openssl.h",
+      "auto_cbb.h",
       "curve25519_openssl.cc",
       "ec_private_key_openssl.cc",
       "ec_signature_creator_openssl.cc",
@@ -228,15 +228,6 @@
   }
 }
 
-# TODO(GYP): Delete this after we've converted everything to GN.
-# The _run targets exist only for compatibility w/ GYP.
-group("crypto_unittests_run") {
-  testonly = true
-  deps = [
-    ":crypto_unittests",
-  ]
-}
-
 test("crypto_unittests") {
   sources = [
     "aead_openssl_unittest.cc",
@@ -244,7 +235,6 @@
     "ec_private_key_unittest.cc",
     "ec_signature_creator_unittest.cc",
     "encryptor_unittest.cc",
-    "ghash_unittest.cc",
     "hkdf_unittest.cc",
     "hmac_unittest.cc",
     "nss_key_util_unittest.cc",
diff --git a/crypto/auto_cbb.h b/crypto/auto_cbb.h
new file mode 100644
index 0000000..5206a21
--- /dev/null
+++ b/crypto/auto_cbb.h
@@ -0,0 +1,35 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef CRYPTO_AUTO_CBB_H_
+#define CRYPTO_AUTO_CBB_H_
+
+#include <openssl/bytestring.h>
+
+#include "base/macros.h"
+
+namespace crypto {
+
+// AutoCBB is a wrapper over OpenSSL's CBB type that automatically releases
+// resources when going out of scope.
+class AutoCBB {
+ public:
+  AutoCBB() { CBB_zero(&cbb_); }
+  ~AutoCBB() { CBB_cleanup(&cbb_); }
+
+  CBB* get() { return &cbb_; }
+
+  void Reset() {
+    CBB_cleanup(&cbb_);
+    CBB_zero(&cbb_);
+  }
+
+ private:
+  CBB cbb_;
+  DISALLOW_COPY_AND_ASSIGN(AutoCBB);
+};
+
+}  // namespace crypto
+
+#endif   // CRYPTO_AUTO_CBB_H_
diff --git a/crypto/crypto.gyp b/crypto/crypto.gyp
index 2590c4f..e2472d7 100644
--- a/crypto/crypto.gyp
+++ b/crypto/crypto.gyp
@@ -128,6 +128,7 @@
             'sources!': [
               'aead_openssl.cc',
               'aead_openssl.h',
+              'auto_cbb.h',
               'curve25519_openssl.cc',
               'ec_private_key_openssl.cc',
               'ec_signature_creator_openssl.cc',
@@ -169,7 +170,6 @@
         'ec_private_key_unittest.cc',
         'ec_signature_creator_unittest.cc',
         'encryptor_unittest.cc',
-        'ghash_unittest.cc',
         'hkdf_unittest.cc',
         'hmac_unittest.cc',
         'nss_key_util_unittest.cc',
@@ -196,14 +196,6 @@
       ],
       'conditions': [
         [ 'use_nss_certs == 1', {
-          'conditions': [
-            [ 'use_allocator!="none"', {
-                'dependencies': [
-                  '../base/allocator/allocator.gyp:allocator',
-                ],
-              },
-            ],
-          ],
           'dependencies': [
             '../build/linux/system.gyp:ssl',
           ],
diff --git a/crypto/crypto.gypi b/crypto/crypto.gypi
index e5cc4f4..143d555 100644
--- a/crypto/crypto.gypi
+++ b/crypto/crypto.gypi
@@ -32,6 +32,7 @@
       'apple_keychain.h',
       'apple_keychain_ios.mm',
       'apple_keychain_mac.mm',
+      'auto_cbb.h',
       'capi_util.cc',
       'capi_util.h',
       'crypto_export.h',
@@ -41,8 +42,6 @@
       'curve25519.h',
       'curve25519_nss.cc',
       'curve25519_openssl.cc',
-      'ghash.cc',
-      'ghash.h',
       'ec_private_key.h',
       'ec_private_key_nss.cc',
       'ec_private_key_openssl.cc',
diff --git a/crypto/ec_private_key.h b/crypto/ec_private_key.h
index 9a8a02a..1ee4aca 100644
--- a/crypto/ec_private_key.h
+++ b/crypto/ec_private_key.h
@@ -95,8 +95,7 @@
 
   // Exports private key data for testing. The format of data stored into output
   // doesn't matter other than that it is consistent for the same key.
-  bool ExportValue(std::vector<uint8_t>* output);
-  bool ExportECParams(std::vector<uint8_t>* output);
+  bool ExportValueForTesting(std::vector<uint8_t>* output);
 
  private:
   // Constructor is private. Use one of the Create*() methods above instead.
diff --git a/crypto/openssl_util.cc b/crypto/openssl_util.cc
index 128d7e4..2a31093 100644
--- a/crypto/openssl_util.cc
+++ b/crypto/openssl_util.cc
@@ -5,10 +5,12 @@
 #include "crypto/openssl_util.h"
 
 #include <openssl/err.h>
-#include <openssl/ssl.h>
 #if defined(OPENSSL_IS_BORINGSSL)
 #include <openssl/cpu.h>
+#else
+#include <openssl/ssl.h>
 #endif
+#include <openssl/crypto.h>
 #include <stddef.h>
 #include <stdint.h>
 
@@ -50,16 +52,19 @@
 #if defined(OS_ANDROID) && defined(ARCH_CPU_ARMEL)
     const bool has_neon =
         (android_getCpuFeatures() & ANDROID_CPU_ARM_FEATURE_NEON) != 0;
-    // CRYPTO_set_NEON_capable is called before |SSL_library_init| because this
-    // stops BoringSSL from probing for NEON support via SIGILL in the case
-    // that getauxval isn't present.
-    CRYPTO_set_NEON_capable(has_neon);
-    // See https://code.google.com/p/chromium/issues/detail?id=341598
     base::CPU cpu;
-    CRYPTO_set_NEON_functional(!cpu.has_broken_neon());
+    // CRYPTO_set_NEON_capable is called before |CRYPTO_library_init| because
+    // this stops BoringSSL from probing for NEON support via SIGILL in the case
+    // that getauxval isn't present. Also workaround a CPU with broken NEON
+    // support. See https://code.google.com/p/chromium/issues/detail?id=341598
+    CRYPTO_set_NEON_capable(has_neon && !cpu.has_broken_neon());
 #endif
 
+#if defined(OPENSSL_IS_BORINGSSL)
+    CRYPTO_library_init();
+#else
     SSL_library_init();
+#endif
   }
 
   ~OpenSSLInitSingleton() {}
diff --git a/crypto/openssl_util.h b/crypto/openssl_util.h
index 78fa66e..d608cde 100644
--- a/crypto/openssl_util.h
+++ b/crypto/openssl_util.h
@@ -58,12 +58,12 @@
 // multiple times.
 // This function is thread-safe, and OpenSSL will only ever be initialized once.
 // OpenSSL will be properly shut down on program exit.
-void CRYPTO_EXPORT EnsureOpenSSLInit();
+CRYPTO_EXPORT void EnsureOpenSSLInit();
 
 // Drains the OpenSSL ERR_get_error stack. On a debug build the error codes
 // are send to VLOG(1), on a release build they are disregarded. In most
 // cases you should pass FROM_HERE as the |location|.
-void CRYPTO_EXPORT ClearOpenSSLERRStack(
+CRYPTO_EXPORT void ClearOpenSSLERRStack(
     const tracked_objects::Location& location);
 
 // Place an instance of this class on the call stack to automatically clear
diff --git a/crypto/rsa_private_key.h b/crypto/rsa_private_key.h
index 9703334..d4808f5 100644
--- a/crypto/rsa_private_key.h
+++ b/crypto/rsa_private_key.h
@@ -200,7 +200,7 @@
   // Creates a copy of the object.
   RSAPrivateKey* Copy() const;
 
-  // Exports the private key to a PKCS #1 PrivateKey block.
+  // Exports the private key to a PKCS #8 PrivateKeyInfo block.
   bool ExportPrivateKey(std::vector<uint8_t>* output) const;
 
   // Exports the public key to an X509 SubjectPublicKeyInfo block.
diff --git a/crypto/rsa_private_key_openssl.cc b/crypto/rsa_private_key_openssl.cc
index f7fdd9d..3e87a0a 100644
--- a/crypto/rsa_private_key_openssl.cc
+++ b/crypto/rsa_private_key_openssl.cc
@@ -4,55 +4,21 @@
 
 #include "crypto/rsa_private_key.h"
 
-#include <openssl/bio.h>
+#include <openssl/bytestring.h>
 #include <openssl/bn.h>
 #include <openssl/evp.h>
-#include <openssl/pkcs12.h>
+#include <openssl/mem.h>
 #include <openssl/rsa.h>
 #include <stdint.h>
 
 #include "base/logging.h"
 #include "base/memory/scoped_ptr.h"
+#include "crypto/auto_cbb.h"
 #include "crypto/openssl_util.h"
 #include "crypto/scoped_openssl_types.h"
 
 namespace crypto {
 
-namespace {
-
-using ScopedPKCS8_PRIV_KEY_INFO =
-    ScopedOpenSSL<PKCS8_PRIV_KEY_INFO, PKCS8_PRIV_KEY_INFO_free>;
-
-// Function pointer definition, for injecting the required key export function
-// into ExportKey, below. The supplied function should export EVP_PKEY into
-// the supplied BIO, returning 1 on success or 0 on failure.
-using ExportFunction = int (*)(BIO*, EVP_PKEY*);
-
-// Helper to export |key| into |output| via the specified ExportFunction.
-bool ExportKey(EVP_PKEY* key,
-               ExportFunction export_fn,
-               std::vector<uint8_t>* output) {
-  if (!key)
-    return false;
-
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  ScopedBIO bio(BIO_new(BIO_s_mem()));
-
-  int res = export_fn(bio.get(), key);
-  if (!res)
-    return false;
-
-  char* data = NULL;
-  long len = BIO_get_mem_data(bio.get(), &data);
-  if (!data || len < 0)
-    return false;
-
-  output->assign(data, data + len);
-  return true;
-}
-
-}  // namespace
-
 // static
 RSAPrivateKey* RSAPrivateKey::Create(uint16_t num_bits) {
   OpenSSLErrStackTracer err_tracer(FROM_HERE);
@@ -76,25 +42,16 @@
 // static
 RSAPrivateKey* RSAPrivateKey::CreateFromPrivateKeyInfo(
     const std::vector<uint8_t>& input) {
-  if (input.empty())
-    return NULL;
-
   OpenSSLErrStackTracer err_tracer(FROM_HERE);
 
-  // Importing is a little more involved than exporting, as we must first
-  // PKCS#8 decode the input, and then import the EVP_PKEY from Private Key
-  // Info structure returned.
-  const uint8_t* ptr = &input[0];
-  ScopedPKCS8_PRIV_KEY_INFO p8inf(
-      d2i_PKCS8_PRIV_KEY_INFO(nullptr, &ptr, input.size()));
-  if (!p8inf.get() || ptr != &input[0] + input.size())
-    return NULL;
+  CBS cbs;
+  CBS_init(&cbs, input.data(), input.size());
+  ScopedEVP_PKEY pkey(EVP_parse_private_key(&cbs));
+  if (!pkey || CBS_len(&cbs) != 0 || EVP_PKEY_id(pkey.get()) != EVP_PKEY_RSA)
+    return nullptr;
 
   scoped_ptr<RSAPrivateKey> result(new RSAPrivateKey);
-  result->key_ = EVP_PKCS82PKEY(p8inf.get());
-  if (!result->key_ || EVP_PKEY_id(result->key_) != EVP_PKEY_RSA)
-    return NULL;
-
+  result->key_ = pkey.release();
   return result.release();
 }
 
@@ -129,11 +86,31 @@
 }
 
 bool RSAPrivateKey::ExportPrivateKey(std::vector<uint8_t>* output) const {
-  return ExportKey(key_, i2d_PKCS8PrivateKeyInfo_bio, output);
+  uint8_t *der;
+  size_t der_len;
+  AutoCBB cbb;
+  if (!CBB_init(cbb.get(), 0) ||
+      !EVP_marshal_private_key(cbb.get(), key_) ||
+      !CBB_finish(cbb.get(), &der, &der_len)) {
+    return false;
+  }
+  output->assign(der, der + der_len);
+  OPENSSL_free(der);
+  return true;
 }
 
 bool RSAPrivateKey::ExportPublicKey(std::vector<uint8_t>* output) const {
-  return ExportKey(key_, i2d_PUBKEY_bio, output);
+  uint8_t *der;
+  size_t der_len;
+  AutoCBB cbb;
+  if (!CBB_init(cbb.get(), 0) ||
+      !EVP_marshal_public_key(cbb.get(), key_) ||
+      !CBB_finish(cbb.get(), &der, &der_len)) {
+    return false;
+  }
+  output->assign(der, der + der_len);
+  OPENSSL_free(der);
+  return true;
 }
 
 }  // namespace crypto
diff --git a/crypto/scoped_test_nss_db.h b/crypto/scoped_test_nss_db.h
index a305b7f..c01653f 100644
--- a/crypto/scoped_test_nss_db.h
+++ b/crypto/scoped_test_nss_db.h
@@ -20,7 +20,7 @@
   ScopedTestNSSDB();
   ~ScopedTestNSSDB();
 
-  bool is_open() const { return slot_; }
+  bool is_open() const { return !!slot_; }
   PK11SlotInfo* slot() const { return slot_.get(); }
 
  private:
diff --git a/crypto/secure_hash.h b/crypto/secure_hash.h
index 491a299..a5590e5 100644
--- a/crypto/secure_hash.h
+++ b/crypto/secure_hash.h
@@ -10,11 +10,6 @@
 #include "base/macros.h"
 #include "crypto/crypto_export.h"
 
-namespace base {
-class Pickle;
-class PickleIterator;
-}
-
 namespace crypto {
 
 // A wrapper to calculate secure hashes incrementally, allowing to
@@ -30,17 +25,12 @@
 
   virtual void Update(const void* input, size_t len) = 0;
   virtual void Finish(void* output, size_t len) = 0;
+  virtual size_t GetHashLength() const = 0;
 
-  // Serialize the context, so it can be restored at a later time.
-  // |pickle| will contain the serialized data.
-  // Returns whether or not |pickle| was filled.
-  virtual bool Serialize(base::Pickle* pickle) = 0;
-
-  // Restore the context that was saved earlier.
-  // |data_iterator| allows this to be used as part of a larger pickle.
-  // |pickle| holds the saved data.
-  // Returns success or failure.
-  virtual bool Deserialize(base::PickleIterator* data_iterator) = 0;
+  // Create a clone of this SecureHash. The returned clone and this both
+  // represent the same hash state. But from this point on, calling
+  // Update()/Finish() on either doesn't affect the state of the other.
+  virtual SecureHash* Clone() const = 0;
 
  protected:
   SecureHash() {}
diff --git a/crypto/secure_hash_default.cc b/crypto/secure_hash_default.cc
index cec6fb8..b33010f 100644
--- a/crypto/secure_hash_default.cc
+++ b/crypto/secure_hash_default.cc
@@ -15,16 +15,16 @@
 
 namespace {
 
-const char kSHA256Descriptor[] = "NSS";
-
 class SecureHashSHA256NSS : public SecureHash {
  public:
-  static const int kSecureHashVersion = 1;
-
   SecureHashSHA256NSS() {
     SHA256_Begin(&ctx_);
   }
 
+  SecureHashSHA256NSS(const SecureHashSHA256NSS& other) {
+    SHA256_Clone(&ctx_, const_cast<SHA256Context*>(&other.ctx_));
+  }
+
   ~SecureHashSHA256NSS() override { memset(&ctx_, 0, sizeof(ctx_)); }
 
   // SecureHash implementation:
@@ -37,50 +37,14 @@
                static_cast<unsigned int>(len));
   }
 
-  bool Serialize(base::Pickle* pickle) override;
-  bool Deserialize(base::PickleIterator* data_iterator) override;
+  SecureHash* Clone() const override { return new SecureHashSHA256NSS(*this); }
+
+  size_t GetHashLength() const override { return SHA256_LENGTH; }
 
  private:
   SHA256Context ctx_;
 };
 
-bool SecureHashSHA256NSS::Serialize(base::Pickle* pickle) {
-  if (!pickle)
-    return false;
-
-  if (!pickle->WriteInt(kSecureHashVersion) ||
-      !pickle->WriteString(kSHA256Descriptor) ||
-      !pickle->WriteBytes(&ctx_, sizeof(ctx_))) {
-    return false;
-  }
-
-  return true;
-}
-
-bool SecureHashSHA256NSS::Deserialize(base::PickleIterator* data_iterator) {
-  int version;
-  if (!data_iterator->ReadInt(&version))
-    return false;
-
-  if (version > kSecureHashVersion)
-    return false;  // We don't know how to deal with this.
-
-  std::string type;
-  if (!data_iterator->ReadString(&type))
-    return false;
-
-  if (type != kSHA256Descriptor)
-    return false;  // It's the wrong kind.
-
-  const char* data = NULL;
-  if (!data_iterator->ReadBytes(&data, sizeof(ctx_)))
-    return false;
-
-  memcpy(&ctx_, data, sizeof(ctx_));
-
-  return true;
-}
-
 }  // namespace
 
 SecureHash* SecureHash::Create(Algorithm algorithm) {
diff --git a/crypto/secure_hash_openssl.cc b/crypto/secure_hash_openssl.cc
index ec859ff..868300f 100644
--- a/crypto/secure_hash_openssl.cc
+++ b/crypto/secure_hash_openssl.cc
@@ -16,16 +16,16 @@
 
 namespace {
 
-const char kSHA256Descriptor[] = "OpenSSL";
-
 class SecureHashSHA256OpenSSL : public SecureHash {
  public:
-  static const int kSecureHashVersion = 1;
-
   SecureHashSHA256OpenSSL() {
     SHA256_Init(&ctx_);
   }
 
+  SecureHashSHA256OpenSSL(const SecureHashSHA256OpenSSL& other) {
+    memcpy(&ctx_, &other.ctx_, sizeof(ctx_));
+  }
+
   ~SecureHashSHA256OpenSSL() override {
     OPENSSL_cleanse(&ctx_, sizeof(ctx_));
   }
@@ -40,53 +40,16 @@
     SHA256_Final(result.safe_buffer(), &ctx_);
   }
 
-  bool Serialize(base::Pickle* pickle) override;
-  bool Deserialize(base::PickleIterator* data_iterator) override;
+  SecureHash* Clone() const override {
+    return new SecureHashSHA256OpenSSL(*this);
+  }
+
+  size_t GetHashLength() const override { return SHA256_DIGEST_LENGTH; }
 
  private:
   SHA256_CTX ctx_;
 };
 
-bool SecureHashSHA256OpenSSL::Serialize(base::Pickle* pickle) {
-  if (!pickle)
-    return false;
-
-  if (!pickle->WriteInt(kSecureHashVersion) ||
-      !pickle->WriteString(kSHA256Descriptor) ||
-      !pickle->WriteBytes(&ctx_, sizeof(ctx_))) {
-    return false;
-  }
-
-  return true;
-}
-
-bool SecureHashSHA256OpenSSL::Deserialize(base::PickleIterator* data_iterator) {
-  if (!data_iterator)
-    return false;
-
-  int version;
-  if (!data_iterator->ReadInt(&version))
-    return false;
-
-  if (version > kSecureHashVersion)
-    return false;  // We don't know how to deal with this.
-
-  std::string type;
-  if (!data_iterator->ReadString(&type))
-    return false;
-
-  if (type != kSHA256Descriptor)
-    return false;  // It's the wrong kind.
-
-  const char* data = NULL;
-  if (!data_iterator->ReadBytes(&data, sizeof(ctx_)))
-    return false;
-
-  memcpy(&ctx_, data, sizeof(ctx_));
-
-  return true;
-}
-
 }  // namespace
 
 SecureHash* SecureHash::Create(Algorithm algorithm) {
diff --git a/crypto/secure_hash_unittest.cc b/crypto/secure_hash_unittest.cc
index df0afa6..019e86f 100644
--- a/crypto/secure_hash_unittest.cc
+++ b/crypto/secure_hash_unittest.cc
@@ -10,21 +10,16 @@
 #include <string>
 
 #include "base/memory/scoped_ptr.h"
-#include "base/pickle.h"
 #include "crypto/sha2.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
 TEST(SecureHashTest, TestUpdate) {
   // Example B.3 from FIPS 180-2: long message.
   std::string input3(500000, 'a');  // 'a' repeated half a million times
-  int expected3[] = { 0xcd, 0xc7, 0x6e, 0x5c,
-                      0x99, 0x14, 0xfb, 0x92,
-                      0x81, 0xa1, 0xc7, 0xe2,
-                      0x84, 0xd7, 0x3e, 0x67,
-                      0xf1, 0x80, 0x9a, 0x48,
-                      0xa4, 0x97, 0x20, 0x0e,
-                      0x04, 0x6d, 0x39, 0xcc,
-                      0xc7, 0x11, 0x2c, 0xd0 };
+  const int kExpectedHashOfInput3[] = {
+      0xcd, 0xc7, 0x6e, 0x5c, 0x99, 0x14, 0xfb, 0x92, 0x81, 0xa1, 0xc7,
+      0xe2, 0x84, 0xd7, 0x3e, 0x67, 0xf1, 0x80, 0x9a, 0x48, 0xa4, 0x97,
+      0x20, 0x0e, 0x04, 0x6d, 0x39, 0xcc, 0xc7, 0x11, 0x2c, 0xd0};
 
   uint8_t output3[crypto::kSHA256Length];
 
@@ -35,43 +30,53 @@
 
   ctx->Finish(output3, sizeof(output3));
   for (size_t i = 0; i < crypto::kSHA256Length; i++)
-    EXPECT_EQ(expected3[i], static_cast<int>(output3[i]));
+    EXPECT_EQ(kExpectedHashOfInput3[i], static_cast<int>(output3[i]));
 }
 
-// Save the crypto state mid-stream, and create another instance with the
-// saved state.  Then feed the same data afterwards to both.
-// When done, both should have the same hash value.
-TEST(SecureHashTest, TestSerialization) {
+TEST(SecureHashTest, TestClone) {
   std::string input1(10001, 'a');  // 'a' repeated 10001 times
-  std::string input2(10001, 'b');  // 'b' repeated 10001 times
-  std::string input3(10001, 'c');  // 'c' repeated 10001 times
-  std::string input4(10001, 'd');  // 'd' repeated 10001 times
-  std::string input5(10001, 'e');  // 'e' repeated 10001 times
+  std::string input2(10001, 'd');  // 'd' repeated 10001 times
+
+  const uint8_t kExpectedHashOfInput1[crypto::kSHA256Length] = {
+      0x0c, 0xab, 0x99, 0xa0, 0x58, 0x60, 0x0f, 0xfa, 0xad, 0x12, 0x92,
+      0xd0, 0xc5, 0x3c, 0x05, 0x48, 0xeb, 0xaf, 0x88, 0xdd, 0x1d, 0x01,
+      0x03, 0x03, 0x45, 0x70, 0x5f, 0x01, 0x8a, 0x81, 0x39, 0x09};
+  const uint8_t kExpectedHashOfInput1And2[crypto::kSHA256Length] = {
+      0x4c, 0x8e, 0x26, 0x5a, 0xc3, 0x85, 0x1f, 0x1f, 0xa5, 0x04, 0x1c,
+      0xc7, 0x88, 0x53, 0x1c, 0xc7, 0x80, 0x47, 0x15, 0xfb, 0x47, 0xff,
+      0x72, 0xb1, 0x28, 0x37, 0xb0, 0x4d, 0x6e, 0x22, 0x2e, 0x4d};
 
   uint8_t output1[crypto::kSHA256Length];
   uint8_t output2[crypto::kSHA256Length];
+  uint8_t output3[crypto::kSHA256Length];
 
   scoped_ptr<crypto::SecureHash> ctx1(crypto::SecureHash::Create(
       crypto::SecureHash::SHA256));
-  scoped_ptr<crypto::SecureHash> ctx2(crypto::SecureHash::Create(
-      crypto::SecureHash::SHA256));
-  base::Pickle pickle;
   ctx1->Update(input1.data(), input1.size());
+
+  scoped_ptr<crypto::SecureHash> ctx2(ctx1->Clone());
+  scoped_ptr<crypto::SecureHash> ctx3(ctx2->Clone());
+  // At this point, ctx1, ctx2, and ctx3 are all equivalent and represent the
+  // state after hashing input1.
+
+  // Updating ctx1 and ctx2 with input2 should produce equivalent results.
   ctx1->Update(input2.data(), input2.size());
-  ctx1->Update(input3.data(), input3.size());
-
-  EXPECT_TRUE(ctx1->Serialize(&pickle));
-  ctx1->Update(input4.data(), input4.size());
-  ctx1->Update(input5.data(), input5.size());
-
   ctx1->Finish(output1, sizeof(output1));
 
-  base::PickleIterator data_iterator(pickle);
-  EXPECT_TRUE(ctx2->Deserialize(&data_iterator));
-  ctx2->Update(input4.data(), input4.size());
-  ctx2->Update(input5.data(), input5.size());
-
+  ctx2->Update(input2.data(), input2.size());
   ctx2->Finish(output2, sizeof(output2));
 
   EXPECT_EQ(0, memcmp(output1, output2, crypto::kSHA256Length));
+  EXPECT_EQ(0,
+            memcmp(output1, kExpectedHashOfInput1And2, crypto::kSHA256Length));
+
+  // Finish() ctx3, which should produce the hash of input1.
+  ctx3->Finish(&output3, sizeof(output3));
+  EXPECT_EQ(0, memcmp(output3, kExpectedHashOfInput1, crypto::kSHA256Length));
+}
+
+TEST(SecureHashTest, TestLength) {
+  scoped_ptr<crypto::SecureHash> ctx(
+      crypto::SecureHash::Create(crypto::SecureHash::SHA256));
+  EXPECT_EQ(crypto::kSHA256Length, ctx->GetHashLength());
 }
diff --git a/crypto/signature_creator_unittest.cc b/crypto/signature_creator_unittest.cc
index af1a042..fff065e 100644
--- a/crypto/signature_creator_unittest.cc
+++ b/crypto/signature_creator_unittest.cc
@@ -14,19 +14,6 @@
 #include "crypto/signature_verifier.h"
 #include "testing/gtest/include/gtest/gtest.h"
 
-namespace {
-
-// This is the algorithm ID for SHA-1 with RSA encryption.
-const uint8_t kSHA1WithRSAAlgorithmID[] = {0x30, 0x0d, 0x06, 0x09, 0x2a,
-                                           0x86, 0x48, 0x86, 0xf7, 0x0d,
-                                           0x01, 0x01, 0x05, 0x05, 0x00};
-
-// This is the algorithm ID for SHA-1 with RSA encryption.
-const uint8_t kSHA256WithRSAAlgorithmID[] = {0x30, 0x0d, 0x06, 0x09, 0x2a,
-                                             0x86, 0x48, 0x86, 0xf7, 0x0d,
-                                             0x01, 0x01, 0x0B, 0x05, 0x00};
-}
-
 TEST(SignatureCreatorTest, BasicTest) {
   // Do a verify round trip.
   scoped_ptr<crypto::RSAPrivateKey> key_original(
@@ -56,9 +43,8 @@
 
   crypto::SignatureVerifier verifier;
   ASSERT_TRUE(verifier.VerifyInit(
-      kSHA1WithRSAAlgorithmID, sizeof(kSHA1WithRSAAlgorithmID),
-      &signature.front(), signature.size(),
-      &public_key_info.front(), public_key_info.size()));
+      crypto::SignatureVerifier::RSA_PKCS1_SHA1, &signature.front(),
+      signature.size(), &public_key_info.front(), public_key_info.size()));
 
   verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
                         data.size());
@@ -91,9 +77,8 @@
   // Verify the input data.
   crypto::SignatureVerifier verifier;
   ASSERT_TRUE(verifier.VerifyInit(
-      kSHA1WithRSAAlgorithmID, sizeof(kSHA1WithRSAAlgorithmID),
-      &signature.front(), signature.size(),
-      &public_key_info.front(), public_key_info.size()));
+      crypto::SignatureVerifier::RSA_PKCS1_SHA1, &signature.front(),
+      signature.size(), &public_key_info.front(), public_key_info.size()));
 
   verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
                         data.size());
@@ -127,9 +112,8 @@
   // Verify the input data.
   crypto::SignatureVerifier verifier;
   ASSERT_TRUE(verifier.VerifyInit(
-      kSHA256WithRSAAlgorithmID, sizeof(kSHA256WithRSAAlgorithmID),
-      &signature.front(), signature.size(),
-      &public_key_info.front(), public_key_info.size()));
+      crypto::SignatureVerifier::RSA_PKCS1_SHA256, &signature.front(),
+      signature.size(), &public_key_info.front(), public_key_info.size()));
 
   verifier.VerifyUpdate(reinterpret_cast<const uint8_t*>(data.c_str()),
                         data.size());
diff --git a/crypto/signature_verifier.h b/crypto/signature_verifier.h
index b26a0df..5b7369f 100644
--- a/crypto/signature_verifier.h
+++ b/crypto/signature_verifier.h
@@ -33,6 +33,13 @@
     SHA256,
   };
 
+  // The set of supported signature algorithms. Extend as required.
+  enum SignatureAlgorithm {
+    RSA_PKCS1_SHA1,
+    RSA_PKCS1_SHA256,
+    ECDSA_SHA256,
+  };
+
   SignatureVerifier();
   ~SignatureVerifier();
 
@@ -42,16 +49,7 @@
   // by one or more VerifyUpdate calls and a VerifyFinal call.
   // NOTE: for RSA-PSS signatures, use VerifyInitRSAPSS instead.
   //
-  // The signature algorithm is specified as a DER encoded ASN.1
-  // AlgorithmIdentifier structure:
-  //   AlgorithmIdentifier  ::=  SEQUENCE  {
-  //       algorithm               OBJECT IDENTIFIER,
-  //       parameters              ANY DEFINED BY algorithm OPTIONAL  }
-  //
-  // The signature is encoded according to the signature algorithm, but it
-  // must not be further encoded in an ASN.1 BIT STRING.
-  // Note: An RSA signature is actually a big integer.  It must be in
-  // big-endian byte order.
+  // The signature is encoded according to the signature algorithm.
   //
   // The public key is specified as a DER encoded ASN.1 SubjectPublicKeyInfo
   // structure, which contains not only the public key but also its type
@@ -59,8 +57,7 @@
   //   SubjectPublicKeyInfo  ::=  SEQUENCE  {
   //       algorithm            AlgorithmIdentifier,
   //       subjectPublicKey     BIT STRING  }
-  bool VerifyInit(const uint8_t* signature_algorithm,
-                  int signature_algorithm_len,
+  bool VerifyInit(SignatureAlgorithm signature_algorithm,
                   const uint8_t* signature,
                   int signature_len,
                   const uint8_t* public_key_info,
@@ -98,19 +95,10 @@
   // error occurred.
   bool VerifyFinal();
 
-  // Note: we can provide a one-shot interface if there is interest:
-  //   bool Verify(const uint8_t* data,
-  //               int data_len,
-  //               const uint8_t* signature_algorithm,
-  //               int signature_algorithm_len,
-  //               const uint8_t* signature,
-  //               int signature_len,
-  //               const uint8_t* public_key_info,
-  //               int public_key_info_len);
-
  private:
 #if defined(USE_OPENSSL)
-  bool CommonInit(const EVP_MD* digest,
+  bool CommonInit(int pkey_type,
+                  const EVP_MD* digest,
                   const uint8_t* signature,
                   int signature_len,
                   const uint8_t* public_key_info,
diff --git a/crypto/signature_verifier_nss.cc b/crypto/signature_verifier_nss.cc
index e6cd3e0..edbd3f6 100644
--- a/crypto/signature_verifier_nss.cc
+++ b/crypto/signature_verifier_nss.cc
@@ -30,6 +30,18 @@
   return HASH_AlgNULL;
 }
 
+SECOidTag ToNSSSignatureType(SignatureVerifier::SignatureAlgorithm sig_alg) {
+  switch (sig_alg) {
+    case SignatureVerifier::RSA_PKCS1_SHA1:
+      return SEC_OID_PKCS1_SHA1_WITH_RSA_ENCRYPTION;
+    case SignatureVerifier::RSA_PKCS1_SHA256:
+      return SEC_OID_PKCS1_SHA256_WITH_RSA_ENCRYPTION;
+    case SignatureVerifier::ECDSA_SHA256:
+      return SEC_OID_ANSIX962_ECDSA_SHA256_SIGNATURE;
+  }
+  return SEC_OID_UNKNOWN;
+}
+
 SECStatus VerifyRSAPSS_End(SECKEYPublicKey* public_key,
                            HASHContext* hash_context,
                            HASH_HashType mask_hash_alg,
@@ -74,8 +86,7 @@
   Reset();
 }
 
-bool SignatureVerifier::VerifyInit(const uint8_t* signature_algorithm,
-                                   int signature_algorithm_len,
+bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
                                    const uint8_t* signature,
                                    int signature_len,
                                    const uint8_t* public_key_info,
@@ -90,37 +101,13 @@
   if (!public_key)
     return false;
 
-  PLArenaPool* arena = PORT_NewArena(DER_DEFAULT_CHUNKSIZE);
-  if (!arena) {
-    SECKEY_DestroyPublicKey(public_key);
-    return false;
-  }
-
-  SECItem sig_alg_der;
-  sig_alg_der.type = siBuffer;
-  sig_alg_der.data = const_cast<uint8_t*>(signature_algorithm);
-  sig_alg_der.len = signature_algorithm_len;
-  SECAlgorithmID sig_alg_id;
-  SECStatus rv;
-  rv = SEC_QuickDERDecodeItem(arena, &sig_alg_id,
-                              SEC_ASN1_GET(SECOID_AlgorithmIDTemplate),
-                              &sig_alg_der);
-  if (rv != SECSuccess) {
-    SECKEY_DestroyPublicKey(public_key);
-    PORT_FreeArena(arena, PR_TRUE);
-    return false;
-  }
-
   SECItem sig;
   sig.type = siBuffer;
   sig.data = const_cast<uint8_t*>(signature);
   sig.len = signature_len;
-  SECOidTag hash_alg_tag;
-  vfy_context_ = VFY_CreateContextWithAlgorithmID(public_key, &sig,
-                                                  &sig_alg_id, &hash_alg_tag,
-                                                  NULL);
+  vfy_context_ = VFY_CreateContext(
+      public_key, &sig, ToNSSSignatureType(signature_algorithm), nullptr);
   SECKEY_DestroyPublicKey(public_key);  // Done with public_key.
-  PORT_FreeArena(arena, PR_TRUE);  // Done with sig_alg_id.
   if (!vfy_context_) {
     // A corrupted RSA signature could be detected without the data, so
     // VFY_CreateContextWithAlgorithmID may fail with SEC_ERROR_BAD_SIGNATURE
@@ -128,8 +115,7 @@
     return false;
   }
 
-  rv = VFY_Begin(vfy_context_);
-  if (rv != SECSuccess) {
+  if (VFY_Begin(vfy_context_) != SECSuccess) {
     NOTREACHED();
     return false;
   }
diff --git a/crypto/signature_verifier_openssl.cc b/crypto/signature_verifier_openssl.cc
index a756149..495abd2 100644
--- a/crypto/signature_verifier_openssl.cc
+++ b/crypto/signature_verifier_openssl.cc
@@ -4,8 +4,9 @@
 
 #include "crypto/signature_verifier.h"
 
+#include <openssl/bytestring.h>
+#include <openssl/digest.h>
 #include <openssl/evp.h>
-#include <openssl/x509.h>
 #include <stdint.h>
 
 #include <vector>
@@ -43,33 +44,32 @@
   Reset();
 }
 
-bool SignatureVerifier::VerifyInit(const uint8_t* signature_algorithm,
-                                   int signature_algorithm_len,
+bool SignatureVerifier::VerifyInit(SignatureAlgorithm signature_algorithm,
                                    const uint8_t* signature,
                                    int signature_len,
                                    const uint8_t* public_key_info,
                                    int public_key_info_len) {
-  OpenSSLErrStackTracer err_tracer(FROM_HERE);
-  ScopedOpenSSL<X509_ALGOR, X509_ALGOR_free> algorithm(
-      d2i_X509_ALGOR(NULL, &signature_algorithm, signature_algorithm_len));
-  if (!algorithm.get())
-    return false;
-  int nid = OBJ_obj2nid(algorithm.get()->algorithm);
-  const EVP_MD* digest;
-  if (nid == NID_ecdsa_with_SHA1) {
-    digest = EVP_sha1();
-  } else if (nid == NID_ecdsa_with_SHA256) {
-    digest = EVP_sha256();
-  } else {
-    // This works for PKCS #1 v1.5 RSA signatures, but not for ECDSA
-    // signatures.
-    digest = EVP_get_digestbyobj(algorithm.get()->algorithm);
+  int pkey_type = EVP_PKEY_NONE;
+  const EVP_MD* digest = nullptr;
+  switch (signature_algorithm) {
+    case RSA_PKCS1_SHA1:
+      pkey_type = EVP_PKEY_RSA;
+      digest = EVP_sha1();
+      break;
+    case RSA_PKCS1_SHA256:
+      pkey_type = EVP_PKEY_RSA;
+      digest = EVP_sha256();
+      break;
+    case ECDSA_SHA256:
+      pkey_type = EVP_PKEY_EC;
+      digest = EVP_sha256();
+      break;
   }
-  if (!digest)
-    return false;
+  DCHECK_NE(EVP_PKEY_NONE, pkey_type);
+  DCHECK(digest);
 
-  return CommonInit(digest, signature, signature_len, public_key_info,
-                    public_key_info_len, NULL);
+  return CommonInit(pkey_type, digest, signature, signature_len,
+                    public_key_info, public_key_info_len, nullptr);
 }
 
 bool SignatureVerifier::VerifyInitRSAPSS(HashAlgorithm hash_alg,
@@ -87,8 +87,8 @@
   }
 
   EVP_PKEY_CTX* pkey_ctx;
-  if (!CommonInit(digest, signature, signature_len, public_key_info,
-                  public_key_info_len, &pkey_ctx)) {
+  if (!CommonInit(EVP_PKEY_RSA, digest, signature, signature_len,
+                  public_key_info, public_key_info_len, &pkey_ctx)) {
     return false;
   }
 
@@ -100,11 +100,8 @@
   if (!mgf_digest) {
     return false;
   }
-  rv = EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf_digest);
-  if (rv != 1)
-    return false;
-  rv = EVP_PKEY_CTX_set_rsa_pss_saltlen(pkey_ctx, salt_len);
-  return rv == 1;
+  return EVP_PKEY_CTX_set_rsa_mgf1_md(pkey_ctx, mgf_digest) &&
+         EVP_PKEY_CTX_set_rsa_pss_saltlen(pkey_ctx, salt_len);
 }
 
 void SignatureVerifier::VerifyUpdate(const uint8_t* data_part,
@@ -126,7 +123,8 @@
   return rv == 1;
 }
 
-bool SignatureVerifier::CommonInit(const EVP_MD* digest,
+bool SignatureVerifier::CommonInit(int pkey_type,
+                                   const EVP_MD* digest,
                                    const uint8_t* signature,
                                    int signature_len,
                                    const uint8_t* public_key_info,
@@ -139,10 +137,13 @@
 
   signature_.assign(signature, signature + signature_len);
 
-  const uint8_t* ptr = public_key_info;
-  ScopedEVP_PKEY public_key(d2i_PUBKEY(nullptr, &ptr, public_key_info_len));
-  if (!public_key.get() || ptr != public_key_info + public_key_info_len)
+  CBS cbs;
+  CBS_init(&cbs, public_key_info, public_key_info_len);
+  ScopedEVP_PKEY public_key(EVP_parse_public_key(&cbs));
+  if (!public_key || CBS_len(&cbs) != 0 ||
+      EVP_PKEY_id(public_key.get()) != pkey_type) {
     return false;
+  }
 
   verify_context_->ctx.reset(EVP_MD_CTX_create());
   int rv = EVP_DigestVerifyInit(verify_context_->ctx.get(), pkey_ctx,
diff --git a/crypto/signature_verifier_unittest.cc b/crypto/signature_verifier_unittest.cc
index adcc885..d71ea82 100644
--- a/crypto/signature_verifier_unittest.cc
+++ b/crypto/signature_verifier_unittest.cc
@@ -14,9 +14,9 @@
 TEST(SignatureVerifierTest, BasicTest) {
   // The input data in this test comes from real certificates.
   //
-  // tbs_certificate ("to-be-signed certificate", the part of a certificate
-  // that is signed), signature_algorithm, and algorithm come from the
-  // certificate of bugs.webkit.org.
+  // tbs_certificate ("to-be-signed certificate", the part of a certificate that
+  // is signed), signature, and algorithm come from the certificate of
+  // bugs.webkit.org.
   //
   // public_key_info comes from the certificate of the issuer, Go Daddy Secure
   // Certification Authority.
@@ -116,19 +116,6 @@
       0x74, 0x2e, 0x6f, 0x72, 0x67, 0x82, 0x0a, 0x77, 0x65, 0x62, 0x6b, 0x69,
       0x74, 0x2e, 0x6f, 0x72, 0x67};
 
-  // The signature algorithm is specified as the following ASN.1 structure:
-  //    AlgorithmIdentifier  ::=  SEQUENCE  {
-  //        algorithm               OBJECT IDENTIFIER,
-  //        parameters              ANY DEFINED BY algorithm OPTIONAL  }
-  //
-  const uint8_t signature_algorithm[15] = {
-      0x30, 0x0d,  // a SEQUENCE of length 13 (0xd)
-      0x06, 0x09,  // an OBJECT IDENTIFIER of length 9
-      // 1.2.840.113549.1.1.5 - sha1WithRSAEncryption
-      0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x01, 0x05, 0x05,
-      0x00,  // a NULL of length 0
-  };
-
   // RSA signature, a big integer in the big-endian byte order.
   const uint8_t signature[256] = {
       0x1e, 0x6a, 0xe7, 0xe0, 0x4f, 0xe7, 0x4d, 0xd0, 0x69, 0x7c, 0xf8, 0x8f,
@@ -202,12 +189,11 @@
   crypto::SignatureVerifier verifier;
   bool ok;
 
-  // Test 1: feed all of the data to the verifier at once (a single
+  // Test  1: feed all of the data to the verifier at once (a single
   // VerifyUpdate call).
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           public_key_info, sizeof(public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), public_key_info,
+                           sizeof(public_key_info));
   EXPECT_TRUE(ok);
   verifier.VerifyUpdate(tbs_certificate, sizeof(tbs_certificate));
   ok = verifier.VerifyFinal();
@@ -215,12 +201,11 @@
 
   // Test 2: feed the data to the verifier in three parts (three VerifyUpdate
   // calls).
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           public_key_info, sizeof(public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), public_key_info,
+                           sizeof(public_key_info));
   EXPECT_TRUE(ok);
-  verifier.VerifyUpdate(tbs_certificate,       256);
+  verifier.VerifyUpdate(tbs_certificate, 256);
   verifier.VerifyUpdate(tbs_certificate + 256, 256);
   verifier.VerifyUpdate(tbs_certificate + 512, sizeof(tbs_certificate) - 512);
   ok = verifier.VerifyFinal();
@@ -230,10 +215,9 @@
   uint8_t bad_tbs_certificate[sizeof(tbs_certificate)];
   memcpy(bad_tbs_certificate, tbs_certificate, sizeof(tbs_certificate));
   bad_tbs_certificate[10] += 1;  // Corrupt one byte of the data.
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           public_key_info, sizeof(public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), public_key_info,
+                           sizeof(public_key_info));
   EXPECT_TRUE(ok);
   verifier.VerifyUpdate(bad_tbs_certificate, sizeof(bad_tbs_certificate));
   ok = verifier.VerifyFinal();
@@ -243,8 +227,7 @@
   uint8_t bad_signature[sizeof(signature)];
   memcpy(bad_signature, signature, sizeof(signature));
   bad_signature[10] += 1;  // Corrupt one byte of the signature.
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1,
                            bad_signature, sizeof(bad_signature),
                            public_key_info, sizeof(public_key_info));
 
@@ -260,20 +243,18 @@
   uint8_t bad_public_key_info[sizeof(public_key_info)];
   memcpy(bad_public_key_info, public_key_info, sizeof(public_key_info));
   bad_public_key_info[0] += 1;  // Corrupt part of the SPKI syntax.
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           bad_public_key_info, sizeof(bad_public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), bad_public_key_info,
+                           sizeof(bad_public_key_info));
   EXPECT_FALSE(ok);
 
   // Test 6: import a key with extra data.
   uint8_t long_public_key_info[sizeof(public_key_info) + 5];
   memset(long_public_key_info, 0, sizeof(long_public_key_info));
   memcpy(long_public_key_info, public_key_info, sizeof(public_key_info));
-  ok = verifier.VerifyInit(signature_algorithm,
-                           sizeof(signature_algorithm),
-                           signature, sizeof(signature),
-                           long_public_key_info, sizeof(long_public_key_info));
+  ok = verifier.VerifyInit(crypto::SignatureVerifier::RSA_PKCS1_SHA1, signature,
+                           sizeof(signature), long_public_key_info,
+                           sizeof(long_public_key_info));
   EXPECT_FALSE(ok);
 }
 
@@ -1022,7 +1003,7 @@
   //       algorithm            AlgorithmIdentifier,
   //       subjectPublicKey     BIT STRING  }
   //
-  // The signature algorithm is specified as the following ASN.1 structure:
+  // The algorithm is specified as the following ASN.1 structure:
   //    AlgorithmIdentifier  ::=  SEQUENCE  {
   //        algorithm               OBJECT IDENTIFIER,
   //        parameters              ANY DEFINED BY algorithm OPTIONAL  }
diff --git a/crypto/third_party/nss/sha512.cc b/crypto/third_party/nss/sha512.cc
index 5ef4e50..78950cb 100644
--- a/crypto/third_party/nss/sha512.cc
+++ b/crypto/third_party/nss/sha512.cc
@@ -471,6 +471,11 @@
 	*digestLen = padLen;
 }
 
+void SHA256_Clone(SHA256Context* dest, SHA256Context* src)
+{
+  memcpy(dest, src, sizeof *dest);
+}
+
 /* Comment out unused code, mostly the SHA384 and SHA512 implementations. */
 #if 0
 SECStatus
@@ -519,12 +524,6 @@
     return ctx;
 }
 
-void SHA256_Clone(SHA256Context *dest, SHA256Context *src)
-{
-    memcpy(dest, src, sizeof *dest);
-}
-
-
 /* ======= SHA512 and SHA384 common constants and defines ================= */
 
 /* common #defines for SHA512 and SHA384 */
diff --git a/dbus/object_proxy.cc b/dbus/object_proxy.cc
index 9fb3ee4..e30c9fd 100644
--- a/dbus/object_proxy.cc
+++ b/dbus/object_proxy.cc
@@ -565,17 +565,19 @@
   if (ignore_service_unknown_errors_ &&
       (error_name == kErrorServiceUnknown || error_name == kErrorObjectUnknown))
     return;
-  logging::LogSeverity severity = logging::LOG_ERROR;
-  // "UnknownObject" indicates that an object or service is no longer available,
-  // e.g. a Shill network service has gone out of range. Treat these as warnings
-  // not errors.
-  if (error_name == kErrorObjectUnknown)
-    severity = logging::LOG_WARNING;
+
   std::ostringstream msg;
   msg << "Failed to call method: " << interface_name << "." << method_name
       << ": object_path= " << object_path_.value()
       << ": " << error_name << ": " << error_message;
-  logging::LogAtLevel(severity, msg.str());
+
+  // "UnknownObject" indicates that an object or service is no longer available,
+  // e.g. a Shill network service has gone out of range. Treat these as warnings
+  // not errors.
+  if (error_name == kErrorObjectUnknown)
+    LOG(WARNING) << msg.str();
+  else
+    LOG(ERROR) << msg.str();
 }
 
 void ObjectProxy::OnCallMethodError(const std::string& interface_name,
diff --git a/sandbox/linux/services/namespace_utils.h b/sandbox/linux/services/namespace_utils.h
index 7231033..ec5d241 100644
--- a/sandbox/linux/services/namespace_utils.h
+++ b/sandbox/linux/services/namespace_utils.h
@@ -7,9 +7,10 @@
 
 #include <sys/types.h>
 
+#include <type_traits>
+
 #include "base/compiler_specific.h"
 #include "base/macros.h"
-#include "base/template_util.h"
 #include "sandbox/sandbox_export.h"
 
 namespace sandbox {
@@ -17,7 +18,7 @@
 // Utility functions for using Linux namepaces.
 class SANDBOX_EXPORT NamespaceUtils {
  public:
-  static_assert((base::is_same<uid_t, gid_t>::value),
+  static_assert(std::is_same<uid_t, gid_t>::value,
                 "uid_t and gid_t must be the same type");
   // generic_id_t can be used for either uid_t or gid_t.
   typedef uid_t generic_id_t;
diff --git a/sandbox/win/src/interceptors.h b/sandbox/win/src/interceptors.h
index a17447a..2391957 100644
--- a/sandbox/win/src/interceptors.h
+++ b/sandbox/win/src/interceptors.h
@@ -19,7 +19,7 @@
   SET_INFORMATION_THREAD_ID,
   OPEN_THREAD_TOKEN_ID,
   OPEN_THREAD_TOKEN_EX_ID,
-  OPEN_TREAD_ID,
+  OPEN_THREAD_ID,
   OPEN_PROCESS_ID,
   OPEN_PROCESS_TOKEN_ID,
   OPEN_PROCESS_TOKEN_EX_ID,
@@ -34,6 +34,7 @@
   // Process-thread dispatcher:
   CREATE_PROCESSW_ID,
   CREATE_PROCESSA_ID,
+  CREATE_THREAD_ID,
   // Registry dispatcher:
   CREATE_KEY_ID,
   OPEN_KEY_ID,
diff --git a/sandbox/win/src/ipc_tags.h b/sandbox/win/src/ipc_tags.h
index d680411..3a1724b 100644
--- a/sandbox/win/src/ipc_tags.h
+++ b/sandbox/win/src/ipc_tags.h
@@ -32,6 +32,7 @@
   IPC_GDI_GDIDLLINITIALIZE_TAG,
   IPC_GDI_GETSTOCKOBJECT_TAG,
   IPC_USER_REGISTERCLASSW_TAG,
+  IPC_CREATETHREAD_TAG,
   IPC_LAST_TAG
 };
 
diff --git a/sandbox/win/src/sandbox_policy.h b/sandbox/win/src/sandbox_policy.h
index cc39c62..df76c36 100644
--- a/sandbox/win/src/sandbox_policy.h
+++ b/sandbox/win/src/sandbox_policy.h
@@ -8,8 +8,6 @@
 #include <stddef.h>
 #include <stdint.h>
 
-#include <string>
-
 #include "base/strings/string16.h"
 #include "sandbox/win/src/sandbox_types.h"
 #include "sandbox/win/src/security_level.h"
@@ -48,17 +46,17 @@
                            // over the resulting process and thread handles.
                            // No other parameters besides the command line are
                            // passed to the child process.
-    PROCESS_ALL_EXEC,      // Allows the creation of a process and return fill
+    PROCESS_ALL_EXEC,      // Allows the creation of a process and return full
                            // access on the returned handles.
                            // This flag can be used only when the main token of
                            // the sandboxed application is at least INTERACTIVE.
     EVENTS_ALLOW_ANY,      // Allows the creation of an event with full access.
-    EVENTS_ALLOW_READONLY, // Allows opening an even with synchronize access.
-    REG_ALLOW_READONLY,    // Allows readonly access to a registry key.
-    REG_ALLOW_ANY,         // Allows read and write access to a registry key.
-    FAKE_USER_GDI_INIT     // Fakes user32 and gdi32 initialization. This can
-                           // be used to allow the DLLs to load and initialize
-                           // even if the process cannot access that subsystem.
+    EVENTS_ALLOW_READONLY,  // Allows opening an even with synchronize access.
+    REG_ALLOW_READONLY,     // Allows readonly access to a registry key.
+    REG_ALLOW_ANY,          // Allows read and write access to a registry key.
+    FAKE_USER_GDI_INIT      // Fakes user32 and gdi32 initialization. This can
+                            // be used to allow the DLLs to load and initialize
+                            // even if the process cannot access that subsystem.
   };
 
   // Increments the reference count of this object. The reference count must
@@ -135,6 +133,9 @@
   virtual ResultCode SetJobLevel(JobLevel job_level,
                                  uint32_t ui_exceptions) = 0;
 
+  // Returns the job level.
+  virtual JobLevel GetJobLevel() const = 0;
+
   // Sets a hard limit on the size of the commit set for the sandboxed process.
   // If the limit is reached, the process will be terminated with
   // SBOX_FATAL_MEMORY_EXCEEDED (7012).
@@ -206,6 +207,10 @@
   // Returns the currently set delayed mitigation flags.
   virtual MitigationFlags GetDelayedProcessMitigations() const = 0;
 
+  // Disconnect the target from CSRSS when TargetServices::LowerToken() is
+  // called inside the target.
+  virtual void SetDisconnectCsrss() = 0;
+
   // Sets the interceptions to operate in strict mode. By default, interceptions
   // are performed in "relaxed" mode, where if something inside NTDLL.DLL is
   // already patched we attempt to intercept it anyway. Setting interceptions
@@ -246,11 +251,9 @@
   virtual ResultCode AddKernelObjectToClose(const wchar_t* handle_type,
                                             const wchar_t* handle_name) = 0;
 
-  // Adds a handle that will be shared with the target process.
-  // Returns the handle which was actually shared with the target. This is
-  // achieved by duplicating the handle to ensure that it is inheritable by
-  // the target. The caller should treat this as an opaque value.
-  virtual void* AddHandleToShare(HANDLE handle) = 0;
+  // Adds a handle that will be shared with the target process. Does not take
+  // ownership of the handle.
+  virtual void AddHandleToShare(HANDLE handle) = 0;
 };
 
 }  // namespace sandbox
diff --git a/sandbox/win/src/security_level.h b/sandbox/win/src/security_level.h
index 26ec306..87abdeb 100644
--- a/sandbox/win/src/security_level.h
+++ b/sandbox/win/src/security_level.h
@@ -183,8 +183,7 @@
 // PROCESS_CREATION_MITIGATION_POLICY_STRICT_HANDLE_CHECKS_ALWAYS_ON.
 const MitigationFlags MITIGATION_STRICT_HANDLE_CHECKS             = 0x00000100;
 
-// Prevents the process from making Win32k calls. Must be enabled after
-// startup. Corresponds to
+// Prevents the process from making Win32k calls. Corresponds to
 // PROCESS_CREATION_MITIGATION_POLICY_WIN32K_SYSTEM_CALL_DISABLE_ALWAYS_ON.
 const MitigationFlags MITIGATION_WIN32K_DISABLE                   = 0x00000200;
 
@@ -193,6 +192,11 @@
 // PROCESS_CREATION_MITIGATION_POLICY_EXTENSION_POINT_DISABLE_ALWAYS_ON.
 const MitigationFlags MITIGATION_EXTENSION_DLL_DISABLE            = 0x00000400;
 
+// Prevents the process from loading non-system fonts into GDI.
+// Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_FONT_DISABLE_ALWAYS_ON
+const MitigationFlags MITIGATION_NONSYSTEM_FONT_DISABLE = 0x00000800;
+
 // Sets the DLL search order to LOAD_LIBRARY_SEARCH_DEFAULT_DIRS. Additional
 // directories can be added via the Windows AddDllDirectory() function.
 // http://msdn.microsoft.com/en-us/library/windows/desktop/hh310515
@@ -204,6 +208,14 @@
 // opening the process token for impersonate/duplicate/assignment.
 const MitigationFlags MITIGATION_HARDEN_TOKEN_IL_POLICY  = 0x00000001ULL << 33;
 
+// Blocks mapping of images from remote devices. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_REMOTE_ALWAYS_ON.
+const MitigationFlags MITIGATION_IMAGE_LOAD_NO_REMOTE = 0x00000001ULL << 52;
+
+// Blocks mapping of images that have the low manditory label. Corresponds to
+// PROCESS_CREATION_MITIGATION_POLICY_IMAGE_LOAD_NO_LOW_LABEL_ALWAYS_ON.
+const MitigationFlags MITIGATION_IMAGE_LOAD_NO_LOW_LABEL = 0x00000001ULL << 56;
+
 }  // namespace sandbox
 
 #endif  // SANDBOX_SRC_SECURITY_LEVEL_H_