Version 3.26.26 (based on bleeding_edge revision r21007)

Expose promise value through promise mirror (issue 3093).

Simplified CPU/CpuFeatures a bit (Chromium issue 359977).

Performance and stability improvements on all platforms.

git-svn-id: http://v8.googlecode.com/svn/trunk@21011 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 52b9970..f41f017 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2014-04-28: Version 3.26.26
+
+        Expose promise value through promise mirror (issue 3093).
+
+        Simplified CPU/CpuFeatures a bit (Chromium issue 359977).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-04-28: Version 3.26.25
 
         Add timestamps to CPU profile samples (Chromium issue 363976).
diff --git a/src/accessors.cc b/src/accessors.cc
index 9913943..eb99faa 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -947,21 +947,47 @@
 //
 
 
-Object* Accessors::FunctionGetName(Isolate* isolate,
-                                   Object* object,
-                                   void*) {
-  JSFunction* holder = FindInstanceOf<JSFunction>(isolate, object);
-  return holder == NULL
-      ? isolate->heap()->undefined_value()
-      : holder->shared()->name();
+void Accessors::FunctionNameGetter(
+    v8::Local<v8::String> name,
+    const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  Handle<Object> object = Utils::OpenHandle(*info.This());
+  MaybeHandle<JSFunction> maybe_function;
+
+  {
+    DisallowHeapAllocation no_allocation;
+    JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
+    if (function != NULL) maybe_function = Handle<JSFunction>(function);
+  }
+
+  Handle<JSFunction> function;
+  Handle<Object> result;
+  if (maybe_function.ToHandle(&function)) {
+    result = Handle<Object>(function->shared()->name(), isolate);
+  } else {
+    result = isolate->factory()->undefined_value();
+  }
+  info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 
-const AccessorDescriptor Accessors::FunctionName = {
-  FunctionGetName,
-  ReadOnlySetAccessor,
-  0
-};
+void Accessors::FunctionNameSetter(
+    v8::Local<v8::String> name,
+    v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<void>& info) {
+  // Do nothing.
+}
+
+
+Handle<AccessorInfo> Accessors::FunctionNameInfo(
+      Isolate* isolate, PropertyAttributes attributes) {
+  return MakeAccessor(isolate,
+                      isolate->factory()->name_string(),
+                      &FunctionNameGetter,
+                      &FunctionNameSetter,
+                      attributes);
+}
 
 
 //
diff --git a/src/accessors.h b/src/accessors.h
index 226597a..db63fda 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -37,12 +37,12 @@
 // The list of accessor descriptors. This is a second-order macro
 // taking a macro to be applied to all accessor descriptor names.
 #define ACCESSOR_DESCRIPTOR_LIST(V) \
-  V(FunctionName)                   \
   V(FunctionArguments)              \
   V(FunctionCaller)                 \
   V(ArrayLength)
 
 #define ACCESSOR_INFO_LIST(V)       \
+  V(FunctionName)                   \
   V(FunctionLength)                 \
   V(FunctionPrototype)              \
   V(ScriptColumnOffset)             \
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 5c1c311..7767fd2 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -48,7 +48,6 @@
 #ifdef DEBUG
 bool CpuFeatures::initialized_ = false;
 #endif
-bool CpuFeatures::hint_creating_snapshot_ = false;
 unsigned CpuFeatures::supported_ = 0;
 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
 unsigned CpuFeatures::cross_compile_ = 0;
@@ -101,22 +100,6 @@
 }
 
 
-void CpuFeatures::SetHintCreatingSnapshot() {
-  hint_creating_snapshot_ = true;
-}
-
-
-void CpuFeatures::ProbeWithoutIsolate() {
-  Probe(hint_creating_snapshot_);
-}
-
-
-void CpuFeatures::Probe() {
-  // The Serializer can only be queried after isolate initialization.
-  Probe(Serializer::enabled());
-}
-
-
 void CpuFeatures::Probe(bool serializer_enabled) {
   uint64_t standard_features = static_cast<unsigned>(
       OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
@@ -133,8 +116,6 @@
 
   if (serializer_enabled) {
     // No probing for features if we might serialize (generate snapshot).
-    printf("   ");
-    PrintFeatures();
     return;
   }
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index f64cc5c..df22a20 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -56,12 +56,7 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
-
-  // A special case for printing target and features, which we want to do
-  // before initializing the isolate
-  static void SetHintCreatingSnapshot();
-  static void ProbeWithoutIsolate();
+  static void Probe(bool serializer_enabled);
 
   // Display target use when compiling.
   static void PrintTarget();
@@ -98,10 +93,9 @@
            (cross_compile_ & mask) == mask;
   }
 
- private:
-  static void Probe(bool serializer_enabled);
-  static bool hint_creating_snapshot_;
+  static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(VFP3); }
 
+ private:
   static bool Check(CpuFeature f, unsigned set) {
     return (set & flag2set(f)) != 0;
   }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 7985459..f5d87d5 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -81,11 +81,6 @@
   static Register registers[] = { r3, r2, r1 };
   descriptor->register_param_count_ = 3;
   descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
   descriptor->deoptimization_handler_ =
       Runtime::FunctionForId(
           Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
@@ -229,11 +224,6 @@
     descriptor->stack_parameter_count_ = r0;
     descriptor->register_param_count_ = 3;
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -261,10 +251,6 @@
     descriptor->stack_parameter_count_ = r0;
     descriptor->register_param_count_ = 2;
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -5107,11 +5093,8 @@
   __ str(ip, MemOperand(r0, 3 * kPointerSize));
 
   const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
-  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
-  ApiFunction thunk_fun(thunk_address);
-  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
-      isolate());
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
 
   AllowExternalCallThatCantCauseGC scope(masm);
   MemOperand context_restore_operand(
@@ -5157,12 +5140,8 @@
 
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
-  ExternalReference::Type thunk_type =
-      ExternalReference::PROFILING_GETTER_CALL;
-  ApiFunction thunk_fun(thunk_address);
-  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
-      isolate());
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
   __ CallApiFunctionAndReturn(api_function_address,
                               thunk_ref,
                               kStackUnwindSpace,
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 20c6a5d..198a244 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -46,16 +46,6 @@
 namespace v8 {
 namespace internal {
 
-void CPU::SetUp() {
-  CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
-  return CpuFeatures::IsSupported(VFP3);
-}
-
-
 void CPU::FlushICache(void* start, size_t size) {
   // Nothing to do flushing no instructions.
   if (size == 0) {
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index b76af24..cffab77 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -1821,12 +1821,33 @@
   __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
   __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
   __ mov(r1, Operand(constant_elements));
-  if (expr->depth() > 1) {
+  if (has_fast_elements && constant_elements_values->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    FastCloneShallowArrayStub stub(
+        isolate(),
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        allocation_site_mode,
+        length);
+    __ CallStub(&stub);
+    __ IncrementCounter(
+        isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
+  } else if (expr->depth() > 1 || Serializer::enabled() ||
+             length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ mov(r0, Operand(Smi::FromInt(flags)));
     __ Push(r3, r2, r1, r0);
     __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+    if (has_fast_elements) {
+      mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    }
+
+    FastCloneShallowArrayStub stub(isolate(), mode, allocation_site_mode,
+                                   length);
     __ CallStub(&stub);
   }
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 2821c32..eade5f1 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -2351,10 +2351,7 @@
 
   Label profiler_disabled;
   Label end_profiler_check;
-  bool* is_profiling_flag =
-      isolate()->cpu_profiler()->is_profiling_address();
-  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
-  mov(r9, Operand(reinterpret_cast<int32_t>(is_profiling_flag)));
+  mov(r9, Operand(ExternalReference::is_profiling_address(isolate())));
   ldrb(r9, MemOperand(r9, 0));
   cmp(r9, Operand(0));
   b(eq, &profiler_disabled);
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 293362a6..853c2c7 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -88,11 +88,6 @@
   static Register registers[] = { x3, x2, x1 };
   descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
   descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
   descriptor->deoptimization_handler_ =
       Runtime::FunctionForId(
           Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
@@ -258,11 +253,6 @@
     descriptor->register_param_count_ =
         sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -309,10 +299,6 @@
     descriptor->register_param_count_ =
         sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -5326,11 +5312,8 @@
   __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize));
 
   const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
-  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
-  ApiFunction thunk_fun(thunk_address);
-  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
-      isolate());
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
 
   AllowExternalCallThatCantCauseGC scope(masm);
   MemOperand context_restore_operand(
@@ -5383,12 +5366,8 @@
 
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
-  ExternalReference::Type thunk_type =
-      ExternalReference::PROFILING_GETTER_CALL;
-  ApiFunction thunk_fun(thunk_address);
-  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
-      isolate());
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
 
   const int spill_offset = 1 + kApiStackSpace;
   __ CallApiFunctionAndReturn(api_function_address,
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index b8899ad..bdc4a03 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -49,16 +49,6 @@
 unsigned CpuFeatures::icache_line_size_ = 1;
 
 
-void CPU::SetUp() {
-  CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
-  return true;
-}
-
-
 void CPU::FlushICache(void* address, size_t length) {
   if (length == 0) {
     return;
@@ -139,7 +129,7 @@
 }
 
 
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool serializer_enabled) {
   // Compute I and D cache line size. The cache type register holds
   // information about the caches.
   uint32_t cache_type_register = GetCacheType();
diff --git a/src/arm64/cpu-arm64.h b/src/arm64/cpu-arm64.h
index ddec72d..009cead 100644
--- a/src/arm64/cpu-arm64.h
+++ b/src/arm64/cpu-arm64.h
@@ -42,7 +42,7 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool serializer_enabled);
 
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
@@ -81,6 +81,8 @@
     return true;
   }
 
+  static bool SupportsCrankshaft() { return true; }
+
  private:
   // Return the content of the cache type register.
   static uint32_t GetCacheType();
diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc
index e6b57ea..2099fde 100644
--- a/src/arm64/full-codegen-arm64.cc
+++ b/src/arm64/full-codegen-arm64.cc
@@ -1824,12 +1824,35 @@
   __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset));
   __ Mov(x2, Smi::FromInt(expr->literal_index()));
   __ Mov(x1, Operand(constant_elements));
-  if (expr->depth() > 1) {
+  if (has_fast_elements && constant_elements_values->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    FastCloneShallowArrayStub stub(
+        isolate(),
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        allocation_site_mode,
+        length);
+    __ CallStub(&stub);
+    __ IncrementCounter(
+        isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11);
+  } else if ((expr->depth() > 1) || Serializer::enabled() ||
+             length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ Mov(x0, Smi::FromInt(flags));
     __ Push(x3, x2, x1, x0);
     __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
   } else {
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+    if (has_fast_elements) {
+      mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    }
+
+    FastCloneShallowArrayStub stub(isolate(),
+                                   mode,
+                                   allocation_site_mode,
+                                   length);
     __ CallStub(&stub);
   }
 
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index 3d67e73..b731a97 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1683,9 +1683,7 @@
 
   Label profiler_disabled;
   Label end_profiler_check;
-  bool* is_profiling_flag = isolate()->cpu_profiler()->is_profiling_address();
-  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
-  Mov(x10, reinterpret_cast<uintptr_t>(is_profiling_flag));
+  Mov(x10, ExternalReference::is_profiling_address(isolate()));
   Ldrb(w10, MemOperand(x10));
   Cbz(w10, &profiler_disabled);
   Mov(x3, thunk_ref);
diff --git a/src/assembler.cc b/src/assembler.cc
index c3aee56..843b9c3 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -39,6 +39,7 @@
 #include "builtins.h"
 #include "counters.h"
 #include "cpu.h"
+#include "cpu-profiler.h"
 #include "debug.h"
 #include "deoptimizer.h"
 #include "execution.h"
@@ -1317,6 +1318,30 @@
 }
 
 
+ExternalReference ExternalReference::is_profiling_address(Isolate* isolate) {
+  return ExternalReference(isolate->cpu_profiler()->is_profiling_address());
+}
+
+
+ExternalReference ExternalReference::invoke_function_callback(
+    Isolate* isolate) {
+  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
+  ApiFunction thunk_fun(thunk_address);
+  return ExternalReference(&thunk_fun, thunk_type, isolate);
+}
+
+
+ExternalReference ExternalReference::invoke_accessor_getter_callback(
+    Isolate* isolate) {
+  Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+  ExternalReference::Type thunk_type =
+      ExternalReference::PROFILING_GETTER_CALL;
+  ApiFunction thunk_fun(thunk_address);
+  return ExternalReference(&thunk_fun, thunk_type, isolate);
+}
+
+
 #ifndef V8_INTERPRETED_REGEXP
 
 ExternalReference ExternalReference::re_check_stack_guard_state(
diff --git a/src/assembler.h b/src/assembler.h
index 6f0f78f..71e8c1d 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -861,6 +861,10 @@
 
   static ExternalReference cpu_features();
 
+  static ExternalReference is_profiling_address(Isolate* isolate);
+  static ExternalReference invoke_function_callback(Isolate* isolate);
+  static ExternalReference invoke_accessor_getter_callback(Isolate* isolate);
+
   Address address() const { return reinterpret_cast<Address>(address_); }
 
   // Function Debug::Break()
diff --git a/src/assert-scope.h b/src/assert-scope.h
index 428e6d0..2bb4623 100644
--- a/src/assert-scope.h
+++ b/src/assert-scope.h
@@ -50,7 +50,8 @@
 enum PerIsolateAssertType {
   JAVASCRIPT_EXECUTION_ASSERT,
   JAVASCRIPT_EXECUTION_THROWS,
-  ALLOCATION_FAILURE_ASSERT
+  ALLOCATION_FAILURE_ASSERT,
+  DEOPTIMIZATION_ASSERT
 };
 
 
@@ -268,6 +269,14 @@
 typedef PerIsolateAssertScopeDebugOnly<ALLOCATION_FAILURE_ASSERT, true>
     AllowAllocationFailure;
 
+// Scope to document where we do not expect deoptimization.
+typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, false>
+    DisallowDeoptimization;
+
+// Scope to introduce an exception to DisallowDeoptimization.
+typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, true>
+    AllowDeoptimization;
+
 } }  // namespace v8::internal
 
 #endif  // V8_ASSERT_SCOPE_H_
diff --git a/src/ast.h b/src/ast.h
index ed3a8b6e..97b6a60 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -281,8 +281,7 @@
   int length() const { return list_.length(); }
 
   void AddMapIfMissing(Handle<Map> map, Zone* zone) {
-    map = Map::CurrentMapForDeprecated(map);
-    if (map.is_null()) return;
+    if (!Map::CurrentMapForDeprecated(map).ToHandle(&map)) return;
     for (int i = 0; i < length(); ++i) {
       if (at(i).is_identical_to(map)) return;
     }
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 67e41fc..d577772 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -388,7 +388,6 @@
   int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
   Map::EnsureDescriptorSlack(map, size);
 
-  Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
   Handle<Foreign> args(factory()->NewForeign(&Accessors::FunctionArguments));
   Handle<Foreign> caller(factory()->NewForeign(&Accessors::FunctionCaller));
   PropertyAttributes attribs = static_cast<PropertyAttributes>(
@@ -401,8 +400,11 @@
                           length, attribs);
     map->AppendDescriptor(&d);
   }
+  Handle<AccessorInfo> name =
+      Accessors::FunctionNameInfo(isolate(), attribs);
   {  // Add name.
-    CallbacksDescriptor d(factory()->name_string(), name, attribs);
+    CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())),
+                          name, attribs);
     map->AppendDescriptor(&d);
   }
   {  // Add arguments.
@@ -519,7 +521,6 @@
   int size = (prototypeMode == DONT_ADD_PROTOTYPE) ? 4 : 5;
   Map::EnsureDescriptorSlack(map, size);
 
-  Handle<Foreign> name(factory()->NewForeign(&Accessors::FunctionName));
   Handle<AccessorPair> arguments(factory()->NewAccessorPair());
   Handle<AccessorPair> caller(factory()->NewAccessorPair());
   PropertyAttributes rw_attribs =
@@ -534,8 +535,11 @@
                           length, ro_attribs);
     map->AppendDescriptor(&d);
   }
+  Handle<AccessorInfo> name =
+      Accessors::FunctionNameInfo(isolate(), ro_attribs);
   {  // Add name.
-    CallbacksDescriptor d(factory()->name_string(), name, ro_attribs);
+    CallbacksDescriptor d(Handle<Name>(Name::cast(name->name())),
+                          name, ro_attribs);
     map->AppendDescriptor(&d);
   }
   {  // Add arguments.
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 342e317..68c9cd5 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -150,9 +150,9 @@
   bool runtime_stack_params = descriptor_->stack_parameter_count_.is_valid();
   HInstruction* stack_parameter_count = NULL;
   for (int i = 0; i < param_count; ++i) {
-    Representation r = descriptor_->register_param_representations_ == NULL
-        ? Representation::Tagged()
-        : descriptor_->register_param_representations_[i];
+    Representation r = descriptor_->IsParameterCountRegister(i)
+        ? Representation::Integer32()
+        : Representation::Tagged();
     HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
     start_environment->Bind(i, param);
     parameters_[i] = param;
@@ -353,6 +353,8 @@
   Factory* factory = isolate()->factory();
   HValue* undefined = graph()->GetConstantUndefined();
   AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
+  FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
+  int length = casted_stub()->length();
 
   HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
                                                   GetParameter(1),
@@ -367,40 +369,46 @@
       AllocationSite::kTransitionInfoOffset);
   HInstruction* boilerplate = Add<HLoadNamedField>(
       allocation_site, static_cast<HValue*>(NULL), access);
-  HValue* elements = AddLoadElements(boilerplate);
-  HValue* capacity = AddLoadFixedArrayLength(elements);
-  IfBuilder zero_capacity(this);
-  zero_capacity.If<HCompareNumericAndBranch>(capacity, graph()->GetConstant0(),
-                                           Token::EQ);
-  zero_capacity.Then();
-  Push(BuildCloneShallowArrayEmpty(boilerplate,
-                                   allocation_site,
-                                   alloc_site_mode));
-  zero_capacity.Else();
-  IfBuilder if_fixed_cow(this);
-  if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
-  if_fixed_cow.Then();
-  Push(BuildCloneShallowArrayCow(boilerplate,
-                                 allocation_site,
-                                 alloc_site_mode,
-                                 FAST_ELEMENTS));
-  if_fixed_cow.Else();
-  IfBuilder if_fixed(this);
-  if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
-  if_fixed.Then();
-  Push(BuildCloneShallowArrayNonEmpty(boilerplate,
-                                      allocation_site,
-                                      alloc_site_mode,
-                                      FAST_ELEMENTS));
+  HValue* push_value;
+  if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
+    HValue* elements = AddLoadElements(boilerplate);
 
-  if_fixed.Else();
-  Push(BuildCloneShallowArrayNonEmpty(boilerplate,
-                                      allocation_site,
-                                      alloc_site_mode,
-                                      FAST_DOUBLE_ELEMENTS));
-  if_fixed.End();
-  if_fixed_cow.End();
-  zero_capacity.End();
+    IfBuilder if_fixed_cow(this);
+    if_fixed_cow.If<HCompareMap>(elements, factory->fixed_cow_array_map());
+    if_fixed_cow.Then();
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        FAST_ELEMENTS,
+                                        0/*copy-on-write*/);
+    environment()->Push(push_value);
+    if_fixed_cow.Else();
+
+    IfBuilder if_fixed(this);
+    if_fixed.If<HCompareMap>(elements, factory->fixed_array_map());
+    if_fixed.Then();
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        FAST_ELEMENTS,
+                                        length);
+    environment()->Push(push_value);
+    if_fixed.Else();
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        FAST_DOUBLE_ELEMENTS,
+                                        length);
+    environment()->Push(push_value);
+  } else {
+    ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
+    push_value = BuildCloneShallowArray(boilerplate,
+                                        allocation_site,
+                                        alloc_site_mode,
+                                        elements_kind,
+                                        length);
+    environment()->Push(push_value);
+  }
 
   checker.ElseDeopt("Uninitialized boilerplate literals");
   checker.End();
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 90d36a4..353a228 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -45,7 +45,6 @@
       hint_stack_parameter_count_(-1),
       function_mode_(NOT_JS_FUNCTION_STUB_MODE),
       register_params_(NULL),
-      register_param_representations_(NULL),
       deoptimization_handler_(NULL),
       handler_arguments_mode_(DONT_PASS_ARGUMENTS),
       miss_handler_(),
@@ -753,7 +752,9 @@
 
 // static
 void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) {
-  FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
+  FastCloneShallowArrayStub stub(isolate,
+                                 FastCloneShallowArrayStub::CLONE_ELEMENTS,
+                                 DONT_TRACK_ALLOCATION_SITE, 0);
   InstallDescriptor(isolate, &stub);
 }
 
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 23acaef..f337137 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -300,7 +300,6 @@
   int hint_stack_parameter_count_;
   StubFunctionMode function_mode_;
   Register* register_params_;
-  Representation* register_param_representations_;
 
   Address deoptimization_handler_;
   HandlerArgumentsMode handler_arguments_mode_;
@@ -605,18 +604,50 @@
 class FastCloneShallowArrayStub : public HydrogenCodeStub {
  public:
   // Maximum length of copied elements array.
-  static const int kMaximumInlinedCloneLength = 8;
+  static const int kMaximumClonedLength = 8;
+  enum Mode {
+    CLONE_ELEMENTS,
+    CLONE_DOUBLE_ELEMENTS,
+    COPY_ON_WRITE_ELEMENTS,
+    CLONE_ANY_ELEMENTS,
+    LAST_CLONE_MODE = CLONE_ANY_ELEMENTS
+  };
+
+  static const int kFastCloneModeCount = LAST_CLONE_MODE + 1;
 
   FastCloneShallowArrayStub(Isolate* isolate,
-                            AllocationSiteMode allocation_site_mode)
+                            Mode mode,
+                            AllocationSiteMode allocation_site_mode,
+                            int length)
       : HydrogenCodeStub(isolate),
-      allocation_site_mode_(allocation_site_mode) {}
+        mode_(mode),
+        allocation_site_mode_(allocation_site_mode),
+        length_((mode == COPY_ON_WRITE_ELEMENTS) ? 0 : length) {
+    ASSERT_GE(length_, 0);
+    ASSERT_LE(length_, kMaximumClonedLength);
+  }
 
+  Mode mode() const { return mode_; }
+  int length() const { return length_; }
   AllocationSiteMode allocation_site_mode() const {
     return allocation_site_mode_;
   }
 
-  virtual Handle<Code> GenerateCode();
+  ElementsKind ComputeElementsKind() const {
+    switch (mode()) {
+      case CLONE_ELEMENTS:
+      case COPY_ON_WRITE_ELEMENTS:
+        return FAST_ELEMENTS;
+      case CLONE_DOUBLE_ELEMENTS:
+        return FAST_DOUBLE_ELEMENTS;
+      case CLONE_ANY_ELEMENTS:
+        /*fall-through*/;
+    }
+    UNREACHABLE();
+    return LAST_ELEMENTS_KIND;
+  }
+
+  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
 
   virtual void InitializeInterfaceDescriptor(
       CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
@@ -624,13 +655,22 @@
   static void InstallDescriptors(Isolate* isolate);
 
  private:
+  Mode mode_;
   AllocationSiteMode allocation_site_mode_;
+  int length_;
 
   class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
+  class ModeBits: public BitField<Mode, 1, 4> {};
+  class LengthBits: public BitField<int, 5, 4> {};
   // Ensure data fits within available bits.
+  STATIC_ASSERT(LAST_ALLOCATION_SITE_MODE == 1);
+  STATIC_ASSERT(kFastCloneModeCount < 16);
+  STATIC_ASSERT(kMaximumClonedLength < 16);
   Major MajorKey() { return FastCloneShallowArray; }
   int NotMissMinorKey() {
-    return AllocationSiteModeBits::encode(allocation_site_mode_);
+    return AllocationSiteModeBits::encode(allocation_site_mode_)
+        | ModeBits::encode(mode_)
+        | LengthBits::encode(length_);
   }
 };
 
diff --git a/src/conversions.cc b/src/conversions.cc
index 1d5ed35..0b83a83 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -29,8 +29,13 @@
 #include <limits.h>
 #include <cmath>
 
+#include "v8.h"
+
+#include "assert-scope.h"
+#include "conversions.h"
 #include "conversions-inl.h"
 #include "dtoa.h"
+#include "factory.h"
 #include "list-inl.h"
 #include "strtod.h"
 #include "utils.h"
@@ -44,6 +49,47 @@
 namespace internal {
 
 
+namespace {
+
+// C++-style iterator adaptor for StringCharacterStream
+// (unlike C++ iterators the end-marker has different type).
+class StringCharacterStreamIterator {
+ public:
+  class EndMarker {};
+
+  explicit StringCharacterStreamIterator(StringCharacterStream* stream);
+
+  uint16_t operator*() const;
+  void operator++();
+  bool operator==(EndMarker const&) const { return end_; }
+  bool operator!=(EndMarker const& m) const { return !end_; }
+
+ private:
+  StringCharacterStream* const stream_;
+  uint16_t current_;
+  bool end_;
+};
+
+
+StringCharacterStreamIterator::StringCharacterStreamIterator(
+    StringCharacterStream* stream) : stream_(stream) {
+  ++(*this);
+}
+
+uint16_t StringCharacterStreamIterator::operator*() const {
+  return current_;
+}
+
+
+void StringCharacterStreamIterator::operator++() {
+  end_ = !stream_->HasMore();
+  if (!end_) {
+    current_ = stream_->GetNext();
+  }
+}
+}  // End anonymous namespace.
+
+
 double StringToDouble(UnicodeCache* unicode_cache,
                       const char* str, int flags, double empty_string_val) {
   // We cast to const uint8_t* here to avoid instantiating the
@@ -273,7 +319,6 @@
 }
 
 
-
 char* DoubleToExponentialCString(double value, int f) {
   const int kMaxDigitsAfterPoint = 20;
   // f might be -1 to signal that f was undefined in JavaScript.
@@ -460,4 +505,22 @@
   return builder.Finalize();
 }
 
+
+double StringToDouble(UnicodeCache* unicode_cache,
+                      String* string,
+                      int flags,
+                      double empty_string_val) {
+  DisallowHeapAllocation no_gc;
+  String::FlatContent flat = string->GetFlatContent();
+  // ECMA-262 section 15.1.2.3, empty string is NaN
+  if (flat.IsAscii()) {
+    return StringToDouble(
+        unicode_cache, flat.ToOneByteVector(), flags, empty_string_val);
+  } else {
+    return StringToDouble(
+        unicode_cache, flat.ToUC16Vector(), flags, empty_string_val);
+  }
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/conversions.h b/src/conversions.h
index c8484e4..12f895d 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -28,6 +28,11 @@
 #ifndef V8_CONVERSIONS_H_
 #define V8_CONVERSIONS_H_
 
+#include <limits>
+
+#include "checks.h"
+#include "handles.h"
+#include "objects.h"
 #include "utils.h"
 
 namespace v8 {
@@ -163,6 +168,77 @@
 char* DoubleToPrecisionCString(double value, int f);
 char* DoubleToRadixCString(double value, int radix);
 
+
+static inline bool IsMinusZero(double value) {
+  static const DoubleRepresentation minus_zero(-0.0);
+  return DoubleRepresentation(value) == minus_zero;
+}
+
+
+// Integer32 is an integer that can be represented as a signed 32-bit
+// integer. It has to be in the range [-2^31, 2^31 - 1].
+// We also have to check for negative 0 as it is not an Integer32.
+static inline bool IsInt32Double(double value) {
+  return !IsMinusZero(value) &&
+         value >= kMinInt &&
+         value <= kMaxInt &&
+         value == FastI2D(FastD2I(value));
+}
+
+
+// Convert from Number object to C integer.
+inline int32_t NumberToInt32(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return DoubleToInt32(number->Number());
+}
+
+
+inline uint32_t NumberToUint32(Object* number) {
+  if (number->IsSmi()) return Smi::cast(number)->value();
+  return DoubleToUint32(number->Number());
+}
+
+
+double StringToDouble(UnicodeCache* unicode_cache,
+                      String* string,
+                      int flags,
+                      double empty_string_val = 0.0);
+
+
+inline bool TryNumberToSize(Isolate* isolate,
+                            Object* number, size_t* result) {
+  SealHandleScope shs(isolate);
+  if (number->IsSmi()) {
+    int value = Smi::cast(number)->value();
+    ASSERT(static_cast<unsigned>(Smi::kMaxValue)
+           <= std::numeric_limits<size_t>::max());
+    if (value >= 0) {
+      *result = static_cast<size_t>(value);
+      return true;
+    }
+    return false;
+  } else {
+    ASSERT(number->IsHeapNumber());
+    double value = HeapNumber::cast(number)->value();
+    if (value >= 0 &&
+        value <= std::numeric_limits<size_t>::max()) {
+      *result = static_cast<size_t>(value);
+      return true;
+    } else {
+      return false;
+    }
+  }
+}
+
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+                           Object* number) {
+  size_t result = 0;
+  bool is_valid = TryNumberToSize(isolate, number, &result);
+  CHECK(is_valid);
+  return result;
+}
+
 } }  // namespace v8::internal
 
 #endif  // V8_CONVERSIONS_H_
diff --git a/src/cpu.h b/src/cpu.h
index b2e9f7da..b1ca746 100644
--- a/src/cpu.h
+++ b/src/cpu.h
@@ -102,11 +102,6 @@
   // Returns the number of processors online.
   static int NumberOfProcessorsOnline();
 
-  // Initializes the cpu architecture support. Called once at VM startup.
-  static void SetUp();
-
-  static bool SupportsCrankshaft();
-
   // Flush instruction cache.
   static void FlushICache(void* start, size_t size);
 
diff --git a/src/elements.cc b/src/elements.cc
index 79dc722..281d8b0 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -28,10 +28,10 @@
 #include "v8.h"
 
 #include "arguments.h"
-#include "objects.h"
+#include "conversions.h"
 #include "elements.h"
+#include "objects.h"
 #include "utils.h"
-#include "v8conversions.h"
 
 // Each concrete ElementsAccessor can handle exactly one ElementsKind,
 // several abstract ElementsAccessor classes are used to allow sharing
diff --git a/src/factory.cc b/src/factory.cc
index 32ff424..3c3b187 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -4,9 +4,9 @@
 
 #include "factory.h"
 
-#include "macro-assembler.h"
+#include "conversions.h"
 #include "isolate-inl.h"
-#include "v8conversions.h"
+#include "macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -1737,7 +1737,7 @@
   map->set_prototype(object->map()->prototype());
 
   // Allocate the backing storage for the properties.
-  int prop_size = map->unused_property_fields() - map->inobject_properties();
+  int prop_size = map->InitialPropertiesLength();
   Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
 
   Heap* heap = isolate()->heap();
@@ -1788,7 +1788,7 @@
   ASSERT(map->instance_type() == object->map()->instance_type());
 
   // Allocate the backing storage for the properties.
-  int prop_size = map->unused_property_fields() - map->inobject_properties();
+  int prop_size = map->InitialPropertiesLength();
   Handle<FixedArray> properties = NewFixedArray(prop_size, TENURED);
 
   // In order to keep heap in consistent state there must be no allocations
diff --git a/src/flags.cc b/src/flags.cc
index 7feb51f..e241f55 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -360,10 +360,15 @@
 }
 
 
+bool FlagList::serializer_enabled_ = false;
+
+
 // static
 int FlagList::SetFlagsFromCommandLine(int* argc,
                                       char** argv,
-                                      bool remove_flags) {
+                                      bool remove_flags,
+                                      bool serializer_enabled) {
+  serializer_enabled_ = serializer_enabled;
   int return_code = 0;
   // parse arguments
   for (int i = 1; i < *argc;) {
@@ -545,7 +550,7 @@
 void FlagList::PrintHelp() {
 #if V8_TARGET_ARCH_ARM
   CpuFeatures::PrintTarget();
-  CpuFeatures::ProbeWithoutIsolate();
+  CpuFeatures::Probe(serializer_enabled_);
   CpuFeatures::PrintFeatures();
 #endif  // V8_TARGET_ARCH_ARM
 
diff --git a/src/flags.h b/src/flags.h
index fe182e5..70376b0 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -63,7 +63,10 @@
   //   --flag=value  (non-bool flags only, no spaces around '=')
   //   --flag value  (non-bool flags only)
   //   --            (equivalent to --js_arguments, captures all remaining args)
-  static int SetFlagsFromCommandLine(int* argc, char** argv, bool remove_flags);
+  static int SetFlagsFromCommandLine(int* argc,
+                                     char** argv,
+                                     bool remove_flags,
+                                     bool serializer_enabled = false);
 
   // Set the flag values by parsing the string str. Splits string into argc
   // substrings argv[], each of which consisting of non-white-space chars,
@@ -78,6 +81,10 @@
 
   // Set flags as consequence of being implied by another flag.
   static void EnforceFlagImplications();
+
+ private:
+  // TODO(svenpanne) Remove this when Serializer/startup has been refactored.
+  static bool serializer_enabled_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 08c0f49..cb04f62 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -31,10 +31,10 @@
 
 #include "allocation-tracker.h"
 #include "code-stubs.h"
-#include "heap-profiler.h"
+#include "conversions.h"
 #include "debug.h"
+#include "heap-profiler.h"
 #include "types.h"
-#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/heap.cc b/src/heap.cc
index 0876783..ebf9f8f 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -32,6 +32,7 @@
 #include "bootstrapper.h"
 #include "codegen.h"
 #include "compilation-cache.h"
+#include "conversions.h"
 #include "cpu-profiler.h"
 #include "debug.h"
 #include "deoptimizer.h"
@@ -50,7 +51,6 @@
 #include "store-buffer.h"
 #include "utils/random-number-generator.h"
 #include "utils.h"
-#include "v8conversions.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 #if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index d1684d7..55a5735 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -32,13 +32,13 @@
 
 #include "allocation.h"
 #include "code-stubs.h"
+#include "conversions.h"
 #include "data-flow.h"
 #include "deoptimizer.h"
 #include "small-pointer-list.h"
 #include "string-stream.h"
 #include "unique.h"
 #include "utils.h"
-#include "v8conversions.h"
 #include "zone.h"
 
 namespace v8 {
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index bee033d..76360c5 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -2414,26 +2414,15 @@
 }
 
 
-HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object,
-                                                HValue* dependency) {
+HLoadNamedField* HGraphBuilder::AddLoadElements(HValue* object) {
   return Add<HLoadNamedField>(
-      object, dependency, HObjectAccess::ForElementsPointer());
+      object, static_cast<HValue*>(NULL), HObjectAccess::ForElementsPointer());
 }
 
 
-HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(
-    HValue* array,
-    HValue* dependency) {
+HLoadNamedField* HGraphBuilder::AddLoadFixedArrayLength(HValue* object) {
   return Add<HLoadNamedField>(
-      array, dependency, HObjectAccess::ForFixedArrayLength());
-}
-
-
-HLoadNamedField* HGraphBuilder::AddLoadArrayLength(HValue* array,
-                                                   ElementsKind kind,
-                                                   HValue* dependency) {
-  return Add<HLoadNamedField>(
-      array, dependency, HObjectAccess::ForArrayLength(kind));
+      object, static_cast<HValue*>(NULL), HObjectAccess::ForFixedArrayLength());
 }
 
 
@@ -2474,8 +2463,9 @@
   HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
       new_kind, new_capacity);
 
-  BuildCopyElements(object, elements, kind, new_elements,
-                    new_kind, length, new_capacity);
+  BuildCopyElements(elements, kind,
+                    new_elements, new_kind,
+                    length, new_capacity);
 
   Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
                         new_elements);
@@ -2488,8 +2478,8 @@
                                               ElementsKind elements_kind,
                                               HValue* from,
                                               HValue* to) {
-  // Fast elements kinds need to be initialized in case statements below cause a
-  // garbage collection.
+  // Fast elements kinds need to be initialized in case statements below cause
+  // a garbage collection.
   Factory* factory = isolate()->factory();
 
   double nan_double = FixedDoubleArray::hole_nan_as_double();
@@ -2533,137 +2523,93 @@
 }
 
 
-void HGraphBuilder::BuildCopyElements(HValue* array,
-                                      HValue* from_elements,
+void HGraphBuilder::BuildCopyElements(HValue* from_elements,
                                       ElementsKind from_elements_kind,
                                       HValue* to_elements,
                                       ElementsKind to_elements_kind,
                                       HValue* length,
                                       HValue* capacity) {
-  int constant_capacity = -1;
-  if (capacity->IsConstant() &&
-      HConstant::cast(capacity)->HasInteger32Value()) {
-    int constant_candidate = HConstant::cast(capacity)->Integer32Value();
-    if (constant_candidate <=
-        FastCloneShallowArrayStub::kMaximumInlinedCloneLength) {
-      constant_capacity = constant_candidate;
-    }
-  }
-
-  if (constant_capacity != -1) {
-    // Unroll the loop for small elements kinds.
-    for (int i = 0; i < constant_capacity; i++) {
-      HValue* key_constant = Add<HConstant>(i);
-      HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant,
-                                            static_cast<HValue*>(NULL),
-                                            from_elements_kind);
-      Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind);
-    }
-  } else {
-    bool pre_fill_with_holes =
+  bool pre_fill_with_holes =
       IsFastDoubleElementsKind(from_elements_kind) &&
       IsFastObjectElementsKind(to_elements_kind);
 
-    if (pre_fill_with_holes) {
-      // If the copy might trigger a GC, make sure that the FixedArray is
-      // pre-initialized with holes to make sure that it's always in a
-      // consistent state.
-      BuildFillElementsWithHole(to_elements, to_elements_kind,
-                                graph()->GetConstant0(), capacity);
-    }
+  if (pre_fill_with_holes) {
+    // If the copy might trigger a GC, make sure that the FixedArray is
+    // pre-initialized with holes to make sure that it's always in a consistent
+    // state.
+    BuildFillElementsWithHole(to_elements, to_elements_kind,
+                              graph()->GetConstant0(), capacity);
+  }
 
-    LoopBuilder builder(this, context(), LoopBuilder::kPostDecrement);
+  LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement);
 
-    // Be very careful to copy the elements up to length backwards down to
-    // zero. This eliminates the need to keep length alive through the loop,
-    // since the termination condition compares to a constant. This reduces
-    // register pressure in code stubs that otherwise would spill and create
-    // a stack frame.
-    HValue* decremented_length = AddUncasted<HSub>(length,
-                                                   graph()->GetConstant1());
-    decremented_length->ClearFlag(HValue::kCanOverflow);
-    HValue* key = builder.BeginBody(decremented_length, Add<HConstant>(-1),
-                                    Token::NE);
-    HValue* element = Add<HLoadKeyed>(from_elements, key,
-                                      static_cast<HValue*>(NULL),
-                                      from_elements_kind,
-                                      ALLOW_RETURN_HOLE);
+  HValue* key = builder.BeginBody(graph()->GetConstant0(), length, Token::LT);
 
-    ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
-                         IsFastSmiElementsKind(to_elements_kind))
+  HValue* element = Add<HLoadKeyed>(from_elements, key,
+                                    static_cast<HValue*>(NULL),
+                                    from_elements_kind,
+                                    ALLOW_RETURN_HOLE);
+
+  ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
+                       IsFastSmiElementsKind(to_elements_kind))
       ? FAST_HOLEY_ELEMENTS : to_elements_kind;
 
-    if (IsHoleyElementsKind(from_elements_kind) &&
-        from_elements_kind != to_elements_kind) {
-      IfBuilder if_hole(this);
-      if_hole.If<HCompareHoleAndBranch>(element);
-      if_hole.Then();
-      HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
+  if (IsHoleyElementsKind(from_elements_kind) &&
+      from_elements_kind != to_elements_kind) {
+    IfBuilder if_hole(this);
+    if_hole.If<HCompareHoleAndBranch>(element);
+    if_hole.Then();
+    HConstant* hole_constant = IsFastDoubleElementsKind(to_elements_kind)
         ? Add<HConstant>(FixedDoubleArray::hole_nan_as_double())
         : graph()->GetConstantHole();
-      Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
-      if_hole.Else();
-      HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
-      store->SetFlag(HValue::kAllowUndefinedAsNaN);
-      if_hole.End();
-    } else {
-      HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
-      store->SetFlag(HValue::kAllowUndefinedAsNaN);
-    }
-
-    builder.EndBody();
-
-    if (!pre_fill_with_holes && !length->Equals(capacity)) {
-      // Force an explicit reload of capacity and length from locations where we
-      // know they are available. This caps their live ranges before entering
-      // the the element copy loop above, reducing register pressure enough to
-      // not spill and create stack frames for performance-critical array stubs
-      // on platforms with a small register set.
-      capacity = AddLoadFixedArrayLength(to_elements);
-      capacity->ClearFlag(HValue::kUseGVN);
-      length = AddLoadArrayLength(array, to_elements_kind);
-      length->ClearFlag(HValue::kUseGVN);
-      // Fill unused capacity with the hole.
-      BuildFillElementsWithHole(to_elements, to_elements_kind,
-                                length, capacity);
-    }
+    Add<HStoreKeyed>(to_elements, key, hole_constant, kind);
+    if_hole.Else();
+    HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+    store->SetFlag(HValue::kAllowUndefinedAsNaN);
+    if_hole.End();
+  } else {
+    HStoreKeyed* store = Add<HStoreKeyed>(to_elements, key, element, kind);
+    store->SetFlag(HValue::kAllowUndefinedAsNaN);
   }
 
-  Counters* counters = isolate()->counters();
-  AddIncrementCounter(counters->inlined_copyied_elements());
+  builder.EndBody();
+
+  if (!pre_fill_with_holes && length != capacity) {
+    // Fill unused capacity with the hole.
+    BuildFillElementsWithHole(to_elements, to_elements_kind,
+                              key, capacity);
+  }
 }
 
-HValue* HGraphBuilder::BuildCloneShallowArrayCommon(
-    HValue* boilerplate,
-    HValue* allocation_site,
-    HValue* extra_size,
-    HValue** return_elements,
-    AllocationSiteMode mode) {
+
+HValue* HGraphBuilder::BuildCloneShallowArray(HValue* boilerplate,
+                                              HValue* allocation_site,
+                                              AllocationSiteMode mode,
+                                              ElementsKind kind,
+                                              int length) {
+  NoObservableSideEffectsScope no_effects(this);
+
   // All sizes here are multiples of kPointerSize.
-  int array_size = JSArray::kSize;
+  int size = JSArray::kSize;
   if (mode == TRACK_ALLOCATION_SITE) {
-    array_size += AllocationMemento::kSize;
+    size += AllocationMemento::kSize;
   }
 
-  HValue* size_in_bytes = Add<HConstant>(array_size);
-  if (extra_size != NULL) {
-    size_in_bytes = AddUncasted<HAdd>(extra_size, size_in_bytes);
-    size_in_bytes->ClearFlag(HValue::kCanOverflow);
-  }
-
+  HValue* size_in_bytes = Add<HConstant>(size);
   HInstruction* object = Add<HAllocate>(size_in_bytes,
                                         HType::JSObject(),
                                         NOT_TENURED,
                                         JS_OBJECT_TYPE);
 
   // Copy the JS array part.
-  HValue* map = Add<HLoadNamedField>(boilerplate,
-      static_cast<HValue*>(NULL), HObjectAccess::ForMap());
-  Add<HStoreNamedField>(object, HObjectAccess::ForPropertiesPointer(),
-      Add<HConstant>(isolate()->factory()->empty_fixed_array()),
-                     INITIALIZING_STORE);
-  Add<HStoreNamedField>(object, HObjectAccess::ForMap(), map,
-                        INITIALIZING_STORE);
+  for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+    if ((i != JSArray::kElementsOffset) || (length == 0)) {
+      HObjectAccess access = HObjectAccess::ForJSArrayOffset(i);
+      Add<HStoreNamedField>(
+          object, access, Add<HLoadNamedField>(
+              boilerplate, static_cast<HValue*>(NULL), access));
+    }
+  }
 
   // Create an allocation site info if requested.
   if (mode == TRACK_ALLOCATION_SITE) {
@@ -2671,97 +2617,54 @@
         object, Add<HConstant>(JSArray::kSize), allocation_site);
   }
 
-  if (extra_size != NULL) {
-    HValue* elements = Add<HInnerAllocatedObject>(object,
-        Add<HConstant>(array_size));
-    if (return_elements != NULL) *return_elements = elements;
+  if (length > 0) {
+    // We have to initialize the elements pointer if allocation folding is
+    // turned off.
+    if (!FLAG_use_gvn || !FLAG_use_allocation_folding) {
+      HConstant* empty_fixed_array = Add<HConstant>(
+          isolate()->factory()->empty_fixed_array());
+      Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+          empty_fixed_array, INITIALIZING_STORE);
+    }
+
+    HValue* boilerplate_elements = AddLoadElements(boilerplate);
+    HValue* object_elements;
+    if (IsFastDoubleElementsKind(kind)) {
+      HValue* elems_size = Add<HConstant>(FixedDoubleArray::SizeFor(length));
+      object_elements = Add<HAllocate>(elems_size, HType::Tagged(),
+          NOT_TENURED, FIXED_DOUBLE_ARRAY_TYPE);
+    } else {
+      HValue* elems_size = Add<HConstant>(FixedArray::SizeFor(length));
+      object_elements = Add<HAllocate>(elems_size, HType::Tagged(),
+          NOT_TENURED, FIXED_ARRAY_TYPE);
+    }
+    Add<HStoreNamedField>(object, HObjectAccess::ForElementsPointer(),
+                          object_elements);
+
+    // Copy the elements array header.
+    for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
+      HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
+      Add<HStoreNamedField>(
+          object_elements, access, Add<HLoadNamedField>(
+              boilerplate_elements, static_cast<HValue*>(NULL), access));
+    }
+
+    // Copy the elements array contents.
+    // TODO(mstarzinger): Teach HGraphBuilder::BuildCopyElements to unfold
+    // copying loops with constant length up to a given boundary and use this
+    // helper here instead.
+    for (int i = 0; i < length; i++) {
+      HValue* key_constant = Add<HConstant>(i);
+      HInstruction* value = Add<HLoadKeyed>(boilerplate_elements, key_constant,
+                                            static_cast<HValue*>(NULL), kind);
+      Add<HStoreKeyed>(object_elements, key_constant, value, kind);
+    }
   }
 
   return object;
 }
 
 
-HValue* HGraphBuilder::BuildCloneShallowArrayCow(HValue* boilerplate,
-                                                 HValue* allocation_site,
-                                                 AllocationSiteMode mode,
-                                                 ElementsKind kind) {
-  HValue* result = BuildCloneShallowArrayCommon(boilerplate,
-      allocation_site, NULL, NULL, mode);
-
-  HValue* length = AddLoadArrayLength(boilerplate, kind);
-  HValue* elements = AddLoadElements(boilerplate);
-  HObjectAccess access1 = HObjectAccess::ForArrayLength(kind);
-  HObjectAccess access2 = HObjectAccess::ForElementsPointer();
-  Add<HStoreNamedField>(result, access1, length, INITIALIZING_STORE);
-  Add<HStoreNamedField>(result, access2, elements, INITIALIZING_STORE);
-
-  return result;
-}
-
-
-HValue* HGraphBuilder::BuildCloneShallowArrayEmpty(HValue* boilerplate,
-                                                   HValue* allocation_site,
-                                                   AllocationSiteMode mode) {
-  HValue* result = BuildCloneShallowArrayCommon(boilerplate,
-     allocation_site, NULL, NULL, mode);
-
-  HObjectAccess access = HObjectAccess::ForArrayLength(FAST_ELEMENTS);
-  Add<HStoreNamedField>(result, access, graph()->GetConstant0(),
-                        INITIALIZING_STORE);
-  access = HObjectAccess::ForElementsPointer();
-  Add<HStoreNamedField>(result, access,
-      Add<HConstant>(isolate()->factory()->empty_fixed_array()),
-                     INITIALIZING_STORE);
-
-  return result;
-}
-
-
-HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
-                                                      HValue* allocation_site,
-                                                      AllocationSiteMode mode,
-                                                      ElementsKind kind) {
-  int elements_kind_size = IsFastDoubleElementsKind(kind)
-    ? kDoubleSize : kPointerSize;
-
-  HValue* boilerplate_elements = AddLoadElements(boilerplate);
-  HValue* capacity = AddLoadFixedArrayLength(boilerplate_elements);
-  HValue* extra = AddUncasted<HMul>(capacity,
-                                    Add<HConstant>(elements_kind_size));
-  extra->ClearFlag(HValue::kCanOverflow);
-  extra = AddUncasted<HAdd>(extra, Add<HConstant>(FixedArray::kHeaderSize));
-  extra->ClearFlag(HValue::kCanOverflow);
-  HValue* elements = NULL;
-  HValue* result = BuildCloneShallowArrayCommon(boilerplate,
-      allocation_site, extra, &elements, mode);
-
-  // Explicitly reload the boilerplate's elements. This frees up a register
-  // during the allocation which otherwise causes spillage in many common code
-  // sequences on platforms with tight register constraints.
-  boilerplate_elements = AddLoadElements(boilerplate);
-  boilerplate_elements->ClearFlag(HValue::kUseGVN);
-  HValue* length = Add<HLoadNamedField>(boilerplate, static_cast<HValue*>(NULL),
-                                        HObjectAccess::ForArrayLength(kind));
-  Add<HStoreNamedField>(result, HObjectAccess::ForElementsPointer(),
-                        elements, INITIALIZING_STORE);
-  Add<HStoreNamedField>(result, HObjectAccess::ForArrayLength(kind),
-      length, INITIALIZING_STORE);
-
-  // Copy the elements array header.
-  for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
-    HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
-    Add<HStoreNamedField>(elements, access,
-        Add<HLoadNamedField>(boilerplate_elements,
-                             static_cast<HValue*>(NULL), access));
-  }
-
-  BuildCopyElements(result, boilerplate_elements, kind, elements,
-                    kind, length, capacity);
-
-  return result;
-}
-
-
 void HGraphBuilder::BuildCompareNil(
     HValue* value,
     Type* type,
@@ -5114,9 +5017,9 @@
 static bool IsFastLiteral(Handle<JSObject> boilerplate,
                           int max_depth,
                           int* max_properties) {
-  if (boilerplate->map()->is_deprecated()) {
-    Handle<Object> result = JSObject::TryMigrateInstance(boilerplate);
-    if (result.is_null()) return false;
+  if (boilerplate->map()->is_deprecated() &&
+      !JSObject::TryMigrateInstance(boilerplate)) {
+    return false;
   }
 
   ASSERT(max_depth >= 0 && *max_properties >= 0);
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 2099a80..c561ab1 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -1421,8 +1421,7 @@
     store_map->SkipWriteBarrier();
     return store_map;
   }
-  HLoadNamedField* AddLoadElements(HValue* object,
-                                   HValue* dependency = NULL);
+  HLoadNamedField* AddLoadElements(HValue* object);
 
   bool MatchRotateRight(HValue* left,
                         HValue* right,
@@ -1438,12 +1437,7 @@
                                Maybe<int> fixed_right_arg,
                                HAllocationMode allocation_mode);
 
-  HLoadNamedField* AddLoadFixedArrayLength(HValue *object,
-                                           HValue *dependency = NULL);
-
-  HLoadNamedField* AddLoadArrayLength(HValue *object,
-                                      ElementsKind kind,
-                                      HValue *dependency = NULL);
+  HLoadNamedField* AddLoadFixedArrayLength(HValue *object);
 
   HValue* AddLoadJSBuiltin(Builtins::JavaScript builtin);
 
@@ -1786,33 +1780,18 @@
                                  HValue* from,
                                  HValue* to);
 
-  void BuildCopyElements(HValue* array,
-                         HValue* from_elements,
+  void BuildCopyElements(HValue* from_elements,
                          ElementsKind from_elements_kind,
                          HValue* to_elements,
                          ElementsKind to_elements_kind,
                          HValue* length,
                          HValue* capacity);
 
-  HValue* BuildCloneShallowArrayCommon(HValue* boilerplate,
-                                       HValue* allocation_site,
-                                       HValue* extra_size,
-                                       HValue** return_elements,
-                                       AllocationSiteMode mode);
-
-  HValue* BuildCloneShallowArrayCow(HValue* boilerplate,
-                                    HValue* allocation_site,
-                                    AllocationSiteMode mode,
-                                    ElementsKind kind);
-
-  HValue* BuildCloneShallowArrayEmpty(HValue* boilerplate,
-                                      HValue* allocation_site,
-                                      AllocationSiteMode mode);
-
-  HValue* BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
-                                         HValue* allocation_site,
-                                         AllocationSiteMode mode,
-                                         ElementsKind kind);
+  HValue* BuildCloneShallowArray(HValue* boilerplate,
+                                 HValue* allocation_site,
+                                 AllocationSiteMode mode,
+                                 ElementsKind kind,
+                                 int length);
 
   HValue* BuildElementIndexHash(HValue* index);
 
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 407f93e..8438e07 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -89,13 +89,13 @@
 }
 
 
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool serializer_enabled) {
   ASSERT(!initialized_);
   ASSERT(supported_ == 0);
 #ifdef DEBUG
   initialized_ = true;
 #endif
-  if (Serializer::enabled()) {
+  if (serializer_enabled) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index 2a8e454..30c13a3 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -530,7 +530,7 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool serializer_enabled);
 
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
@@ -564,6 +564,8 @@
            (cross_compile_ & mask) == mask;
   }
 
+  static bool SupportsCrankshaft() { return IsSupported(SSE2); }
+
  private:
   static bool Check(CpuFeature f, uint64_t set) {
     return (set & flag2set(f)) != 0;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 8826f51..68a1ad8 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -86,11 +86,6 @@
   static Register registers[] = { eax, ebx, ecx };
   descriptor->register_param_count_ = 3;
   descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
   descriptor->deoptimization_handler_ =
       Runtime::FunctionForId(
           Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
@@ -222,11 +217,6 @@
     descriptor->stack_parameter_count_ = eax;
     descriptor->register_param_count_ = 3;
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -254,10 +244,6 @@
     descriptor->stack_parameter_count_ = eax;
     descriptor->register_param_count_ = 2;
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -5001,7 +4987,8 @@
   __ lea(scratch, ApiParameterOperand(2));
   __ mov(ApiParameterOperand(0), scratch);
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
 
   Operand context_restore_operand(ebp,
                                   (2 + FCA::kContextSaveIndex) * kPointerSize);
@@ -5014,7 +5001,7 @@
   }
   Operand return_value_operand(ebp, return_value_offset * kPointerSize);
   __ CallApiFunctionAndReturn(api_function_address,
-                              thunk_address,
+                              thunk_ref,
                               ApiParameterOperand(1),
                               argc + FCA::kArgsLength + 1,
                               return_value_operand,
@@ -5049,10 +5036,11 @@
   __ add(scratch, Immediate(kPointerSize));
   __ mov(ApiParameterOperand(1), scratch);  // arguments pointer.
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
 
   __ CallApiFunctionAndReturn(api_function_address,
-                              thunk_address,
+                              thunk_ref,
                               ApiParameterOperand(2),
                               kStackSpace,
                               Operand(ebp, 7 * kPointerSize),
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 5fb04fc..3ea70f2 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -41,16 +41,6 @@
 namespace v8 {
 namespace internal {
 
-void CPU::SetUp() {
-  CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
-  return CpuFeatures::IsSupported(SSE2);
-}
-
-
 void CPU::FlushICache(void* start, size_t size) {
   // No need to flush the instruction cache on Intel. On Intel instruction
   // cache flushing is only necessary when multiple cores running the same
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 9854bb6..bfdc7bb 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -1758,7 +1758,24 @@
     allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
   }
 
-  if (expr->depth() > 1) {
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+    __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+    __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+    __ mov(ecx, Immediate(constant_elements));
+    FastCloneShallowArrayStub stub(
+        isolate(),
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        allocation_site_mode,
+        length);
+    __ CallStub(&stub);
+  } else if (expr->depth() > 1 || Serializer::enabled() ||
+             length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
     __ push(Immediate(Smi::FromInt(expr->literal_index())));
@@ -1766,11 +1783,25 @@
     __ push(Immediate(Smi::FromInt(flags)));
     __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
   } else {
+    ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+    // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    if (has_constant_fast_elements) {
+      mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    }
+
     __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
     __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
     __ mov(ecx, Immediate(constant_elements));
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    FastCloneShallowArrayStub stub(isolate(),
+                                   mode,
+                                   allocation_site_mode,
+                                   length);
     __ CallStub(&stub);
   }
 
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index f33d096..12e490c 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -2300,7 +2300,7 @@
 
 void MacroAssembler::CallApiFunctionAndReturn(
     Register function_address,
-    Address thunk_address,
+    ExternalReference thunk_ref,
     Operand thunk_last_arg,
     int stack_space,
     Operand return_value_operand,
@@ -2331,17 +2331,15 @@
 
   Label profiler_disabled;
   Label end_profiler_check;
-  bool* is_profiling_flag =
-      isolate()->cpu_profiler()->is_profiling_address();
-  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
-  mov(eax, Immediate(reinterpret_cast<Address>(is_profiling_flag)));
+  mov(eax, Immediate(ExternalReference::is_profiling_address(isolate())));
   cmpb(Operand(eax, 0), 0);
   j(zero, &profiler_disabled);
 
   // Additional parameter is the address of the actual getter function.
   mov(thunk_last_arg, function_address);
   // Call the api function.
-  call(thunk_address, RelocInfo::RUNTIME_ENTRY);
+  mov(eax, Immediate(thunk_ref));
+  call(eax);
   jmp(&end_profiler_check);
 
   bind(&profiler_disabled);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 6e74549..8f03960 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -807,7 +807,7 @@
   // caller-save registers.  Restores context.  On return removes
   // stack_space * kPointerSize (GCed).
   void CallApiFunctionAndReturn(Register function_address,
-                                Address thunk_address,
+                                ExternalReference thunk_ref,
                                 Operand thunk_last_arg,
                                 int stack_space,
                                 Operand return_value_operand,
diff --git a/src/ic.cc b/src/ic.cc
index 6480da1..09444b3 100644
--- a/src/ic.cc
+++ b/src/ic.cc
@@ -31,11 +31,11 @@
 #include "api.h"
 #include "arguments.h"
 #include "codegen.h"
+#include "conversions.h"
 #include "execution.h"
 #include "ic-inl.h"
 #include "runtime.h"
 #include "stub-cache.h"
-#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
index b8dab32..c533ce3 100644
--- a/src/incremental-marking.cc
+++ b/src/incremental-marking.cc
@@ -31,9 +31,9 @@
 
 #include "code-stubs.h"
 #include "compilation-cache.h"
+#include "conversions.h"
 #include "objects-visiting.h"
 #include "objects-visiting-inl.h"
-#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/isolate.cc b/src/isolate.cc
index 8f5b968..9071c02 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1808,7 +1808,7 @@
 
   use_crankshaft_ = FLAG_crankshaft
       && !Serializer::enabled()
-      && CPU::SupportsCrankshaft();
+      && CpuFeatures::SupportsCrankshaft();
 
   if (function_entry_hook() != NULL) {
     // When function entry hooking is in effect, we have to create the code
diff --git a/src/json-parser.h b/src/json-parser.h
index 8c19bc8..732e7ea 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -31,7 +31,7 @@
 #include "v8.h"
 
 #include "char-predicates-inl.h"
-#include "v8conversions.h"
+#include "conversions.h"
 #include "messages.h"
 #include "spaces-inl.h"
 #include "token.h"
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index e6957eb..7c4a9b4 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -29,8 +29,8 @@
 #define V8_JSON_STRINGIFIER_H_
 
 #include "v8.h"
+#include "conversions.h"
 #include "utils.h"
-#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index f3a57f9..ccdc114 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -46,7 +46,6 @@
 #ifdef DEBUG
 bool CpuFeatures::initialized_ = false;
 #endif
-bool CpuFeatures::hint_creating_snapshot_ = false;
 unsigned CpuFeatures::supported_ = 0;
 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
 unsigned CpuFeatures::cross_compile_ = 0;
@@ -103,22 +102,6 @@
 }
 
 
-void CpuFeatures::SetHintCreatingSnapshot() {
-  hint_creating_snapshot_ = true;
-}
-
-
-void CpuFeatures::ProbeWithoutIsolate() {
-  Probe(hint_creating_snapshot_);
-}
-
-
-void CpuFeatures::Probe() {
-  // The Serializer can only be queried after isolate initialization.
-  Probe(Serializer::enabled());
-}
-
-
 void CpuFeatures::Probe(bool serializer_enabled) {
   unsigned standard_features = (OS::CpuFeaturesImpliedByPlatform() |
                                 CpuFeaturesImpliedByCompiler());
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index e4729b2..8c186c1 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -425,12 +425,10 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool serializer_enabled);
 
   // A special case for printing target and features, which we want to do
   // before initializing the isolate
-  static void SetHintCreatingSnapshot();
-  static void ProbeWithoutIsolate();
 
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
@@ -459,10 +457,9 @@
            (cross_compile_ & mask) == mask;
   }
 
- private:
-  static void Probe(bool serializer_enabled);
-  static bool hint_creating_snapshot_;
+  static bool SupportsCrankshaft() { return CpuFeatures::IsSupported(FPU); }
 
+ private:
   static bool Check(CpuFeature f, unsigned set) {
     return (set & flag2set(f)) != 0;
   }
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index f7f15da..b383ea0 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -5305,11 +5305,8 @@
   __ sw(zero_reg, MemOperand(a0, 3 * kPointerSize));
 
   const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
-  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
-  ExternalReference::Type thunk_type = ExternalReference::PROFILING_API_CALL;
-  ApiFunction thunk_fun(thunk_address);
-  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
-      isolate());
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
 
   AllowExternalCallThatCantCauseGC scope(masm);
   MemOperand context_restore_operand(
@@ -5355,12 +5352,8 @@
 
   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
-  ExternalReference::Type thunk_type =
-      ExternalReference::PROFILING_GETTER_CALL;
-  ApiFunction thunk_fun(thunk_address);
-  ExternalReference thunk_ref = ExternalReference(&thunk_fun, thunk_type,
-      isolate());
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
   __ CallApiFunctionAndReturn(api_function_address,
                               thunk_ref,
                               kStackUnwindSpace,
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index 49d0b37..54026d1 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -47,16 +47,6 @@
 namespace internal {
 
 
-void CPU::SetUp() {
-  CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
-  return CpuFeatures::IsSupported(FPU);
-}
-
-
 void CPU::FlushICache(void* start, size_t size) {
   // Nothing to do, flushing no instructions.
   if (size == 0) {
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 022b8d8..9df3da8 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1834,7 +1834,18 @@
   __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
   __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
   __ li(a1, Operand(constant_elements));
-  if (expr->depth() > 1) {
+  if (has_fast_elements && constant_elements_values->map() ==
+      isolate()->heap()->fixed_cow_array_map()) {
+    FastCloneShallowArrayStub stub(
+        isolate(),
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        allocation_site_mode,
+        length);
+    __ CallStub(&stub);
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
+        1, a1, a2);
+  } else if (expr->depth() > 1 || Serializer::enabled() ||
+             length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ li(a0, Operand(Smi::FromInt(flags)));
     __ Push(a3, a2, a1, a0);
     __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 239a476..bd12f1a 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3929,10 +3929,7 @@
 
   Label profiler_disabled;
   Label end_profiler_check;
-  bool* is_profiling_flag =
-      isolate()->cpu_profiler()->is_profiling_address();
-  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
-  li(t9, reinterpret_cast<int32_t>(is_profiling_flag));
+  li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
   lb(t9, MemOperand(t9, 0));
   Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
 
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 9cde02b..26ed909 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -1216,6 +1216,11 @@
 };
 
 
+PromiseMirror.prototype.promiseValue = function() {
+  return %GetPromiseValue(this.value_);
+};
+
+
 /**
  * Base mirror object for properties.
  * @param {ObjectMirror} mirror The mirror object having this property
@@ -2533,6 +2538,7 @@
   if (mirror.isPromise()) {
     // Add promise specific properties.
     content.status = mirror.status();
+    content.promiseValue = mirror.promiseValue();
   }
 
   // Add actual properties - named properties followed by indexed properties.
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index 9f91b83..f3a652f 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -301,15 +301,9 @@
   // By default, log code create information in the snapshot.
   i::FLAG_log_code = true;
 
-#if V8_TARGET_ARCH_ARM
-  // Printing flags on ARM requires knowing if we intend to enable
-  // the serializer or not.
-  v8::internal::CpuFeatures::SetHintCreatingSnapshot();
-#endif
-
   // Print the usage if an error occurs when parsing the command line
   // flags or if the help flag is set.
-  int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+  int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true, true);
   if (result > 0 || argc != 2 || i::FLAG_help) {
     ::printf("Usage: %s [flag] ... outfile\n", argv[0]);
     i::FlagList::PrintHelp();
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 6aff6a2..36b6ae7 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -4679,6 +4679,7 @@
 
 void Code::set_marked_for_deoptimization(bool flag) {
   ASSERT(kind() == OPTIMIZED_FUNCTION);
+  ASSERT(!flag || AllowDeoptimization::IsAllowed(GetIsolate()));
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = MarkedForDeoptimizationField::update(previous, flag);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
diff --git a/src/objects.cc b/src/objects.cc
index 08f4a44..21ed836 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2425,43 +2425,6 @@
 }
 
 
-// Returns NULL if the updated map is incompatible.
-Map* Map::FindUpdatedMap(int verbatim,
-                         int length,
-                         DescriptorArray* descriptors) {
-  DisallowHeapAllocation no_allocation;
-
-  // This can only be called on roots of transition trees.
-  ASSERT(GetBackPointer()->IsUndefined());
-
-  Map* current = this;
-
-  for (int i = verbatim; i < length; i++) {
-    if (!current->HasTransitionArray()) break;
-    Name* name = descriptors->GetKey(i);
-    TransitionArray* transitions = current->transitions();
-    int transition = transitions->Search(name);
-    if (transition == TransitionArray::kNotFound) break;
-    current = transitions->GetTarget(transition);
-    PropertyDetails details = descriptors->GetDetails(i);
-    PropertyDetails target_details =
-        current->instance_descriptors()->GetDetails(i);
-    if (details.attributes() != target_details.attributes()) return NULL;
-    if (details.type() == CALLBACKS) {
-      if (target_details.type() != CALLBACKS) return NULL;
-      if (descriptors->GetValue(i) !=
-              current->instance_descriptors()->GetValue(i)) {
-        return NULL;
-      }
-    } else if (target_details.type() == CALLBACKS) {
-      return NULL;
-    }
-  }
-
-  return current;
-}
-
-
 Map* Map::FindLastMatchMap(int verbatim,
                            int length,
                            DescriptorArray* descriptors) {
@@ -2552,13 +2515,10 @@
                               int modify_index,
                               Handle<HeapType> new_field_type) {
   Isolate* isolate = map->GetIsolate();
-  Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate);
-  Handle<DescriptorArray> descriptors(
-      field_owner->instance_descriptors(), isolate);
 
   // Check if we actually need to generalize the field type at all.
   Handle<HeapType> old_field_type(
-      descriptors->GetFieldType(modify_index), isolate);
+      map->instance_descriptors()->GetFieldType(modify_index), isolate);
   if (new_field_type->NowIs(old_field_type)) {
     ASSERT(Map::GeneralizeFieldType(old_field_type,
                                     new_field_type,
@@ -2566,6 +2526,12 @@
     return;
   }
 
+  // Determine the field owner.
+  Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate);
+  Handle<DescriptorArray> descriptors(
+      field_owner->instance_descriptors(), isolate);
+  ASSERT_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
+
   // Determine the generalized new field type.
   new_field_type = Map::GeneralizeFieldType(
       old_field_type, new_field_type, isolate);
@@ -2598,23 +2564,28 @@
 // (partial) version of the type in the transition tree.
 // To do this, on each rewrite:
 // - Search the root of the transition tree using FindRootMap.
-// - Find |updated|, the newest matching version of this map using
-//   FindUpdatedMap. This uses the keys in the own map's descriptor array to
-//   walk the transition tree.
-// - Merge/generalize the descriptor array of the current map and |updated|.
-// - Generalize the |modify_index| descriptor using |new_representation|.
-// - Walk the tree again starting from the root towards |updated|. Stop at
+// - Find |target_map|, the newest matching version of this map using the keys
+//   in the |old_map|'s descriptor array to walk the transition tree.
+// - Merge/generalize the descriptor array of the |old_map| and |target_map|.
+// - Generalize the |modify_index| descriptor using |new_representation| and
+//   |new_field_type|.
+// - Walk the tree again starting from the root towards |target_map|. Stop at
 //   |split_map|, the first map who's descriptor array does not match the merged
 //   descriptor array.
-// - If |updated| == |split_map|, |updated| is in the expected state. Return it.
-// - Otherwise, invalidate the outdated transition target from |updated|, and
+// - If |target_map| == |split_map|, |target_map| is in the expected state.
+//   Return it.
+// - Otherwise, invalidate the outdated transition target from |target_map|, and
 //   replace its transition tree with a new branch for the updated descriptors.
 Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
                                           int modify_index,
                                           Representation new_representation,
                                           Handle<HeapType> new_field_type,
                                           StoreMode store_mode) {
-  Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
+  Isolate* isolate = old_map->GetIsolate();
+
+  Handle<DescriptorArray> old_descriptors(
+      old_map->instance_descriptors(), isolate);
+  int old_nof = old_map->NumberOfOwnDescriptors();
   PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
   Representation old_representation = old_details.representation();
 
@@ -2641,84 +2612,239 @@
     return old_map;
   }
 
-  if (new_representation.Equals(old_representation) &&
-      old_details.type() == FIELD) {
-    Map::GeneralizeFieldType(old_map, modify_index, new_field_type);
-    return old_map;
-  }
-
-  Handle<Map> root_map(old_map->FindRootMap());
-
   // Check the state of the root map.
+  Handle<Map> root_map(old_map->FindRootMap(), isolate);
   if (!old_map->EquivalentToForTransition(*root_map)) {
     return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
         old_details.attributes(), "not equivalent");
   }
-
-  int verbatim = root_map->NumberOfOwnDescriptors();
-
-  if (store_mode != ALLOW_AS_CONSTANT && modify_index < verbatim) {
-    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-        old_details.attributes(), "root modification");
+  int root_nof = root_map->NumberOfOwnDescriptors();
+  if (modify_index < root_nof) {
+    PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
+    if ((old_details.type() != FIELD && store_mode == FORCE_FIELD) ||
+        (old_details.type() == FIELD &&
+         (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
+          !new_representation.fits_into(old_details.representation())))) {
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+          old_details.attributes(), "root modification");
+    }
   }
 
-  int descriptors = old_map->NumberOfOwnDescriptors();
-  Map* raw_updated = root_map->FindUpdatedMap(
-      verbatim, descriptors, *old_descriptors);
-  if (raw_updated == NULL) {
-    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-        old_details.attributes(), "incompatible");
+  Handle<Map> target_map = root_map;
+  for (int i = root_nof; i < old_nof; ++i) {
+    int j = target_map->SearchTransition(old_descriptors->GetKey(i));
+    if (j == TransitionArray::kNotFound) break;
+    Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
+    Handle<DescriptorArray> tmp_descriptors = handle(
+        tmp_map->instance_descriptors(), isolate);
+
+    // Check if target map is incompatible.
+    PropertyDetails old_details = old_descriptors->GetDetails(i);
+    PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+    PropertyType old_type = old_details.type();
+    PropertyType tmp_type = tmp_details.type();
+    if (tmp_details.attributes() != old_details.attributes() ||
+        ((tmp_type == CALLBACKS || old_type == CALLBACKS) &&
+         (tmp_type != old_type ||
+          tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) {
+      return CopyGeneralizeAllRepresentations(
+          old_map, modify_index, store_mode,
+          old_details.attributes(), "incompatible");
+    }
+    Representation old_representation = old_details.representation();
+    Representation tmp_representation = tmp_details.representation();
+    if (!old_representation.fits_into(tmp_representation) ||
+        (!new_representation.fits_into(tmp_representation) &&
+         modify_index == i)) {
+      break;
+    }
+    if (tmp_type == FIELD) {
+      // Generalize the field type as necessary.
+      Handle<HeapType> old_field_type = (old_type == FIELD)
+          ? handle(old_descriptors->GetFieldType(i), isolate)
+          : old_descriptors->GetValue(i)->OptimalType(
+              isolate, tmp_representation);
+      if (modify_index == i) {
+        old_field_type = GeneralizeFieldType(
+            new_field_type, old_field_type, isolate);
+      }
+      GeneralizeFieldType(tmp_map, i, old_field_type);
+    } else if (tmp_type == CONSTANT) {
+      if (old_type != CONSTANT ||
+          old_descriptors->GetConstant(i) != tmp_descriptors->GetConstant(i)) {
+        break;
+      }
+    } else {
+      ASSERT_EQ(tmp_type, old_type);
+      ASSERT_EQ(tmp_descriptors->GetValue(i), old_descriptors->GetValue(i));
+    }
+    target_map = tmp_map;
   }
 
-  Handle<Map> updated(raw_updated);
-  Handle<DescriptorArray> updated_descriptors(updated->instance_descriptors());
-
-  int valid = updated->NumberOfOwnDescriptors();
-
-  // Directly change the map if the target map is more general. Ensure that the
-  // target type of the modify_index is a FIELD, unless we are migrating.
-  if (updated_descriptors->IsMoreGeneralThan(
-          verbatim, valid, descriptors, *old_descriptors) &&
-      (store_mode == ALLOW_AS_CONSTANT ||
-       updated_descriptors->GetDetails(modify_index).type() == FIELD)) {
-    Representation updated_representation =
-        updated_descriptors->GetDetails(modify_index).representation();
-    if (new_representation.fits_into(updated_representation)) return updated;
+  // Directly change the map if the target map is more general.
+  Handle<DescriptorArray> target_descriptors(
+      target_map->instance_descriptors(), isolate);
+  int target_nof = target_map->NumberOfOwnDescriptors();
+  if (target_nof == old_nof &&
+      (store_mode != FORCE_FIELD ||
+       target_descriptors->GetDetails(modify_index).type() == FIELD)) {
+    ASSERT(modify_index < target_nof);
+    ASSERT(new_representation.fits_into(
+            target_descriptors->GetDetails(modify_index).representation()));
+    ASSERT(target_descriptors->GetDetails(modify_index).type() != FIELD ||
+           new_field_type->NowIs(
+               target_descriptors->GetFieldType(modify_index)));
+    return target_map;
   }
 
-  Handle<DescriptorArray> new_descriptors = DescriptorArray::Merge(
-      updated, verbatim, valid, descriptors, modify_index,
-      store_mode, old_map);
-  ASSERT(store_mode == ALLOW_AS_CONSTANT ||
+  // Find the last compatible target map in the transition tree.
+  for (int i = target_nof; i < old_nof; ++i) {
+    int j = target_map->SearchTransition(old_descriptors->GetKey(i));
+    if (j == TransitionArray::kNotFound) break;
+    Handle<Map> tmp_map(target_map->GetTransition(j), isolate);
+    Handle<DescriptorArray> tmp_descriptors(
+        tmp_map->instance_descriptors(), isolate);
+
+    // Check if target map is compatible.
+    PropertyDetails old_details = old_descriptors->GetDetails(i);
+    PropertyDetails tmp_details = tmp_descriptors->GetDetails(i);
+    if (tmp_details.attributes() != old_details.attributes() ||
+        ((tmp_details.type() == CALLBACKS || old_details.type() == CALLBACKS) &&
+         (tmp_details.type() != old_details.type() ||
+          tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i)))) {
+      return CopyGeneralizeAllRepresentations(
+          old_map, modify_index, store_mode,
+          old_details.attributes(), "incompatible");
+    }
+    target_map = tmp_map;
+  }
+  target_nof = target_map->NumberOfOwnDescriptors();
+  target_descriptors = handle(target_map->instance_descriptors(), isolate);
+
+  // Allocate a new descriptor array large enough to hold the required
+  // descriptors, with minimally the exact same size as the old descriptor
+  // array.
+  int new_slack = Max(
+      old_nof, old_descriptors->number_of_descriptors()) - old_nof;
+  Handle<DescriptorArray> new_descriptors = DescriptorArray::Allocate(
+      isolate, old_nof, new_slack);
+  ASSERT(new_descriptors->length() > target_descriptors->length() ||
+         new_descriptors->NumberOfSlackDescriptors() > 0 ||
+         new_descriptors->number_of_descriptors() ==
+         old_descriptors->number_of_descriptors());
+  ASSERT(new_descriptors->number_of_descriptors() == old_nof);
+
+  // 0 -> |root_nof|
+  int current_offset = 0;
+  for (int i = 0; i < root_nof; ++i) {
+    PropertyDetails old_details = old_descriptors->GetDetails(i);
+    if (old_details.type() == FIELD) current_offset++;
+    Descriptor d(handle(old_descriptors->GetKey(i), isolate),
+                 handle(old_descriptors->GetValue(i), isolate),
+                 old_details);
+    new_descriptors->Set(i, &d);
+  }
+
+  // |root_nof| -> |target_nof|
+  for (int i = root_nof; i < target_nof; ++i) {
+    Handle<Name> target_key(target_descriptors->GetKey(i), isolate);
+    PropertyDetails old_details = old_descriptors->GetDetails(i);
+    PropertyDetails target_details = target_descriptors->GetDetails(i);
+    target_details = target_details.CopyWithRepresentation(
+        old_details.representation().generalize(
+            target_details.representation()));
+    if (modify_index == i) {
+      target_details = target_details.CopyWithRepresentation(
+          new_representation.generalize(target_details.representation()));
+    }
+    if (old_details.type() == FIELD ||
+        target_details.type() == FIELD ||
+        (modify_index == i && store_mode == FORCE_FIELD) ||
+        (target_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
+      Handle<HeapType> old_field_type = (old_details.type() == FIELD)
+          ? handle(old_descriptors->GetFieldType(i), isolate)
+          : old_descriptors->GetValue(i)->OptimalType(
+              isolate, target_details.representation());
+      Handle<HeapType> target_field_type = (target_details.type() == FIELD)
+          ? handle(target_descriptors->GetFieldType(i), isolate)
+          : target_descriptors->GetValue(i)->OptimalType(
+              isolate, target_details.representation());
+      target_field_type = GeneralizeFieldType(
+          target_field_type, old_field_type, isolate);
+      if (modify_index == i) {
+        target_field_type = GeneralizeFieldType(
+            target_field_type, new_field_type, isolate);
+      }
+      FieldDescriptor d(target_key,
+                        current_offset++,
+                        target_field_type,
+                        target_details.attributes(),
+                        target_details.representation());
+      new_descriptors->Set(i, &d);
+    } else {
+      ASSERT_NE(FIELD, target_details.type());
+      Descriptor d(target_key,
+                   handle(target_descriptors->GetValue(i), isolate),
+                   target_details);
+      new_descriptors->Set(i, &d);
+    }
+  }
+
+  // |target_nof| -> |old_nof|
+  for (int i = target_nof; i < old_nof; ++i) {
+    PropertyDetails old_details = old_descriptors->GetDetails(i);
+    Handle<Name> old_key(old_descriptors->GetKey(i), isolate);
+    if (modify_index == i) {
+      old_details = old_details.CopyWithRepresentation(
+          new_representation.generalize(old_details.representation()));
+    }
+    if (old_details.type() == FIELD) {
+      Handle<HeapType> old_field_type(
+          old_descriptors->GetFieldType(i), isolate);
+      if (modify_index == i) {
+        old_field_type = GeneralizeFieldType(
+            old_field_type, new_field_type, isolate);
+      }
+      FieldDescriptor d(old_key,
+                        current_offset++,
+                        old_field_type,
+                        old_details.attributes(),
+                        old_details.representation());
+      new_descriptors->Set(i, &d);
+    } else {
+      ASSERT(old_details.type() == CONSTANT || old_details.type() == CALLBACKS);
+      if (modify_index == i && store_mode == FORCE_FIELD) {
+        FieldDescriptor d(old_key,
+                          current_offset++,
+                          GeneralizeFieldType(
+                              old_descriptors->GetValue(i)->OptimalType(
+                                  isolate, old_details.representation()),
+                              new_field_type, isolate),
+                          old_details.attributes(),
+                          old_details.representation());
+        new_descriptors->Set(i, &d);
+      } else {
+        ASSERT_NE(FIELD, old_details.type());
+        Descriptor d(old_key,
+                     handle(old_descriptors->GetValue(i), isolate),
+                     old_details);
+        new_descriptors->Set(i, &d);
+      }
+    }
+  }
+
+  new_descriptors->Sort();
+
+  ASSERT(store_mode != FORCE_FIELD ||
          new_descriptors->GetDetails(modify_index).type() == FIELD);
 
-  Isolate* isolate = new_descriptors->GetIsolate();
-  old_representation =
-      new_descriptors->GetDetails(modify_index).representation();
-  Representation updated_representation =
-      new_representation.generalize(old_representation);
-  if (!updated_representation.Equals(old_representation)) {
-    new_descriptors->SetRepresentation(modify_index, updated_representation);
-  }
-  if (new_descriptors->GetDetails(modify_index).type() == FIELD) {
-    Handle<HeapType> field_type(
-        new_descriptors->GetFieldType(modify_index), isolate);
-    new_field_type = Map::GeneralizeFieldType(
-        field_type, new_field_type, isolate);
-    new_descriptors->SetValue(modify_index, *new_field_type);
-  }
-
   Handle<Map> split_map(root_map->FindLastMatchMap(
-      verbatim, descriptors, *new_descriptors));
+          root_nof, old_nof, *new_descriptors), isolate);
+  int split_nof = split_map->NumberOfOwnDescriptors();
+  ASSERT_NE(old_nof, split_nof);
 
-  int split_descriptors = split_map->NumberOfOwnDescriptors();
-  // This is shadowed by |updated_descriptors| being more general than
-  // |old_descriptors|.
-  ASSERT(descriptors != split_descriptors);
-
-  int descriptor = split_descriptors;
   split_map->DeprecateTarget(
-      old_descriptors->GetKey(descriptor), *new_descriptors);
+      old_descriptors->GetKey(split_nof), *new_descriptors);
 
   if (FLAG_trace_generalization) {
     PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
@@ -2732,7 +2858,7 @@
         : HeapType::Constant(handle(new_descriptors->GetValue(modify_index),
                                     isolate), isolate);
     old_map->PrintGeneralization(
-        stdout, "", modify_index, descriptor, descriptors,
+        stdout, "", modify_index, split_nof, old_nof,
         old_details.type() == CONSTANT && store_mode == FORCE_FIELD,
         old_details.representation(), new_details.representation(),
         *old_field_type, *new_field_type);
@@ -2740,10 +2866,9 @@
 
   // Add missing transitions.
   Handle<Map> new_map = split_map;
-  for (; descriptor < descriptors; descriptor++) {
-    new_map = CopyInstallDescriptors(new_map, descriptor, new_descriptors);
+  for (int i = split_nof; i < old_nof; ++i) {
+    new_map = CopyInstallDescriptors(new_map, i, new_descriptors);
   }
-
   new_map->set_owns_descriptors(true);
   return new_map;
 }
@@ -2764,44 +2889,80 @@
 }
 
 
-Handle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+// static
+MaybeHandle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
   Handle<Map> proto_map(map);
   while (proto_map->prototype()->IsJSObject()) {
     Handle<JSObject> holder(JSObject::cast(proto_map->prototype()));
-    if (holder->map()->is_deprecated()) {
-      JSObject::TryMigrateInstance(holder);
-    }
     proto_map = Handle<Map>(holder->map());
+    if (proto_map->is_deprecated() && JSObject::TryMigrateInstance(holder)) {
+      proto_map = Handle<Map>(holder->map());
+    }
   }
   return CurrentMapForDeprecatedInternal(map);
 }
 
 
-Handle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> map) {
-  if (!map->is_deprecated()) return map;
-
+// static
+MaybeHandle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> old_map) {
   DisallowHeapAllocation no_allocation;
-  DescriptorArray* old_descriptors = map->instance_descriptors();
+  DisallowDeoptimization no_deoptimization(old_map->GetIsolate());
 
-  int descriptors = map->NumberOfOwnDescriptors();
-  Map* root_map = map->FindRootMap();
+  if (!old_map->is_deprecated()) return old_map;
 
   // Check the state of the root map.
-  if (!map->EquivalentToForTransition(root_map)) return Handle<Map>();
-  int verbatim = root_map->NumberOfOwnDescriptors();
+  Map* root_map = old_map->FindRootMap();
+  if (!old_map->EquivalentToForTransition(root_map)) return MaybeHandle<Map>();
+  int root_nof = root_map->NumberOfOwnDescriptors();
 
-  Map* updated = root_map->FindUpdatedMap(
-      verbatim, descriptors, old_descriptors);
-  if (updated == NULL) return Handle<Map>();
+  int old_nof = old_map->NumberOfOwnDescriptors();
+  DescriptorArray* old_descriptors = old_map->instance_descriptors();
 
-  DescriptorArray* updated_descriptors = updated->instance_descriptors();
-  int valid = updated->NumberOfOwnDescriptors();
-  if (!updated_descriptors->IsMoreGeneralThan(
-          verbatim, valid, descriptors, old_descriptors)) {
-    return Handle<Map>();
+  Map* new_map = root_map;
+  for (int i = root_nof; i < old_nof; ++i) {
+    int j = new_map->SearchTransition(old_descriptors->GetKey(i));
+    if (j == TransitionArray::kNotFound) return MaybeHandle<Map>();
+    new_map = new_map->GetTransition(j);
+    DescriptorArray* new_descriptors = new_map->instance_descriptors();
+
+    PropertyDetails new_details = new_descriptors->GetDetails(i);
+    PropertyDetails old_details = old_descriptors->GetDetails(i);
+    if (old_details.attributes() != new_details.attributes() ||
+        !old_details.representation().fits_into(new_details.representation())) {
+      return MaybeHandle<Map>();
+    }
+    PropertyType new_type = new_details.type();
+    PropertyType old_type = old_details.type();
+    Object* new_value = new_descriptors->GetValue(i);
+    Object* old_value = old_descriptors->GetValue(i);
+    switch (new_type) {
+      case FIELD:
+        if ((old_type == FIELD &&
+             !HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) ||
+            (old_type == CONSTANT &&
+             !HeapType::cast(new_value)->NowContains(old_value)) ||
+            (old_type == CALLBACKS &&
+             !HeapType::Any()->Is(HeapType::cast(new_value)))) {
+          return MaybeHandle<Map>();
+        }
+        break;
+
+      case CONSTANT:
+      case CALLBACKS:
+        if (old_type != new_type || old_value != new_value) {
+          return MaybeHandle<Map>();
+        }
+        break;
+
+      case NORMAL:
+      case HANDLER:
+      case INTERCEPTOR:
+      case NONEXISTENT:
+        UNREACHABLE();
+    }
   }
-
-  return handle(updated);
+  if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>();
+  return handle(new_map);
 }
 
 
@@ -3903,15 +4064,20 @@
 }
 
 
-Handle<Object> JSObject::TryMigrateInstance(Handle<JSObject> object) {
-  Handle<Map> original_map(object->map());
-  Handle<Map> new_map = Map::CurrentMapForDeprecatedInternal(original_map);
-  if (new_map.is_null()) return Handle<Object>();
+// static
+bool JSObject::TryMigrateInstance(Handle<JSObject> object) {
+  Isolate* isolate = object->GetIsolate();
+  DisallowDeoptimization no_deoptimization(isolate);
+  Handle<Map> original_map(object->map(), isolate);
+  Handle<Map> new_map;
+  if (!Map::CurrentMapForDeprecatedInternal(original_map).ToHandle(&new_map)) {
+    return false;
+  }
   JSObject::MigrateToMap(object, new_map);
   if (FLAG_trace_migration) {
     object->PrintInstanceMigration(stdout, *original_map, object->map());
   }
-  return object;
+  return true;
 }
 
 
@@ -8356,150 +8522,6 @@
 }
 
 
-// Creates a new descriptor array by merging the descriptor array of |right_map|
-// into the (at least partly) updated descriptor array of |left_map|.
-// The method merges two descriptor array in three parts. Both descriptor arrays
-// are identical up to |verbatim|. They also overlap in keys up to |valid|.
-// Between |verbatim| and |valid|, the resulting descriptor type as well as the
-// representation are generalized from both |left_map| and |right_map|. Beyond
-// |valid|, the descriptors are copied verbatim from |right_map| up to
-// |new_size|.
-// In case of incompatible types, the type and representation of |right_map| is
-// used.
-Handle<DescriptorArray> DescriptorArray::Merge(Handle<Map> left_map,
-                                               int verbatim,
-                                               int valid,
-                                               int new_size,
-                                               int modify_index,
-                                               StoreMode store_mode,
-                                               Handle<Map> right_map) {
-  ASSERT(verbatim <= valid);
-  ASSERT(valid <= new_size);
-
-  // Allocate a new descriptor array large enough to hold the required
-  // descriptors, with minimally the exact same size as this descriptor array.
-  Isolate* isolate = left_map->GetIsolate();
-  Handle<DescriptorArray> left(left_map->instance_descriptors());
-  Handle<DescriptorArray> right(right_map->instance_descriptors());
-  Handle<DescriptorArray> result = DescriptorArray::Allocate(
-      isolate,
-      new_size,
-      Max(new_size, right->number_of_descriptors()) - new_size);
-  ASSERT(result->length() > left->length() ||
-         result->NumberOfSlackDescriptors() > 0 ||
-         result->number_of_descriptors() == right->number_of_descriptors());
-  ASSERT(result->number_of_descriptors() == new_size);
-
-  int descriptor;
-
-  // 0 -> |verbatim|
-  int current_offset = 0;
-  for (descriptor = 0; descriptor < verbatim; descriptor++) {
-    if (left->GetDetails(descriptor).type() == FIELD) current_offset++;
-    Descriptor d(handle(right->GetKey(descriptor)),
-                 handle(right->GetValue(descriptor), right->GetIsolate()),
-                 right->GetDetails(descriptor));
-    result->Set(descriptor, &d);
-  }
-
-  // |verbatim| -> |valid|
-  for (; descriptor < valid; descriptor++) {
-    PropertyDetails left_details = left->GetDetails(descriptor);
-    PropertyDetails right_details = right->GetDetails(descriptor);
-    if (left_details.type() == FIELD || right_details.type() == FIELD ||
-        (store_mode == FORCE_FIELD && descriptor == modify_index) ||
-        (left_details.type() == CONSTANT &&
-         right_details.type() == CONSTANT &&
-         left->GetValue(descriptor) != right->GetValue(descriptor))) {
-      ASSERT(left_details.type() == CONSTANT || left_details.type() == FIELD);
-      ASSERT(right_details.type() == CONSTANT || right_details.type() == FIELD);
-      Representation representation = left_details.representation().generalize(
-          right_details.representation());
-      Handle<HeapType> left_type = (left_details.type() == FIELD)
-          ? handle(left->GetFieldType(descriptor), isolate)
-          : left->GetValue(descriptor)->OptimalType(isolate, representation);
-      Handle<HeapType> right_type = (right_details.type() == FIELD)
-          ? handle(right->GetFieldType(descriptor), isolate)
-          : right->GetValue(descriptor)->OptimalType(isolate, representation);
-      Handle<HeapType> field_type = Map::GeneralizeFieldType(
-          left_type, right_type, isolate);
-      FieldDescriptor d(handle(left->GetKey(descriptor), isolate),
-                        current_offset++,
-                        field_type,
-                        right_details.attributes(),
-                        representation);
-      result->Set(descriptor, &d);
-    } else {
-      Descriptor d(handle(right->GetKey(descriptor), isolate),
-                   handle(right->GetValue(descriptor), isolate),
-                   right_details);
-      result->Set(descriptor, &d);
-    }
-  }
-
-  // |valid| -> |new_size|
-  for (; descriptor < new_size; descriptor++) {
-    PropertyDetails right_details = right->GetDetails(descriptor);
-    if (right_details.type() == FIELD) {
-      FieldDescriptor d(handle(right->GetKey(descriptor), isolate),
-                        current_offset++,
-                        handle(right->GetFieldType(descriptor), isolate),
-                        right_details.attributes(),
-                        right_details.representation());
-      result->Set(descriptor, &d);
-    } else if (store_mode == FORCE_FIELD && descriptor == modify_index) {
-      ASSERT_EQ(CONSTANT, right_details.type());
-      Representation field_representation = right_details.representation();
-      Handle<HeapType> field_type = right->GetValue(descriptor)->OptimalType(
-          isolate, field_representation);
-      FieldDescriptor d(handle(right->GetKey(descriptor), isolate),
-                        current_offset++,
-                        field_type,
-                        right_details.attributes(),
-                        field_representation);
-      result->Set(descriptor, &d);
-    } else {
-      Descriptor d(handle(right->GetKey(descriptor), isolate),
-                   handle(right->GetValue(descriptor), isolate),
-                   right_details);
-      result->Set(descriptor, &d);
-    }
-  }
-
-  result->Sort();
-  return result;
-}
-
-
-// Checks whether a merge of |other| into |this| would return a copy of |this|.
-bool DescriptorArray::IsMoreGeneralThan(int verbatim,
-                                        int valid,
-                                        int new_size,
-                                        DescriptorArray* other) {
-  ASSERT(verbatim <= valid);
-  ASSERT(valid <= new_size);
-  if (valid != new_size) return false;
-
-  for (int descriptor = verbatim; descriptor < valid; descriptor++) {
-    PropertyDetails details = GetDetails(descriptor);
-    PropertyDetails other_details = other->GetDetails(descriptor);
-    if (!other_details.representation().fits_into(details.representation())) {
-      return false;
-    }
-    if (details.type() == CONSTANT) {
-      if (other_details.type() != CONSTANT) return false;
-      if (GetValue(descriptor) != other->GetValue(descriptor)) return false;
-    } else if (details.type() == FIELD && other_details.type() == FIELD) {
-      if (!other->GetFieldType(descriptor)->NowIs(GetFieldType(descriptor))) {
-        return false;
-      }
-    }
-  }
-
-  return true;
-}
-
-
 // We need the whiteness witness since sort will reshuffle the entries in the
 // descriptor array. If the descriptor array were to be black, the shuffling
 // would move a slot that was already recorded as pointing into an evacuation
diff --git a/src/objects.h b/src/objects.h
index 8970c9e..5afb690 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -2294,8 +2294,8 @@
   static void MigrateInstance(Handle<JSObject> instance);
 
   // Migrates the given object only if the target map is already available,
-  // or returns an empty handle if such a map is not yet available.
-  static Handle<Object> TryMigrateInstance(Handle<JSObject> instance);
+  // or returns false if such a map is not yet available.
+  static bool TryMigrateInstance(Handle<JSObject> instance);
 
   // Retrieve a value in a normalized object given a lookup result.
   // Handles the special representation of JS global objects.
@@ -3441,20 +3441,6 @@
   // array.
   inline void Append(Descriptor* desc);
 
-  static Handle<DescriptorArray> Merge(Handle<Map> left_map,
-                                       int verbatim,
-                                       int valid,
-                                       int new_size,
-                                       int modify_index,
-                                       StoreMode store_mode,
-                                       Handle<Map> right_map)
-      V8_WARN_UNUSED_RESULT;
-
-  bool IsMoreGeneralThan(int verbatim,
-                         int valid,
-                         int new_size,
-                         DescriptorArray* other);
-
   static Handle<DescriptorArray> CopyUpTo(Handle<DescriptorArray> desc,
                                           int enumeration_index,
                                           int slack = 0);
@@ -6429,9 +6415,11 @@
   // is found by re-transitioning from the root of the transition tree using the
   // descriptor array of the map. Returns NULL if no updated map is found.
   // This method also applies any pending migrations along the prototype chain.
-  static Handle<Map> CurrentMapForDeprecated(Handle<Map> map);
+  static MaybeHandle<Map> CurrentMapForDeprecated(Handle<Map> map)
+      V8_WARN_UNUSED_RESULT;
   // Same as above, but does not touch the prototype chain.
-  static Handle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map);
+  static MaybeHandle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map)
+      V8_WARN_UNUSED_RESULT;
 
   static Handle<Map> CopyDropDescriptors(Handle<Map> map);
   static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
@@ -6731,7 +6719,6 @@
   void DeprecateTransitionTree();
   void DeprecateTarget(Name* key, DescriptorArray* new_descriptors);
 
-  Map* FindUpdatedMap(int verbatim, int length, DescriptorArray* descriptors);
   Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
 
   void UpdateDescriptor(int descriptor_number, Descriptor* desc);
diff --git a/src/property-details-inl.h b/src/property-details-inl.h
index 98eb1cf..8350452 100644
--- a/src/property-details-inl.h
+++ b/src/property-details-inl.h
@@ -28,9 +28,9 @@
 #ifndef V8_PROPERTY_DETAILS_INL_H_
 #define V8_PROPERTY_DETAILS_INL_H_
 
+#include "conversions.h"
 #include "objects.h"
 #include "property-details.h"
-#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/property.h b/src/property.h
index c82fb82..48435cc 100644
--- a/src/property.h
+++ b/src/property.h
@@ -66,6 +66,7 @@
         details_(attributes, type, representation, field_index) { }
 
   friend class DescriptorArray;
+  friend class Map;
 };
 
 
diff --git a/src/runtime.cc b/src/runtime.cc
index bc88f78..c91c3f9 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -38,6 +38,7 @@
 #include "codegen.h"
 #include "compilation-cache.h"
 #include "compiler.h"
+#include "conversions.h"
 #include "cpu.h"
 #include "cpu-profiler.h"
 #include "dateparser-inl.h"
@@ -63,7 +64,6 @@
 #include "string-search.h"
 #include "stub-cache.h"
 #include "uri.h"
-#include "v8conversions.h"
 #include "v8threads.h"
 #include "vm-state-inl.h"
 
@@ -14583,8 +14583,7 @@
   // code where we can't handle lazy deopts for lack of a suitable bailout
   // ID. So we just try migration and signal failure if necessary,
   // which will also trigger a deopt.
-  Handle<Object> result = JSObject::TryMigrateInstance(js_object);
-  if (result.is_null()) return Smi::FromInt(0);
+  if (!JSObject::TryMigrateInstance(js_object)) return Smi::FromInt(0);
   return *object;
 }
 
diff --git a/src/serialize.cc b/src/serialize.cc
index efeba8a..e5694ca 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -554,6 +554,26 @@
       62,
       "Code::MarkCodeAsExecuted");
 
+  Add(ExternalReference::is_profiling_address(isolate).address(),
+      UNCLASSIFIED,
+      63,
+      "CpuProfiler::is_profiling");
+
+  Add(ExternalReference::scheduled_exception_address(isolate).address(),
+      UNCLASSIFIED,
+      64,
+      "Isolate::scheduled_exception");
+
+  Add(ExternalReference::invoke_function_callback(isolate).address(),
+      UNCLASSIFIED,
+      65,
+      "InvokeFunctionCallback");
+
+  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
+      UNCLASSIFIED,
+      66,
+      "InvokeAccessorGetterCallback");
+
   // Add a small set of deopt entry addresses to encoder without generating the
   // deopt table code, which isn't possible at deserialization time.
   HandleScope scope(isolate);
diff --git a/src/type-info.cc b/src/type-info.cc
index f863af0..0ba6dfa 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -217,8 +217,8 @@
   Handle<Map> map;
   Map* raw_map = code->FindFirstMap();
   if (raw_map != NULL) {
-    map = Map::CurrentMapForDeprecated(handle(raw_map));
-    if (!map.is_null() && CanRetainOtherContext(*map, *native_context_)) {
+    if (Map::CurrentMapForDeprecated(handle(raw_map)).ToHandle(&map) &&
+        CanRetainOtherContext(*map, *native_context_)) {
       map = Handle<Map>::null();
     }
   }
diff --git a/src/uri.h b/src/uri.h
index 1b52a50..a8791bc 100644
--- a/src/uri.h
+++ b/src/uri.h
@@ -30,9 +30,9 @@
 
 #include "v8.h"
 
+#include "conversions.h"
 #include "string-search.h"
 #include "utils.h"
-#include "v8conversions.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 620957e..0bd4955 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -128,7 +128,6 @@
   SC(call_premonomorphic_stubs, V8.CallPreMonomorphicStubs)           \
   SC(call_normal_stubs, V8.CallNormalStubs)                           \
   SC(call_megamorphic_stubs, V8.CallMegamorphicStubs)                 \
-  SC(inlined_copyied_elements, V8.InlinedCopiedElements)              \
   SC(arguments_adaptors, V8.ArgumentsAdaptors)                        \
   SC(compilation_cache_hits, V8.CompilationCacheHits)                 \
   SC(compilation_cache_misses, V8.CompilationCacheMisses)             \
diff --git a/src/v8.cc b/src/v8.cc
index 70a4983..c1eedd4 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -144,7 +144,7 @@
   platform_ = new DefaultPlatform;
 #endif
   Sampler::SetUp();
-  CPU::SetUp();
+  CpuFeatures::Probe(Serializer::enabled());
   OS::PostSetUp();
   ElementsAccessor::InitializeOncePerProcess();
   LOperand::SetUpCaches();
diff --git a/src/v8conversions.cc b/src/v8conversions.cc
deleted file mode 100644
index b891a3e..0000000
--- a/src/v8conversions.cc
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdarg.h>
-#include <limits.h>
-
-#include "v8.h"
-
-#include "assert-scope.h"
-#include "conversions-inl.h"
-#include "v8conversions.h"
-#include "dtoa.h"
-#include "factory.h"
-#include "strtod.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-
-// C++-style iterator adaptor for StringCharacterStream
-// (unlike C++ iterators the end-marker has different type).
-class StringCharacterStreamIterator {
- public:
-  class EndMarker {};
-
-  explicit StringCharacterStreamIterator(StringCharacterStream* stream);
-
-  uint16_t operator*() const;
-  void operator++();
-  bool operator==(EndMarker const&) const { return end_; }
-  bool operator!=(EndMarker const& m) const { return !end_; }
-
- private:
-  StringCharacterStream* const stream_;
-  uint16_t current_;
-  bool end_;
-};
-
-
-StringCharacterStreamIterator::StringCharacterStreamIterator(
-    StringCharacterStream* stream) : stream_(stream) {
-  ++(*this);
-}
-
-uint16_t StringCharacterStreamIterator::operator*() const {
-  return current_;
-}
-
-
-void StringCharacterStreamIterator::operator++() {
-  end_ = !stream_->HasMore();
-  if (!end_) {
-    current_ = stream_->GetNext();
-  }
-}
-}  // End anonymous namespace.
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
-                      String* string,
-                      int flags,
-                      double empty_string_val) {
-  DisallowHeapAllocation no_gc;
-  String::FlatContent flat = string->GetFlatContent();
-  // ECMA-262 section 15.1.2.3, empty string is NaN
-  if (flat.IsAscii()) {
-    return StringToDouble(
-        unicode_cache, flat.ToOneByteVector(), flags, empty_string_val);
-  } else {
-    return StringToDouble(
-        unicode_cache, flat.ToUC16Vector(), flags, empty_string_val);
-  }
-}
-
-} }  // namespace v8::internal
diff --git a/src/v8conversions.h b/src/v8conversions.h
deleted file mode 100644
index eb315b1..0000000
--- a/src/v8conversions.h
+++ /dev/null
@@ -1,109 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_V8CONVERSIONS_H_
-#define V8_V8CONVERSIONS_H_
-
-#include "conversions.h"
-
-namespace v8 {
-namespace internal {
-
-
-static inline bool IsMinusZero(double value) {
-  static const DoubleRepresentation minus_zero(-0.0);
-  return DoubleRepresentation(value) == minus_zero;
-}
-
-
-// Integer32 is an integer that can be represented as a signed 32-bit
-// integer. It has to be in the range [-2^31, 2^31 - 1].
-// We also have to check for negative 0 as it is not an Integer32.
-static inline bool IsInt32Double(double value) {
-  return !IsMinusZero(value) &&
-         value >= kMinInt &&
-         value <= kMaxInt &&
-         value == FastI2D(FastD2I(value));
-}
-
-
-// Convert from Number object to C integer.
-inline int32_t NumberToInt32(Object* number) {
-  if (number->IsSmi()) return Smi::cast(number)->value();
-  return DoubleToInt32(number->Number());
-}
-
-
-inline uint32_t NumberToUint32(Object* number) {
-  if (number->IsSmi()) return Smi::cast(number)->value();
-  return DoubleToUint32(number->Number());
-}
-
-
-double StringToDouble(UnicodeCache* unicode_cache,
-                      String* string,
-                      int flags,
-                      double empty_string_val = 0.0);
-
-
-inline bool TryNumberToSize(Isolate* isolate,
-                            Object* number, size_t* result) {
-  SealHandleScope shs(isolate);
-  if (number->IsSmi()) {
-    int value = Smi::cast(number)->value();
-    ASSERT(static_cast<unsigned>(Smi::kMaxValue)
-           <= std::numeric_limits<size_t>::max());
-    if (value >= 0) {
-      *result = static_cast<size_t>(value);
-      return true;
-    }
-    return false;
-  } else {
-    ASSERT(number->IsHeapNumber());
-    double value = HeapNumber::cast(number)->value();
-    if (value >= 0 &&
-        value <= std::numeric_limits<size_t>::max()) {
-      *result = static_cast<size_t>(value);
-      return true;
-    } else {
-      return false;
-    }
-  }
-}
-
-// Converts a number into size_t.
-inline size_t NumberToSize(Isolate* isolate,
-                           Object* number) {
-  size_t result = 0;
-  bool is_valid = TryNumberToSize(isolate, number, &result);
-  CHECK(is_valid);
-  return result;
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_V8CONVERSIONS_H_
diff --git a/src/version.cc b/src/version.cc
index 087c315..7da193b 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     26
-#define BUILD_NUMBER      25
+#define BUILD_NUMBER      26
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index da2db62..359408a 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -52,13 +52,13 @@
 }
 
 
-void CpuFeatures::Probe() {
+void CpuFeatures::Probe(bool serializer_enabled) {
   ASSERT(supported_ == CpuFeatures::kDefaultCpuFeatures);
 #ifdef DEBUG
   initialized_ = true;
 #endif
   supported_ = kDefaultCpuFeatures;
-  if (Serializer::enabled()) {
+  if (serializer_enabled) {
     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     return;  // No features if we might serialize.
   }
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 43475d1..702be58 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -450,7 +450,7 @@
  public:
   // Detect features of the target CPU. Set safe defaults if the serializer
   // is enabled (snapshots must be portable).
-  static void Probe();
+  static void Probe(bool serializer_enabled);
 
   // Check whether a feature is supported by the target CPU.
   static bool IsSupported(CpuFeature f) {
@@ -484,6 +484,8 @@
            (cross_compile_ & mask) == mask;
   }
 
+  static bool SupportsCrankshaft() { return true; }
+
  private:
   static bool Check(CpuFeature f, uint64_t set) {
     return (set & flag2set(f)) != 0;
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 26e9322..ecf58c9 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -82,11 +82,6 @@
   static Register registers[] = { rax, rbx, rcx };
   descriptor->register_param_count_ = 3;
   descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
   descriptor->deoptimization_handler_ =
       Runtime::FunctionForId(
           Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
@@ -216,11 +211,6 @@
     descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
     descriptor->stack_parameter_count_ = rax;
     descriptor->register_param_count_ = 3;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
     descriptor->register_params_ = registers_variable_args;
   }
 
@@ -249,10 +239,6 @@
     descriptor->stack_parameter_count_ = rax;
     descriptor->register_param_count_ = 2;
     descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
   }
 
   descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
@@ -4856,7 +4842,8 @@
   // v8::InvocationCallback's argument.
   __ leap(arguments_arg, StackSpaceOperand(0));
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeFunctionCallback);
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
 
   // Accessor for FunctionCallbackInfo and first js arg.
   StackArgumentsAccessor args_from_rbp(rbp, FCA::kArgsLength + 1,
@@ -4868,7 +4855,7 @@
       is_store ? 0 : FCA::kArgsLength - FCA::kReturnValueOffset);
   __ CallApiFunctionAndReturn(
       api_function_address,
-      thunk_address,
+      thunk_ref,
       callback_arg,
       argc + FCA::kArgsLength + 1,
       return_value_operand,
@@ -4915,7 +4902,8 @@
   // could be used to pass arguments.
   __ leap(accessor_info_arg, StackSpaceOperand(0));
 
-  Address thunk_address = FUNCTION_ADDR(&InvokeAccessorGetterCallback);
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
 
   // It's okay if api_function_address == getter_arg
   // but not accessor_info_arg or name_arg
@@ -4928,7 +4916,7 @@
       PropertyCallbackArguments::kArgsLength - 1 -
       PropertyCallbackArguments::kReturnValueOffset);
   __ CallApiFunctionAndReturn(api_function_address,
-                              thunk_address,
+                              thunk_ref,
                               getter_arg,
                               kStackSpace,
                               return_value_operand,
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index 4fa290a..7a4dd0c 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -41,16 +41,6 @@
 namespace v8 {
 namespace internal {
 
-void CPU::SetUp() {
-  CpuFeatures::Probe();
-}
-
-
-bool CPU::SupportsCrankshaft() {
-  return true;  // Yay!
-}
-
-
 void CPU::FlushICache(void* start, size_t size) {
   // No need to flush the instruction cache on Intel. On Intel instruction
   // cache flushing is only necessary when multiple cores running the same
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index d974033..ae72ba7 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -1796,7 +1796,24 @@
     allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
   }
 
-  if (expr->depth() > 1) {
+  Heap* heap = isolate()->heap();
+  if (has_constant_fast_elements &&
+      constant_elements_values->map() == heap->fixed_cow_array_map()) {
+    // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    __ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+    __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+    __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+    __ Move(rbx, Smi::FromInt(expr->literal_index()));
+    __ Move(rcx, constant_elements);
+    FastCloneShallowArrayStub stub(
+        isolate(),
+        FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
+        allocation_site_mode,
+        length);
+    __ CallStub(&stub);
+  } else if (expr->depth() > 1 || Serializer::enabled() ||
+             length > FastCloneShallowArrayStub::kMaximumClonedLength) {
     __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     __ Push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
     __ Push(Smi::FromInt(expr->literal_index()));
@@ -1804,11 +1821,24 @@
     __ Push(Smi::FromInt(flags));
     __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
   } else {
+    ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
+           FLAG_smi_only_arrays);
+    FastCloneShallowArrayStub::Mode mode =
+        FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
+
+    // If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
+    // change, so it's possible to specialize the stub in advance.
+    if (has_constant_fast_elements) {
+      mode = FastCloneShallowArrayStub::CLONE_ELEMENTS;
+    }
+
     __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
     __ Move(rbx, Smi::FromInt(expr->literal_index()));
     __ Move(rcx, constant_elements);
-    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    FastCloneShallowArrayStub stub(isolate(),
+                                   mode,
+                                   allocation_site_mode, length);
     __ CallStub(&stub);
   }
 
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index ba16ea8..78f33b4 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -667,7 +667,7 @@
 
 void MacroAssembler::CallApiFunctionAndReturn(
     Register function_address,
-    Address thunk_address,
+    ExternalReference thunk_ref,
     Register thunk_last_arg,
     int stack_space,
     Operand return_value_operand,
@@ -714,16 +714,13 @@
 
   Label profiler_disabled;
   Label end_profiler_check;
-  bool* is_profiling_flag =
-      isolate()->cpu_profiler()->is_profiling_address();
-  STATIC_ASSERT(sizeof(*is_profiling_flag) == 1);
-  Move(rax, is_profiling_flag, RelocInfo::EXTERNAL_REFERENCE);
+  Move(rax, ExternalReference::is_profiling_address(isolate()));
   cmpb(Operand(rax, 0), Immediate(0));
   j(zero, &profiler_disabled);
 
   // Third parameter is the address of the actual getter function.
   Move(thunk_last_arg, function_address);
-  Move(rax, thunk_address, RelocInfo::EXTERNAL_REFERENCE);
+  Move(rax, thunk_ref);
   jmp(&end_profiler_check);
 
   bind(&profiler_disabled);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index e574667..715ab87 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -1313,7 +1313,7 @@
   // caller-save registers.  Restores context.  On return removes
   // stack_space * kPointerSize (GCed).
   void CallApiFunctionAndReturn(Register function_address,
-                                Address thunk_address,
+                                ExternalReference thunk_ref,
                                 Register thunk_last_arg,
                                 int stack_space,
                                 Operand return_value_operand,
diff --git a/test/mjsunit/es6/mirror-promises.js b/test/mjsunit/es6/mirror-promises.js
index c0eb5bf..5a21a6b 100644
--- a/test/mjsunit/es6/mirror-promises.js
+++ b/test/mjsunit/es6/mirror-promises.js
@@ -17,7 +17,7 @@
   return this.refs_[handle];
 }
 
-function testPromiseMirror(promise, status) {
+function testPromiseMirror(promise, status, value) {
   // Create mirror and JSON representation.
   var mirror = debug.MakeMirror(promise);
   var serializer = debug.MakeMirrorSerializer();
@@ -39,6 +39,7 @@
   assertEquals("Object", mirror.className());
   assertEquals("#<Promise>", mirror.toText());
   assertSame(promise, mirror.value());
+  assertEquals(value, mirror.promiseValue());
 
   // Parse JSON representation and check.
   var fromJSON = eval('(' + json + ')');
@@ -47,7 +48,7 @@
   assertEquals('function', refs.lookup(fromJSON.constructorFunction.ref).type);
   assertEquals('Promise', refs.lookup(fromJSON.constructorFunction.ref).name);
   assertEquals(status, fromJSON.status);
-
+  assertEquals(value, fromJSON.promiseValue);
 }
 
 // Test a number of different promises.
@@ -55,6 +56,14 @@
 var rejected = new Promise(function(resolve, reject) { reject() });
 var pending = new Promise(function(resolve, reject) {});
 
-testPromiseMirror(resolved, "resolved");
-testPromiseMirror(rejected, "rejected");
-testPromiseMirror(pending, "pending");
+testPromiseMirror(resolved, "resolved", undefined);
+testPromiseMirror(rejected, "rejected", undefined);
+testPromiseMirror(pending, "pending", undefined);
+
+var resolvedv = new Promise(function(resolve, reject) { resolve('resolve') });
+var rejectedv = new Promise(function(resolve, reject) { reject('reject') });
+var thrownv = new Promise(function(resolve, reject) { throw 'throw' });
+
+testPromiseMirror(resolvedv, "resolved", 'resolve');
+testPromiseMirror(rejectedv, "rejected", 'reject');
+testPromiseMirror(thrownv, "rejected", 'throw');
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index bf345cc..9f3003b 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -595,8 +595,6 @@
         '../../src/v8.cc',
         '../../src/v8.h',
         '../../src/v8checks.h',
-        '../../src/v8conversions.cc',
-        '../../src/v8conversions.h',
         '../../src/v8globals.h',
         '../../src/v8memory.h',
         '../../src/v8threads.cc',