Version 3.29.87 (based on bleeding_edge revision r24156)

Preserve message when rethrowing exception (issue 3583).

Fix escaped index JSON parsing (Chromium issue 416449).

Performance and stability improvements on all platforms.

git-svn-id: https://v8.googlecode.com/svn/trunk@24158 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 3787796..e2271e3 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,12 @@
+2014-09-24: Version 3.29.87
+
+        Preserve message when rethrowing exception (issue 3583).
+
+        Fix escaped index JSON parsing (Chromium issue 416449).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-09-23: Version 3.29.84
 
         Performance and stability improvements on all platforms.
diff --git a/Makefile b/Makefile
index 96d7a7a..2fbe1ba 100644
--- a/Makefile
+++ b/Makefile
@@ -230,8 +230,8 @@
 
 # List of files that trigger Makefile regeneration:
 GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
-           build/toolchain.gypi samples/samples.gyp src/d8.gyp \
-           test/cctest/cctest.gyp tools/gyp/v8.gyp
+           build/toolchain.gypi samples/samples.gyp src/compiler/compiler.gyp \
+           src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
 
 # If vtunejit=on, the v8vtune.gyp will be appended.
 ifeq ($(vtunejit), on)
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 43d6b5b..3a9895d 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -34,6 +34,32 @@
 import sys
 
 
+_EXCLUDED_PATHS = (
+    r"^test[\\\/].*",
+    r"^testing[\\\/].*",
+    r"^third_party[\\\/].*",
+    r"^tools[\\\/].*",
+)
+
+
+# Regular expression that matches code only used for test binaries
+# (best effort).
+_TEST_CODE_EXCLUDED_PATHS = (
+    r'.+-unittest\.cc',
+    # Has a method VisitForTest().
+    r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
+    # Test extension.
+    r'src[\\\/]extensions[\\\/]gc-extension\.cc',
+)
+
+
+_TEST_ONLY_WARNING = (
+    'You might be calling functions intended only for testing from\n'
+    'production code.  It is OK to ignore this warning if you know what\n'
+    'you are doing, as the heuristics used to detect the situation are\n'
+    'not perfect.  The commit queue will not block on this warning.')
+
+
 def _V8PresubmitChecks(input_api, output_api):
   """Runs the V8 presubmit checks."""
   import sys
@@ -113,6 +139,49 @@
   return results
 
 
+def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
+  """Attempts to prevent use of functions intended only for testing in
+  non-testing code. For now this is just a best-effort implementation
+  that ignores header files and may have some false positives. A
+  better implementation would probably need a proper C++ parser.
+  """
+  # We only scan .cc files, as the declaration of for-testing functions in
+  # header files are hard to distinguish from calls to such functions without a
+  # proper C++ parser.
+  file_inclusion_pattern = r'.+\.cc'
+
+  base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
+  inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
+  comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
+  exclusion_pattern = input_api.re.compile(
+    r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
+      base_function_pattern, base_function_pattern))
+
+  def FilterFile(affected_file):
+    black_list = (_EXCLUDED_PATHS +
+                  _TEST_CODE_EXCLUDED_PATHS +
+                  input_api.DEFAULT_BLACK_LIST)
+    return input_api.FilterSourceFile(
+      affected_file,
+      white_list=(file_inclusion_pattern, ),
+      black_list=black_list)
+
+  problems = []
+  for f in input_api.AffectedSourceFiles(FilterFile):
+    local_path = f.LocalPath()
+    for line_number, line in f.ChangedContents():
+      if (inclusion_pattern.search(line) and
+          not comment_pattern.search(line) and
+          not exclusion_pattern.search(line)):
+        problems.append(
+          '%s:%d\n    %s' % (local_path, line_number, line.strip()))
+
+  if problems:
+    return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
+  else:
+    return []
+
+
 def _CommonChecks(input_api, output_api):
   """Checks common to both upload and commit."""
   results = []
@@ -122,6 +191,8 @@
       input_api, output_api))
   results.extend(_V8PresubmitChecks(input_api, output_api))
   results.extend(_CheckUnwantedDependencies(input_api, output_api))
+  results.extend(
+      _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
   return results
 
 
diff --git a/src/api.cc b/src/api.cc
index 2096efd..e11d140 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -6567,9 +6567,6 @@
 
 
 Isolate* Isolate::New(const Isolate::CreateParams& params) {
-  // TODO(jochen): Remove again soon.
-  V8::Initialize();
-
   i::Isolate* isolate = new i::Isolate();
   Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
   if (params.entry_hook) {
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index d879f29..25270d1 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -3465,8 +3465,8 @@
   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss);
-  __ JumpIfNotUniqueName(tmp2, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
 
   // Unique names are compared by identity.
   __ cmp(left, right);
@@ -3698,7 +3698,7 @@
     __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -3868,7 +3868,7 @@
       __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ ldrb(entry_key,
               FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index 11b170b..a06ed73 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -319,30 +319,26 @@
   // Each entry in the jump table generates one instruction and inlines one
   // 32bit data after it.
   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
-      deopt_jump_table_.length() * 7)) {
+                jump_table_.length() * 7)) {
     Abort(kGeneratedCodeIsTooLarge);
   }
 
-  if (deopt_jump_table_.length() > 0) {
+  if (jump_table_.length() > 0) {
     Label needs_frame, call_deopt_entry;
 
     Comment(";;; -------------------- Jump table --------------------");
-    Address base = deopt_jump_table_[0].address;
+    Address base = jump_table_[0].address;
 
     Register entry_offset = scratch0();
 
-    int length = deopt_jump_table_.length();
+    int length = jump_table_.length();
     for (int i = 0; i < length; i++) {
-      Deoptimizer::JumpTableEntry* table_entry = &deopt_jump_table_[i];
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
       __ bind(&table_entry->label);
 
-      Deoptimizer::BailoutType type = table_entry->bailout_type;
-      DCHECK(type == deopt_jump_table_[0].bailout_type);
+      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
       Address entry = table_entry->address;
-      int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-      DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-      DeoptComment(table_entry->mnemonic, table_entry->reason);
+      DeoptComment(table_entry->reason);
 
       // Second-level deopt table entries are contiguous and small, so instead
       // of loading the full, absolute address of each one, load an immediate
@@ -846,7 +842,7 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
-                            const char* reason,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -899,35 +895,35 @@
     __ stop("trap_on_deopt", condition);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (condition == al && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry) ||
-        (deopt_jump_table_.last().bailout_type != bailout_type) ||
-        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason,
-                                              bailout_type, !frame_is_built_);
-      deopt_jump_table_.Add(table_entry, zone());
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
     }
-    __ b(condition, &deopt_jump_table_.last().label);
+    __ b(condition, &jump_table_.last().label);
   }
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
-                            const char* reason) {
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(condition, instr, reason, bailout_type);
+  DeoptimizeIf(condition, instr, detail, bailout_type);
 }
 
 
@@ -4976,26 +4972,22 @@
     __ bind(&check_false);
     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
     __ cmp(scratch2, Operand(ip));
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(ne, instr);
+    DeoptimizeIf(ne, instr, "cannot truncate");
     __ mov(input_reg, Operand::Zero());
   } else {
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIf(ne, instr);
+    DeoptimizeIf(ne, instr, "not a heap number");
 
     __ sub(ip, scratch2, Operand(kHeapObjectTag));
     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
-    __ RecordComment("Deferred TaggedToI: lost precision or NaN");
-    DeoptimizeIf(ne, instr);
+    DeoptimizeIf(ne, instr, "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ cmp(input_reg, Operand::Zero());
       __ b(ne, &done);
       __ VmovHigh(scratch1, double_scratch2);
       __ tst(scratch1, Operand(HeapNumber::kSignMask));
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIf(ne, instr);
+      DeoptimizeIf(ne, instr, "minus zero");
     }
   }
   __ bind(&done);
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index ca8c563..cb137d1 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -26,7 +26,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        deopt_jump_table_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -235,9 +235,9 @@
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
   void DeoptimizeIf(Condition condition, LInstruction* instr,
-                    const char* reason, Deoptimizer::BailoutType bailout_type);
+                    const char* detail, Deoptimizer::BailoutType bailout_type);
   void DeoptimizeIf(Condition condition, LInstruction* instr,
-                    const char* reason = NULL);
+                    const char* detail = NULL);
 
   void AddToTranslation(LEnvironment* environment,
                         Translation* translation,
@@ -332,7 +332,7 @@
   void EmitVectorLoadICRegisters(T* instr);
 
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index 92615e1..c845a3d 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -3199,8 +3199,8 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index 057591a..030b87a 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -1340,7 +1340,7 @@
   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
                                                 Label* failure);
 
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 93b0e28..1702e46 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -3370,8 +3370,8 @@
 
   // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
   // should have kInternalizedTag set.
-  __ JumpIfNotUniqueName(lhs_instance_type, &miss);
-  __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+  __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
+  __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
 
   // Unique names are compared by identity.
   STATIC_ASSERT(EQUAL == 0);
@@ -4488,7 +4488,7 @@
     __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ Ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ Bind(&good);
   }
 
@@ -4575,7 +4575,7 @@
       // Check if the entry name is not a unique name.
       __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
index a7ea29b..40c5c42 100644
--- a/src/arm64/lithium-codegen-arm64.cc
+++ b/src/arm64/lithium-codegen-arm64.cc
@@ -839,12 +839,8 @@
       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
       __ Bind(&table_entry->label);
 
-      Deoptimizer::BailoutType type = table_entry->bailout_type;
       Address entry = table_entry->address;
-      int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-      DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-      DeoptComment(table_entry->mnemonic, table_entry->reason);
+      DeoptComment(table_entry->reason);
 
       // Second-level deopt table entries are contiguous and small, so instead
       // of loading the full, absolute address of each one, load the base
@@ -993,7 +989,7 @@
 
 
 void LCodeGen::DeoptimizeBranch(
-    LInstruction* instr, const char* reason, BranchType branch_type,
+    LInstruction* instr, const char* detail, BranchType branch_type,
     Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -1044,21 +1040,22 @@
     __ Bind(&dont_trap);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to build frame, or restore caller doubles.
   if (branch_type == always &&
       frame_is_built_ && !info()->saves_caller_doubles()) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry* table_entry =
+        new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
+                                                 !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (jump_table_.is_empty() || (jump_table_.last()->address != entry) ||
-        (jump_table_.last()->bailout_type != bailout_type) ||
-        (jump_table_.last()->needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry* table_entry =
-          new (zone()) Deoptimizer::JumpTableEntry(
-              entry, instr->Mnemonic(), reason, bailout_type, !frame_is_built_);
+    if (jump_table_.is_empty() ||
+        !table_entry->IsEquivalentTo(*jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     __ B(&jump_table_.last()->label, branch_type, reg, bit);
@@ -1068,78 +1065,78 @@
 
 void LCodeGen::Deoptimize(LInstruction* instr,
                           Deoptimizer::BailoutType* override_bailout_type,
-                          const char* reason) {
-  DeoptimizeBranch(instr, reason, always, NoReg, -1, override_bailout_type);
+                          const char* detail) {
+  DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
-                            const char* reason) {
-  DeoptimizeBranch(instr, reason, static_cast<BranchType>(cond));
+                            const char* detail) {
+  DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
 }
 
 
 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
-                                const char* reason) {
-  DeoptimizeBranch(instr, reason, reg_zero, rt);
+                                const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_zero, rt);
 }
 
 
 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
-                                   const char* reason) {
-  DeoptimizeBranch(instr, reason, reg_not_zero, rt);
+                                   const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_not_zero, rt);
 }
 
 
 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
-                                    const char* reason) {
+                                    const char* detail) {
   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
-  DeoptimizeIfBitSet(rt, sign_bit, instr, reason);
+  DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
 }
 
 
 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
-                               const char* reason) {
-  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, reason);
+                               const char* detail) {
+  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
 }
 
 
 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
-                                  const char* reason) {
-  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, reason);
+                                  const char* detail) {
+  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
 }
 
 
 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
-                                LInstruction* instr, const char* reason) {
+                                LInstruction* instr, const char* detail) {
   __ CompareRoot(rt, index);
-  DeoptimizeIf(eq, instr, reason);
+  DeoptimizeIf(eq, instr, detail);
 }
 
 
 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
-                                   LInstruction* instr, const char* reason) {
+                                   LInstruction* instr, const char* detail) {
   __ CompareRoot(rt, index);
-  DeoptimizeIf(ne, instr, reason);
+  DeoptimizeIf(ne, instr, detail);
 }
 
 
 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
-                                     const char* reason) {
+                                     const char* detail) {
   __ TestForMinusZero(input);
-  DeoptimizeIf(vs, instr, reason);
+  DeoptimizeIf(vs, instr, detail);
 }
 
 
 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
-                                  const char* reason) {
-  DeoptimizeBranch(instr, reason, reg_bit_set, rt, bit);
+                                  const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
 }
 
 
 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
-                                    const char* reason) {
-  DeoptimizeBranch(instr, reason, reg_bit_clear, rt, bit);
+                                    const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
 }
 
 
@@ -5629,22 +5626,20 @@
     Register output = ToRegister32(instr->result());
     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
 
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr);
+    DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, instr,
+                        "not a heap number");
 
     // A heap number: load value and convert to int32 using non-truncating
     // function. If the result is out of range, branch to deoptimize.
     __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
     __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
-    __ RecordComment("Deferred TaggedToI: lost precision or NaN");
-    DeoptimizeIf(ne, instr);
+    DeoptimizeIf(ne, instr, "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ Cmp(output, 0);
       __ B(ne, &done);
       __ Fmov(scratch1, dbl_scratch1);
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIfNegative(scratch1, instr);
+      DeoptimizeIfNegative(scratch1, instr, "minus zero");
     }
   }
   __ Bind(&done);
diff --git a/src/arm64/lithium-codegen-arm64.h b/src/arm64/lithium-codegen-arm64.h
index 40fbc38..e24b031 100644
--- a/src/arm64/lithium-codegen-arm64.h
+++ b/src/arm64/lithium-codegen-arm64.h
@@ -213,35 +213,35 @@
                                    Register temp,
                                    LOperand* index,
                                    String::Encoding encoding);
-  void DeoptimizeBranch(LInstruction* instr, const char* reason,
+  void DeoptimizeBranch(LInstruction* instr, const char* detail,
                         BranchType branch_type, Register reg = NoReg,
                         int bit = -1,
                         Deoptimizer::BailoutType* override_bailout_type = NULL);
   void Deoptimize(LInstruction* instr,
                   Deoptimizer::BailoutType* override_bailout_type = NULL,
-                  const char* reason = NULL);
+                  const char* detail = NULL);
   void DeoptimizeIf(Condition cond, LInstruction* instr,
-                    const char* reason = NULL);
+                    const char* detail = NULL);
   void DeoptimizeIfZero(Register rt, LInstruction* instr,
-                        const char* reason = NULL);
+                        const char* detail = NULL);
   void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
-                           const char* reason = NULL);
+                           const char* detail = NULL);
   void DeoptimizeIfNegative(Register rt, LInstruction* instr,
-                            const char* reason = NULL);
+                            const char* detail = NULL);
   void DeoptimizeIfSmi(Register rt, LInstruction* instr,
-                       const char* reason = NULL);
+                       const char* detail = NULL);
   void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
-                          const char* reason = NULL);
+                          const char* detail = NULL);
   void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
-                        LInstruction* instr, const char* reason = NULL);
+                        LInstruction* instr, const char* detail = NULL);
   void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
-                           LInstruction* instr, const char* reason = NULL);
+                           LInstruction* instr, const char* detail = NULL);
   void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
-                             const char* reason = NULL);
+                             const char* detail = NULL);
   void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
-                          const char* reason = NULL);
+                          const char* detail = NULL);
   void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
-                            const char* reason = NULL);
+                            const char* detail = NULL);
 
   MemOperand PrepareKeyedExternalArrayOperand(Register key,
                                               Register base,
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index f78efd2..394bb36 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -2768,8 +2768,8 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register type,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
   //   continue
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index e73fc2c..33ef439 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -1074,7 +1074,7 @@
       Register first_object_instance_type, Register second_object_instance_type,
       Register scratch1, Register scratch2, Label* failure);
 
-  void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
 
   // ---- Calling / Jumping helpers ----
 
diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc
index 277275c..129252b 100644
--- a/src/arm64/simulator-arm64.cc
+++ b/src/arm64/simulator-arm64.cc
@@ -1855,9 +1855,12 @@
 void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
   if ((address >= stack_limit_) && (address < stack)) {
     fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
-    fprintf(stream_, "  sp is here:          0x%016" PRIx64 "\n", stack);
-    fprintf(stream_, "  access was here:     0x%016" PRIx64 "\n", address);
-    fprintf(stream_, "  stack limit is here: 0x%016" PRIx64 "\n", stack_limit_);
+    fprintf(stream_, "  sp is here:          0x%016" PRIx64 "\n",
+            static_cast<uint64_t>(stack));
+    fprintf(stream_, "  access was here:     0x%016" PRIx64 "\n",
+            static_cast<uint64_t>(address));
+    fprintf(stream_, "  stack limit is here: 0x%016" PRIx64 "\n",
+            static_cast<uint64_t>(stack_limit_));
     fprintf(stream_, "\n");
     FATAL("ACCESS BELOW STACK POINTER");
   }
diff --git a/src/base/macros.h b/src/base/macros.h
index 7a35618..cef088c 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -230,7 +230,7 @@
 // WARNING: if Dest or Source is a non-POD type, the result of the memcpy
 // is likely to surprise you.
 template <class Dest, class Source>
-inline Dest bit_cast(const Source& source) {
+V8_INLINE Dest bit_cast(Source const& source) {
   COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
 
   Dest dest;
diff --git a/src/builtins.cc b/src/builtins.cc
index 4a393cb..d0c19e5 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -1289,11 +1289,6 @@
 }
 
 
-static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateSloppyArguments(masm);
-}
-
-
 static void Generate_StoreIC_Miss(MacroAssembler* masm) {
   StoreIC::GenerateMiss(masm);
 }
diff --git a/src/builtins.h b/src/builtins.h
index f9409da..c1ed91d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -89,7 +89,6 @@
     kNoExtraICState)                                                           \
   V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState)              \
   V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState)           \
-  V(KeyedLoadIC_SloppyArguments, KEYED_LOAD_IC, MONOMORPHIC, kNoExtraICState)  \
                                                                                \
   V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \
                                                                                \
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 3a9688a..1c43049 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -71,6 +71,8 @@
     MULTIPLE
   };
 
+  HValue* UnmappedCase(HValue* elements, HValue* key);
+
   HValue* BuildArrayConstructor(ElementsKind kind,
                                 AllocationSiteOverrideMode override_mode,
                                 ArgumentClass argument_class);
@@ -600,6 +602,122 @@
 Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
 
 
+HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
+  HValue* result;
+  HInstruction* backing_store = Add<HLoadKeyed>(
+      elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
+      FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+  Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
+  HValue* backing_store_length =
+      Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
+                           HObjectAccess::ForFixedArrayLength());
+  IfBuilder in_unmapped_range(this);
+  in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
+                                                 Token::LT);
+  in_unmapped_range.Then();
+  {
+    result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL),
+                             FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
+  }
+  in_unmapped_range.ElseDeopt("Outside of range");
+  in_unmapped_range.End();
+  return result;
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+  HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+
+  // Mapped arguments are actual arguments. Unmapped arguments are values added
+  // to the arguments object after it was created for the call. Mapped arguments
+  // are stored in the context at indexes given by elements[key + 2]. Unmapped
+  // arguments are stored as regular indexed properties in the arguments array,
+  // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
+  // look at argument object construction.
+  //
+  // The sloppy arguments elements array has a special format:
+  //
+  // 0: context
+  // 1: unmapped arguments array
+  // 2: mapped_index0,
+  // 3: mapped_index1,
+  // ...
+  //
+  // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
+  // If key + 2 >= elements.length then attempt to look in the unmapped
+  // arguments array (given by elements[1]) and return the value at key, missing
+  // to the runtime if the unmapped arguments array is not a fixed array or if
+  // key >= unmapped_arguments_array.length.
+  //
+  // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
+  // in the unmapped arguments array, as described above. Otherwise, t is a Smi
+  // index into the context array given at elements[0]. Return the value at
+  // context[t].
+
+  key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
+  IfBuilder positive_smi(this);
+  positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
+                                            Token::LT);
+  positive_smi.ThenDeopt("key is negative");
+  positive_smi.End();
+
+  HValue* constant_two = Add<HConstant>(2);
+  HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
+  HValue* elements_length =
+      Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
+                           HObjectAccess::ForFixedArrayLength());
+  HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
+  IfBuilder in_range(this);
+  in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
+  in_range.Then();
+  {
+    HValue* index = AddUncasted<HAdd>(key, constant_two);
+    HInstruction* mapped_index =
+        Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
+                        FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+
+    IfBuilder is_valid(this);
+    is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
+                                              graph()->GetConstantHole());
+    is_valid.Then();
+    {
+      // TODO(mvstanton): I'd like to assert from this point, that if the
+      // mapped_index is not the hole that it is indeed, a smi. An unnecessary
+      // smi check is being emitted.
+      HValue* the_context =
+          Add<HLoadKeyed>(elements, graph()->GetConstant0(),
+                          static_cast<HValue*>(NULL), FAST_ELEMENTS);
+      DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
+      HValue* result =
+          Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL),
+                          FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+      environment()->Push(result);
+    }
+    is_valid.Else();
+    {
+      HValue* result = UnmappedCase(elements, key);
+      environment()->Push(result);
+    }
+    is_valid.End();
+  }
+  in_range.Else();
+  {
+    HValue* result = UnmappedCase(elements, key);
+    environment()->Push(result);
+  }
+  in_range.End();
+
+  return environment()->Pop();
+}
+
+
+Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
 void CodeStubGraphBuilderBase::BuildStoreNamedField(
     HValue* object, HValue* value, FieldIndex index,
     Representation representation) {
@@ -1092,7 +1210,6 @@
 template <>
 HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
   StoreGlobalStub* stub = casted_stub();
-  Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
   Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
   Handle<PropertyCell> placeholder_cell =
       isolate()->factory()->NewPropertyCell(placeholer_value);
@@ -1124,7 +1241,7 @@
     // property has been deleted and that the store must be handled by the
     // runtime.
     IfBuilder builder(this);
-    HValue* hole_value = Add<HConstant>(hole);
+    HValue* hole_value = graph()->GetConstantHole();
     builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
     builder.Then();
     builder.Deopt("Unexpected cell contents in global store");
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index 96460c5..5c9e1a2 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -586,12 +586,14 @@
 void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
   if (kind() == Code::STORE_IC) {
     descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+  } else if (kind() == Code::KEYED_LOAD_IC) {
+    descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
   }
 }
 
 
 CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() {
-  if (kind() == Code::LOAD_IC) {
+  if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
     return LoadDescriptor(isolate());
   } else {
     DCHECK_EQ(Code::STORE_IC, kind());
diff --git a/src/code-stubs.h b/src/code-stubs.h
index f9016f1..3b31399 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -82,6 +82,7 @@
   /* IC Handler stubs */                    \
   V(LoadConstant)                           \
   V(LoadField)                              \
+  V(KeyedLoadSloppyArguments)               \
   V(StoreField)                             \
   V(StoreGlobal)                            \
   V(StringLength)
@@ -914,6 +915,20 @@
 };
 
 
+class KeyedLoadSloppyArgumentsStub : public HandlerStub {
+ public:
+  explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
+      : HandlerStub(isolate) {}
+
+ protected:
+  virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
+
+ private:
+  DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
+};
+
+
 class LoadConstantStub : public HandlerStub {
  public:
   LoadConstantStub(Isolate* isolate, int constant_index)
diff --git a/src/compiler.cc b/src/compiler.cc
index 685009e..68918d6 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -325,10 +325,6 @@
   DCHECK(info()->IsOptimizing());
   DCHECK(!info()->IsCompilingForDebugging());
 
-  // We should never arrive here if there is no code object on the
-  // shared function object.
-  DCHECK(info()->shared_info()->code()->kind() == Code::FUNCTION);
-
   // We should never arrive here if optimization has been disabled on the
   // shared function info.
   DCHECK(!info()->shared_info()->optimization_disabled());
@@ -396,7 +392,8 @@
   DCHECK(info()->shared_info()->has_deoptimization_support());
 
   // Check the whitelist for TurboFan.
-  if (info()->closure()->PassesFilter(FLAG_turbo_filter)) {
+  if ((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
+      info()->closure()->PassesFilter(FLAG_turbo_filter)) {
     compiler::Pipeline pipeline(info());
     pipeline.GenerateCode();
     if (!info()->code().is_null()) {
@@ -704,6 +701,117 @@
 }
 
 
+MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
+    Handle<JSFunction> function, BailoutId osr_ast_id) {
+  if (FLAG_cache_optimized_code) {
+    Handle<SharedFunctionInfo> shared(function->shared());
+    // Bound functions are not cached.
+    if (shared->bound()) return MaybeHandle<Code>();
+    DisallowHeapAllocation no_gc;
+    int index = shared->SearchOptimizedCodeMap(
+        function->context()->native_context(), osr_ast_id);
+    if (index > 0) {
+      if (FLAG_trace_opt) {
+        PrintF("[found optimized code for ");
+        function->ShortPrint();
+        if (!osr_ast_id.IsNone()) {
+          PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
+        }
+        PrintF("]\n");
+      }
+      FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
+      if (literals != NULL) function->set_literals(literals);
+      return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
+    }
+  }
+  return MaybeHandle<Code>();
+}
+
+
+static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
+  Handle<Code> code = info->code();
+  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
+
+  // Context specialization folds-in the context, so no sharing can occur.
+  if (code->is_turbofanned() && info->is_context_specializing()) return;
+
+  // Cache optimized code.
+  if (FLAG_cache_optimized_code) {
+    Handle<JSFunction> function = info->closure();
+    Handle<SharedFunctionInfo> shared(function->shared());
+    // Do not cache bound functions.
+    if (shared->bound()) return;
+    Handle<FixedArray> literals(function->literals());
+    Handle<Context> native_context(function->context()->native_context());
+    SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+                                              literals, info->osr_ast_id());
+  }
+}
+
+
+static bool CompileOptimizedPrologue(CompilationInfo* info) {
+  if (!Parser::Parse(info)) return false;
+  if (!Rewriter::Rewrite(info)) return false;
+  if (!Scope::Analyze(info)) return false;
+  DCHECK(info->scope() != NULL);
+  return true;
+}
+
+
+static bool GetOptimizedCodeNow(CompilationInfo* info) {
+  if (!CompileOptimizedPrologue(info)) return false;
+
+  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+
+  OptimizedCompileJob job(info);
+  if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+  if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
+  if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
+
+  // Success!
+  DCHECK(!info->isolate()->has_pending_exception());
+  InsertCodeIntoOptimizedCodeMap(info);
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
+                            info->shared_info());
+  return true;
+}
+
+
+static bool GetOptimizedCodeLater(CompilationInfo* info) {
+  Isolate* isolate = info->isolate();
+  if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
+    if (FLAG_trace_concurrent_recompilation) {
+      PrintF("  ** Compilation queue full, will retry optimizing ");
+      info->closure()->PrintName();
+      PrintF(" later.\n");
+    }
+    return false;
+  }
+
+  CompilationHandleScope handle_scope(info);
+  if (!CompileOptimizedPrologue(info)) return false;
+  info->SaveHandles();  // Copy handles to the compilation handle scope.
+
+  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
+
+  OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
+  OptimizedCompileJob::Status status = job->CreateGraph();
+  if (status != OptimizedCompileJob::SUCCEEDED) return false;
+  isolate->optimizing_compiler_thread()->QueueForOptimization(job);
+
+  if (FLAG_trace_concurrent_recompilation) {
+    PrintF("  ** Queued ");
+    info->closure()->PrintName();
+    if (info->is_osr()) {
+      PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
+    } else {
+      PrintF(" for concurrent optimization.\n");
+    }
+  }
+  return true;
+}
+
+
 MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
   DCHECK(!function->GetIsolate()->has_pending_exception());
   DCHECK(!function->is_compiled());
@@ -730,29 +838,14 @@
     VMState<COMPILER> state(info.isolate());
     PostponeInterruptsScope postpone(info.isolate());
 
-    if (FLAG_trace_opt) {
-      // TODO(titzer): record and report full stats here.
-      PrintF("[optimizing asm ");
-      function->ShortPrint();
-      PrintF("]\n");
-    }
-
-    if (!Parser::Parse(&info)) return MaybeHandle<Code>();
-    if (!Rewriter::Rewrite(&info)) return MaybeHandle<Code>();
-    if (!Scope::Analyze(&info)) return MaybeHandle<Code>();
-    if (FLAG_turbo_deoptimization && !EnsureDeoptimizationSupport(&info)) {
-      return MaybeHandle<Code>();
-    }
-
     info.SetOptimizing(BailoutId::None(),
                        Handle<Code>(function->shared()->code()));
 
     info.MarkAsContextSpecializing();
     info.MarkAsTypingEnabled();
     info.MarkAsInliningDisabled();
-    compiler::Pipeline pipeline(&info);
-    pipeline.GenerateCode();
-    if (!info.code().is_null()) return info.code();
+
+    if (GetOptimizedCodeNow(&info)) return info.code();
   }
 
   if (function->shared()->is_compiled()) {
@@ -1077,6 +1170,7 @@
     if (FLAG_serialize_toplevel &&
         compile_options == ScriptCompiler::kConsumeCodeCache &&
         !isolate->debug()->is_loaded()) {
+      HistogramTimerScope timer(isolate->counters()->compile_deserialize());
       return CodeSerializer::Deserialize(isolate, *cached_data, source);
     } else {
       maybe_result = compilation_cache->LookupScript(
@@ -1123,6 +1217,8 @@
       compilation_cache->PutScript(source, context, result);
       if (FLAG_serialize_toplevel &&
           compile_options == ScriptCompiler::kProduceCodeCache) {
+        HistogramTimerScope histogram_timer(
+            isolate->counters()->compile_serialize());
         *cached_data = CodeSerializer::Serialize(isolate, result, source);
         if (FLAG_profile_deserialization) {
           PrintF("[Compiling and serializing %d bytes took %0.3f ms]\n",
@@ -1209,118 +1305,6 @@
 }
 
 
-MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
-    Handle<JSFunction> function,
-    BailoutId osr_ast_id) {
-  if (FLAG_cache_optimized_code) {
-    Handle<SharedFunctionInfo> shared(function->shared());
-    // Bound functions are not cached.
-    if (shared->bound()) return MaybeHandle<Code>();
-    DisallowHeapAllocation no_gc;
-    int index = shared->SearchOptimizedCodeMap(
-        function->context()->native_context(), osr_ast_id);
-    if (index > 0) {
-      if (FLAG_trace_opt) {
-        PrintF("[found optimized code for ");
-        function->ShortPrint();
-        if (!osr_ast_id.IsNone()) {
-          PrintF(" at OSR AST id %d", osr_ast_id.ToInt());
-        }
-        PrintF("]\n");
-      }
-      FixedArray* literals = shared->GetLiteralsFromOptimizedCodeMap(index);
-      if (literals != NULL) function->set_literals(literals);
-      return Handle<Code>(shared->GetCodeFromOptimizedCodeMap(index));
-    }
-  }
-  return MaybeHandle<Code>();
-}
-
-
-static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
-  Handle<Code> code = info->code();
-  if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
-
-  // Context specialization folds-in the context, so no sharing can occur.
-  if (code->is_turbofanned() && info->is_context_specializing()) return;
-
-  // Cache optimized code.
-  if (FLAG_cache_optimized_code) {
-    Handle<JSFunction> function = info->closure();
-    Handle<SharedFunctionInfo> shared(function->shared());
-    // Do not cache bound functions.
-    if (shared->bound()) return;
-    Handle<FixedArray> literals(function->literals());
-    Handle<Context> native_context(function->context()->native_context());
-    SharedFunctionInfo::AddToOptimizedCodeMap(
-        shared, native_context, code, literals, info->osr_ast_id());
-  }
-}
-
-
-static bool CompileOptimizedPrologue(CompilationInfo* info) {
-  if (!Parser::Parse(info)) return false;
-  if (!Rewriter::Rewrite(info)) return false;
-  if (!Scope::Analyze(info)) return false;
-  DCHECK(info->scope() != NULL);
-  return true;
-}
-
-
-static bool GetOptimizedCodeNow(CompilationInfo* info) {
-  if (!CompileOptimizedPrologue(info)) return false;
-
-  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
-
-  OptimizedCompileJob job(info);
-  if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
-  if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
-  if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
-
-  // Success!
-  DCHECK(!info->isolate()->has_pending_exception());
-  InsertCodeIntoOptimizedCodeMap(info);
-  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
-                            info->shared_info());
-  return true;
-}
-
-
-static bool GetOptimizedCodeLater(CompilationInfo* info) {
-  Isolate* isolate = info->isolate();
-  if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
-    if (FLAG_trace_concurrent_recompilation) {
-      PrintF("  ** Compilation queue full, will retry optimizing ");
-      info->closure()->PrintName();
-      PrintF(" later.\n");
-    }
-    return false;
-  }
-
-  CompilationHandleScope handle_scope(info);
-  if (!CompileOptimizedPrologue(info)) return false;
-  info->SaveHandles();  // Copy handles to the compilation handle scope.
-
-  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
-
-  OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info);
-  OptimizedCompileJob::Status status = job->CreateGraph();
-  if (status != OptimizedCompileJob::SUCCEEDED) return false;
-  isolate->optimizing_compiler_thread()->QueueForOptimization(job);
-
-  if (FLAG_trace_concurrent_recompilation) {
-    PrintF("  ** Queued ");
-     info->closure()->PrintName();
-    if (info->is_osr()) {
-      PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
-    } else {
-      PrintF(" for concurrent optimization.\n");
-    }
-  }
-  return true;
-}
-
-
 MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
                                              Handle<Code> current_code,
                                              ConcurrencyMode mode,
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 4035682..c995d11 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -136,13 +136,8 @@
   ArmOperandConverter i(this, instr);
 
   switch (ArchOpcodeField::decode(instr->opcode())) {
-    case kArchCallAddress: {
-      DirectCEntryStub stub(isolate());
-      stub.GenerateCall(masm(), i.InputRegister(0));
-      DCHECK_EQ(LeaveCC, i.OutputSBit());
-      break;
-    }
     case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -156,6 +151,7 @@
       break;
     }
     case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -169,13 +165,6 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
-    case kArchDrop: {
-      int words = MiscField::decode(instr->opcode());
-      __ Drop(words);
-      DCHECK_LT(0, words);
-      DCHECK_EQ(LeaveCC, i.OutputSBit());
-      break;
-    }
     case kArchJmp:
       __ b(code_->GetLabel(i.InputBlock(0)));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
@@ -856,6 +845,27 @@
   // On 32-bit ARM we do not insert nops for inlined Smi code.
 }
 
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block literal pool emission for duration of padding.
+      v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
 #undef __
 
 }  // namespace compiler
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index c645fb7..a37ebf2 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -68,10 +68,8 @@
       case kArmStrh:
         return value >= -255 && value <= 255;
 
-      case kArchCallAddress:
       case kArchCallCodeObject:
       case kArchCallJSFunction:
-      case kArchDrop:
       case kArchJmp:
       case kArchNop:
       case kArchRet:
@@ -803,9 +801,6 @@
       opcode = kArchCallCodeObject;
       break;
     }
-    case CallDescriptor::kCallAddress:
-      opcode = kArchCallAddress;
-      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -825,13 +820,6 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
-
-  // Caller clean up of stack for C-style calls.
-  if (descriptor->kind() == CallDescriptor::kCallAddress &&
-      !buffer.pushed_nodes.empty()) {
-    DCHECK(deoptimization == NULL && continuation == NULL);
-    Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
-  }
 }
 
 
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
index c8d5f26..4a9893f 100644
--- a/src/compiler/arm64/code-generator-arm64.cc
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -131,12 +131,8 @@
   Arm64OperandConverter i(this, instr);
   InstructionCode opcode = instr->opcode();
   switch (ArchOpcodeField::decode(opcode)) {
-    case kArchCallAddress: {
-      DirectCEntryStub stub(isolate());
-      stub.GenerateCall(masm(), i.InputRegister(0));
-      break;
-    }
     case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
       if (instr->InputAt(0)->IsImmediate()) {
         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
                 RelocInfo::CODE_TARGET);
@@ -149,6 +145,7 @@
       break;
     }
     case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -163,11 +160,6 @@
       AddSafepointAndDeopt(instr);
       break;
     }
-    case kArchDrop: {
-      int words = MiscField::decode(instr->opcode());
-      __ Drop(words);
-      break;
-    }
     case kArchJmp:
       __ B(code_->GetLabel(i.InputBlock(0)));
       break;
@@ -854,6 +846,29 @@
 
 void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
 
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    intptr_t current_pc = masm()->pc_offset();
+
+    if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+      intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK((padding_size % kInstructionSize) == 0);
+      InstructionAccurateScope instruction_accurate(
+          masm(), padding_size / kInstructionSize);
+
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= kInstructionSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
 #undef __
 
 }  // namespace compiler
diff --git a/src/compiler/arm64/instruction-selector-arm64-unittest.cc b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
index 9903a52..b5562c2 100644
--- a/src/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -32,6 +32,26 @@
 }
 
 
+// Helper to build Int32Constant or Int64Constant depending on the given
+// machine type.
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
+                    int64_t value) {
+  switch (type) {
+    case kMachInt32:
+      return m.Int32Constant(value);
+      break;
+
+    case kMachInt64:
+      return m.Int64Constant(value);
+      break;
+
+    default:
+      UNIMPLEMENTED();
+  }
+  return NULL;
+}
+
+
 // ARM64 logical instructions.
 static const MachInst2 kLogicalInstructions[] = {
     {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
@@ -286,13 +306,13 @@
   const MachineType type = dpi.machine_type;
   TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
     StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
     EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
     ASSERT_EQ(2U, s[0]->InputCount());
     EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
     EXPECT_EQ(1U, s[0]->OutputCount());
   }
 }
@@ -304,7 +324,7 @@
 
   TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
     StreamBuilder m(this, type, type);
-    m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+    m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
     Stream s = m.Build();
 
     // Add can support an immediate on the left by commuting, but Sub can't
@@ -314,7 +334,7 @@
       EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
       ASSERT_EQ(2U, s[0]->InputCount());
       EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
       EXPECT_EQ(1U, s[0]->OutputCount());
     }
   }
@@ -1004,38 +1024,35 @@
 TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
   const MachInst2 cmp = GetParam();
   const MachineType type = cmp.machine_type;
-  // TODO(all): Add support for testing 64-bit immediates.
-  if (type == kMachInt32) {
-    TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-      // Compare with 0 are turned into tst instruction.
-      if (imm == 0) continue;
-      StreamBuilder m(this, type, type);
-      m.Return((m.*cmp.constructor)(m.Parameter(0), m.Int32Constant(imm)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-      EXPECT_EQ(kEqual, s[0]->flags_condition());
-    }
-    TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
-      // Compare with 0 are turned into tst instruction.
-      if (imm == 0) continue;
-      StreamBuilder m(this, type, type);
-      m.Return((m.*cmp.constructor)(m.Int32Constant(imm), m.Parameter(0)));
-      Stream s = m.Build();
-      ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
-      EXPECT_EQ(1U, s[0]->OutputCount());
-      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
-      EXPECT_EQ(kEqual, s[0]->flags_condition());
-    }
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    // Compare with 0 are turned into tst instruction.
+    if (imm == 0) continue;
+    StreamBuilder m(this, type, type);
+    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    // Compare with 0 are turned into tst instruction.
+    if (imm == 0) continue;
+    StreamBuilder m(this, type, type);
+    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
   }
 }
 
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 7c88ee9..eac1ec6 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -37,9 +37,13 @@
   }
 
   bool CanBeImmediate(Node* node, ImmediateMode mode) {
-    Int32Matcher m(node);
-    if (!m.HasValue()) return false;
-    int64_t value = m.Value();
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
     unsigned ignored;
     switch (mode) {
       case kLogical32Imm:
@@ -107,11 +111,12 @@
 
 
 // Shared routine for multiple binary operations.
+template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        InstructionCode opcode, ImmediateMode operand_mode,
                        FlagsContinuation* cont) {
   Arm64OperandGenerator g(selector);
-  Int32BinopMatcher m(node);
+  Matcher m(node);
   InstructionOperand* inputs[4];
   size_t input_count = 0;
   InstructionOperand* outputs[2];
@@ -142,10 +147,11 @@
 
 
 // Shared routine for multiple binary operations.
+template <typename Matcher>
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        ArchOpcode opcode, ImmediateMode operand_mode) {
   FlagsContinuation cont;
-  VisitBinop(selector, node, opcode, operand_mode, &cont);
+  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
 }
 
 
@@ -262,22 +268,22 @@
 
 
 void InstructionSelector::VisitWord32And(Node* node) {
-  VisitBinop(this, node, kArm64And32, kLogical32Imm);
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
 }
 
 
 void InstructionSelector::VisitWord64And(Node* node) {
-  VisitBinop(this, node, kArm64And, kLogical64Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
 }
 
 
 void InstructionSelector::VisitWord32Or(Node* node) {
-  VisitBinop(this, node, kArm64Or32, kLogical32Imm);
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
 }
 
 
 void InstructionSelector::VisitWord64Or(Node* node) {
-  VisitBinop(this, node, kArm64Or, kLogical64Imm);
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
 }
 
 
@@ -287,7 +293,7 @@
   if (m.right().Is(-1)) {
     Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop(this, node, kArm64Xor32, kLogical32Imm);
+    VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
   }
 }
 
@@ -298,7 +304,7 @@
   if (m.right().Is(-1)) {
     Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
   } else {
-    VisitBinop(this, node, kArm64Xor, kLogical32Imm);
+    VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
   }
 }
 
@@ -344,12 +350,12 @@
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
-  VisitBinop(this, node, kArm64Add32, kArithmeticImm);
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
 }
 
 
 void InstructionSelector::VisitInt64Add(Node* node) {
-  VisitBinop(this, node, kArm64Add, kArithmeticImm);
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
 }
 
 
@@ -360,7 +366,7 @@
     Emit(kArm64Neg32, g.DefineAsRegister(node),
          g.UseRegister(m.right().node()));
   } else {
-    VisitBinop(this, node, kArm64Sub32, kArithmeticImm);
+    VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
   }
 }
 
@@ -371,7 +377,7 @@
   if (m.left().Is(0)) {
     Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
   } else {
-    VisitBinop(this, node, kArm64Sub, kArithmeticImm);
+    VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
   }
 }
 
@@ -502,13 +508,13 @@
 
 void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
                                                     FlagsContinuation* cont) {
-  VisitBinop(this, node, kArm64Add32, kArithmeticImm, cont);
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
 }
 
 
 void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
                                                     FlagsContinuation* cont) {
-  VisitBinop(this, node, kArm64Sub32, kArithmeticImm, cont);
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
 }
 
 
@@ -624,12 +630,8 @@
   InitializeCallBuffer(call, &buffer, true, false);
 
   // Push the arguments to the stack.
-  bool is_c_frame = descriptor->kind() == CallDescriptor::kCallAddress;
   bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
   int aligned_push_count = buffer.pushed_nodes.size();
-  if (is_c_frame && pushed_count_uneven) {
-    aligned_push_count++;
-  }
   // TODO(dcarney): claim and poke probably take small immediates,
   //                loop here or whatever.
   // Bump the stack pointer(s).
@@ -644,8 +646,7 @@
     // Emit the uneven pushes.
     if (pushed_count_uneven) {
       Node* input = buffer.pushed_nodes[slot];
-      ArchOpcode opcode = is_c_frame ? kArm64PokePairZero : kArm64Poke;
-      Emit(opcode | MiscField::encode(slot), NULL, g.UseRegister(input));
+      Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
       slot--;
     }
     // Now all pushes can be done in pairs.
@@ -663,9 +664,6 @@
       opcode = kArchCallCodeObject;
       break;
     }
-    case CallDescriptor::kCallAddress:
-      opcode = kArchCallAddress;
-      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -685,12 +683,6 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
-
-  // Caller clean up of stack for C-style calls.
-  if (is_c_frame && aligned_push_count > 0) {
-    DCHECK(deoptimization == NULL && continuation == NULL);
-    Emit(kArchDrop | MiscField::encode(aligned_push_count), NULL);
-  }
 }
 
 }  // namespace compiler
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index ae0e102..f22c479 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -21,7 +21,8 @@
       safepoints_(code->zone()),
       deoptimization_states_(code->zone()),
       deoptimization_literals_(code->zone()),
-      translations_(code->zone()) {}
+      translations_(code->zone()),
+      last_lazy_deopt_pc_(0) {}
 
 
 Handle<Code> CodeGenerator::GenerateCode() {
@@ -242,6 +243,7 @@
   }
 
   if (needs_frame_state) {
+    MarkLazyDeoptSite();
     // If the frame state is present, it starts at argument 1
     // (just after the code address).
     InstructionOperandConverter converter(this, instr);
@@ -387,8 +389,7 @@
             isolate()->factory()->NewNumberFromInt(constant.ToInt32());
         break;
       case Constant::kFloat64:
-        constant_object =
-            isolate()->factory()->NewHeapNumber(constant.ToFloat64());
+        constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
         break;
       case Constant::kHeapObject:
         constant_object = constant.ToHeapObject();
@@ -403,6 +404,11 @@
   }
 }
 
+
+void CodeGenerator::MarkLazyDeoptSite() {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
 #if !V8_TURBOFAN_BACKEND
 
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
index dfc98cd..ddc2f9a 100644
--- a/src/compiler/code-generator.h
+++ b/src/compiler/code-generator.h
@@ -98,8 +98,10 @@
   void AddTranslationForOperand(Translation* translation, Instruction* instr,
                                 InstructionOperand* op);
   void AddNopForSmiCodeInlining();
-  // ===========================================================================
+  void EnsureSpaceForLazyDeopt();
+  void MarkLazyDeoptSite();
 
+  // ===========================================================================
   struct DeoptimizationState : ZoneObject {
    public:
     BailoutId bailout_id() const { return bailout_id_; }
@@ -126,6 +128,7 @@
   ZoneDeque<DeoptimizationState*> deoptimization_states_;
   ZoneDeque<Handle<Object> > deoptimization_literals_;
   TranslationBuffer translations_;
+  int last_lazy_deopt_pc_;
 };
 
 }  // namespace compiler
diff --git a/src/compiler/common-operator-unittest.cc b/src/compiler/common-operator-unittest.cc
index 128f8dd..5001770 100644
--- a/src/compiler/common-operator-unittest.cc
+++ b/src/compiler/common-operator-unittest.cc
@@ -4,6 +4,8 @@
 
 #include "src/compiler/common-operator.h"
 
+#include <limits>
+
 #include "src/compiler/operator-properties-inl.h"
 #include "src/test/test-utils.h"
 
@@ -132,9 +134,26 @@
 
 const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};
 
+const float kFloat32Values[] = {
+    std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
+    std::numeric_limits<float>::max()};
+
 }  // namespace
 
 
+TEST_F(CommonOperatorTest, Float32Constant) {
+  TRACED_FOREACH(float, value, kFloat32Values) {
+    const Operator* op = common()->Float32Constant(value);
+    EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
+    EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+
 TEST_F(CommonOperatorTest, ValueEffect) {
   TRACED_FOREACH(int, arguments, kArguments) {
     const Operator* op = common()->ValueEffect(arguments);
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
index 9034843..19792bd 100644
--- a/src/compiler/common-operator.cc
+++ b/src/compiler/common-operator.cc
@@ -137,6 +137,13 @@
 }
 
 
+const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
+  return new (zone())
+      Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
+                       "Float32Constant", value);
+}
+
+
 const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
   return new (zone())
       Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
index 52c0af2..a3659ad 100644
--- a/src/compiler/common-operator.h
+++ b/src/compiler/common-operator.h
@@ -84,6 +84,7 @@
 
   const Operator* Int32Constant(int32_t);
   const Operator* Int64Constant(int64_t);
+  const Operator* Float32Constant(volatile float);
   const Operator* Float64Constant(volatile double);
   const Operator* ExternalConstant(const ExternalReference&);
   const Operator* NumberConstant(volatile double);
diff --git a/src/compiler/compiler.gyp b/src/compiler/compiler.gyp
index fc19168..ec5ec28 100644
--- a/src/compiler/compiler.gyp
+++ b/src/compiler/compiler.gyp
@@ -26,6 +26,7 @@
         'graph-unittest.h',
         'instruction-selector-unittest.cc',
         'instruction-selector-unittest.h',
+        'js-builtin-reducer-unittest.cc',
         'machine-operator-reducer-unittest.cc',
         'machine-operator-unittest.cc',
         'simplified-operator-reducer-unittest.cc',
diff --git a/src/compiler/graph-unittest.cc b/src/compiler/graph-unittest.cc
index f7faa6d..881c2cc 100644
--- a/src/compiler/graph-unittest.cc
+++ b/src/compiler/graph-unittest.cc
@@ -44,7 +44,12 @@
 }
 
 
-Node* GraphTest::Float64Constant(double value) {
+Node* GraphTest::Float32Constant(volatile float value) {
+  return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* GraphTest::Float64Constant(volatile double value) {
   return graph()->NewNode(common()->Float64Constant(value));
 }
 
@@ -59,7 +64,7 @@
 }
 
 
-Node* GraphTest::NumberConstant(double value) {
+Node* GraphTest::NumberConstant(volatile double value) {
   return graph()->NewNode(common()->NumberConstant(value));
 }
 
@@ -664,6 +669,12 @@
 }
 
 
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
+}
+
+
 Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
   return MakeMatcher(
       new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher));
@@ -732,6 +743,7 @@
     return MakeMatcher(                                                   \
         new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
   }
+IS_BINOP_MATCHER(NumberLessThan)
 IS_BINOP_MATCHER(Word32And)
 IS_BINOP_MATCHER(Word32Sar)
 IS_BINOP_MATCHER(Word32Shl)
@@ -742,6 +754,7 @@
 IS_BINOP_MATCHER(Word64Shl)
 IS_BINOP_MATCHER(Word64Equal)
 IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32Mul)
 IS_BINOP_MATCHER(Uint32LessThanOrEqual)
 #undef IS_BINOP_MATCHER
 
diff --git a/src/compiler/graph-unittest.h b/src/compiler/graph-unittest.h
index 42e4dd9..39d3e15 100644
--- a/src/compiler/graph-unittest.h
+++ b/src/compiler/graph-unittest.h
@@ -31,10 +31,11 @@
 
  protected:
   Node* Parameter(int32_t index);
-  Node* Float64Constant(double value);
+  Node* Float32Constant(volatile float value);
+  Node* Float64Constant(volatile double value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
-  Node* NumberConstant(double value);
+  Node* NumberConstant(volatile double value);
   Node* HeapConstant(const Unique<HeapObject>& value);
   Node* FalseConstant();
   Node* TrueConstant();
@@ -65,6 +66,7 @@
     const Matcher<ExternalReference>& value_matcher);
 Matcher<Node*> IsHeapConstant(
     const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
 Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
 Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
 Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
@@ -83,6 +85,9 @@
                       const Matcher<Node*>& effect_matcher,
                       const Matcher<Node*>& control_matcher);
 
+Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
+                                const Matcher<Node*>& rhs_matcher);
+
 Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
                       const Matcher<Node*>& base_matcher,
                       const Matcher<Node*>& index_matcher,
@@ -114,6 +119,8 @@
                              const Matcher<Node*>& rhs_matcher);
 Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
                                       const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
+                          const Matcher<Node*>& rhs_matcher);
 Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
                                        const Matcher<Node*>& rhs_matcher);
 Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
index c476eaa..200dcb6 100644
--- a/src/compiler/ia32/code-generator-ia32.cc
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -111,16 +111,8 @@
   IA32OperandConverter i(this, instr);
 
   switch (ArchOpcodeField::decode(instr->opcode())) {
-    case kArchCallAddress:
-      if (HasImmediateInput(instr, 0)) {
-        // TODO(dcarney): wire up EXTERNAL_REFERENCE instead of RUNTIME_ENTRY.
-        __ call(reinterpret_cast<byte*>(i.InputInt32(0)),
-                RelocInfo::RUNTIME_ENTRY);
-      } else {
-        __ call(i.InputRegister(0));
-      }
-      break;
     case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ call(code, RelocInfo::CODE_TARGET);
@@ -132,6 +124,7 @@
       break;
     }
     case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -142,11 +135,6 @@
       AddSafepointAndDeopt(instr);
       break;
     }
-    case kArchDrop: {
-      int words = MiscField::decode(instr->opcode());
-      __ add(esp, Immediate(kPointerSize * words));
-      break;
-    }
     case kArchJmp:
       __ jmp(code()->GetLabel(i.InputBlock(0)));
       break;
@@ -946,6 +934,21 @@
 
 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
 
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
 #undef __
 
 }  // namespace compiler
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
index 42702c1..ce8cb0f 100644
--- a/src/compiler/ia32/instruction-selector-ia32.cc
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -531,9 +531,6 @@
       opcode = kArchCallCodeObject;
       break;
     }
-    case CallDescriptor::kCallAddress:
-      opcode = kArchCallAddress;
-      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -553,13 +550,6 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
-
-  // Caller clean up of stack for C-style calls.
-  if (descriptor->kind() == CallDescriptor::kCallAddress &&
-      buffer.pushed_nodes.size() > 0) {
-    DCHECK(deoptimization == NULL && continuation == NULL);
-    Emit(kArchDrop | MiscField::encode(buffer.pushed_nodes.size()), NULL);
-  }
 }
 
 }  // namespace compiler
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
index 669316a..2d921bd 100644
--- a/src/compiler/instruction-codes.h
+++ b/src/compiler/instruction-codes.h
@@ -29,10 +29,8 @@
 // Target-specific opcodes that specify which assembly sequence to emit.
 // Most opcodes specify a single instruction.
 #define ARCH_OPCODE_LIST(V) \
-  V(ArchCallAddress)        \
   V(ArchCallCodeObject)     \
   V(ArchCallJSFunction)     \
-  V(ArchDrop)               \
   V(ArchJmp)                \
   V(ArchNop)                \
   V(ArchRet)                \
diff --git a/src/compiler/instruction-selector-unittest.h b/src/compiler/instruction-selector-unittest.h
index c236853..4e12dab 100644
--- a/src/compiler/instruction-selector-unittest.h
+++ b/src/compiler/instruction-selector-unittest.h
@@ -147,6 +147,10 @@
       return ToConstant(operand).ToInt32();
     }
 
+    int64_t ToInt64(const InstructionOperand* operand) const {
+      return ToConstant(operand).ToInt64();
+    }
+
     int ToVreg(const InstructionOperand* operand) const {
       if (operand->IsConstant()) return operand->index();
       EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
diff --git a/src/compiler/js-builtin-reducer-unittest.cc b/src/compiler/js-builtin-reducer-unittest.cc
new file mode 100644
index 0000000..557ce27
--- /dev/null
+++ b/src/compiler/js-builtin-reducer-unittest.cc
@@ -0,0 +1,157 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducerTest : public GraphTest {
+ public:
+  JSBuiltinReducerTest() : javascript_(zone()) {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine;
+    JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
+    JSBuiltinReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  Node* Parameter(Type* t, int32_t index = 0) {
+    Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
+    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+    return n;
+  }
+
+  Node* UndefinedConstant() {
+    return HeapConstant(
+        Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+  }
+
+  JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+  JSOperatorBuilder javascript_;
+};
+
+
+namespace {
+
+// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
+Type* const kNumberTypes[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
+    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
+    Type::OrderedNumber(),   Type::Number()};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+
+TEST_F(JSBuiltinReducerTest, MathMax0) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+  Node* call = graph()->NewNode(javascript()->Call(2, NO_CALL_FUNCTION_FLAGS),
+                                fun, UndefinedConstant());
+  Reduction r = Reduce(call);
+
+  EXPECT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax1) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+                                  fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call);
+
+    EXPECT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), p0);
+  }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax2) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    TRACED_FOREACH(Type*, t1, kNumberTypes) {
+      Node* p0 = Parameter(t0, 0);
+      Node* p1 = Parameter(t1, 1);
+      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+      Node* call =
+          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+                           UndefinedConstant(), p0, p1);
+      Reduction r = Reduce(call);
+
+      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+        Capture<Node*> branch;
+        EXPECT_TRUE(r.Changed());
+        EXPECT_THAT(
+            r.replacement(),
+            IsPhi(kMachNone, p1, p0,
+                  IsMerge(IsIfTrue(CaptureEq(&branch)),
+                          IsIfFalse(AllOf(CaptureEq(&branch),
+                                          IsBranch(IsNumberLessThan(p0, p1),
+                                                   graph()->start()))))));
+      } else {
+        EXPECT_FALSE(r.Changed());
+        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+      }
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+
+TEST_F(JSBuiltinReducerTest, MathImul) {
+  Handle<JSFunction> f(isolate()->context()->math_imul_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    TRACED_FOREACH(Type*, t1, kNumberTypes) {
+      Node* p0 = Parameter(t0, 0);
+      Node* p1 = Parameter(t1, 1);
+      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+      Node* call =
+          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+                           UndefinedConstant(), p0, p1);
+      Reduction r = Reduce(call);
+
+      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+        EXPECT_TRUE(r.Changed());
+        EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+      } else {
+        EXPECT_FALSE(r.Changed());
+        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+      }
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
index 42becb3..83161e1 100644
--- a/src/compiler/js-builtin-reducer.cc
+++ b/src/compiler/js-builtin-reducer.cc
@@ -34,34 +34,49 @@
   // constant callee being a well-known builtin with a BuiltinFunctionId.
   bool HasBuiltinFunctionId() {
     if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
-    HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_, 0));
-    return m.HasValue() && m.Value().handle()->shared()->HasBuiltinFunctionId();
+    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+    if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
+    Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+    return function->shared()->HasBuiltinFunctionId();
   }
 
   // Retrieves the BuiltinFunctionId as described above.
   BuiltinFunctionId GetBuiltinFunctionId() {
     DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
-    HeapObjectMatcher<JSFunction> m(NodeProperties::GetValueInput(node_, 0));
-    return m.Value().handle()->shared()->builtin_function_id();
+    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+    Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+    return function->shared()->builtin_function_id();
   }
 
+  // Determines whether the call takes zero inputs.
+  bool InputsMatchZero() { return GetJSCallArity() == 0; }
+
   // Determines whether the call takes one input of the given type.
-  bool InputsMatch(Type* t1) {
+  bool InputsMatchOne(Type* t1) {
     return GetJSCallArity() == 1 &&
            NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
   }
 
   // Determines whether the call takes two inputs of the given types.
-  bool InputsMatch(Type* t1, Type* t2) {
+  bool InputsMatchTwo(Type* t1, Type* t2) {
     return GetJSCallArity() == 2 &&
            NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
            NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
   }
 
+  // Determines whether the call takes inputs all of the given type.
+  bool InputsMatchAll(Type* t) {
+    for (int i = 0; i < GetJSCallArity(); i++) {
+      if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
   Node* left() { return GetJSCallInput(0); }
   Node* right() { return GetJSCallInput(1); }
 
- protected:
   int GetJSCallArity() {
     DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
     // Skip first (i.e. callee) and second (i.e. receiver) operand.
@@ -80,10 +95,42 @@
 };
 
 
+// ECMA-262, section 15.8.2.11.
+Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchZero()) {
+    // Math.max() -> -Infinity
+    return Replace(jsgraph()->Constant(-V8_INFINITY));
+  }
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.max(a:number) -> a
+    return Replace(r.left());
+  }
+  if (r.InputsMatchAll(Type::Integral32())) {
+    // Math.max(a:int32, b:int32, ...)
+    Node* value = r.GetJSCallInput(0);
+    for (int i = 1; i < r.GetJSCallArity(); i++) {
+      Node* p = r.GetJSCallInput(i);
+      Node* control = graph()->start();
+      Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
+
+      Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+      value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge);
+    }
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
 // ES6 draft 08-24-14, section 20.2.2.19.
 Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
   JSCallReduction r(node);
-  if (r.InputsMatch(Type::Integral32(), Type::Integral32())) {
+  if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
     // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
     Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
     return Replace(value);
@@ -98,6 +145,8 @@
   // Dispatch according to the BuiltinFunctionId if present.
   if (!r.HasBuiltinFunctionId()) return NoChange();
   switch (r.GetBuiltinFunctionId()) {
+    case kMathMax:
+      return ReplaceWithPureReduction(node, ReduceMathMax(node));
     case kMathImul:
       return ReplaceWithPureReduction(node, ReduceMathImul(node));
     default:
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
index bbff3ef..92c7b4a 100644
--- a/src/compiler/js-builtin-reducer.h
+++ b/src/compiler/js-builtin-reducer.h
@@ -24,11 +24,13 @@
   virtual Reduction Reduce(Node* node) OVERRIDE;
 
  private:
-  Graph* graph() { return jsgraph_->graph(); }
-  CommonOperatorBuilder* common() { return jsgraph_->common(); }
-  MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
   SimplifiedOperatorBuilder* simplified() { return &simplified_; }
 
+  Reduction ReduceMathMax(Node* node);
   Reduction ReduceMathImul(Node* node);
 
   JSGraph* jsgraph_;
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index a907bc5..2b2dfd1 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -65,6 +65,9 @@
 
   // Creates a Int32Constant node, usually canonicalized.
   Node* Int32Constant(int32_t value);
+  Node* Uint32Constant(uint32_t value) {
+    return Int32Constant(bit_cast<int32_t>(value));
+  }
 
   // Creates a Float64Constant node, usually canonicalized.
   Node* Float64Constant(double value);
@@ -109,6 +112,7 @@
 
   Factory* factory() { return isolate()->factory(); }
 };
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
index bc3ea82..130c5cb 100644
--- a/src/compiler/js-typed-lowering.cc
+++ b/src/compiler/js-typed-lowering.cc
@@ -571,13 +571,14 @@
   // TODO(mstarzinger): This lowering is not correct if:
   //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
   //   b) The typed array or it's buffer is neutered.
-  //   c) The index is out of bounds
   if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
       base_type->AsConstant()->Value()->IsJSTypedArray()) {
     // JSStoreProperty(typed-array, int32, value)
     JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
     ElementsKind elements_kind = array->map()->elements_kind();
     ExternalArrayType type = array->type();
+    uint32_t length;
+    CHECK(array->length()->ToUint32(&length));
     ElementAccess element_access;
     Node* elements = graph()->NewNode(
         simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
@@ -591,11 +592,24 @@
       DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
       element_access = AccessBuilder::ForTypedArrayElement(type, false);
     }
-    Node* store =
-        graph()->NewNode(simplified()->StoreElement(element_access), elements,
-                         key, value, NodeProperties::GetEffectInput(node),
-                         NodeProperties::GetControlInput(node));
-    return ReplaceEagerly(node, store);
+
+    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
+                                   jsgraph()->Uint32Constant(length));
+    Node* branch = graph()->NewNode(common()->Branch(), check,
+                                    NodeProperties::GetControlInput(node));
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* store = graph()->NewNode(
+        simplified()->StoreElement(element_access), elements, key, value,
+        NodeProperties::GetEffectInput(node), if_true);
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
+                                 NodeProperties::GetEffectInput(node), merge);
+
+    return ReplaceWith(phi);
   }
   return NoChange();
 }
diff --git a/src/compiler/machine-operator-reducer-unittest.cc b/src/compiler/machine-operator-reducer-unittest.cc
index 616f5d4..f3073ab 100644
--- a/src/compiler/machine-operator-reducer-unittest.cc
+++ b/src/compiler/machine-operator-reducer-unittest.cc
@@ -46,6 +46,43 @@
 
 namespace {
 
+static const float kFloat32Values[] = {
+    -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+    -1.22813e+35f,                           -1.20555e+35f, -1.34584e+34f,
+    -1.0079e+32f,                            -6.49364e+26f, -3.06077e+25f,
+    -1.46821e+25f,                           -1.17658e+23f, -1.9617e+22f,
+    -2.7357e+20f,                            -1.48708e+13f, -1.89633e+12f,
+    -4.66622e+11f,                           -2.22581e+11f, -1.45381e+10f,
+    -1.3956e+09f,                            -1.32951e+09f, -1.30721e+09f,
+    -1.19756e+09f,                           -9.26822e+08f, -6.35647e+08f,
+    -4.00037e+08f,                           -1.81227e+08f, -5.09256e+07f,
+    -964300.0f,                              -192446.0f,    -28455.0f,
+    -27194.0f,                               -26401.0f,     -20575.0f,
+    -17069.0f,                               -9167.0f,      -960.178f,
+    -113.0f,                                 -62.0f,        -15.0f,
+    -7.0f,                                   -0.0256635f,   -4.60374e-07f,
+    -3.63759e-10f,                           -4.30175e-14f, -5.27385e-15f,
+    -1.48084e-15f,                           -1.05755e-19f, -3.2995e-21f,
+    -1.67354e-23f,                           -1.11885e-23f, -1.78506e-30f,
+    -5.07594e-31f,                           -3.65799e-31f, -1.43718e-34f,
+    -1.27126e-38f,                           -0.0f,         0.0f,
+    1.17549e-38f,                            1.56657e-37f,  4.08512e-29f,
+    3.31357e-28f,                            6.25073e-22f,  4.1723e-13f,
+    1.44343e-09f,                            5.27004e-08f,  9.48298e-08f,
+    5.57888e-07f,                            4.89988e-05f,  0.244326f,
+    12.4895f,                                19.0f,         47.0f,
+    106.0f,                                  538.324f,      564.536f,
+    819.124f,                                7048.0f,       12611.0f,
+    19878.0f,                                20309.0f,      797056.0f,
+    1.77219e+09f,                            1.51116e+11f,  4.18193e+13f,
+    3.59167e+16f,                            3.38211e+19f,  2.67488e+20f,
+    1.78831e+21f,                            9.20914e+21f,  8.35654e+23f,
+    1.4495e+24f,                             5.94015e+25f,  4.43608e+30f,
+    2.44502e+33f,                            2.61152e+33f,  1.38178e+37f,
+    1.71306e+37f,                            3.31899e+38f,  3.40282e+38f,
+    std::numeric_limits<float>::infinity()};
+
+
 static const double kFloat64Values[] = {
     -V8_INFINITY,  -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
     -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
@@ -165,7 +202,7 @@
 namespace {
 
 struct UnaryOperator {
-  const Operator* (MachineOperatorBuilder::*constructor)() const;
+  const Operator* (MachineOperatorBuilder::*constructor)();
   const char* constructor_name;
 };
 
@@ -206,6 +243,20 @@
 
 
 // -----------------------------------------------------------------------------
+// ChangeFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
+  TRACED_FOREACH(float, x, kFloat32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
 // ChangeFloat64ToInt32
 
 
@@ -315,6 +366,31 @@
 
 
 // -----------------------------------------------------------------------------
+// TruncateFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest,
+       TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateFloat64ToFloat32(),
+      graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
+  TRACED_FOREACH(double, x, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
 // TruncateFloat64ToInt32
 
 
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
index 936deca..53ee810 100644
--- a/src/compiler/machine-operator-reducer.cc
+++ b/src/compiler/machine-operator-reducer.cc
@@ -21,6 +21,11 @@
 MachineOperatorReducer::~MachineOperatorReducer() {}
 
 
+Node* MachineOperatorReducer::Float32Constant(volatile float value) {
+  return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
 Node* MachineOperatorReducer::Float64Constant(volatile double value) {
   return jsgraph()->Float64Constant(value);
 }
@@ -383,6 +388,11 @@
       }
       break;
     }
+    case IrOpcode::kChangeFloat32ToFloat64: {
+      Float32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      break;
+    }
     case IrOpcode::kChangeFloat64ToInt32: {
       Float64Matcher m(node->InputAt(0));
       if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value()));
@@ -427,6 +437,12 @@
       if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
       break;
     }
+    case IrOpcode::kTruncateFloat64ToFloat32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
+      if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
     // TODO(turbofan): strength-reduce and fold floating point operations.
     default:
       break;
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
index 57fcdee..c79ceae 100644
--- a/src/compiler/machine-operator-reducer.h
+++ b/src/compiler/machine-operator-reducer.h
@@ -27,11 +27,15 @@
   virtual Reduction Reduce(Node* node) OVERRIDE;
 
  private:
+  Node* Float32Constant(volatile float value);
   Node* Float64Constant(volatile double value);
   Node* Int32Constant(int32_t value);
   Node* Int64Constant(int64_t value);
 
   Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
+  Reduction ReplaceFloat32(volatile float value) {
+    return Replace(Float32Constant(value));
+  }
   Reduction ReplaceFloat64(volatile double value) {
     return Replace(Float64Constant(value));
   }
diff --git a/src/compiler/machine-operator-unittest.cc b/src/compiler/machine-operator-unittest.cc
index 5842872..6aaf06f 100644
--- a/src/compiler/machine-operator-unittest.cc
+++ b/src/compiler/machine-operator-unittest.cc
@@ -169,7 +169,7 @@
 namespace {
 
 struct PureOperator {
-  const Operator* (MachineOperatorBuilder::*constructor)() const;
+  const Operator* (MachineOperatorBuilder::*constructor)();
   IrOpcode::Value opcode;
   int value_input_count;
   int value_output_count;
@@ -187,32 +187,33 @@
     &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
         output_count                                               \
   }
-    PURE(Word32And, 2, 1),             PURE(Word32Or, 2, 1),
-    PURE(Word32Xor, 2, 1),             PURE(Word32Shl, 2, 1),
-    PURE(Word32Shr, 2, 1),             PURE(Word32Sar, 2, 1),
-    PURE(Word32Ror, 2, 1),             PURE(Word32Equal, 2, 1),
-    PURE(Word64And, 2, 1),             PURE(Word64Or, 2, 1),
-    PURE(Word64Xor, 2, 1),             PURE(Word64Shl, 2, 1),
-    PURE(Word64Shr, 2, 1),             PURE(Word64Sar, 2, 1),
-    PURE(Word64Ror, 2, 1),             PURE(Word64Equal, 2, 1),
-    PURE(Int32Add, 2, 1),              PURE(Int32AddWithOverflow, 2, 2),
-    PURE(Int32Sub, 2, 1),              PURE(Int32SubWithOverflow, 2, 2),
-    PURE(Int32Mul, 2, 1),              PURE(Int32Div, 2, 1),
-    PURE(Int32UDiv, 2, 1),             PURE(Int32Mod, 2, 1),
-    PURE(Int32UMod, 2, 1),             PURE(Int32LessThan, 2, 1),
-    PURE(Int32LessThanOrEqual, 2, 1),  PURE(Uint32LessThan, 2, 1),
-    PURE(Uint32LessThanOrEqual, 2, 1), PURE(Int64Add, 2, 1),
-    PURE(Int64Sub, 2, 1),              PURE(Int64Mul, 2, 1),
-    PURE(Int64Div, 2, 1),              PURE(Int64UDiv, 2, 1),
-    PURE(Int64Mod, 2, 1),              PURE(Int64UMod, 2, 1),
-    PURE(Int64LessThan, 2, 1),         PURE(Int64LessThanOrEqual, 2, 1),
-    PURE(ChangeFloat64ToInt32, 1, 1),  PURE(ChangeFloat64ToUint32, 1, 1),
-    PURE(ChangeInt32ToInt64, 1, 1),    PURE(ChangeUint32ToFloat64, 1, 1),
-    PURE(ChangeUint32ToUint64, 1, 1),  PURE(TruncateFloat64ToInt32, 1, 1),
-    PURE(TruncateInt64ToInt32, 1, 1),  PURE(Float64Add, 2, 1),
-    PURE(Float64Sub, 2, 1),            PURE(Float64Mul, 2, 1),
-    PURE(Float64Div, 2, 1),            PURE(Float64Mod, 2, 1),
-    PURE(Float64Equal, 2, 1),          PURE(Float64LessThan, 2, 1),
+    PURE(Word32And, 2, 1),                PURE(Word32Or, 2, 1),
+    PURE(Word32Xor, 2, 1),                PURE(Word32Shl, 2, 1),
+    PURE(Word32Shr, 2, 1),                PURE(Word32Sar, 2, 1),
+    PURE(Word32Ror, 2, 1),                PURE(Word32Equal, 2, 1),
+    PURE(Word64And, 2, 1),                PURE(Word64Or, 2, 1),
+    PURE(Word64Xor, 2, 1),                PURE(Word64Shl, 2, 1),
+    PURE(Word64Shr, 2, 1),                PURE(Word64Sar, 2, 1),
+    PURE(Word64Ror, 2, 1),                PURE(Word64Equal, 2, 1),
+    PURE(Int32Add, 2, 1),                 PURE(Int32AddWithOverflow, 2, 2),
+    PURE(Int32Sub, 2, 1),                 PURE(Int32SubWithOverflow, 2, 2),
+    PURE(Int32Mul, 2, 1),                 PURE(Int32Div, 2, 1),
+    PURE(Int32UDiv, 2, 1),                PURE(Int32Mod, 2, 1),
+    PURE(Int32UMod, 2, 1),                PURE(Int32LessThan, 2, 1),
+    PURE(Int32LessThanOrEqual, 2, 1),     PURE(Uint32LessThan, 2, 1),
+    PURE(Uint32LessThanOrEqual, 2, 1),    PURE(Int64Add, 2, 1),
+    PURE(Int64Sub, 2, 1),                 PURE(Int64Mul, 2, 1),
+    PURE(Int64Div, 2, 1),                 PURE(Int64UDiv, 2, 1),
+    PURE(Int64Mod, 2, 1),                 PURE(Int64UMod, 2, 1),
+    PURE(Int64LessThan, 2, 1),            PURE(Int64LessThanOrEqual, 2, 1),
+    PURE(ChangeFloat32ToFloat64, 1, 1),   PURE(ChangeFloat64ToInt32, 1, 1),
+    PURE(ChangeFloat64ToUint32, 1, 1),    PURE(ChangeInt32ToInt64, 1, 1),
+    PURE(ChangeUint32ToFloat64, 1, 1),    PURE(ChangeUint32ToUint64, 1, 1),
+    PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
+    PURE(TruncateInt64ToInt32, 1, 1),     PURE(Float64Add, 2, 1),
+    PURE(Float64Sub, 2, 1),               PURE(Float64Mul, 2, 1),
+    PURE(Float64Div, 2, 1),               PURE(Float64Mod, 2, 1),
+    PURE(Float64Equal, 2, 1),             PURE(Float64LessThan, 2, 1),
     PURE(Float64LessThanOrEqual, 2, 1)
 #undef PURE
 };
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 9dea037..eb3e948 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -97,12 +97,14 @@
   V(Int64UMod, Operator::kNoProperties, 2, 1)                                 \
   V(Int64LessThan, Operator::kNoProperties, 2, 1)                             \
   V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
+  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1)                    \
   V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1)                      \
   V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1)                     \
   V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1)                      \
   V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1)                        \
   V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1)                     \
   V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1)                      \
+  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1)                  \
   V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1)                    \
   V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1)                      \
   V(Float64Add, Operator::kCommutative, 2, 1)                                 \
@@ -194,14 +196,12 @@
 
 
 #define PURE(Name, properties, input_count, output_count) \
-  const Operator* MachineOperatorBuilder::Name() const {  \
-    return &impl_.k##Name;                                \
-  }
+  const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
 PURE_OP_LIST(PURE)
 #undef PURE
 
 
-const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) const {
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
   switch (rep) {
 #define LOAD(Type) \
   case k##Type:    \
@@ -217,7 +217,7 @@
 }
 
 
-const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) const {
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
   switch (rep.machine_type()) {
 #define STORE(Type)                                     \
   case k##Type:                                         \
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index ec911f4..23b7ef6 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -62,84 +62,84 @@
  public:
   explicit MachineOperatorBuilder(MachineType word = kMachPtr);
 
-  const Operator* Word32And() const WARN_UNUSED_RESULT;
-  const Operator* Word32Or() const WARN_UNUSED_RESULT;
-  const Operator* Word32Xor() const WARN_UNUSED_RESULT;
-  const Operator* Word32Shl() const WARN_UNUSED_RESULT;
-  const Operator* Word32Shr() const WARN_UNUSED_RESULT;
-  const Operator* Word32Sar() const WARN_UNUSED_RESULT;
-  const Operator* Word32Ror() const WARN_UNUSED_RESULT;
-  const Operator* Word32Equal() const WARN_UNUSED_RESULT;
+  const Operator* Word32And();
+  const Operator* Word32Or();
+  const Operator* Word32Xor();
+  const Operator* Word32Shl();
+  const Operator* Word32Shr();
+  const Operator* Word32Sar();
+  const Operator* Word32Ror();
+  const Operator* Word32Equal();
 
-  const Operator* Word64And() const WARN_UNUSED_RESULT;
-  const Operator* Word64Or() const WARN_UNUSED_RESULT;
-  const Operator* Word64Xor() const WARN_UNUSED_RESULT;
-  const Operator* Word64Shl() const WARN_UNUSED_RESULT;
-  const Operator* Word64Shr() const WARN_UNUSED_RESULT;
-  const Operator* Word64Sar() const WARN_UNUSED_RESULT;
-  const Operator* Word64Ror() const WARN_UNUSED_RESULT;
-  const Operator* Word64Equal() const WARN_UNUSED_RESULT;
+  const Operator* Word64And();
+  const Operator* Word64Or();
+  const Operator* Word64Xor();
+  const Operator* Word64Shl();
+  const Operator* Word64Shr();
+  const Operator* Word64Sar();
+  const Operator* Word64Ror();
+  const Operator* Word64Equal();
 
-  const Operator* Int32Add() const WARN_UNUSED_RESULT;
-  const Operator* Int32AddWithOverflow() const WARN_UNUSED_RESULT;
-  const Operator* Int32Sub() const WARN_UNUSED_RESULT;
-  const Operator* Int32SubWithOverflow() const WARN_UNUSED_RESULT;
-  const Operator* Int32Mul() const WARN_UNUSED_RESULT;
-  const Operator* Int32Div() const WARN_UNUSED_RESULT;
-  const Operator* Int32UDiv() const WARN_UNUSED_RESULT;
-  const Operator* Int32Mod() const WARN_UNUSED_RESULT;
-  const Operator* Int32UMod() const WARN_UNUSED_RESULT;
-  const Operator* Int32LessThan() const WARN_UNUSED_RESULT;
-  const Operator* Int32LessThanOrEqual() const WARN_UNUSED_RESULT;
-  const Operator* Uint32LessThan() const WARN_UNUSED_RESULT;
-  const Operator* Uint32LessThanOrEqual() const WARN_UNUSED_RESULT;
+  const Operator* Int32Add();
+  const Operator* Int32AddWithOverflow();
+  const Operator* Int32Sub();
+  const Operator* Int32SubWithOverflow();
+  const Operator* Int32Mul();
+  const Operator* Int32Div();
+  const Operator* Int32UDiv();
+  const Operator* Int32Mod();
+  const Operator* Int32UMod();
+  const Operator* Int32LessThan();
+  const Operator* Int32LessThanOrEqual();
+  const Operator* Uint32LessThan();
+  const Operator* Uint32LessThanOrEqual();
 
-  const Operator* Int64Add() const WARN_UNUSED_RESULT;
-  const Operator* Int64Sub() const WARN_UNUSED_RESULT;
-  const Operator* Int64Mul() const WARN_UNUSED_RESULT;
-  const Operator* Int64Div() const WARN_UNUSED_RESULT;
-  const Operator* Int64UDiv() const WARN_UNUSED_RESULT;
-  const Operator* Int64Mod() const WARN_UNUSED_RESULT;
-  const Operator* Int64UMod() const WARN_UNUSED_RESULT;
-  const Operator* Int64LessThan() const WARN_UNUSED_RESULT;
-  const Operator* Int64LessThanOrEqual() const WARN_UNUSED_RESULT;
+  const Operator* Int64Add();
+  const Operator* Int64Sub();
+  const Operator* Int64Mul();
+  const Operator* Int64Div();
+  const Operator* Int64UDiv();
+  const Operator* Int64Mod();
+  const Operator* Int64UMod();
+  const Operator* Int64LessThan();
+  const Operator* Int64LessThanOrEqual();
 
-  // Convert representation of integers between float64 and int32/uint32.
-  // The precise rounding mode and handling of out of range inputs are *not*
-  // defined for these operators, since they are intended only for use with
-  // integers.
-  const Operator* ChangeInt32ToFloat64() const WARN_UNUSED_RESULT;
-  const Operator* ChangeUint32ToFloat64() const WARN_UNUSED_RESULT;
-  const Operator* ChangeFloat64ToInt32() const WARN_UNUSED_RESULT;
-  const Operator* ChangeFloat64ToUint32() const WARN_UNUSED_RESULT;
+  // These operators change the representation of numbers while preserving the
+  // value of the number. Narrowing operators assume the input is representable
+  // in the target type and are *not* defined for other inputs.
+  // Use narrowing change operators only when there is a static guarantee that
+  // the input value is representable in the target value.
+  const Operator* ChangeFloat32ToFloat64();
+  const Operator* ChangeFloat64ToInt32();   // narrowing
+  const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* ChangeInt32ToFloat64();
+  const Operator* ChangeInt32ToInt64();
+  const Operator* ChangeUint32ToFloat64();
+  const Operator* ChangeUint32ToUint64();
 
-  // Sign/zero extend int32/uint32 to int64/uint64.
-  const Operator* ChangeInt32ToInt64() const WARN_UNUSED_RESULT;
-  const Operator* ChangeUint32ToUint64() const WARN_UNUSED_RESULT;
-
-  // Truncate double to int32 using JavaScript semantics.
-  const Operator* TruncateFloat64ToInt32() const WARN_UNUSED_RESULT;
-
-  // Truncate the high order bits and convert the remaining bits to int32.
-  const Operator* TruncateInt64ToInt32() const WARN_UNUSED_RESULT;
+  // These operators truncate numbers, both changing the representation of
+  // the number and mapping multiple input values onto the same output value.
+  const Operator* TruncateFloat64ToFloat32();
+  const Operator* TruncateFloat64ToInt32();  // JavaScript semantics.
+  const Operator* TruncateInt64ToInt32();
 
   // Floating point operators always operate with IEEE 754 round-to-nearest.
-  const Operator* Float64Add() const WARN_UNUSED_RESULT;
-  const Operator* Float64Sub() const WARN_UNUSED_RESULT;
-  const Operator* Float64Mul() const WARN_UNUSED_RESULT;
-  const Operator* Float64Div() const WARN_UNUSED_RESULT;
-  const Operator* Float64Mod() const WARN_UNUSED_RESULT;
+  const Operator* Float64Add();
+  const Operator* Float64Sub();
+  const Operator* Float64Mul();
+  const Operator* Float64Div();
+  const Operator* Float64Mod();
 
   // Floating point comparisons complying to IEEE 754.
-  const Operator* Float64Equal() const WARN_UNUSED_RESULT;
-  const Operator* Float64LessThan() const WARN_UNUSED_RESULT;
-  const Operator* Float64LessThanOrEqual() const WARN_UNUSED_RESULT;
+  const Operator* Float64Equal();
+  const Operator* Float64LessThan();
+  const Operator* Float64LessThanOrEqual();
 
   // load [base + index]
-  const Operator* Load(LoadRepresentation rep) const WARN_UNUSED_RESULT;
+  const Operator* Load(LoadRepresentation rep);
 
   // store [base + index], value
-  const Operator* Store(StoreRepresentation rep) const WARN_UNUSED_RESULT;
+  const Operator* Store(StoreRepresentation rep);
 
   // Target machine word-size assumed by this builder.
   bool Is32() const { return word() == kRepWord32; }
@@ -167,7 +167,7 @@
   V(Int, LessThan)        \
   V(Int, LessThanOrEqual)
 #define PSEUDO_OP(Prefix, Suffix)                                \
-  const Operator* Prefix##Suffix() const {                       \
+  const Operator* Prefix##Suffix() {                             \
     return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
   }
   PSEUDO_OP_LIST(PSEUDO_OP)
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index d73a926..e62eaee 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -90,6 +90,7 @@
   bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
 };
 
+typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
 typedef FloatMatcher<double, IrOpcode::kFloat64Constant> Float64Matcher;
 typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
 
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
index f295eac..dabf5c5 100644
--- a/src/compiler/opcodes.h
+++ b/src/compiler/opcodes.h
@@ -25,6 +25,7 @@
 #define LEAF_OP_LIST(V) \
   V(Int32Constant)      \
   V(Int64Constant)      \
+  V(Float32Constant)    \
   V(Float64Constant)    \
   V(ExternalConstant)   \
   V(NumberConstant)     \
@@ -161,62 +162,64 @@
   V(StoreElement)
 
 // Opcodes for Machine-level operators.
-#define MACHINE_OP_LIST(V)  \
-  V(Load)                   \
-  V(Store)                  \
-  V(Word32And)              \
-  V(Word32Or)               \
-  V(Word32Xor)              \
-  V(Word32Shl)              \
-  V(Word32Shr)              \
-  V(Word32Sar)              \
-  V(Word32Ror)              \
-  V(Word32Equal)            \
-  V(Word64And)              \
-  V(Word64Or)               \
-  V(Word64Xor)              \
-  V(Word64Shl)              \
-  V(Word64Shr)              \
-  V(Word64Sar)              \
-  V(Word64Ror)              \
-  V(Word64Equal)            \
-  V(Int32Add)               \
-  V(Int32AddWithOverflow)   \
-  V(Int32Sub)               \
-  V(Int32SubWithOverflow)   \
-  V(Int32Mul)               \
-  V(Int32Div)               \
-  V(Int32UDiv)              \
-  V(Int32Mod)               \
-  V(Int32UMod)              \
-  V(Int32LessThan)          \
-  V(Int32LessThanOrEqual)   \
-  V(Uint32LessThan)         \
-  V(Uint32LessThanOrEqual)  \
-  V(Int64Add)               \
-  V(Int64Sub)               \
-  V(Int64Mul)               \
-  V(Int64Div)               \
-  V(Int64UDiv)              \
-  V(Int64Mod)               \
-  V(Int64UMod)              \
-  V(Int64LessThan)          \
-  V(Int64LessThanOrEqual)   \
-  V(ChangeInt32ToFloat64)   \
-  V(ChangeUint32ToFloat64)  \
-  V(ChangeFloat64ToInt32)   \
-  V(ChangeFloat64ToUint32)  \
-  V(ChangeInt32ToInt64)     \
-  V(ChangeUint32ToUint64)   \
-  V(TruncateFloat64ToInt32) \
-  V(TruncateInt64ToInt32)   \
-  V(Float64Add)             \
-  V(Float64Sub)             \
-  V(Float64Mul)             \
-  V(Float64Div)             \
-  V(Float64Mod)             \
-  V(Float64Equal)           \
-  V(Float64LessThan)        \
+#define MACHINE_OP_LIST(V)    \
+  V(Load)                     \
+  V(Store)                    \
+  V(Word32And)                \
+  V(Word32Or)                 \
+  V(Word32Xor)                \
+  V(Word32Shl)                \
+  V(Word32Shr)                \
+  V(Word32Sar)                \
+  V(Word32Ror)                \
+  V(Word32Equal)              \
+  V(Word64And)                \
+  V(Word64Or)                 \
+  V(Word64Xor)                \
+  V(Word64Shl)                \
+  V(Word64Shr)                \
+  V(Word64Sar)                \
+  V(Word64Ror)                \
+  V(Word64Equal)              \
+  V(Int32Add)                 \
+  V(Int32AddWithOverflow)     \
+  V(Int32Sub)                 \
+  V(Int32SubWithOverflow)     \
+  V(Int32Mul)                 \
+  V(Int32Div)                 \
+  V(Int32UDiv)                \
+  V(Int32Mod)                 \
+  V(Int32UMod)                \
+  V(Int32LessThan)            \
+  V(Int32LessThanOrEqual)     \
+  V(Uint32LessThan)           \
+  V(Uint32LessThanOrEqual)    \
+  V(Int64Add)                 \
+  V(Int64Sub)                 \
+  V(Int64Mul)                 \
+  V(Int64Div)                 \
+  V(Int64UDiv)                \
+  V(Int64Mod)                 \
+  V(Int64UMod)                \
+  V(Int64LessThan)            \
+  V(Int64LessThanOrEqual)     \
+  V(ChangeFloat32ToFloat64)   \
+  V(ChangeFloat64ToInt32)     \
+  V(ChangeFloat64ToUint32)    \
+  V(ChangeInt32ToFloat64)     \
+  V(ChangeInt32ToInt64)       \
+  V(ChangeUint32ToFloat64)    \
+  V(ChangeUint32ToUint64)     \
+  V(TruncateFloat64ToFloat32) \
+  V(TruncateFloat64ToInt32)   \
+  V(TruncateInt64ToInt32)     \
+  V(Float64Add)               \
+  V(Float64Sub)               \
+  V(Float64Mul)               \
+  V(Float64Div)               \
+  V(Float64Mod)               \
+  V(Float64Equal)             \
+  V(Float64LessThan)          \
   V(Float64LessThanOrEqual)
 
 #define VALUE_OP_LIST(V) \
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index 1bd87b3..31d53e4 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -269,13 +269,14 @@
       SourcePositionTable::Scope pos(&source_positions,
                                      SourcePosition::Unknown());
       Linkage linkage(info());
-      ValueNumberingReducer vn_reducer(zone());
+      // TODO(turbofan): Value numbering disabled for now.
+      // ValueNumberingReducer vn_reducer(zone());
       SimplifiedOperatorReducer simple_reducer(&jsgraph);
       ChangeLowering lowering(&jsgraph, &linkage);
       MachineOperatorReducer mach_reducer(&jsgraph);
       GraphReducer graph_reducer(&graph);
       // TODO(titzer): Figure out if we should run all reducers at once here.
-      graph_reducer.AddReducer(&vn_reducer);
+      // graph_reducer.AddReducer(&vn_reducer);
       graph_reducer.AddReducer(&simple_reducer);
       graph_reducer.AddReducer(&lowering);
       graph_reducer.AddReducer(&mach_reducer);
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
index 9cb1613..a4af55a 100644
--- a/src/compiler/raw-machine-assembler.h
+++ b/src/compiler/raw-machine-assembler.h
@@ -5,12 +5,6 @@
 #ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
 #define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
 
-#ifdef USE_SIMULATOR
-#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 0
-#else
-#define MACHINE_ASSEMBLER_SUPPORTS_CALL_C 1
-#endif
-
 #include "src/v8.h"
 
 #include "src/compiler/common-operator.h"
@@ -375,21 +369,6 @@
     return NewNode(machine()->TruncateInt64ToInt32(), a);
   }
 
-#ifdef MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-  // Call to C.
-  Node* CallC(Node* function_address, MachineType return_type,
-              MachineType* arg_types, Node** args, int n_args) {
-    CallDescriptor* descriptor =
-        Linkage::GetSimplifiedCDescriptor(zone(), machine_sig());
-    Node** passed_args = zone()->NewArray<Node*>(n_args + 1);
-    passed_args[0] = function_address;
-    for (int i = 0; i < n_args; ++i) {
-      passed_args[i + 1] = args[i];
-    }
-    return NewNode(common()->Call(descriptor), n_args + 1, passed_args);
-  }
-#endif
-
   // Parameters.
   Node* Parameter(size_t index);
 
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index c4af35e..bfecdef 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -255,6 +255,12 @@
 }
 
 
+Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<float>(node), zone()));
+}
+
+
 Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
   // TODO(titzer): only call Type::Of() if the type is not already known.
   return Bounds(Type::Of(OpParameter<double>(node), zone()));
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
index cb10477..4d078b7 100644
--- a/src/compiler/x64/code-generator-x64.cc
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -205,6 +205,7 @@
 
   switch (ArchOpcodeField::decode(instr->opcode())) {
     case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
       if (HasImmediateInput(instr, 0)) {
         Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
         __ Call(code, RelocInfo::CODE_TARGET);
@@ -216,16 +217,8 @@
       AddSafepointAndDeopt(instr);
       break;
     }
-    case kArchCallAddress:
-      if (HasImmediateInput(instr, 0)) {
-        Immediate64 imm = i.InputImmediate64(0);
-        DCHECK_EQ(kImm64Value, imm.type);
-        __ Call(reinterpret_cast<byte*>(imm.value), RelocInfo::NONE64);
-      } else {
-        __ call(i.InputRegister(0));
-      }
-      break;
     case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
       Register func = i.InputRegister(0);
       if (FLAG_debug_code) {
         // Check the function's context matches the context argument.
@@ -236,11 +229,6 @@
       AddSafepointAndDeopt(instr);
       break;
     }
-    case kArchDrop: {
-      int words = MiscField::decode(instr->opcode());
-      __ addq(rsp, Immediate(kPointerSize * words));
-      break;
-    }
     case kArchJmp:
       __ jmp(code_->GetLabel(i.InputBlock(0)));
       break;
@@ -1005,6 +993,21 @@
 
 void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
 
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
 #undef __
 
 }  // namespace internal
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index e041a74..96501e6 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -691,9 +691,6 @@
       opcode = kArchCallCodeObject;
       break;
     }
-    case CallDescriptor::kCallAddress:
-      opcode = kArchCallAddress;
-      break;
     case CallDescriptor::kCallJSFunction:
       opcode = kArchCallJSFunction;
       break;
@@ -713,15 +710,6 @@
     DCHECK(continuation != NULL);
     call_instr->MarkAsControl();
   }
-
-  // Caller clean up of stack for C-style calls.
-  if (descriptor->kind() == CallDescriptor::kCallAddress &&
-      !buffer.pushed_nodes.empty()) {
-    DCHECK(deoptimization == NULL && continuation == NULL);
-    Emit(kArchDrop |
-             MiscField::encode(static_cast<int>(buffer.pushed_nodes.size())),
-         NULL);
-  }
 }
 
 }  // namespace compiler
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index bac1576..ae87dc4 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -67,6 +67,14 @@
 }
 
 
+inline float DoubleToFloat32(double x) {
+  // TODO(yanggou): This static_cast is implementation-defined behaviour in C++,
+  // so we may need to do the conversion manually instead to match the spec.
+  volatile float f = static_cast<float>(x);
+  return f;
+}
+
+
 inline double DoubleToInteger(double x) {
   if (std::isnan(x)) return 0;
   if (!std::isfinite(x) || x == 0) return x;
diff --git a/src/conversions.h b/src/conversions.h
index 1b76ac5..6a28b5f 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -77,6 +77,10 @@
 }
 
 
+// This function should match the exact semantics of ECMA-262 20.2.2.17.
+inline float DoubleToFloat32(double x);
+
+
 // This function should match the exact semantics of ECMA-262 9.4.
 inline double DoubleToInteger(double x);
 
diff --git a/src/counters.h b/src/counters.h
index f97b9d2..651cf54 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -311,7 +311,11 @@
   HT(pre_parse, V8.PreParse)                                 \
   /* Total compilation times. */                             \
   HT(compile, V8.Compile)                                    \
-  HT(compile_eval, V8.CompileEval)
+  HT(compile_eval, V8.CompileEval)                           \
+  /* Serialization as part of compilation (code caching) */  \
+  HT(compile_serialize, V8.CompileSerialize)                 \
+  HT(compile_deserialize, V8.CompileDeserialize)
+
 
 #define HISTOGRAM_PERCENTAGE_LIST(HP)                                 \
   /* Heap fragmentation. */                                           \
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index a0961fc..612d5f6 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -101,20 +101,41 @@
 
   static const int kBailoutTypesWithCodeEntry = SOFT + 1;
 
+  struct Reason {
+    Reason(int r, const char* m, const char* d)
+        : raw_position(r), mnemonic(m), detail(d) {}
+
+    bool operator==(const Reason& other) const {
+      return raw_position == other.raw_position &&
+             CStringEquals(mnemonic, other.mnemonic) &&
+             CStringEquals(detail, other.detail);
+    }
+
+    bool operator!=(const Reason& other) const { return !(*this == other); }
+
+    int raw_position;
+    const char* mnemonic;
+    const char* detail;
+  };
+
   struct JumpTableEntry : public ZoneObject {
-    inline JumpTableEntry(Address entry, const char* the_mnemonic,
-                          const char* the_reason, Deoptimizer::BailoutType type,
-                          bool frame)
+    inline JumpTableEntry(Address entry, const Reason& the_reason,
+                          Deoptimizer::BailoutType type, bool frame)
         : label(),
           address(entry),
-          mnemonic(the_mnemonic),
           reason(the_reason),
           bailout_type(type),
           needs_frame(frame) {}
+
+    bool IsEquivalentTo(const JumpTableEntry& other) const {
+      return address == other.address && bailout_type == other.bailout_type &&
+             needs_frame == other.needs_frame &&
+             (!FLAG_trace_deopt || reason == other.reason);
+    }
+
     Label label;
     Address address;
-    const char* mnemonic;
-    const char* reason;
+    Reason reason;
     Deoptimizer::BailoutType bailout_type;
     bool needs_frame;
   };
diff --git a/src/elements-kind.h b/src/elements-kind.h
index b48a5df..fb97341 100644
--- a/src/elements-kind.h
+++ b/src/elements-kind.h
@@ -87,6 +87,11 @@
 }
 
 
+inline bool IsSloppyArgumentsElements(ElementsKind kind) {
+  return kind == SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+
 inline bool IsExternalArrayElementsKind(ElementsKind kind) {
   return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
       kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
diff --git a/src/heap-snapshot-generator-inl.h b/src/heap-snapshot-generator-inl.h
index f7d87aa..3f7e622 100644
--- a/src/heap-snapshot-generator-inl.h
+++ b/src/heap-snapshot-generator-inl.h
@@ -43,25 +43,6 @@
 }
 
 
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
-  return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
-  return reinterpret_cast<HeapObject*>(
-      reinterpret_cast<char*>(kFirstGcSubrootObject) +
-      delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
-  return static_cast<int>(
-      (reinterpret_cast<char*>(subroot) -
-       reinterpret_cast<char*>(kFirstGcSubrootObject)) /
-      HeapObjectsMap::kObjectIdStep);
-}
-
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index cb9edaf..4a4c914 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -188,7 +188,6 @@
       uid_(uid),
       root_index_(HeapEntry::kNoEntry),
       gc_roots_index_(HeapEntry::kNoEntry),
-      natives_root_index_(HeapEntry::kNoEntry),
       max_snapshot_js_object_id_(0) {
   STATIC_ASSERT(
       sizeof(HeapGraphEdge) ==
@@ -217,6 +216,18 @@
 }
 
 
+void HeapSnapshot::AddSyntheticRootEntries() {
+  AddRootEntry();
+  AddGcRootsEntry();
+  SnapshotObjectId id = HeapObjectsMap::kGcRootsFirstSubrootId;
+  for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
+    AddGcSubrootEntry(tag, id);
+    id += HeapObjectsMap::kObjectIdStep;
+  }
+  DCHECK(HeapObjectsMap::kFirstAvailableObjectId == id);
+}
+
+
 HeapEntry* HeapSnapshot::AddRootEntry() {
   DCHECK(root_index_ == HeapEntry::kNoEntry);
   DCHECK(entries_.is_empty());  // Root entry must be the first one.
@@ -243,15 +254,11 @@
 }
 
 
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) {
   DCHECK(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
   DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
-  HeapEntry* entry = AddEntry(
-      HeapEntry::kSynthetic,
-      VisitorSynchronization::kTagNames[tag],
-      HeapObjectsMap::GetNthGcSubrootId(tag),
-      0,
-      0);
+  HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
+                              VisitorSynchronization::kTagNames[tag], id, 0, 0);
   gc_subroot_indexes_[tag] = entry->index();
   return entry;
 }
@@ -771,20 +778,6 @@
 }
 
 
-HeapObject* const V8HeapExplorer::kInternalRootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
-
-
 V8HeapExplorer::V8HeapExplorer(
     HeapSnapshot* snapshot,
     SnapshottingProgressReportingInterface* progress,
@@ -809,16 +802,7 @@
 
 
 HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
-  if (object == kInternalRootObject) {
-    snapshot_->AddRootEntry();
-    return snapshot_->root();
-  } else if (object == kGcRootsObject) {
-    HeapEntry* entry = snapshot_->AddGcRootsEntry();
-    return entry;
-  } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
-    HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
-    return entry;
-  } else if (object->IsJSFunction()) {
+  if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
     const char* name = shared->bound() ? "native_bind" :
@@ -965,41 +949,6 @@
 };
 
 
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
-  GcSubrootsEnumerator(
-      SnapshotFiller* filler, V8HeapExplorer* explorer)
-      : filler_(filler),
-        explorer_(explorer),
-        previous_object_count_(0),
-        object_count_(0) {
-  }
-  void VisitPointers(Object** start, Object** end) {
-    object_count_ += end - start;
-  }
-  void Synchronize(VisitorSynchronization::SyncTag tag) {
-    // Skip empty subroots.
-    if (previous_object_count_ != object_count_) {
-      previous_object_count_ = object_count_;
-      filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
-    }
-  }
- private:
-  SnapshotFiller* filler_;
-  V8HeapExplorer* explorer_;
-  intptr_t previous_object_count_;
-  intptr_t object_count_;
-};
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) {
-  filler->AddEntry(kInternalRootObject, this);
-  filler->AddEntry(kGcRootsObject, this);
-  GcSubrootsEnumerator enumerator(filler, this);
-  heap_->IterateRoots(&enumerator, VISIT_ALL);
-}
-
-
 const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
   switch (object->map()->instance_type()) {
     case MAP_TYPE:
@@ -1855,9 +1804,6 @@
   void FillReferences(V8HeapExplorer* explorer) {
     DCHECK(strong_references_.length() <= all_references_.length());
     Builtins* builtins = heap_->isolate()->builtins();
-    for (int i = 0; i < reference_tags_.length(); ++i) {
-      explorer->SetGcRootsReference(reference_tags_[i].tag);
-    }
     int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
     while (all_index < all_references_.length()) {
       bool is_strong = strong_index < strong_references_.length()
@@ -1900,10 +1846,15 @@
     SnapshotFiller* filler) {
   filler_ = filler;
 
+  // Create references to the synthetic roots.
+  SetRootGcRootsReference();
+  for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
+    SetGcRootsReference(static_cast<VisitorSynchronization::SyncTag>(tag));
+  }
+
   // Make sure builtin code objects get their builtin tags
   // first. Otherwise a particular JSFunction object could set
   // its custom name to a generic builtin.
-  SetRootGcRootsReference();
   RootsReferencesExtractor extractor(heap_);
   heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
   extractor.SetCollectingAllReferences();
@@ -2619,6 +2570,8 @@
   debug_heap->Verify();
 #endif
 
+  snapshot_->AddSyntheticRootEntries();
+
   if (!FillReferences()) return false;
 
   snapshot_->FillChildren();
@@ -2659,7 +2612,6 @@
 
 bool HeapSnapshotGenerator::FillReferences() {
   SnapshotFiller filler(snapshot_, &entries_);
-  v8_heap_explorer_.AddRootEntries(&filler);
   return v8_heap_explorer_.IterateAndExtractReferences(&filler)
       && dom_explorer_.IterateAndExtractReferences(&filler);
 }
diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h
index a0d73bf..3e4ce71 100644
--- a/src/heap-snapshot-generator.h
+++ b/src/heap-snapshot-generator.h
@@ -100,7 +100,7 @@
   Type type() { return static_cast<Type>(type_); }
   const char* name() { return name_; }
   void set_name(const char* name) { name_ = name; }
-  inline SnapshotObjectId id() { return id_; }
+  SnapshotObjectId id() { return id_; }
   size_t self_size() { return self_size_; }
   unsigned trace_node_id() const { return trace_node_id_; }
   INLINE(int index() const);
@@ -154,7 +154,6 @@
   size_t RawSnapshotSize() const;
   HeapEntry* root() { return &entries_[root_index_]; }
   HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
-  HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
   HeapEntry* gc_subroot(int index) {
     return &entries_[gc_subroot_indexes_[index]];
   }
@@ -171,10 +170,7 @@
                       SnapshotObjectId id,
                       size_t size,
                       unsigned trace_node_id);
-  HeapEntry* AddRootEntry();
-  HeapEntry* AddGcRootsEntry();
-  HeapEntry* AddGcSubrootEntry(int tag);
-  HeapEntry* AddNativesRootEntry();
+  void AddSyntheticRootEntries();
   HeapEntry* GetEntryById(SnapshotObjectId id);
   List<HeapEntry*>* GetSortedEntriesList();
   void FillChildren();
@@ -183,12 +179,15 @@
   void PrintEntriesSize();
 
  private:
+  HeapEntry* AddRootEntry();
+  HeapEntry* AddGcRootsEntry();
+  HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id);
+
   HeapProfiler* profiler_;
   const char* title_;
   unsigned uid_;
   int root_index_;
   int gc_roots_index_;
-  int natives_root_index_;
   int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
   List<HeapEntry> entries_;
   List<HeapGraphEdge> edges_;
@@ -223,12 +222,10 @@
   size_t GetUsedMemorySize() const;
 
   SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
-  static inline SnapshotObjectId GetNthGcSubrootId(int delta);
 
   static const int kObjectIdStep = 2;
   static const SnapshotObjectId kInternalRootObjectId;
   static const SnapshotObjectId kGcRootsObjectId;
-  static const SnapshotObjectId kNativesRootObjectId;
   static const SnapshotObjectId kGcRootsFirstSubrootId;
   static const SnapshotObjectId kFirstAvailableObjectId;
 
@@ -348,8 +345,6 @@
 
   static String* GetConstructorName(JSObject* object);
 
-  static HeapObject* const kInternalRootObject;
-
  private:
   typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
                                                           HeapObject* object);
@@ -450,9 +445,6 @@
 
   HeapEntry* GetEntry(Object* obj);
 
-  static inline HeapObject* GetNthGcSubrootObject(int delta);
-  static inline int GetGcSubrootOrder(HeapObject* subroot);
-
   Heap* heap_;
   HeapSnapshot* snapshot_;
   StringsStorage* names_;
@@ -465,12 +457,7 @@
   HeapObjectsSet weak_containers_;
   v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
 
-  static HeapObject* const kGcRootsObject;
-  static HeapObject* const kFirstGcSubrootObject;
-  static HeapObject* const kLastGcSubrootObject;
-
   friend class IndexedReferencesExtractor;
-  friend class GcSubrootsEnumerator;
   friend class RootsReferencesExtractor;
 
   DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index 473b474..daab616 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -108,7 +108,7 @@
 
   // Heap size threshold below which we prefer mark-compact over incremental
   // step.
-  static const size_t kSmallHeapSize = 2 * kPointerSize * MB;
+  static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
 
   // That is the maximum idle time we will have during frame rendering.
   static const size_t kMaxFrameRenderingIdleTime = 16;
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index ab57147..695c629 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -455,10 +455,10 @@
   // Offset from the start of the inlined function.
   typedef BitField<int, 9, 23> PositionField;
 
-  // On HPositionInfo can use this constructor.
   explicit HSourcePosition(int value) : value_(value) { }
 
   friend class HPositionInfo;
+  friend class LCodeGenBase;
 
   // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
   // and PositionField.
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index e5a93a7..dfadbe5 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -4834,14 +4834,9 @@
   DCHECK(current_block() != NULL);
   DCHECK(current_block()->HasPredecessor());
 
-  // We only optimize switch statements with a bounded number of clauses.
-  const int kCaseClauseLimit = 128;
   ZoneList<CaseClause*>* clauses = stmt->cases();
   int clause_count = clauses->length();
   ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
-  if (clause_count > kCaseClauseLimit) {
-    return Bailout(kSwitchStatementTooManyClauses);
-  }
 
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   Add<HSimulate>(stmt->EntryId());
@@ -6436,7 +6431,7 @@
     HValue* key = environment()->ExpressionStackAt(1);
     HValue* object = environment()->ExpressionStackAt(2);
     bool has_side_effects = false;
-    HandleKeyedElementAccess(object, key, value, expr, return_id, STORE,
+    HandleKeyedElementAccess(object, key, value, expr, ast_id, return_id, STORE,
                              &has_side_effects);
     Drop(3);
     Push(value);
@@ -7129,7 +7124,7 @@
 
 
 HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
-    HValue* obj, HValue* key, HValue* val, Expression* expr,
+    HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
     BailoutId return_id, PropertyAccessType access_type,
     bool* has_side_effects) {
   if (key->ActualValue()->IsConstant()) {
@@ -7143,7 +7138,7 @@
             Handle<String>::cast(constant));
       }
       HInstruction* instr =
-          BuildNamedAccess(access_type, expr->id(), return_id, expr, obj,
+          BuildNamedAccess(access_type, ast_id, return_id, expr, obj,
                            Handle<String>::cast(constant), val, false);
       if (instr == NULL || instr->IsLinked()) {
         *has_side_effects = false;
@@ -7365,7 +7360,7 @@
 
     bool has_side_effects = false;
     HValue* load = HandleKeyedElementAccess(
-        obj, key, NULL, expr, expr->LoadId(), LOAD, &has_side_effects);
+        obj, key, NULL, expr, ast_id, expr->LoadId(), LOAD, &has_side_effects);
     if (has_side_effects) {
       if (ast_context()->IsEffect()) {
         Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 6b03170..d5e208f 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -2627,7 +2627,8 @@
                                          bool* has_side_effects);
 
   HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
-                                   Expression* expr, BailoutId return_id,
+                                   Expression* expr, BailoutId ast_id,
+                                   BailoutId return_id,
                                    PropertyAccessType access_type,
                                    bool* has_side_effects);
 
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index e9633a1..4e14b69 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -440,7 +440,9 @@
   if (exponent_type() != INTEGER) {
     Label fast_power, try_arithmetic_simplification;
     __ DoubleToI(exponent, double_exponent, double_scratch,
-                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
+                 &try_arithmetic_simplification,
+                 &try_arithmetic_simplification);
     __ jmp(&int_exponent);
 
     __ bind(&try_arithmetic_simplification);
@@ -3502,8 +3504,8 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
-  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
@@ -3728,8 +3730,8 @@
 
     // Check if the entry name is not a unique name.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                           miss);
+    __ JumpIfNotUniqueNameInstanceType(
+        FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
     __ bind(&good);
   }
 
@@ -3863,8 +3865,9 @@
 
       // Check if the entry name is not a unique name.
       __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                             &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(
+          FieldOperand(scratch, Map::kInstanceTypeOffset),
+          &maybe_in_dictionary);
     }
   }
 
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 259fbf0..1d7c8c1 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -386,11 +386,7 @@
     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
     __ bind(&table_entry->label);
     Address entry = table_entry->address;
-    Deoptimizer::BailoutType type = table_entry->bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-    Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    DeoptComment(table_entry->mnemonic, table_entry->reason);
+    DeoptComment(table_entry->reason);
     if (table_entry->needs_frame) {
       DCHECK(!info()->saves_caller_doubles());
       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
@@ -825,7 +821,7 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* reason,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -867,19 +863,19 @@
     __ bind(&done);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().bailout_type != bailout_type) {
-      Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason,
-                                              bailout_type, !frame_is_built_);
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     if (cc == no_condition) {
@@ -892,11 +888,11 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* reason) {
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, instr, reason, bailout_type);
+  DeoptimizeIf(cc, instr, detail, bailout_type);
 }
 
 
@@ -1125,7 +1121,7 @@
     __ and_(dividend, mask);
     __ neg(dividend);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(zero, instr);
+      DeoptimizeIf(zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
   }
@@ -1142,7 +1138,7 @@
   DCHECK(ToRegister(instr->result()).is(eax));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr);
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1157,7 +1153,7 @@
     Label remainder_not_zero;
     __ j(not_zero, &remainder_not_zero, Label::kNear);
     __ cmp(dividend, Immediate(0));
-    DeoptimizeIf(less, instr);
+    DeoptimizeIf(less, instr, "minus zero");
     __ bind(&remainder_not_zero);
   }
 }
@@ -1179,7 +1175,7 @@
   // deopt in this case because we can't return a NaN.
   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(right_reg, Operand(right_reg));
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1190,7 +1186,7 @@
     __ j(not_equal, &no_overflow_possible, Label::kNear);
     __ cmp(right_reg, -1);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "minus zero");
     } else {
       __ j(not_equal, &no_overflow_possible, Label::kNear);
       __ Move(result_reg, Immediate(0));
@@ -1209,7 +1205,7 @@
     __ j(not_sign, &positive_left, Label::kNear);
     __ idiv(right_reg);
     __ test(result_reg, Operand(result_reg));
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
     __ jmp(&done, Label::kNear);
     __ bind(&positive_left);
   }
@@ -1229,19 +1225,19 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     __ cmp(dividend, kMinInt);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "overflow");
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ test(dividend, Immediate(mask));
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
   __ Move(result, dividend);
   int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1262,7 +1258,7 @@
   DCHECK(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr);
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1270,7 +1266,7 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   __ TruncatingDiv(dividend, Abs(divisor));
@@ -1280,7 +1276,7 @@
     __ mov(eax, edx);
     __ imul(eax, eax, divisor);
     __ sub(eax, dividend);
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "lost precision");
   }
 }
 
@@ -1300,7 +1296,7 @@
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1309,7 +1305,7 @@
     __ test(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr);
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1319,7 +1315,7 @@
     __ cmp(dividend, kMinInt);
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1330,7 +1326,7 @@
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     // Deoptimize if remainder is not 0.
     __ test(remainder, remainder);
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
 }
 
@@ -1352,13 +1348,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ neg(dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(overflow, instr);
+      DeoptimizeIf(overflow, instr, "overflow");
     }
     return;
   }
@@ -1385,7 +1381,7 @@
   DCHECK(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr);
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1393,7 +1389,7 @@
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1440,7 +1436,7 @@
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1449,7 +1445,7 @@
     __ test(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr);
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1459,7 +1455,7 @@
     __ cmp(dividend, kMinInt);
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1537,7 +1533,7 @@
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1547,15 +1543,15 @@
     __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
-        DeoptimizeIf(no_condition, instr);
+        DeoptimizeIf(no_condition, instr, "minus zero");
       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
         __ cmp(ToRegister(instr->temp()), Immediate(0));
-        DeoptimizeIf(less, instr);
+        DeoptimizeIf(less, instr, "minus zero");
       }
     } else {
       // Test the non-zero operand for negative sign.
       __ or_(ToRegister(instr->temp()), ToOperand(right));
-      DeoptimizeIf(sign, instr);
+      DeoptimizeIf(sign, instr, "minus zero");
     }
     __ bind(&done);
   }
@@ -1620,10 +1616,6 @@
     switch (instr->op()) {
       case Token::ROR:
         __ ror_cl(ToRegister(left));
-        if (instr->can_deopt()) {
-          __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr);
-        }
         break;
       case Token::SAR:
         __ sar_cl(ToRegister(left));
@@ -1632,7 +1624,7 @@
         __ shr_cl(ToRegister(left));
         if (instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr);
+          DeoptimizeIf(sign, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1649,7 +1641,7 @@
       case Token::ROR:
         if (shift_count == 0 && instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr);
+          DeoptimizeIf(sign, instr, "negative value");
         } else {
           __ ror(ToRegister(left), shift_count);
         }
@@ -1664,7 +1656,7 @@
           __ shr(ToRegister(left), shift_count);
         } else if (instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr);
+          DeoptimizeIf(sign, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1675,7 +1667,7 @@
               __ shl(ToRegister(left), shift_count - 1);
             }
             __ SmiTag(ToRegister(left));
-            DeoptimizeIf(overflow, instr);
+            DeoptimizeIf(overflow, instr, "overflow");
           } else {
             __ shl(ToRegister(left), shift_count);
           }
@@ -1701,7 +1693,7 @@
     __ sub(ToRegister(left), ToOperand(right));
   }
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -1785,9 +1777,9 @@
   DCHECK(object.is(eax));
 
   __ test(object, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr);
+  DeoptimizeIf(zero, instr, "Smi");
   __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "not a date object");
 
   if (index->value() == 0) {
     __ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1917,7 +1909,7 @@
       __ add(ToRegister(left), ToOperand(right));
     }
     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      DeoptimizeIf(overflow, instr);
+      DeoptimizeIf(overflow, instr, "overflow");
     }
   }
 }
@@ -2141,7 +2133,7 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ test(reg, Immediate(kSmiTagMask));
-        DeoptimizeIf(zero, instr);
+        DeoptimizeIf(zero, instr, "Smi");
       }
 
       Register map = no_reg;  // Keep the compiler happy.
@@ -2198,7 +2190,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(no_condition, instr);
+        DeoptimizeIf(no_condition, instr, "unexpected object");
       }
     }
   }
@@ -2828,7 +2820,7 @@
   __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
-    DeoptimizeIf(equal, instr);
+    DeoptimizeIf(equal, instr, "hole");
   }
 }
 
@@ -2872,7 +2864,7 @@
   // it as no longer deleted. We deoptimize in that case.
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
-    DeoptimizeIf(equal, instr);
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   // Store the value.
@@ -2889,7 +2881,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       Label is_not_hole;
       __ j(not_equal, &is_not_hole, Label::kNear);
@@ -2910,7 +2902,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(target, factory()->the_hole_value());
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       __ j(not_equal, &skip_assignment, Label::kNear);
     }
@@ -3009,7 +3001,7 @@
 
   // Check that the function has a prototype or an initial map.
   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "hole");
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3102,7 +3094,7 @@
         __ mov(result, operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           __ test(result, Operand(result));
-          DeoptimizeIf(negative, instr);
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3132,7 +3124,7 @@
         FAST_DOUBLE_ELEMENTS,
         instr->base_offset() + sizeof(kHoleNanLower32));
     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr);
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   Operand double_load_operand = BuildFastArrayOperand(
@@ -3159,10 +3151,10 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
       __ test(result, Immediate(kSmiTagMask));
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "not a Smi");
     } else {
       __ cmp(result, factory()->the_hole_value());
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "hole");
     }
   }
 }
@@ -3309,9 +3301,9 @@
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "Smi");
   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
-  DeoptimizeIf(below, instr);
+  DeoptimizeIf(below, instr, "not a JavaScript object");
 
   __ jmp(&receiver_ok, Label::kNear);
   __ bind(&global_object);
@@ -3337,7 +3329,7 @@
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ cmp(length, kArgumentsLimit);
-  DeoptimizeIf(above, instr);
+  DeoptimizeIf(above, instr, "too many arguments");
 
   __ push(receiver);
   __ mov(receiver, length);
@@ -3530,7 +3522,7 @@
   Register input_reg = ToRegister(instr->value());
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
          factory()->heap_number_map());
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "not a heap number");
 
   Label slow, allocated, done;
   Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3577,7 +3569,7 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ neg(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr);
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
@@ -3635,20 +3627,20 @@
       __ j(not_equal, &non_zero, Label::kNear);
       __ movmskpd(output_reg, input_reg);
       __ test(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
       __ bind(&non_zero);
     }
     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
     __ cvttsd2si(output_reg, Operand(xmm_scratch));
     // Overflow is signalled with minint.
     __ cmp(output_reg, 0x1);
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   } else {
     Label negative_sign, done;
     // Deoptimize on unordered.
     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
     __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(parity_even, instr);
+    DeoptimizeIf(parity_even, instr, "NaN");
     __ j(below, &negative_sign, Label::kNear);
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3657,7 +3649,7 @@
       __ j(above, &positive_sign, Label::kNear);
       __ movmskpd(output_reg, input_reg);
       __ test(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
       __ Move(output_reg, Immediate(0));
       __ jmp(&done, Label::kNear);
       __ bind(&positive_sign);
@@ -3667,7 +3659,7 @@
     __ cvttsd2si(output_reg, Operand(input_reg));
     // Overflow is signalled with minint.
     __ cmp(output_reg, 0x1);
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
     __ jmp(&done, Label::kNear);
 
     // Non-zero negative reaches here.
@@ -3678,7 +3670,7 @@
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ sub(output_reg, Immediate(1));
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
 
     __ bind(&done);
   }
@@ -3706,8 +3698,7 @@
   __ cvttsd2si(output_reg, Operand(xmm_scratch));
   // Overflow is signalled with minint.
   __ cmp(output_reg, 0x1);
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr);
+  DeoptimizeIf(overflow, instr, "overflow");
   __ jmp(&done, dist);
 
   __ bind(&below_one_half);
@@ -3722,8 +3713,7 @@
   __ cvttsd2si(output_reg, Operand(input_temp));
   // Catch minint due to overflow, and to prevent overflow when compensating.
   __ cmp(output_reg, 0x1);
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr);
+  DeoptimizeIf(overflow, instr, "overflow");
 
   __ Cvtsi2sd(xmm_scratch, output_reg);
   __ ucomisd(xmm_scratch, input_temp);
@@ -3739,8 +3729,7 @@
     // If the sign is positive, we return +0.
     __ movmskpd(output_reg, input_reg);
     __ test(output_reg, Immediate(1));
-    __ RecordComment("Minus zero");
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "minus zero");
   }
   __ Move(output_reg, Immediate(0));
   __ bind(&done);
@@ -3816,7 +3805,7 @@
     __ JumpIfSmi(tagged_exponent, &no_deopt);
     DCHECK(!ecx.is(tagged_exponent));
     __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
     __ bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
@@ -4128,7 +4117,7 @@
     __ int3();
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr);
+    DeoptimizeIf(cc, instr, "out of bounds");
   }
 }
 
@@ -4296,7 +4285,7 @@
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "memento found");
   __ bind(&no_memento_found);
 }
 
@@ -4641,12 +4630,12 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     __ test(input, Immediate(0xc0000000));
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "overflow");
   }
   __ SmiTag(input);
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -4657,7 +4646,7 @@
   DCHECK(input->IsRegister() && input->Equals(instr->result()));
   if (instr->needs_check()) {
     __ test(result, Immediate(kSmiTagMask));
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "not a Smi");
   } else {
     __ AssertSmi(result);
   }
@@ -4684,7 +4673,7 @@
     if (can_convert_undefined_to_nan) {
       __ j(not_equal, &convert, Label::kNear);
     } else {
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "not a heap number");
     }
 
     // Heap number to XMM conversion.
@@ -4697,7 +4686,7 @@
       __ j(not_zero, &done, Label::kNear);
       __ movmskpd(temp_reg, result_reg);
       __ test_b(temp_reg, 1);
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
 
@@ -4706,7 +4695,7 @@
 
       // Convert undefined (and hole) to NaN.
       __ cmp(input_reg, factory()->undefined_value());
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
 
       ExternalReference nan =
           ExternalReference::address_of_canonical_non_hole_nan();
@@ -4760,31 +4749,26 @@
 
     __ bind(&check_false);
     __ cmp(input_reg, factory()->false_value());
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
     __ Move(input_reg, Immediate(0));
   } else {
     XMMRegister scratch = ToDoubleRegister(instr->temp());
     DCHECK(!scratch.is(xmm0));
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            isolate()->factory()->heap_number_map());
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     __ cvttsd2si(input_reg, Operand(xmm0));
     __ Cvtsi2sd(scratch, Operand(input_reg));
     __ ucomisd(xmm0, scratch);
-    __ RecordComment("Deferred TaggedToI: lost precision");
-    DeoptimizeIf(not_equal, instr);
-    __ RecordComment("Deferred TaggedToI: NaN");
-    DeoptimizeIf(parity_even, instr);
+    DeoptimizeIf(not_equal, instr, "lost precision");
+    DeoptimizeIf(parity_even, instr, "NaN");
     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
       __ test(input_reg, Operand(input_reg));
       __ j(not_zero, done);
       __ movmskpd(input_reg, xmm0);
       __ and_(input_reg, 1);
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
   }
 }
@@ -4856,14 +4840,20 @@
     XMMRegister input_reg = ToDoubleRegister(input);
     __ TruncateDoubleToI(result_reg, input_reg);
   } else {
-    Label bailout, done;
+    Label lost_precision, is_nan, minus_zero, done;
     XMMRegister input_reg = ToDoubleRegister(input);
     XMMRegister xmm_scratch = double_scratch0();
-     __ DoubleToI(result_reg, input_reg, xmm_scratch,
-         instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-    __ jmp(&done, Label::kNear);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr);
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+    __ DoubleToI(result_reg, input_reg, xmm_scratch,
+                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+                 &is_nan, &minus_zero, dist);
+    __ jmp(&done, dist);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, "lost precision");
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, "NaN");
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, "minus zero");
     __ bind(&done);
   }
 }
@@ -4876,25 +4866,30 @@
   DCHECK(result->IsRegister());
   Register result_reg = ToRegister(result);
 
-  Label bailout, done;
+  Label lost_precision, is_nan, minus_zero, done;
   XMMRegister input_reg = ToDoubleRegister(input);
   XMMRegister xmm_scratch = double_scratch0();
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   __ DoubleToI(result_reg, input_reg, xmm_scratch,
-      instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-  __ jmp(&done, Label::kNear);
-  __ bind(&bailout);
-  DeoptimizeIf(no_condition, instr);
+               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+               &minus_zero, dist);
+  __ jmp(&done, dist);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, "lost precision");
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, "NaN");
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, "minus zero");
   __ bind(&done);
-
   __ SmiTag(result_reg);
-  DeoptimizeIf(overflow, instr);
+  DeoptimizeIf(overflow, instr, "overflow");
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   __ test(ToOperand(input), Immediate(kSmiTagMask));
-  DeoptimizeIf(not_zero, instr);
+  DeoptimizeIf(not_zero, instr, "not a Smi");
 }
 
 
@@ -4902,7 +4897,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     __ test(ToOperand(input), Immediate(kSmiTagMask));
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "Smi");
   }
 }
 
@@ -4923,14 +4918,14 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     } else {
-      DeoptimizeIf(below, instr);
+      DeoptimizeIf(below, instr, "wrong instance type");
       // Omit check for the last type.
       if (last != LAST_TYPE) {
         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
                 static_cast<int8_t>(last));
-        DeoptimizeIf(above, instr);
+        DeoptimizeIf(above, instr, "wrong instance type");
       }
     }
   } else {
@@ -4941,12 +4936,12 @@
     if (base::bits::IsPowerOfTwo32(mask)) {
       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
-      DeoptimizeIf(tag == 0 ? not_zero : zero, instr);
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
     } else {
       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
       __ and_(temp, mask);
       __ cmp(temp, tag);
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     }
   }
 }
@@ -4962,7 +4957,7 @@
     Operand operand = ToOperand(instr->value());
     __ cmp(operand, object);
   }
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "value mismatch");
 }
 
 
@@ -4977,7 +4972,7 @@
 
     __ test(eax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr);
+  DeoptimizeIf(zero, instr, "instance migration failed");
 }
 
 
@@ -5030,7 +5025,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ j(not_equal, deferred->entry());
   } else {
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "wrong map");
   }
 
   __ bind(&success);
@@ -5069,7 +5064,7 @@
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
   __ cmp(input_reg, factory()->undefined_value());
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
   __ mov(input_reg, 0);
   __ jmp(&done, Label::kNear);
 
@@ -5559,17 +5554,17 @@
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
   __ cmp(eax, isolate()->factory()->undefined_value());
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "undefined");
 
   __ cmp(eax, isolate()->factory()->null_value());
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "null");
 
   __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr);
+  DeoptimizeIf(zero, instr, "Smi");
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr);
+  DeoptimizeIf(below_equal, instr, "wrong instance type");
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(&call_runtime);
@@ -5584,7 +5579,7 @@
 
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "wrong map");
   __ bind(&use_cache);
 }
 
@@ -5607,7 +5602,7 @@
          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   __ bind(&done);
   __ test(result, result);
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "no cache");
 }
 
 
@@ -5615,7 +5610,7 @@
   Register object = ToRegister(instr->value());
   __ cmp(ToRegister(instr->map()),
          FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "wrong map");
 }
 
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 555cf8c..0918252 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -209,10 +209,9 @@
 
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* reason,
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
                     Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition cc, LInstruction* instr,
-                    const char* reason = NULL);
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
 
   bool DeoptEveryNTimes() {
     return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index f938d50..7480a6f 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -250,18 +250,17 @@
 }
 
 
-void MacroAssembler::DoubleToI(Register result_reg,
-                               XMMRegister input_reg,
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
                                XMMRegister scratch,
                                MinusZeroMode minus_zero_mode,
-                               Label* conversion_failed,
-                               Label::Distance dst) {
+                               Label* lost_precision, Label* is_nan,
+                               Label* minus_zero, Label::Distance dst) {
   DCHECK(!input_reg.is(scratch));
   cvttsd2si(result_reg, Operand(input_reg));
   Cvtsi2sd(scratch, Operand(result_reg));
   ucomisd(scratch, input_reg);
-  j(not_equal, conversion_failed, dst);
-  j(parity_even, conversion_failed, dst);  // NaN.
+  j(not_equal, lost_precision, dst);
+  j(parity_even, is_nan, dst);
   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
     Label done;
     // The integer converted back is equal to the original. We
@@ -271,9 +270,9 @@
     movmskpd(result_reg, input_reg);
     // Bit 0 contains the sign of the double in input_reg.
     // If input was positive, we are ok and return 0, otherwise
-    // jump to conversion_failed.
+    // jump to minus_zero.
     and_(result_reg, 1);
-    j(not_zero, conversion_failed, dst);
+    j(not_zero, minus_zero, dst);
     bind(&done);
   }
 }
@@ -2917,9 +2916,9 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index 405bea8..52f375e 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -463,8 +463,9 @@
   void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
 
   void DoubleToI(Register result_reg, XMMRegister input_reg,
-      XMMRegister scratch, MinusZeroMode minus_zero_mode,
-      Label* conversion_failed, Label::Distance dst = Label::kFar);
+                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
+                 Label* lost_precision, Label* is_nan, Label* minus_zero,
+                 Label::Distance dst = Label::kFar);
 
   // Smi tagging support.
   void SmiTag(Register reg) {
@@ -915,13 +916,13 @@
       Label* on_not_flat_one_byte_strings);
 
   // Checks if the given register or operand is a unique name
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar) {
-    JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar) {
+    JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
   }
 
-  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
index 1b6cf72..ae13161 100644
--- a/src/ic/arm/ic-arm.cc
+++ b/src/ic/arm/ic-arm.cc
@@ -369,32 +369,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is in lr.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(r1));
-  DCHECK(key.is(r2));
-
-  Label slow, notin;
-  MemOperand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, r0, r3, r4, &notin, &slow);
-  __ ldr(r0, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in r0.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow);
-  __ ldr(r0, unmapped_location);
-  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
-  __ cmp(r0, r3);
-  __ b(eq, &slow);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register key = StoreDescriptor::NameRegister();
diff --git a/src/ic/arm/ic-compiler-arm.cc b/src/ic/arm/ic-compiler-arm.cc
index e918fdc..7bef56e 100644
--- a/src/ic/arm/ic-compiler-arm.cc
+++ b/src/ic/arm/ic-compiler-arm.cc
@@ -44,7 +44,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ cmp(this->name(), Operand(name));
       __ b(ne, &miss);
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
index 9e5427f..76f9c24 100644
--- a/src/ic/arm64/ic-arm64.cc
+++ b/src/ic/arm64/ic-arm64.cc
@@ -370,35 +370,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is in lr.
-  Register result = x0;
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(x1));
-  DCHECK(key.is(x2));
-
-  Label miss, unmapped;
-
-  Register map_scratch = x0;
-  MemOperand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
-  __ Ldr(result, mapped_location);
-  __ Ret();
-
-  __ Bind(&unmapped);
-  // Parameter map is left in map_scratch when a jump on unmapped is done.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
-  __ Ldr(result, unmapped_location);
-  __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
-  __ Ret();
-
-  __ Bind(&miss);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
   Label slow, notin;
diff --git a/src/ic/arm64/ic-compiler-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc
index fd9a4db..ffc1069 100644
--- a/src/ic/arm64/ic-compiler-arm64.cc
+++ b/src/ic/arm64/ic-compiler-arm64.cc
@@ -45,7 +45,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
     }
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
index ecd3e3b..4ed92ec 100644
--- a/src/ic/handler-compiler.cc
+++ b/src/ic/handler-compiler.cc
@@ -390,13 +390,13 @@
       ElementsKind elements_kind = receiver_map->elements_kind();
       if (receiver_map->has_indexed_interceptor()) {
         cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
+      } else if (IsSloppyArgumentsElements(elements_kind)) {
+        cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
       } else if (IsFastElementsKind(elements_kind) ||
                  IsExternalArrayElementsKind(elements_kind) ||
                  IsFixedTypedArrayElementsKind(elements_kind)) {
         cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind)
                           .GetCode();
-      } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
-        cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
       } else {
         DCHECK(elements_kind == DICTIONARY_ELEMENTS);
         cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc
index 685eac9..ac42f30b 100644
--- a/src/ic/ia32/ic-compiler-ia32.cc
+++ b/src/ic/ia32/ic-compiler-ia32.cc
@@ -48,7 +48,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+      __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ cmp(this->name(), Immediate(name));
       __ j(not_equal, &miss);
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
index 2b4c954..67247d2 100644
--- a/src/ic/ia32/ic-ia32.cc
+++ b/src/ic/ia32/ic-ia32.cc
@@ -503,32 +503,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  Label slow, notin;
-  Factory* factory = masm->isolate()->factory();
-  Operand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, ebx, eax, &notin, &slow);
-  __ mov(eax, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in ebx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow);
-  __ cmp(unmapped_location, factory->the_hole_value());
-  __ j(equal, &slow);
-  __ mov(eax, unmapped_location);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   // Return address is on the stack.
   Label slow, notin;
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
index 3b9f51c..aeae4ba 100644
--- a/src/ic/ic-compiler.cc
+++ b/src/ic/ic-compiler.cc
@@ -96,6 +96,8 @@
   Handle<Code> stub;
   if (receiver_map->has_indexed_interceptor()) {
     stub = LoadIndexedInterceptorStub(isolate).GetCode();
+  } else if (receiver_map->has_sloppy_arguments_elements()) {
+    stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
   } else if (receiver_map->has_fast_elements() ||
              receiver_map->has_external_array_elements() ||
              receiver_map->has_fixed_typed_array_elements()) {
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index fc364e1..7f346a0 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -76,7 +76,13 @@
 
 #else
 
-#define TRACE_GENERIC_IC(isolate, type, reason)
+#define TRACE_GENERIC_IC(isolate, type, reason)      \
+  do {                                               \
+    if (FLAG_trace_ic) {                             \
+      PrintF("[%s patching generic stub in ", type); \
+      PrintF("(see below) (%s)]\n", reason);         \
+    }                                                \
+  } while (false)
 
 #endif  // DEBUG
 
@@ -1140,14 +1146,14 @@
   if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
     return generic_stub();
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
     return generic_stub();
   }
 
@@ -1181,11 +1187,7 @@
       if (state() == UNINITIALIZED) stub = string_stub();
     } else if (object->IsJSObject()) {
       Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-      if (receiver->elements()->map() ==
-          isolate()->heap()->sloppy_arguments_elements_map()) {
-        stub = sloppy_arguments_stub();
-      } else if (!Object::ToSmi(isolate(), key).is_null() &&
-                 (!target().is_identical_to(sloppy_arguments_stub()))) {
+      if (!Object::ToSmi(isolate(), key).is_null()) {
         stub = LoadElementStub(receiver);
       }
     }
@@ -1344,13 +1346,42 @@
 
 
 Handle<Code> StoreIC::megamorphic_stub() {
-  return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
-                                          extra_ic_state());
+  if (kind() == Code::STORE_IC) {
+    return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
+                                            extra_ic_state());
+  } else {
+    DCHECK(kind() == Code::KEYED_STORE_IC);
+    if (strict_mode() == STRICT) {
+      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+    } else {
+      return isolate()->builtins()->KeyedStoreIC_Generic();
+    }
+  }
 }
 
 
 Handle<Code> StoreIC::generic_stub() const {
-  return PropertyICCompiler::ComputeStore(isolate(), GENERIC, extra_ic_state());
+  if (kind() == Code::STORE_IC) {
+    return PropertyICCompiler::ComputeStore(isolate(), GENERIC,
+                                            extra_ic_state());
+  } else {
+    DCHECK(kind() == Code::KEYED_STORE_IC);
+    if (strict_mode() == STRICT) {
+      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+    } else {
+      return isolate()->builtins()->KeyedStoreIC_Generic();
+    }
+  }
+}
+
+
+Handle<Code> StoreIC::slow_stub() const {
+  if (kind() == Code::STORE_IC) {
+    return isolate()->builtins()->StoreIC_Slow();
+  } else {
+    DCHECK(kind() == Code::KEYED_STORE_IC);
+    return isolate()->builtins()->KeyedStoreIC_Slow();
+  }
 }
 
 
@@ -1371,9 +1402,11 @@
     return;
   }
 
-  Handle<Code> code = LookupForWrite(lookup, value, store_mode)
-                          ? ComputeHandler(lookup, value)
-                          : slow_stub();
+  bool use_ic = LookupForWrite(lookup, value, store_mode);
+  if (!use_ic) {
+    TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
+  }
+  Handle<Code> code = use_ic ? ComputeHandler(lookup, value) : slow_stub();
 
   PatchCache(lookup->name(), code);
   TRACE_IC("StoreIC", lookup->name());
@@ -1394,7 +1427,10 @@
     case LookupIterator::TRANSITION: {
       Handle<Map> transition = lookup->transition_map();
       // Currently not handled by CompileStoreTransition.
-      if (!holder->HasFastProperties()) break;
+      if (!holder->HasFastProperties()) {
+        TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+        break;
+      }
 
       DCHECK(lookup->IsCacheableTransition());
       NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
@@ -1408,14 +1444,21 @@
     }
 
     case LookupIterator::ACCESSOR: {
-      if (!holder->HasFastProperties()) break;
+      if (!holder->HasFastProperties()) {
+        TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+        break;
+      }
       Handle<Object> accessors = lookup->GetAccessors();
       if (accessors->IsExecutableAccessorInfo()) {
         Handle<ExecutableAccessorInfo> info =
             Handle<ExecutableAccessorInfo>::cast(accessors);
-        if (v8::ToCData<Address>(info->setter()) == 0) break;
+        if (v8::ToCData<Address>(info->setter()) == 0) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
+          break;
+        }
         if (!ExecutableAccessorInfo::IsCompatibleReceiverType(
                 isolate(), info, receiver_type())) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
           break;
         }
         NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
@@ -1423,7 +1466,10 @@
       } else if (accessors->IsAccessorPair()) {
         Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
                               isolate());
-        if (!setter->IsJSFunction()) break;
+        if (!setter->IsJSFunction()) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+          break;
+        }
         Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
         CallOptimization call_optimization(function);
         NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
@@ -1437,6 +1483,7 @@
       }
       // TODO(dcarney): Handle correctly.
       DCHECK(accessors->IsDeclaredAccessorInfo());
+      TRACE_GENERIC_IC(isolate(), "StoreIC", "declared accessor info");
       break;
     }
 
@@ -1477,6 +1524,7 @@
 
       // -------------- Constant properties --------------
       DCHECK(lookup->property_details().type() == CONSTANT);
+      TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
       break;
     }
 
@@ -1495,7 +1543,7 @@
   // via megamorphic stubs, since they don't have a map in their relocation info
   // and so the stubs can't be harvested for the object needed for a map check.
   if (target()->type() != Code::NORMAL) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type");
     return generic_stub();
   }
 
@@ -1561,14 +1609,14 @@
   if (!map_added) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
     return generic_stub();
   }
 
   // If the maximum number of receiver maps has been exceeded, use the generic
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "max polymorph exceeded");
     return generic_stub();
   }
 
@@ -1579,7 +1627,7 @@
     if (store_mode == STANDARD_STORE) {
       store_mode = old_store_mode;
     } else if (store_mode != old_store_mode) {
-      TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
       return generic_stub();
     }
   }
@@ -1597,7 +1645,7 @@
     }
     if (external_arrays != 0 &&
         external_arrays != target_receiver_maps.length()) {
-      TRACE_GENERIC_IC(isolate(), "KeyedIC",
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
                        "unsupported combination of external and normal arrays");
       return generic_stub();
     }
@@ -1752,8 +1800,12 @@
         StoreIC::Store(object, Handle<String>::cast(key), value,
                        JSReceiver::MAY_BE_STORE_FROM_KEYED),
         Object);
-    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
-    set_target(*stub);
+    if (!is_target_set()) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+                       "unhandled internalized string key");
+      TRACE_IC("StoreIC", key);
+      set_target(*stub);
+    }
     return store_handle;
   }
 
@@ -1766,7 +1818,10 @@
     // expect to be able to trap element sets to objects with those maps in
     // the runtime to enable optimization of element hole access.
     Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
-    if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
+    if (heap_object->map()->IsMapInArrayPrototypeChain()) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype");
+      use_ic = false;
+    }
   }
 
   if (use_ic) {
@@ -1779,6 +1834,8 @@
           isolate()->heap()->sloppy_arguments_elements_map()) {
         if (strict_mode() == SLOPPY) {
           stub = sloppy_arguments_stub();
+        } else {
+          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
         }
       } else if (key_is_smi_like &&
                  !(target().is_identical_to(sloppy_arguments_stub()))) {
@@ -1789,8 +1846,14 @@
         if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
           KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
           stub = StoreElementStub(receiver, store_mode);
+        } else {
+          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
         }
+      } else {
+        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
       }
+    } else {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver");
     }
   }
 
@@ -1807,6 +1870,9 @@
   if (*stub == generic) {
     TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
   }
+  if (*stub == *slow_stub()) {
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
+  }
   DCHECK(!stub.is_null());
   set_target(*stub);
   TRACE_IC("StoreIC", key);
diff --git a/src/ic/ic.h b/src/ic/ic.h
index 57d0d12..d86d2b7 100644
--- a/src/ic/ic.h
+++ b/src/ic/ic.h
@@ -371,7 +371,7 @@
     }
   }
 
-  virtual Handle<Code> megamorphic_stub();
+  virtual Handle<Code> megamorphic_stub() OVERRIDE;
 
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -414,7 +414,6 @@
   }
   static void GenerateGeneric(MacroAssembler* masm);
   static void GenerateString(MacroAssembler* masm);
-  static void GenerateSloppyArguments(MacroAssembler* masm);
 
   // Bit mask to be tested against bit field for the cases when
   // generic stub should go into slow case.
@@ -434,9 +433,6 @@
 
  private:
   Handle<Code> generic_stub() const { return generic_stub(isolate()); }
-  Handle<Code> sloppy_arguments_stub() {
-    return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
-  }
   Handle<Code> string_stub() {
     return isolate()->builtins()->KeyedLoadIC_String();
   }
@@ -493,14 +489,12 @@
                       JSReceiver::StoreFromKeyed store_mode);
 
  protected:
-  virtual Handle<Code> megamorphic_stub();
+  virtual Handle<Code> megamorphic_stub() OVERRIDE;
 
   // Stub accessors.
-  virtual Handle<Code> generic_stub() const;
+  Handle<Code> generic_stub() const;
 
-  virtual Handle<Code> slow_stub() const {
-    return isolate()->builtins()->StoreIC_Slow();
-  }
+  Handle<Code> slow_stub() const;
 
   virtual Handle<Code> pre_monomorphic_stub() const {
     return pre_monomorphic_stub(isolate(), strict_mode());
@@ -581,16 +575,6 @@
       return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
     }
   }
-  virtual Handle<Code> slow_stub() const {
-    return isolate()->builtins()->KeyedStoreIC_Slow();
-  }
-  virtual Handle<Code> megamorphic_stub() {
-    if (strict_mode() == STRICT) {
-      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
-    } else {
-      return isolate()->builtins()->KeyedStoreIC_Generic();
-    }
-  }
 
   Handle<Code> StoreElementStub(Handle<JSObject> receiver,
                                 KeyedAccessStoreMode store_mode);
@@ -599,14 +583,6 @@
   inline void set_target(Code* code);
 
   // Stub accessors.
-  virtual Handle<Code> generic_stub() const {
-    if (strict_mode() == STRICT) {
-      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
-    } else {
-      return isolate()->builtins()->KeyedStoreIC_Generic();
-    }
-  }
-
   Handle<Code> sloppy_arguments_stub() {
     return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
   }
diff --git a/src/ic/mips/ic-compiler-mips.cc b/src/ic/mips/ic-compiler-mips.cc
index 920b52a..c1e67f9 100644
--- a/src/ic/mips/ic-compiler-mips.cc
+++ b/src/ic/mips/ic-compiler-mips.cc
@@ -27,7 +27,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ Branch(&miss, ne, this->name(), Operand(name));
     }
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
index 72a85b6..d97a6ba 100644
--- a/src/ic/mips/ic-mips.cc
+++ b/src/ic/mips/ic-mips.cc
@@ -374,32 +374,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is in ra.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(a1));
-  DCHECK(key.is(a2));
-
-  Label slow, notin;
-  MemOperand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, a0, a3, t0, &notin, &slow);
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, mapped_location);
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in a0.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow);
-  __ lw(a0, unmapped_location);
-  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
-  __ Branch(&slow, eq, a0, Operand(a3));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register key = StoreDescriptor::NameRegister();
diff --git a/src/ic/mips64/ic-compiler-mips64.cc b/src/ic/mips64/ic-compiler-mips64.cc
index 3c6eecc..796ed87 100644
--- a/src/ic/mips64/ic-compiler-mips64.cc
+++ b/src/ic/mips64/ic-compiler-mips64.cc
@@ -27,7 +27,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ Branch(&miss, ne, this->name(), Operand(name));
     }
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
index 0ac35ff..a5d9fe7 100644
--- a/src/ic/mips64/ic-mips64.cc
+++ b/src/ic/mips64/ic-mips64.cc
@@ -372,32 +372,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is in ra.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(a1));
-  DCHECK(key.is(a2));
-
-  Label slow, notin;
-  MemOperand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, a0, a3, a4, &notin, &slow);
-  __ Ret(USE_DELAY_SLOT);
-  __ ld(v0, mapped_location);
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in a2.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow);
-  __ ld(a0, unmapped_location);
-  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
-  __ Branch(&slow, eq, a0, Operand(a3));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   Register receiver = StoreDescriptor::ReceiverRegister();
   Register key = StoreDescriptor::NameRegister();
diff --git a/src/ic/x64/ic-compiler-x64.cc b/src/ic/x64/ic-compiler-x64.cc
index 3d7cc68..a5848b6 100644
--- a/src/ic/x64/ic-compiler-x64.cc
+++ b/src/ic/x64/ic-compiler-x64.cc
@@ -82,7 +82,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+      __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ Cmp(this->name(), name);
       __ j(not_equal, &miss);
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
index dc1b86b..ad79f30 100644
--- a/src/ic/x64/ic-x64.cc
+++ b/src/ic/x64/ic-x64.cc
@@ -722,31 +722,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(rdx));
-  DCHECK(key.is(rcx));
-
-  Label slow, notin;
-  Operand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, rbx, rax, rdi, &notin, &slow);
-  __ movp(rax, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in rbx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, rbx, rax, &slow);
-  __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &slow);
-  __ movp(rax, unmapped_location);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   // The return address is on the stack.
   Label slow, notin;
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
index 8370f68..e706998 100644
--- a/src/ic/x87/handler-compiler-x87.cc
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -411,7 +411,7 @@
 
   // Update the write barrier for the map field.
   __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
-                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   if (details.type() == CONSTANT) {
     DCHECK(value_reg.is(eax));
@@ -445,7 +445,7 @@
         __ mov(storage_reg, value_reg);
       }
       __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
-                          EMIT_REMEMBERED_SET, smi_check);
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
     }
   } else {
     // Write to the properties array.
@@ -464,7 +464,7 @@
         __ mov(storage_reg, value_reg);
       }
       __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
-                          EMIT_REMEMBERED_SET, smi_check);
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
     }
   }
 
diff --git a/src/ic/x87/ic-compiler-x87.cc b/src/ic/x87/ic-compiler-x87.cc
index 2467a6d..20b47e7 100644
--- a/src/ic/x87/ic-compiler-x87.cc
+++ b/src/ic/x87/ic-compiler-x87.cc
@@ -48,7 +48,11 @@
     // In case we are compiling an IC for dictionary loads and stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
-      __ JumpIfNotUniqueName(this->name(), &miss);
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+      __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
     } else {
       __ cmp(this->name(), Immediate(name));
       __ j(not_equal, &miss);
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
index 2cd6ea1..9c090c5 100644
--- a/src/ic/x87/ic-x87.cc
+++ b/src/ic/x87/ic-x87.cc
@@ -133,7 +133,7 @@
 
   // Update write barrier. Make sure not to clobber the value.
   __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1);
+  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
 }
 
 
@@ -505,32 +505,6 @@
 }
 
 
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // The return address is on the stack.
-  Register receiver = LoadDescriptor::ReceiverRegister();
-  Register key = LoadDescriptor::NameRegister();
-  DCHECK(receiver.is(edx));
-  DCHECK(key.is(ecx));
-
-  Label slow, notin;
-  Factory* factory = masm->isolate()->factory();
-  Operand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, ebx, eax, &notin, &slow);
-  __ mov(eax, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in ebx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, ebx, eax, &slow);
-  __ cmp(unmapped_location, factory->the_hole_value());
-  __ j(equal, &slow);
-  __ mov(eax, unmapped_location);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
   // Return address is on the stack.
   Label slow, notin;
@@ -546,7 +520,7 @@
   __ mov(mapped_location, value);
   __ lea(ecx, mapped_location);
   __ mov(edx, value);
-  __ RecordWrite(ebx, ecx, edx);
+  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&notin);
   // The unmapped lookup expects that the parameter map is in ebx.
@@ -555,7 +529,7 @@
   __ mov(unmapped_location, value);
   __ lea(edi, unmapped_location);
   __ mov(edx, value);
-  __ RecordWrite(ebx, edi, edx);
+  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
   __ Ret();
   __ bind(&slow);
   GenerateMiss(masm);
@@ -624,7 +598,8 @@
   __ mov(FixedArrayElementOperand(ebx, key), value);
   // Update write barrier for the elements array address.
   __ mov(edx, value);  // Preserve the value which is returned.
-  __ RecordWriteArray(ebx, edx, key, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
   __ ret(0);
 
   __ bind(fast_double);
diff --git a/src/isolate.cc b/src/isolate.cc
index e960445..c6a8b81 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1019,9 +1019,9 @@
       ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
   bool report_exception = catchable_by_javascript && should_report_exception;
   bool try_catch_needs_message =
-      can_be_caught_externally && try_catch_handler()->capture_message_ &&
-      !thread_local_top()->rethrowing_message_;
+      can_be_caught_externally && try_catch_handler()->capture_message_;
   bool bootstrapping = bootstrapper()->IsActive();
+  bool rethrowing_message = thread_local_top()->rethrowing_message_;
 
   thread_local_top()->rethrowing_message_ = false;
 
@@ -1031,7 +1031,7 @@
   }
 
   // Generate the message if required.
-  if (report_exception || try_catch_needs_message) {
+  if (!rethrowing_message && (report_exception || try_catch_needs_message)) {
     MessageLocation potential_computed_location;
     if (location == NULL) {
       // If no location was specified we use a computed one instead.
diff --git a/src/json-parser.h b/src/json-parser.h
index caa2e14..d3148c9 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -426,8 +426,7 @@
         if (value.is_null()) return ReportUnexpectedCharacter();
       }
 
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          json_object, key, value, NONE).Assert();
+      Runtime::DefineObjectProperty(json_object, key, value, NONE).Check();
     } while (MatchSkipWhiteSpace(','));
     if (c0_ != '}') {
       return ReportUnexpectedCharacter();
diff --git a/src/lithium-codegen.cc b/src/lithium-codegen.cc
index 8640116..0207188 100644
--- a/src/lithium-codegen.cc
+++ b/src/lithium-codegen.cc
@@ -147,9 +147,12 @@
 }
 
 
-void LCodeGenBase::DeoptComment(const char* mnemonic, const char* reason) {
-  Comment(";;; deoptimize %s: %s", mnemonic,
-          reason == NULL ? "unknown reason" : reason);
+void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) {
+  OStringStream os;
+  os << ";;; deoptimize at " << HSourcePosition(reason.raw_position) << " "
+     << reason.mnemonic;
+  if (reason.detail != NULL) os << ": " << reason.detail;
+  Comment("%s", os.c_str());
 }
 
 
diff --git a/src/lithium-codegen.h b/src/lithium-codegen.h
index d57e614..301debe 100644
--- a/src/lithium-codegen.h
+++ b/src/lithium-codegen.h
@@ -8,6 +8,7 @@
 #include "src/v8.h"
 
 #include "src/compiler.h"
+#include "src/deoptimizer.h"
 
 namespace v8 {
 namespace internal {
@@ -33,7 +34,7 @@
   HGraph* graph() const;
 
   void FPRINTF_CHECKING Comment(const char* format, ...);
-  void DeoptComment(const char* mnemonic, const char* reason);
+  void DeoptComment(const Deoptimizer::Reason& reason);
 
   bool GenerateBody();
   virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 0154508..a9c10b8 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -3644,8 +3644,8 @@
   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss);
-  __ JumpIfNotUniqueName(tmp2, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
 
   // Use a0 as result
   __ mov(v0, a0);
@@ -3899,7 +3899,7 @@
     __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ lbu(entity_name,
            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -4076,7 +4076,7 @@
       __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ lbu(entry_key,
              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 53915c5..568b8bd 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -1342,6 +1342,24 @@
 }
 
 
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ lw(LoadDescriptor::ReceiverRegister(),
+        MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ li(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  Label done;
+  __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -2281,6 +2299,7 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
+
   __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
   if (FLAG_vector_ics) {
     __ li(VectorLoadICDescriptor::SlotRegister(),
@@ -2292,6 +2311,21 @@
 }
 
 
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
@@ -2585,9 +2619,13 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    __ Move(LoadDescriptor::ReceiverRegister(), v0);
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), v0);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(v0);
   } else {
@@ -2627,6 +2665,7 @@
   } else {
     // Load the function from the receiver.
     DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2640,6 +2679,44 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = a1;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(v0);
+  __ lw(scratch, MemOperand(sp, kPointerSize));
+  __ Push(scratch, v0);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ sw(v0, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2807,13 +2884,20 @@
     EmitCall(expr);
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
   } else {
     DCHECK(call_type == Call::OTHER_CALL);
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 089fc5c..497d10f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -324,31 +324,29 @@
 
 
 bool LCodeGen::GenerateJumpTable() {
-  if (deopt_jump_table_.length() > 0) {
+  if (jump_table_.length() > 0) {
     Label needs_frame, call_deopt_entry;
 
     Comment(";;; -------------------- Jump table --------------------");
-    Address base = deopt_jump_table_[0].address;
+    Address base = jump_table_[0].address;
 
     Register entry_offset = t9;
 
-    int length = deopt_jump_table_.length();
+    int length = jump_table_.length();
     for (int i = 0; i < length; i++) {
-      __ bind(&deopt_jump_table_[i].label);
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
 
-      Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
-      DCHECK(type == deopt_jump_table_[0].bailout_type);
-      Address entry = deopt_jump_table_[i].address;
-      int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-      DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+      DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->reason);
 
       // Second-level deopt table entries are contiguous and small, so instead
       // of loading the full, absolute address of each one, load an immediate
       // offset which will be added to the base address later.
       __ li(entry_offset, Operand(entry - base));
 
-      if (deopt_jump_table_[i].needs_frame) {
+      if (table_entry->needs_frame) {
         DCHECK(!info()->saves_caller_doubles());
         if (needs_frame.is_bound()) {
           __ Branch(&needs_frame);
@@ -819,7 +817,7 @@
 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
                             Deoptimizer::BailoutType bailout_type,
                             Register src1, const Operand& src2,
-                            const char* reason) {
+                            const char* detail) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   DCHECK(environment->HasBeenRegistered());
@@ -860,36 +858,36 @@
     __ bind(&skip);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (condition == al && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry) ||
-        (deopt_jump_table_.last().bailout_type != bailout_type) ||
-        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason,
-                                              bailout_type, !frame_is_built_);
-      deopt_jump_table_.Add(table_entry, zone());
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
     }
-    __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+    __ Branch(&jump_table_.last().label, condition, src1, src2);
   }
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
                             Register src1, const Operand& src2,
-                            const char* reason) {
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(condition, instr, bailout_type, src1, src2, reason);
+  DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail);
 }
 
 
@@ -4924,13 +4922,11 @@
 
     __ bind(&check_false);
     __ LoadRoot(at, Heap::kFalseValueRootIndex);
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(ne, instr, scratch2, Operand(at));
+    DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate");
     __ Branch(USE_DELAY_SLOT, &done);
     __ mov(input_reg, zero_reg);  // In delay slot.
   } else {
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIf(ne, instr, scratch1, Operand(at));
+    DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number");
 
     // Load the double value.
     __ ldc1(double_scratch,
@@ -4945,16 +4941,15 @@
                        except_flag,
                        kCheckForInexactConversion);
 
-    __ RecordComment("Deferred TaggedToI: lost precision or NaN");
-    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg),
+                 "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ Branch(&done, ne, input_reg, Operand(zero_reg));
 
       __ Mfhc1(scratch1, double_scratch);
       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero");
     }
   }
   __ bind(&done);
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index f5087a9..5402c9a 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -25,7 +25,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        deopt_jump_table_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -232,11 +232,11 @@
                     Deoptimizer::BailoutType bailout_type,
                     Register src1 = zero_reg,
                     const Operand& src2 = Operand(zero_reg),
-                    const char* reason = NULL);
+                    const char* detail = NULL);
   void DeoptimizeIf(Condition condition, LInstruction* instr,
                     Register src1 = zero_reg,
                     const Operand& src2 = Operand(zero_reg),
-                    const char* reason = NULL);
+                    const char* detail = NULL);
 
   void AddToTranslation(LEnvironment* environment,
                         Translation* translation,
@@ -364,7 +364,7 @@
   void EmitVectorLoadICRegisters(T* instr);
 
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index e7940f4..604293b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -3582,8 +3582,8 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index 5a7905c..ce52986 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -1483,7 +1483,7 @@
   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
                                                 Label* failure);
 
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index bd6a016..60263b5 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -3681,8 +3681,8 @@
   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss);
-  __ JumpIfNotUniqueName(tmp2, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
 
   // Use a0 as result
   __ mov(v0, a0);
@@ -3937,7 +3937,7 @@
     __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ lbu(entity_name,
            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -4114,7 +4114,7 @@
       __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ lbu(entry_key,
              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc
index 291e4ab..5c26f16 100644
--- a/src/mips64/full-codegen-mips64.cc
+++ b/src/mips64/full-codegen-mips64.cc
@@ -1337,6 +1337,24 @@
 }
 
 
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ ld(LoadDescriptor::ReceiverRegister(),
+        MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ li(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  Label done;
+  __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -2278,6 +2296,8 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
+  DCHECK(!prop->IsSuperAccess());
+
   __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
   if (FLAG_vector_ics) {
     __ li(VectorLoadICDescriptor::SlotRegister(),
@@ -2289,6 +2309,21 @@
 }
 
 
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has register arguments receiver and key.
@@ -2585,9 +2620,13 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    __ Move(LoadDescriptor::ReceiverRegister(), v0);
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), v0);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(v0);
   } else {
@@ -2627,6 +2666,7 @@
   } else {
     // Load the function from the receiver.
     DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
     __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
@@ -2640,6 +2680,44 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = a1;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(v0);
+  __ ld(scratch, MemOperand(sp, kPointerSize));
+  __ Push(scratch, v0);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ sd(v0, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2806,13 +2884,20 @@
     EmitCall(expr);
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
   } else {
     DCHECK(call_type == Call::OTHER_CALL);
diff --git a/src/mips64/lithium-codegen-mips64.cc b/src/mips64/lithium-codegen-mips64.cc
index de619d6..8a0a449 100644
--- a/src/mips64/lithium-codegen-mips64.cc
+++ b/src/mips64/lithium-codegen-mips64.cc
@@ -300,22 +300,20 @@
 
 
 bool LCodeGen::GenerateJumpTable() {
-  if (deopt_jump_table_.length() > 0) {
+  if (jump_table_.length() > 0) {
     Comment(";;; -------------------- Jump table --------------------");
   }
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Label table_start;
   __ bind(&table_start);
   Label needs_frame;
-  for (int i = 0; i < deopt_jump_table_.length(); i++) {
-    __ bind(&deopt_jump_table_[i].label);
-    Address entry = deopt_jump_table_[i].address;
-    Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-    Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
+  for (int i = 0; i < jump_table_.length(); i++) {
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->reason);
     __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
-    if (deopt_jump_table_[i].needs_frame) {
+    if (table_entry->needs_frame) {
       DCHECK(!info()->saves_caller_doubles());
       if (needs_frame.is_bound()) {
         __ Branch(&needs_frame);
@@ -769,7 +767,7 @@
 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
                             Deoptimizer::BailoutType bailout_type,
                             Register src1, const Operand& src2,
-                            const char* reason) {
+                            const char* detail) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   DCHECK(environment->HasBeenRegistered());
@@ -810,36 +808,36 @@
     __ bind(&skip);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (condition == al && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry) ||
-        (deopt_jump_table_.last().bailout_type != bailout_type) ||
-        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason,
-                                              bailout_type, !frame_is_built_);
-      deopt_jump_table_.Add(table_entry, zone());
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
     }
-    __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+    __ Branch(&jump_table_.last().label, condition, src1, src2);
   }
 }
 
 
 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
                             Register src1, const Operand& src2,
-                            const char* reason) {
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(condition, instr, bailout_type, src1, src2, reason);
+  DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail);
 }
 
 
@@ -4956,13 +4954,11 @@
 
     __ bind(&check_false);
     __ LoadRoot(at, Heap::kFalseValueRootIndex);
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(ne, instr, scratch2, Operand(at));
+    DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate");
     __ Branch(USE_DELAY_SLOT, &done);
     __ mov(input_reg, zero_reg);  // In delay slot.
   } else {
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIf(ne, instr, scratch1, Operand(at));
+    DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number");
 
     // Load the double value.
     __ ldc1(double_scratch,
@@ -4977,16 +4973,15 @@
                        except_flag,
                        kCheckForInexactConversion);
 
-    __ RecordComment("Deferred TaggedToI: lost precision or NaN");
-    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg),
+                 "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ Branch(&done, ne, input_reg, Operand(zero_reg));
 
       __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero");
     }
   }
   __ bind(&done);
diff --git a/src/mips64/lithium-codegen-mips64.h b/src/mips64/lithium-codegen-mips64.h
index 3207f19..a4b7adb 100644
--- a/src/mips64/lithium-codegen-mips64.h
+++ b/src/mips64/lithium-codegen-mips64.h
@@ -25,7 +25,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        deopt_jump_table_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -233,11 +233,11 @@
                     Deoptimizer::BailoutType bailout_type,
                     Register src1 = zero_reg,
                     const Operand& src2 = Operand(zero_reg),
-                    const char* reason = NULL);
+                    const char* detail = NULL);
   void DeoptimizeIf(Condition condition, LInstruction* instr,
                     Register src1 = zero_reg,
                     const Operand& src2 = Operand(zero_reg),
-                    const char* reason = NULL);
+                    const char* detail = NULL);
 
   void AddToTranslation(LEnvironment* environment,
                         Translation* translation,
@@ -365,7 +365,7 @@
   void EmitVectorLoadICRegisters(T* instr);
 
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index 020ffe9..12d81bc 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -3492,8 +3492,8 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
index 2bf8c33..2da48fb 100644
--- a/src/mips64/macro-assembler-mips64.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -1554,7 +1554,7 @@
   void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
                                                 Label* failure);
 
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
diff --git a/src/objects.h b/src/objects.h
index d88240d..37227f9 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -1200,9 +1200,6 @@
   V(kSmiSubtractionOverflow, "Smi subtraction overflow")                       \
   V(kStackAccessBelowStackPointer, "Stack access below stack pointer")         \
   V(kStackFrameTypesMustMatch, "Stack frame types must match")                 \
-  V(kSwitchStatementMixedOrNonLiteralSwitchLabels,                             \
-    "SwitchStatement: mixed or non-literal switch labels")                     \
-  V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses")       \
   V(kTheCurrentStackPointerIsBelowCsp,                                         \
     "The current stack pointer is below csp")                                  \
   V(kTheInstructionShouldBeALui, "The instruction should be a lui")            \
diff --git a/src/runtime.cc b/src/runtime.cc
index 3acbb81..3ecf044 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -7971,7 +7971,7 @@
   DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  float xf = static_cast<float>(x);
+  float xf = DoubleToFloat32(x);
   return *isolate->factory()->NewNumber(xf);
 }
 
@@ -13007,6 +13007,16 @@
 }
 
 
+static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) {
+  Handle<JSObject> result =
+      isolate->factory()->NewJSObject(isolate->object_function());
+  Handle<Map> new_map = Map::Copy(Handle<Map>(result->map()));
+  new_map->set_prototype(*isolate->factory()->null_value());
+  JSObject::MigrateToMap(result, new_map);
+  return result;
+}
+
+
 // Evaluate a piece of JavaScript in the context of a stack frame for
 // debugging.  Things that need special attention are:
 // - Parameters and stack-allocated locals need to be materialized.  Altered
@@ -13049,8 +13059,7 @@
   DCHECK(!context.is_null());
 
   // Materialize stack locals and the arguments object.
-  Handle<JSObject> materialized =
-      isolate->factory()->NewJSObject(isolate->object_function());
+  Handle<JSObject> materialized = NewJSObjectWithNullProto(isolate);
 
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, materialized,
diff --git a/src/utils.h b/src/utils.h
index c23cf05..2991815 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -26,6 +26,13 @@
 // ----------------------------------------------------------------------------
 // General helper functions
 
+
+// Same as strcmp, but can handle NULL arguments.
+inline bool CStringEquals(const char* s1, const char* s2) {
+  return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
+}
+
+
 // X must be a power of 2.  Returns the number of trailing zeros.
 inline int WhichPowerOf2(uint32_t x) {
   DCHECK(base::bits::IsPowerOfTwo32(x));
diff --git a/src/version.cc b/src/version.cc
index b9dd192..ba48147 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
 #define MINOR_VERSION     29
-#define BUILD_NUMBER      84
+#define BUILD_NUMBER      87
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index a7d38cd..a625269 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -318,7 +318,9 @@
     Label fast_power, try_arithmetic_simplification;
     // Detect integer exponents stored as double.
     __ DoubleToI(exponent, double_exponent, double_scratch,
-                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
+                 &try_arithmetic_simplification,
+                 &try_arithmetic_simplification);
     __ jmp(&int_exponent);
 
     __ bind(&try_arithmetic_simplification);
@@ -3454,8 +3456,8 @@
   __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
-  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
@@ -3674,8 +3676,8 @@
 
     // Check if the entry name is not a unique name.
     __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                           miss);
+    __ JumpIfNotUniqueNameInstanceType(
+        FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
     __ bind(&good);
   }
 
@@ -3804,8 +3806,9 @@
 
       // Check if the entry name is not a unique name.
       __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                             &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(
+          FieldOperand(scratch, Map::kInstanceTypeOffset),
+          &maybe_in_dictionary);
     }
   }
 
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 15003a9..1981d55 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -306,11 +306,7 @@
     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
     __ bind(&table_entry->label);
     Address entry = table_entry->address;
-    Deoptimizer::BailoutType type = table_entry->bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-    Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    DeoptComment(table_entry->mnemonic, table_entry->reason);
+    DeoptComment(table_entry->reason);
     if (table_entry->needs_frame) {
       DCHECK(!info()->saves_caller_doubles());
       __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
@@ -729,7 +725,7 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* reason,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -774,22 +770,22 @@
     __ bind(&done);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (cc == no_condition && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().bailout_type != bailout_type) {
-      Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason,
-                                              bailout_type, !frame_is_built_);
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     if (cc == no_condition) {
@@ -802,11 +798,11 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* reason) {
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, instr, reason, bailout_type);
+  DeoptimizeIf(cc, instr, detail, bailout_type);
 }
 
 
@@ -1037,7 +1033,7 @@
     __ andl(dividend, Immediate(mask));
     __ negl(dividend);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(zero, instr);
+      DeoptimizeIf(zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
   }
@@ -1054,7 +1050,7 @@
   DCHECK(ToRegister(instr->result()).is(rax));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr);
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1069,7 +1065,7 @@
     Label remainder_not_zero;
     __ j(not_zero, &remainder_not_zero, Label::kNear);
     __ cmpl(dividend, Immediate(0));
-    DeoptimizeIf(less, instr);
+    DeoptimizeIf(less, instr, "minus zero");
     __ bind(&remainder_not_zero);
   }
 }
@@ -1091,7 +1087,7 @@
   // deopt in this case because we can't return a NaN.
   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(right_reg, right_reg);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1102,7 +1098,7 @@
     __ j(not_zero, &no_overflow_possible, Label::kNear);
     __ cmpl(right_reg, Immediate(-1));
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "minus zero");
     } else {
       __ j(not_equal, &no_overflow_possible, Label::kNear);
       __ Set(result_reg, 0);
@@ -1122,7 +1118,7 @@
     __ j(not_sign, &positive_left, Label::kNear);
     __ idivl(right_reg);
     __ testl(result_reg, result_reg);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
     __ jmp(&done, Label::kNear);
     __ bind(&positive_left);
   }
@@ -1148,13 +1144,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ negl(dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(overflow, instr);
+      DeoptimizeIf(overflow, instr, "overflow");
     }
     return;
   }
@@ -1181,7 +1177,7 @@
   DCHECK(ToRegister(instr->result()).is(rdx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr);
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1189,7 +1185,7 @@
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ testl(dividend, dividend);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1236,7 +1232,7 @@
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(divisor, divisor);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1245,7 +1241,7 @@
     __ testl(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ testl(divisor, divisor);
-    DeoptimizeIf(sign, instr);
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1255,7 +1251,7 @@
     __ cmpl(dividend, Immediate(kMinInt));
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmpl(divisor, Immediate(-1));
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1284,19 +1280,19 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ testl(dividend, dividend);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     __ cmpl(dividend, Immediate(kMinInt));
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "overflow");
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ testl(dividend, Immediate(mask));
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
   __ Move(result, dividend);
   int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1317,7 +1313,7 @@
   DCHECK(ToRegister(instr->result()).is(rdx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr);
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1325,7 +1321,7 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ testl(dividend, dividend);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   __ TruncatingDiv(dividend, Abs(divisor));
@@ -1335,7 +1331,7 @@
     __ movl(rax, rdx);
     __ imull(rax, rax, Immediate(divisor));
     __ subl(rax, dividend);
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "lost precision");
   }
 }
 
@@ -1355,7 +1351,7 @@
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(divisor, divisor);
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1364,7 +1360,7 @@
     __ testl(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ testl(divisor, divisor);
-    DeoptimizeIf(sign, instr);
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1374,7 +1370,7 @@
     __ cmpl(dividend, Immediate(kMinInt));
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmpl(divisor, Immediate(-1));
-    DeoptimizeIf(zero, instr);
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1385,7 +1381,7 @@
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     // Deoptimize if remainder is not 0.
     __ testl(remainder, remainder);
-    DeoptimizeIf(not_zero, instr);
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
 }
 
@@ -1462,7 +1458,7 @@
   }
 
   if (can_overflow) {
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1481,10 +1477,10 @@
           ? !instr->hydrogen_value()->representation().IsSmi()
           : SmiValuesAre31Bits());
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
-        DeoptimizeIf(no_condition, instr);
+        DeoptimizeIf(no_condition, instr, "minus zero");
       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
         __ cmpl(kScratchRegister, Immediate(0));
-        DeoptimizeIf(less, instr);
+        DeoptimizeIf(less, instr, "minus zero");
       }
     } else if (right->IsStackSlot()) {
       if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1492,7 +1488,7 @@
       } else {
         __ orl(kScratchRegister, ToOperand(right));
       }
-      DeoptimizeIf(sign, instr);
+      DeoptimizeIf(sign, instr, "minus zero");
     } else {
       // Test the non-zero operand for negative sign.
       if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1500,7 +1496,7 @@
       } else {
         __ orl(kScratchRegister, ToRegister(right));
       }
-      DeoptimizeIf(sign, instr);
+      DeoptimizeIf(sign, instr, "minus zero");
     }
     __ bind(&done);
   }
@@ -1613,7 +1609,7 @@
         __ shrl_cl(ToRegister(left));
         if (instr->can_deopt()) {
           __ testl(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(negative, instr);
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1642,7 +1638,7 @@
           __ shrl(ToRegister(left), Immediate(shift_count));
         } else if (instr->can_deopt()) {
           __ testl(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(negative, instr);
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1657,7 +1653,7 @@
                   __ shll(ToRegister(left), Immediate(shift_count - 1));
                 }
                 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
-                DeoptimizeIf(overflow, instr);
+                DeoptimizeIf(overflow, instr, "overflow");
               } else {
                 __ shll(ToRegister(left), Immediate(shift_count));
               }
@@ -1700,7 +1696,7 @@
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -1765,9 +1761,9 @@
   DCHECK(object.is(rax));
 
   Condition cc = masm()->CheckSmi(object);
-  DeoptimizeIf(cc, instr);
+  DeoptimizeIf(cc, instr, "Smi");
   __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "not a date object");
 
   if (index->value() == 0) {
     __ movp(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1931,7 +1927,7 @@
       }
     }
     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      DeoptimizeIf(overflow, instr);
+      DeoptimizeIf(overflow, instr, "overflow");
     }
   }
 }
@@ -2176,7 +2172,7 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ testb(reg, Immediate(kSmiTagMask));
-        DeoptimizeIf(zero, instr);
+        DeoptimizeIf(zero, instr, "Smi");
       }
 
       const Register map = kScratchRegister;
@@ -2230,7 +2226,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(no_condition, instr);
+        DeoptimizeIf(no_condition, instr, "unexpected object");
       }
     }
   }
@@ -2846,7 +2842,7 @@
   __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(equal, instr);
+    DeoptimizeIf(equal, instr, "hole");
   }
 }
 
@@ -2894,7 +2890,7 @@
     DCHECK(!value.is(cell));
     __ Move(cell, cell_handle, RelocInfo::CELL);
     __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(equal, instr);
+    DeoptimizeIf(equal, instr, "hole");
     // Store the value.
     __ movp(Operand(cell, 0), value);
   } else {
@@ -2913,7 +2909,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       Label is_not_hole;
       __ j(not_equal, &is_not_hole, Label::kNear);
@@ -2934,7 +2930,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       __ j(not_equal, &skip_assignment);
     }
@@ -3032,7 +3028,7 @@
 
   // Check that the function has a prototype or an initial map.
   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "hole");
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3144,7 +3140,7 @@
         __ movl(result, operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           __ testl(result, result);
-          DeoptimizeIf(negative, instr);
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3183,7 +3179,7 @@
         FAST_DOUBLE_ELEMENTS,
         instr->base_offset() + sizeof(kHoleNanLower32));
     __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr);
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   Operand double_load_operand = BuildFastArrayOperand(
@@ -3240,10 +3236,10 @@
   if (requires_hole_check) {
     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
       Condition smi = __ CheckSmi(result);
-      DeoptimizeIf(NegateCondition(smi), instr);
+      DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
     } else {
       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-      DeoptimizeIf(equal, instr);
+      DeoptimizeIf(equal, instr, "hole");
     }
   }
 }
@@ -3392,9 +3388,9 @@
 
   // The receiver should be a JS object.
   Condition is_smi = __ CheckSmi(receiver);
-  DeoptimizeIf(is_smi, instr);
+  DeoptimizeIf(is_smi, instr, "Smi");
   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
-  DeoptimizeIf(below, instr);
+  DeoptimizeIf(below, instr, "not a JavaScript object");
 
   __ jmp(&receiver_ok, Label::kNear);
   __ bind(&global_object);
@@ -3421,7 +3417,7 @@
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ cmpp(length, Immediate(kArgumentsLimit));
-  DeoptimizeIf(above, instr);
+  DeoptimizeIf(above, instr, "too many arguments");
 
   __ Push(receiver);
   __ movp(receiver, length);
@@ -3616,7 +3612,7 @@
   Register input_reg = ToRegister(instr->value());
   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                  Heap::kHeapNumberMapRootIndex);
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "not a heap number");
 
   Label slow, allocated, done;
   Register tmp = input_reg.is(rax) ? rcx : rax;
@@ -3662,7 +3658,7 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ negl(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr);
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
@@ -3673,7 +3669,7 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ negp(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr);
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
@@ -3728,18 +3724,18 @@
       // Deoptimize if minus zero.
       __ movq(output_reg, input_reg);
       __ subq(output_reg, Immediate(1));
-      DeoptimizeIf(overflow, instr);
+      DeoptimizeIf(overflow, instr, "minus zero");
     }
     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
     __ cvttsd2si(output_reg, xmm_scratch);
     __ cmpl(output_reg, Immediate(0x1));
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   } else {
     Label negative_sign, done;
     // Deoptimize on unordered.
     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
     __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(parity_even, instr);
+    DeoptimizeIf(parity_even, instr, "NaN");
     __ j(below, &negative_sign, Label::kNear);
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3748,7 +3744,7 @@
       __ j(above, &positive_sign, Label::kNear);
       __ movmskpd(output_reg, input_reg);
       __ testq(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
       __ Set(output_reg, 0);
       __ jmp(&done);
       __ bind(&positive_sign);
@@ -3758,7 +3754,7 @@
     __ cvttsd2si(output_reg, input_reg);
     // Overflow is signalled with minint.
     __ cmpl(output_reg, Immediate(0x1));
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
     __ jmp(&done, Label::kNear);
 
     // Non-zero negative reaches here.
@@ -3769,7 +3765,7 @@
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ subl(output_reg, Immediate(1));
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
 
     __ bind(&done);
   }
@@ -3796,8 +3792,7 @@
   __ cvttsd2si(output_reg, xmm_scratch);
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x1));
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr);
+  DeoptimizeIf(overflow, instr, "overflow");
   __ jmp(&done, dist);
 
   __ bind(&below_one_half);
@@ -3813,8 +3808,7 @@
   __ cvttsd2si(output_reg, input_temp);
   // Catch minint due to overflow, and to prevent overflow when compensating.
   __ cmpl(output_reg, Immediate(0x1));
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr);
+  DeoptimizeIf(overflow, instr, "overflow");
 
   __ Cvtlsi2sd(xmm_scratch, output_reg);
   __ ucomisd(xmm_scratch, input_temp);
@@ -3829,8 +3823,7 @@
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     __ movq(output_reg, input_reg);
     __ testq(output_reg, output_reg);
-    __ RecordComment("Minus zero");
-    DeoptimizeIf(negative, instr);
+    DeoptimizeIf(negative, instr, "minus zero");
   }
   __ Set(output_reg, 0);
   __ bind(&done);
@@ -3909,7 +3902,7 @@
     Label no_deopt;
     __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
     __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
     __ bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
@@ -4281,7 +4274,7 @@
     __ int3();
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr);
+    DeoptimizeIf(cc, instr, "out of bounds");
   }
 }
 
@@ -4529,7 +4522,7 @@
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "memento found");
   __ bind(&no_memento_found);
 }
 
@@ -4848,12 +4841,12 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
-    DeoptimizeIf(NegateCondition(is_smi), instr);
+    DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
   }
   __ Integer32ToSmi(output, input);
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIf(overflow, instr);
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -4863,7 +4856,7 @@
   Register input = ToRegister(instr->value());
   if (instr->needs_check()) {
     Condition is_smi = __ CheckSmi(input);
-    DeoptimizeIf(NegateCondition(is_smi), instr);
+    DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
   } else {
     __ AssertSmi(input);
   }
@@ -4894,7 +4887,7 @@
     if (can_convert_undefined_to_nan) {
       __ j(not_equal, &convert, Label::kNear);
     } else {
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "not a heap number");
     }
 
     if (deoptimize_on_minus_zero) {
@@ -4904,7 +4897,7 @@
       __ j(not_equal, &done, Label::kNear);
       __ movmskpd(kScratchRegister, result_reg);
       __ testq(kScratchRegister, Immediate(1));
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
 
@@ -4913,7 +4906,7 @@
 
       // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
 
       __ xorps(result_reg, result_reg);
       __ divsd(result_reg, result_reg);
@@ -4960,31 +4953,26 @@
 
     __ bind(&check_false);
     __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
     __ Set(input_reg, 0);
   } else {
     XMMRegister scratch = ToDoubleRegister(instr->temp());
     DCHECK(!scratch.is(xmm0));
     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                    Heap::kHeapNumberMapRootIndex);
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
     __ cvttsd2si(input_reg, xmm0);
     __ Cvtlsi2sd(scratch, input_reg);
     __ ucomisd(xmm0, scratch);
-    __ RecordComment("Deferred TaggedToI: lost precision");
-    DeoptimizeIf(not_equal, instr);
-    __ RecordComment("Deferred TaggedToI: NaN");
-    DeoptimizeIf(parity_even, instr);
+    DeoptimizeIf(not_equal, instr, "lost precision");
+    DeoptimizeIf(parity_even, instr, "NaN");
     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
       __ testl(input_reg, input_reg);
       __ j(not_zero, done);
       __ movmskpd(input_reg, xmm0);
       __ andl(input_reg, Immediate(1));
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
   }
 }
@@ -5048,14 +5036,19 @@
   if (instr->truncating()) {
     __ TruncateDoubleToI(result_reg, input_reg);
   } else {
-    Label bailout, done;
+    Label lost_precision, is_nan, minus_zero, done;
     XMMRegister xmm_scratch = double_scratch0();
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
     __ DoubleToI(result_reg, input_reg, xmm_scratch,
-        instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
-    __ jmp(&done, Label::kNear);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr);
+                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+                 &is_nan, &minus_zero, dist);
+    __ jmp(&done, dist);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, "lost precision");
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, "NaN");
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, "minus zero");
     __ bind(&done);
   }
 }
@@ -5070,25 +5063,29 @@
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
 
-  Label bailout, done;
+  Label lost_precision, is_nan, minus_zero, done;
   XMMRegister xmm_scratch = double_scratch0();
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   __ DoubleToI(result_reg, input_reg, xmm_scratch,
-      instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
-  __ jmp(&done, Label::kNear);
-  __ bind(&bailout);
-  DeoptimizeIf(no_condition, instr);
+               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+               &minus_zero, dist);
+  __ jmp(&done, dist);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, "lost precision");
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, "NaN");
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, "minus zero");
   __ bind(&done);
-
   __ Integer32ToSmi(result_reg, result_reg);
-  DeoptimizeIf(overflow, instr);
+  DeoptimizeIf(overflow, instr, "overflow");
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   Condition cc = masm()->CheckSmi(ToRegister(input));
-  DeoptimizeIf(NegateCondition(cc), instr);
+  DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
 }
 
 
@@ -5096,7 +5093,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     Condition cc = masm()->CheckSmi(ToRegister(input));
-    DeoptimizeIf(cc, instr);
+    DeoptimizeIf(cc, instr, "Smi");
   }
 }
 
@@ -5116,14 +5113,14 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     } else {
-      DeoptimizeIf(below, instr);
+      DeoptimizeIf(below, instr, "wrong instance type");
       // Omit check for the last type.
       if (last != LAST_TYPE) {
         __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
                 Immediate(static_cast<int8_t>(last)));
-        DeoptimizeIf(above, instr);
+        DeoptimizeIf(above, instr, "wrong instance type");
       }
     }
   } else {
@@ -5135,13 +5132,13 @@
       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
                Immediate(mask));
-      DeoptimizeIf(tag == 0 ? not_zero : zero, instr);
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
     } else {
       __ movzxbl(kScratchRegister,
                  FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
       __ andb(kScratchRegister, Immediate(mask));
       __ cmpb(kScratchRegister, Immediate(tag));
-      DeoptimizeIf(not_equal, instr);
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     }
   }
 }
@@ -5150,7 +5147,7 @@
 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   Register reg = ToRegister(instr->value());
   __ Cmp(reg, instr->hydrogen()->object().handle());
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "value mismatch");
 }
 
 
@@ -5165,7 +5162,7 @@
 
     __ testp(rax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr);
+  DeoptimizeIf(zero, instr, "instance migration failed");
 }
 
 
@@ -5218,7 +5215,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ j(not_equal, deferred->entry());
   } else {
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "wrong map");
   }
 
   __ bind(&success);
@@ -5257,7 +5254,7 @@
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
   __ Cmp(input_reg, factory()->undefined_value());
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
   __ xorl(input_reg, input_reg);
   __ jmp(&done, Label::kNear);
 
@@ -5738,19 +5735,19 @@
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   DCHECK(ToRegister(instr->context()).is(rsi));
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "undefined");
 
   Register null_value = rdi;
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   __ cmpp(rax, null_value);
-  DeoptimizeIf(equal, instr);
+  DeoptimizeIf(equal, instr, "null");
 
   Condition cc = masm()->CheckSmi(rax);
-  DeoptimizeIf(cc, instr);
+  DeoptimizeIf(cc, instr, "Smi");
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
-  DeoptimizeIf(below_equal, instr);
+  DeoptimizeIf(below_equal, instr, "wrong instance type");
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(null_value, &call_runtime);
@@ -5765,7 +5762,7 @@
 
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kMetaMapRootIndex);
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "wrong map");
   __ bind(&use_cache);
 }
 
@@ -5787,7 +5784,7 @@
           FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   __ bind(&done);
   Condition cc = masm()->CheckSmi(result);
-  DeoptimizeIf(cc, instr);
+  DeoptimizeIf(cc, instr, "no cache");
 }
 
 
@@ -5795,7 +5792,7 @@
   Register object = ToRegister(instr->value());
   __ cmpp(ToRegister(instr->map()),
           FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr);
+  DeoptimizeIf(not_equal, instr, "wrong map");
 }
 
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index bf10e34..ccd90b5 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -206,10 +206,9 @@
                                     int argc);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* reason,
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
                     Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition cc, LInstruction* instr,
-                    const char* reason = NULL);
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
 
   bool DeoptEveryNTimes() {
     return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 50ca8f2..5033303 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -2701,16 +2701,16 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
 }
 
@@ -3517,17 +3517,16 @@
 }
 
 
-void MacroAssembler::DoubleToI(Register result_reg,
-                               XMMRegister input_reg,
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
                                XMMRegister scratch,
                                MinusZeroMode minus_zero_mode,
-                               Label* conversion_failed,
-                               Label::Distance dst) {
+                               Label* lost_precision, Label* is_nan,
+                               Label* minus_zero, Label::Distance dst) {
   cvttsd2si(result_reg, input_reg);
   Cvtlsi2sd(xmm0, result_reg);
   ucomisd(xmm0, input_reg);
-  j(not_equal, conversion_failed, dst);
-  j(parity_even, conversion_failed, dst);  // NaN.
+  j(not_equal, lost_precision, dst);
+  j(parity_even, is_nan, dst);  // NaN.
   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
     Label done;
     // The integer converted back is equal to the original. We
@@ -3537,9 +3536,9 @@
     movmskpd(result_reg, input_reg);
     // Bit 0 contains the sign of the double in input_reg.
     // If input was positive, we are ok and return 0, otherwise
-    // jump to conversion_failed.
+    // jump to minus_zero.
     andl(result_reg, Immediate(1));
-    j(not_zero, conversion_failed, dst);
+    j(not_zero, minus_zero, dst);
     bind(&done);
   }
 }
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index b4f7dd7..534811e 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -797,10 +797,10 @@
                                  uint32_t encoding_mask);
 
   // Checks if the given register or operand is a unique name
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
-  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
 
   // ---------------------------------------------------------------------------
   // Macro instructions.
@@ -1030,8 +1030,9 @@
   void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
 
   void DoubleToI(Register result_reg, XMMRegister input_reg,
-      XMMRegister scratch, MinusZeroMode minus_zero_mode,
-      Label* conversion_failed, Label::Distance dst = Label::kFar);
+                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
+                 Label* lost_precision, Label* is_nan, Label* minus_zero,
+                 Label::Distance dst = Label::kFar);
 
   void LoadUint32(XMMRegister dst, Register src);
 
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index 25ecfcf..6555ccd 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -45,7 +45,7 @@
 namespace v8 {
 namespace internal {
 
-bool CpuFeatures::SupportsCrankshaft() { return false; }
+bool CpuFeatures::SupportsCrankshaft() { return true; }
 
 
 static const byte kCallOpcode = 0xE8;
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index 8f92249..9e1c883 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -1519,6 +1519,20 @@
 }
 
 
+void Assembler::fldcw(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  emit_operand(ebp, adr);
+}
+
+
+void Assembler::fnstcw(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  emit_operand(edi, adr);
+}
+
+
 void Assembler::fstp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
   EMIT(0xDD);
@@ -1598,6 +1612,13 @@
 }
 
 
+void Assembler::fsqrt() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xFA);
+}
+
+
 void Assembler::fcos() {
   EnsureSpace ensure_space(this);
   EMIT(0xD9);
@@ -1659,6 +1680,13 @@
 }
 
 
+void Assembler::fadd_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDC);
+  emit_operand(eax, adr);
+}
+
+
 void Assembler::fsub(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xE8, i);
@@ -1772,6 +1800,13 @@
 }
 
 
+void Assembler::fxam() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xE5);
+}
+
+
 void Assembler::fucomp(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDD, 0xE8, i);
@@ -1833,6 +1868,20 @@
 }
 
 
+void Assembler::fnsave(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDD);
+  emit_operand(esi, adr);
+}
+
+
+void Assembler::frstor(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDD);
+  emit_operand(esp, adr);
+}
+
+
 void Assembler::sahf() {
   EnsureSpace ensure_space(this);
   EMIT(0x9E);
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index a2bedcc..d37c9d7 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -142,7 +142,7 @@
 
 
 struct X87Register {
-  static const int kMaxNumAllocatableRegisters = 8;
+  static const int kMaxNumAllocatableRegisters = 6;
   static const int kMaxNumRegisters = 8;
   static int NumAllocatableRegisters() {
     return kMaxNumAllocatableRegisters;
@@ -852,6 +852,7 @@
 
   void fabs();
   void fchs();
+  void fsqrt();
   void fcos();
   void fsin();
   void fptan();
@@ -862,6 +863,7 @@
 
   void fadd(int i);
   void fadd_i(int i);
+  void fadd_d(const Operand& adr);
   void fsub(int i);
   void fsub_i(int i);
   void fmul(int i);
@@ -884,14 +886,19 @@
   void ffree(int i = 0);
 
   void ftst();
+  void fxam();
   void fucomp(int i);
   void fucompp();
   void fucomi(int i);
   void fucomip();
   void fcompp();
   void fnstsw_ax();
+  void fldcw(const Operand& adr);
+  void fnstcw(const Operand& adr);
   void fwait();
   void fnclex();
+  void fnsave(const Operand& adr);
+  void frstor(const Operand& adr);
 
   void frndint();
 
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index 6857cdc..d631175 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -660,7 +660,8 @@
 }
 
 
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
   // Enter an internal frame.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -669,7 +670,7 @@
     // stubs that tail call the runtime on deopts passing their parameters in
     // registers.
     __ pushad();
-    __ CallRuntime(Runtime::kNotifyStubFailure, 0);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ popad();
     // Tear down internal frame.
   }
@@ -680,13 +681,12 @@
 
 
 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
-  Generate_NotifyStubFailureHelper(masm);
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
 }
 
 
 void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
-  // SaveDoubles is meanless for X87, just used by deoptimizer.cc
-  Generate_NotifyStubFailureHelper(masm);
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
 }
 
 
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 35514c3..d4c383b 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -127,6 +127,11 @@
   // store the registers in any particular way, but we do have to store and
   // restore them.
   __ pushad();
+  if (save_doubles()) {
+    // Save FPU stat in m108byte.
+    __ sub(esp, Immediate(108));
+    __ fnsave(Operand(esp, 0));
+  }
   const int argument_count = 1;
 
   AllowExternalCallThatCantCauseGC scope(masm);
@@ -136,6 +141,11 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()),
       argument_count);
+  if (save_doubles()) {
+    // Restore FPU stat in m108byte.
+    __ frstor(Operand(esp, 0));
+    __ add(esp, Immediate(108));
+  }
   __ popad();
   __ ret(0);
 }
@@ -1115,16 +1125,12 @@
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(ecx, eax);
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastSubjectOffset,
-                      eax,
-                      edi);
+  __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi,
+                      kDontSaveFPRegs);
   __ mov(eax, ecx);
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastInputOffset,
-                      eax,
-                      edi);
+  __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1618,7 +1624,8 @@
   __ push(edi);
   __ push(ebx);
   __ push(edx);
-  __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
   __ pop(edx);
   __ pop(ebx);
   __ pop(edi);
@@ -1989,12 +1996,19 @@
 
 
 void CodeStub::GenerateFPStubs(Isolate* isolate) {
-  // Do nothing.
+  CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+  // Stubs might already be in the snapshot, detect that and don't regenerate,
+  // which would lead to code stub initialization state being messed up.
+  Code* save_doubles_code;
+  if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+    save_doubles_code = *(save_doubles.GetCode());
+  }
+  isolate->set_fp_stubs_generated(true);
 }
 
 
 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
-  CEntryStub stub(isolate, 1);
+  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
   stub.GetCode();
 }
 
@@ -2010,7 +2024,7 @@
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles());
 
   // ebx: pointer to C function  (C callee-saved)
   // ebp: frame pointer  (restored after C call)
@@ -2066,7 +2080,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles());
   __ ret(0);
 
   // Handling of exception.
@@ -3153,8 +3167,8 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
-  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
@@ -3379,8 +3393,8 @@
 
     // Check if the entry name is not a unique name.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                           miss);
+    __ JumpIfNotUniqueNameInstanceType(
+        FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
     __ bind(&good);
   }
 
@@ -3514,8 +3528,9 @@
 
       // Check if the entry name is not a unique name.
       __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                             &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(
+          FieldOperand(scratch, Map::kInstanceTypeOffset),
+          &maybe_in_dictionary);
     }
   }
 
@@ -3545,6 +3560,8 @@
     Isolate* isolate) {
   StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
   stub.GetCode();
+  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+  stub2.GetCode();
 }
 
 
@@ -3564,7 +3581,7 @@
   __ jmp(&skip_to_incremental_compacting, Label::kFar);
 
   if (remembered_set_action() == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object(), address(), value(),
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -3608,7 +3625,7 @@
         mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);
-    __ RememberedSetHelper(object(), address(), value(),
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
 
     __ bind(&dont_need_remembered_set);
@@ -3625,7 +3642,7 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   int argument_count = 3;
   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
@@ -3638,7 +3655,7 @@
       ExternalReference::incremental_marking_record_write_function(isolate()),
       argument_count);
 
-  regs_.RestoreCallerSaveRegisters(masm);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -3669,7 +3686,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object(), address(), value(),
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -3714,7 +3731,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object(), address(), value(),
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -3784,8 +3801,7 @@
                            FixedArrayBase::kHeaderSize));
   __ mov(Operand(ecx, 0), eax);
   // Update the write barrier for the array store.
-  __ RecordWrite(ebx, ecx, eax,
-                 EMIT_REMEMBERED_SET,
+  __ RecordWrite(ebx, ecx, eax, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                  OMIT_SMI_CHECK);
   __ ret(0);
 
@@ -3814,7 +3830,7 @@
 
 
 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
-  CEntryStub ces(isolate(), 1);
+  CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
diff --git a/src/x87/code-stubs-x87.h b/src/x87/code-stubs-x87.h
index 49462bc..03ff477 100644
--- a/src/x87/code-stubs-x87.h
+++ b/src/x87/code-stubs-x87.h
@@ -116,11 +116,9 @@
 
 class RecordWriteStub: public PlatformCodeStub {
  public:
-  RecordWriteStub(Isolate* isolate,
-                  Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action)
+  RecordWriteStub(Isolate* isolate, Register object, Register value,
+                  Register address, RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
         regs_(object,   // An input reg.
               address,  // An input reg.
@@ -128,7 +126,8 @@
     minor_key_ = ObjectBits::encode(object.code()) |
                  ValueBits::encode(value.code()) |
                  AddressBits::encode(address.code()) |
-                 RememberedSetActionBits::encode(remembered_set_action);
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
   RecordWriteStub(uint32_t key, Isolate* isolate)
@@ -271,12 +270,23 @@
     // saved registers that were not already preserved.  The caller saved
     // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
     // will be restored by other means so we don't bother pushing them here.
-    void SaveCallerSaveRegisters(MacroAssembler* masm) {
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
       if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
       if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+      if (mode == kSaveFPRegs) {
+        // Save FPU state in m108byte.
+        masm->sub(esp, Immediate(108));
+        masm->fnsave(Operand(esp, 0));
+      }
     }
 
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm) {
+    inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        // Restore FPU state in m108byte.
+        masm->frstor(Operand(esp, 0));
+        masm->add(esp, Immediate(108));
+      }
       if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
       if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
     }
@@ -348,10 +358,15 @@
     return RememberedSetActionBits::decode(minor_key_);
   }
 
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 3> {};
   class ValueBits: public BitField<int, 3, 3> {};
   class AddressBits: public BitField<int, 6, 3> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+  class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 10, 1> {};
 
   RegisterAllocation regs_;
 
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index 56d273c..e33959e 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -217,12 +217,8 @@
 
   // Set transitioned map.
   __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
-  __ RecordWriteField(receiver,
-                      HeapObject::kMapOffset,
-                      target_map,
-                      scratch,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 }
 
 
@@ -275,12 +271,8 @@
   // Replace receiver's backing store with newly created FixedDoubleArray.
   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
   __ mov(ebx, eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      ebx,
-                      edi,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
 
@@ -339,12 +331,8 @@
   // ebx: target map
   // Set transitioned map.
   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 }
 
 
@@ -399,12 +387,8 @@
   // Set transitioned map.
   __ bind(&only_change_map);
   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   __ jmp(&success);
 
   // Call into runtime if GC is required.
@@ -433,10 +417,7 @@
   __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
   __ mov(esi, ebx);
-  __ RecordWriteArray(eax,
-                      edx,
-                      esi,
-                      EMIT_REMEMBERED_SET,
+  __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   __ jmp(&entry, Label::kNear);
 
@@ -455,20 +436,12 @@
   // edx: receiver
   // Set transitioned map.
   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   // Replace receiver's backing store with newly created and filled FixedArray.
   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      eax,
-                      edi,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   // Restore registers.
   __ pop(eax);
diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc
index e873ac5..a76c7a7 100644
--- a/src/x87/deoptimizer-x87.cc
+++ b/src/x87/deoptimizer-x87.cc
@@ -204,8 +204,10 @@
 
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
-  // Do nothing for X87.
-  return;
+  for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+    double double_value = input_->GetDoubleRegister(i);
+    output_frame->SetDoubleRegister(i, double_value);
+  }
 }
 
 
@@ -230,9 +232,42 @@
 
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
+
+  const int kDoubleRegsSize =
+      kDoubleSize * X87Register::kMaxNumAllocatableRegisters;
+
+  // Reserve space for x87 fp registers.
+  __ sub(esp, Immediate(kDoubleRegsSize));
+
   __ pushad();
 
-  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize;
+  // GP registers are safe to use now.
+  // Save used x87 fp registers in correct position of previous reserve space.
+  Label loop, done;
+  // Get the layout of x87 stack.
+  __ sub(esp, Immediate(kPointerSize));
+  __ fistp_s(MemOperand(esp, 0));
+  __ pop(eax);
+  // Preserve stack layout in edi
+  __ mov(edi, eax);
+  // Get the x87 stack depth, the first 3 bits.
+  __ mov(ecx, eax);
+  __ and_(ecx, 0x7);
+  __ j(zero, &done, Label::kNear);
+
+  __ bind(&loop);
+  __ shr(eax, 0x3);
+  __ mov(ebx, eax);
+  __ and_(ebx, 0x7);  // Extract the st_x index into ebx.
+  // Pop TOS to the correct position. The disp(0x20) is due to pushad.
+  // The st_i should be saved to (esp + ebx * kDoubleSize + 0x20).
+  __ fstp_d(Operand(esp, ebx, times_8, 0x20));
+  __ dec(ecx);  // Decrease stack depth.
+  __ j(not_zero, &loop, Label::kNear);
+  __ bind(&done);
+
+  const int kSavedRegistersAreaSize =
+      kNumberOfRegisters * kPointerSize + kDoubleRegsSize;
 
   // Get the bailout id from the stack.
   __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
@@ -245,6 +280,7 @@
   __ sub(edx, ebp);
   __ neg(edx);
 
+  __ push(edi);
   // Allocate a new deoptimizer object.
   __ PrepareCallCFunction(6, eax);
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -260,6 +296,8 @@
     __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
   }
 
+  __ pop(edi);
+
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
   __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
@@ -270,13 +308,22 @@
     __ pop(Operand(ebx, offset));
   }
 
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  // Fill in the double input registers.
+  for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize;
+    __ fld_d(Operand(esp, src_offset));
+    __ fstp_d(Operand(ebx, dst_offset));
+  }
+
   // Clear FPU all exceptions.
   // TODO(ulan): Find out why the TOP register is not zero here in some cases,
   // and check that the generated code never deoptimizes with unbalanced stack.
   __ fnclex();
 
   // Remove the bailout id, return address and the double registers.
-  __ add(esp, Immediate(2 * kPointerSize));
+  __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
@@ -298,6 +345,7 @@
   __ j(not_equal, &pop_loop);
 
   // Compute the output frame in the deoptimizer.
+  __ push(edi);
   __ push(eax);
   __ PrepareCallCFunction(1, ebx);
   __ mov(Operand(esp, 0 * kPointerSize), eax);
@@ -307,6 +355,7 @@
         ExternalReference::compute_output_frames_function(isolate()), 1);
   }
   __ pop(eax);
+  __ pop(edi);
 
   // If frame was dynamically aligned, pop padding.
   Label no_padding;
@@ -345,6 +394,25 @@
   __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
+
+  // In case of a failed STUB, we have to restore the x87 stack.
+  // x87 stack layout is in edi.
+  Label loop2, done2;
+  // Get the x87 stack depth, the first 3 bits.
+  __ mov(ecx, edi);
+  __ and_(ecx, 0x7);
+  __ j(zero, &done2, Label::kNear);
+
+  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+  __ bind(&loop2);
+  __ mov(eax, edi);
+  __ shr_cl(eax);
+  __ and_(eax, 0x7);
+  __ fld_d(Operand(ebx, eax, times_8, double_regs_offset));
+  __ sub(ecx, Immediate(0x3));
+  __ j(not_zero, &loop2, Label::kNear);
+  __ bind(&done2);
+
   // Push state, pc, and continuation from the last output frame.
   __ push(Operand(ebx, FrameDescription::state_offset()));
   __ push(Operand(ebx, FrameDescription::pc_offset()));
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index 53a8c29..908e8b0 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -702,7 +702,12 @@
         case 0: mnem = "fld_s"; break;
         case 2: mnem = "fst_s"; break;
         case 3: mnem = "fstp_s"; break;
-        case 7: mnem = "fstcw"; break;
+        case 5:
+          mnem = "fldcw";
+          break;
+        case 7:
+          mnem = "fnstcw";
+          break;
         default: UnimplementedInstruction();
       }
       break;
@@ -716,11 +721,27 @@
       }
       break;
 
+    case 0xDC:
+      switch (regop) {
+        case 0:
+          mnem = "fadd_d";
+          break;
+        default:
+          UnimplementedInstruction();
+      }
+      break;
+
     case 0xDD: switch (regop) {
         case 0: mnem = "fld_d"; break;
         case 1: mnem = "fisttp_d"; break;
         case 2: mnem = "fst_d"; break;
         case 3: mnem = "fstp_d"; break;
+        case 4:
+          mnem = "frstor";
+          break;
+        case 6:
+          mnem = "fnsave";
+          break;
         default: UnimplementedInstruction();
       }
       break;
diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc
index 94ccbcf..58328e0 100644
--- a/src/x87/full-codegen-x87.cc
+++ b/src/x87/full-codegen-x87.cc
@@ -221,10 +221,8 @@
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers eax and ebx.
         if (need_write_barrier) {
-          __ RecordWriteContextSlot(esi,
-                                    context_offset,
-                                    eax,
-                                    ebx);
+          __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+                                    kDontSaveFPRegs);
         } else if (FLAG_debug_code) {
           Label done;
           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
@@ -708,7 +706,7 @@
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
     DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWriteContextSlot(scratch0, offset, src, scratch1);
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
@@ -838,12 +836,9 @@
       VisitForAccumulatorValue(declaration->fun());
       __ mov(ContextOperand(esi, variable->index()), result_register());
       // We know that we have written a function, which is not a smi.
-      __ RecordWriteContextSlot(esi,
-                                Context::SlotOffset(variable->index()),
-                                result_register(),
-                                ecx,
-                                EMIT_REMEMBERED_SET,
-                                OMIT_SMI_CHECK);
+      __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()),
+                                result_register(), ecx, kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
       PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       break;
     }
@@ -877,11 +872,8 @@
   // Assign it.
   __ mov(ContextOperand(esi, variable->index()), eax);
   // We know that we have written a module, which is not a smi.
-  __ RecordWriteContextSlot(esi,
-                            Context::SlotOffset(variable->index()),
-                            eax,
-                            ecx,
-                            EMIT_REMEMBERED_SET,
+  __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()), eax,
+                            ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                             OMIT_SMI_CHECK);
   PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
 
@@ -1783,9 +1775,8 @@
       // Store the subexpression value in the array's elements.
       __ mov(FieldOperand(ebx, offset), result_register());
       // Update the write barrier for the array store.
-      __ RecordWriteField(ebx, offset, result_register(), ecx,
-                          EMIT_REMEMBERED_SET,
-                          INLINE_SMI_CHECK);
+      __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
     } else {
       // Store the subexpression value in the array's elements.
       __ mov(ecx, Immediate(Smi::FromInt(i)));
@@ -1942,7 +1933,8 @@
              Immediate(Smi::FromInt(continuation.pos())));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
       __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                          kDontSaveFPRegs);
       __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
       __ cmp(esp, ebx);
       __ j(equal, &post_runtime);
@@ -2016,7 +2008,8 @@
              Immediate(Smi::FromInt(l_continuation.pos())));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
       __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                          kDontSaveFPRegs);
       __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -2224,8 +2217,8 @@
 
   // Only the value field needs a write barrier, as the other values are in the
   // root set.
-  __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
-                      ecx, edx);
+  __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, ecx,
+                      edx, kDontSaveFPRegs);
 }
 
 
@@ -2433,7 +2426,7 @@
   if (var->IsContextSlot()) {
     __ mov(edx, eax);
     int offset = Context::SlotOffset(var->index());
-    __ RecordWriteContextSlot(ecx, offset, edx, ebx);
+    __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
   }
 }
 
@@ -3532,7 +3525,7 @@
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ mov(edx, eax);
-  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx);
+  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(eax);
diff --git a/src/x87/lithium-codegen-x87.cc b/src/x87/lithium-codegen-x87.cc
index ded2cd9..ff68fd0 100644
--- a/src/x87/lithium-codegen-x87.cc
+++ b/src/x87/lithium-codegen-x87.cc
@@ -254,10 +254,8 @@
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers eax and ebx.
         if (need_write_barrier) {
-          __ RecordWriteContextSlot(esi,
-                                    context_offset,
-                                    eax,
-                                    ebx);
+          __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+                                    kDontSaveFPRegs);
         } else if (FLAG_debug_code) {
           Label done;
           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
@@ -269,6 +267,8 @@
     Comment(";;; End allocate local context");
   }
 
+  // Initailize FPU state.
+  __ fninit();
   // Trace the call.
   if (FLAG_trace && info()->IsOptimizing()) {
     // We have not executed any compiled code yet, so esi still holds the
@@ -327,6 +327,9 @@
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
   DCHECK(slots >= 1);
   __ sub(esp, Immediate((slots - 1) * kPointerSize));
+
+  // Initailize FPU state.
+  __ fninit();
 }
 
 
@@ -342,8 +345,21 @@
 
 
 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+  // When return from function call, FPU should be initialized again.
+  if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
+    bool double_result = instr->HasDoubleRegisterResult();
+    if (double_result) {
+      __ lea(esp, Operand(esp, -kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+    }
+    __ fninit();
+    if (double_result) {
+      __ fld_d(Operand(esp, 0));
+      __ lea(esp, Operand(esp, kDoubleSize));
+    }
+  }
   if (instr->IsGoto()) {
-    x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+    x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
   } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
              !instr->IsGap() && !instr->IsReturn()) {
     if (instr->ClobbersDoubleRegisters(isolate())) {
@@ -367,11 +383,7 @@
     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
     __ bind(&table_entry->label);
     Address entry = table_entry->address;
-    Deoptimizer::BailoutType type = table_entry->bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    DCHECK_NE(Deoptimizer::kNotDeoptimizationEntry, id);
-    Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    DeoptComment(table_entry->mnemonic, table_entry->reason);
+    DeoptComment(table_entry->reason);
     if (table_entry->needs_frame) {
       DCHECK(!info()->saves_caller_doubles());
       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
@@ -494,10 +506,27 @@
 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
   DCHECK(x87_stack_.Contains(reg1));
   DCHECK(x87_stack_.Contains(reg2));
-  x87_stack_.Fxch(reg1, 1);
-  x87_stack_.Fxch(reg2);
-  x87_stack_.pop();
-  x87_stack_.pop();
+  if (reg1.is(reg2) && x87_stack_.depth() == 1) {
+    __ fld(x87_stack_.st(reg1));
+    x87_stack_.push(reg1);
+    x87_stack_.pop();
+    x87_stack_.pop();
+  } else {
+    x87_stack_.Fxch(reg1, 1);
+    x87_stack_.Fxch(reg2);
+    x87_stack_.pop();
+    x87_stack_.pop();
+  }
+}
+
+
+int LCodeGen::X87Stack::GetLayout() {
+  int layout = stack_depth_;
+  for (int i = 0; i < stack_depth_; i++) {
+    layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
+  }
+
+  return layout;
 }
 
 
@@ -572,6 +601,22 @@
 }
 
 
+void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
+  if (x87_stack_.Contains(dst)) {
+    x87_stack_.Fxch(dst);
+    __ fstp(0);
+    x87_stack_.pop();
+    // Push ST(i) onto the FPU register stack
+    __ fld(x87_stack_.st(src));
+    x87_stack_.push(dst);
+  } else {
+    // Push ST(i) onto the FPU register stack
+    __ fld(x87_stack_.st(src));
+    x87_stack_.push(dst);
+  }
+}
+
+
 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
   DCHECK(!src.is_reg_only());
   switch (opts) {
@@ -597,6 +642,9 @@
     case kX87DoubleOperand:
       __ fst_d(dst);
       break;
+    case kX87FloatOperand:
+      __ fst_s(dst);
+      break;
     case kX87IntOperand:
       __ fist_s(dst);
       break;
@@ -660,15 +708,39 @@
 }
 
 
-void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
-  DCHECK(stack_depth_ <= 1);
-  // If ever used for new stubs producing two pairs of doubles joined into two
-  // phis this assert hits. That situation is not handled, since the two stacks
-  // might have st0 and st1 swapped.
-  if (current_block_id + 1 != goto_instr->block_id()) {
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
+                                      LCodeGen* cgen) {
+  // For going to a joined block, an explicit LClobberDoubles is inserted before
+  // LGoto. Because all used x87 registers are spilled to stack slots. The
+  // ResolvePhis phase of register allocator could guarantee the two input's x87
+  // stacks have the same layout. So don't check stack_depth_ <= 1 here.
+  int goto_block_id = goto_instr->block_id();
+  if (current_block_id + 1 != goto_block_id) {
     // If we have a value on the x87 stack on leaving a block, it must be a
     // phi input. If the next block we compile is not the join block, we have
     // to discard the stack state.
+    // Before discarding the stack state, we need to save it if the "goto block"
+    // has unreachable last predecessor when FLAG_unreachable_code_elimination.
+    if (FLAG_unreachable_code_elimination) {
+      int length = goto_instr->block()->predecessors()->length();
+      bool has_unreachable_last_predecessor = false;
+      for (int i = 0; i < length; i++) {
+        HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
+        if (block->IsUnreachable() &&
+            (block->block_id() + 1) == goto_block_id) {
+          has_unreachable_last_predecessor = true;
+        }
+      }
+      if (has_unreachable_last_predecessor) {
+        if (cgen->x87_stack_map_.find(goto_block_id) ==
+            cgen->x87_stack_map_.end()) {
+          X87Stack* stack = new (cgen->zone()) X87Stack(*this);
+          cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
+        }
+      }
+    }
+
+    // Discard the stack state.
     stack_depth_ = 0;
   }
 }
@@ -678,13 +750,14 @@
   // The deoptimizer does not support X87 Registers. But as long as we
   // deopt from a stub its not a problem, since we will re-materialize the
   // original stub inputs, which can't be double registers.
-  DCHECK(info()->IsStub());
+  // DCHECK(info()->IsStub());
   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
     __ pushfd();
     __ VerifyX87StackDepth(x87_stack_.depth());
     __ popfd();
   }
-  for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
+
+  // Flush X87 stack in the deoptimizer entry.
 }
 
 
@@ -891,6 +964,9 @@
     } else {
       translation->StoreInt32Register(reg);
     }
+  } else if (op->IsDoubleRegister()) {
+    X87Register reg = ToX87Register(op);
+    translation->StoreDoubleRegister(reg);
   } else if (op->IsConstantOperand()) {
     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
@@ -925,13 +1001,12 @@
 }
 
 
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
-                           int argc,
-                           LInstruction* instr) {
+void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
+                           LInstruction* instr, SaveFPRegsMode save_doubles) {
   DCHECK(instr != NULL);
   DCHECK(instr->HasPointerMap());
 
-  __ CallRuntime(fun, argc);
+  __ CallRuntime(fun, argc, save_doubles);
 
   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 
@@ -961,7 +1036,7 @@
                                        LOperand* context) {
   LoadContextFromDeferred(context);
 
-  __ CallRuntime(id);
+  __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 
@@ -1007,7 +1082,7 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* reason,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
   LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -1035,6 +1110,12 @@
     __ pop(eax);
     __ popfd();
     DCHECK(frame_is_built_);
+    // Put the x87 stack layout in TOS.
+    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+    __ push(Immediate(x87_stack_.GetLayout()));
+    __ fild_s(MemOperand(esp, 0));
+    // Don't touch eflags.
+    __ lea(esp, Operand(esp, kPointerSize));
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
     __ bind(&no_deopt);
     __ mov(Operand::StaticVariable(count), eax);
@@ -1042,14 +1123,18 @@
     __ popfd();
   }
 
-  // Before Instructions which can deopt, we normally flush the x87 stack. But
-  // we can have inputs or outputs of the current instruction on the stack,
-  // thus we need to flush them here from the physical stack to leave it in a
-  // consistent state.
-  if (x87_stack_.depth() > 0) {
+  // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
+  // the correct location.
+  {
     Label done;
     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
-    EmitFlushX87ForDeopt();
+    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+
+    int x87_stack_layout = x87_stack_.GetLayout();
+    __ push(Immediate(x87_stack_layout));
+    __ fild_s(MemOperand(esp, 0));
+    // Don't touch eflags.
+    __ lea(esp, Operand(esp, kPointerSize));
     __ bind(&done);
   }
 
@@ -1060,19 +1145,19 @@
     __ bind(&done);
   }
 
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
   DCHECK(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
-    DeoptComment(instr->Mnemonic(), reason);
+    DeoptComment(reason);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().bailout_type != bailout_type) {
-      Deoptimizer::JumpTableEntry table_entry(entry, instr->Mnemonic(), reason,
-                                              bailout_type, !frame_is_built_);
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     if (cc == no_condition) {
@@ -1085,11 +1170,11 @@
 
 
 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
-                            const char* reason) {
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, instr, reason, bailout_type);
+  DeoptimizeIf(cc, instr, detail, bailout_type);
 }
 
 
@@ -1236,6 +1321,16 @@
           LabelType(label));
   __ bind(label->label());
   current_block_ = label->block_id();
+  if (label->block()->predecessors()->length() > 1) {
+    // A join block's x87 stack is that of its last visited predecessor.
+    // If the last visited predecessor block is unreachable, the stack state
+    // will be wrong. In such case, use the x87 stack of reachable predecessor.
+    X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
+    // Restore x87 stack.
+    if (it != x87_stack_map_.end()) {
+      x87_stack_ = *(it->second);
+    }
+  }
   DoGap(label);
 }
 
@@ -1737,7 +1832,7 @@
     // Bail out if the result is supposed to be negative zero.
     Label done;
     __ test(left, Operand(left));
-    __ j(not_zero, &done, Label::kNear);
+    __ j(not_zero, &done);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
         DeoptimizeIf(no_condition, instr);
@@ -2118,8 +2213,58 @@
     }
     __ bind(&return_left);
   } else {
-    // TODO(weiliang) use X87 for double representation.
-    UNIMPLEMENTED();
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    Label check_nan_left, check_zero, return_left, return_right;
+    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+    X87Register left_reg = ToX87Register(left);
+    X87Register right_reg = ToX87Register(right);
+
+    X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
+    __ fld(1);
+    __ fld(1);
+    __ FCmp();
+    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
+    __ j(equal, &check_zero, Label::kNear);            // left == right.
+    __ j(condition, &return_left, Label::kNear);
+    __ jmp(&return_right, Label::kNear);
+
+    __ bind(&check_zero);
+    __ fld(0);
+    __ fldz();
+    __ FCmp();
+    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+      // load it to left.
+      Register scratch_reg = ToRegister(instr->temp());
+      __ fld(1);
+      __ fld(1);
+      __ sub(esp, Immediate(2 * kPointerSize));
+      __ fstp_s(MemOperand(esp, 0));
+      __ fstp_s(MemOperand(esp, kPointerSize));
+      __ pop(scratch_reg);
+      __ xor_(MemOperand(esp, 0), scratch_reg);
+      X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
+      __ pop(scratch_reg);  // restore esp
+    } else {
+      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+      X87Fxch(left_reg);
+      __ fadd(1);
+    }
+    __ jmp(&return_left, Label::kNear);
+
+    __ bind(&check_nan_left);
+    __ fld(0);
+    __ fld(0);
+    __ FCmp();                                      // NaN check.
+    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
+
+    __ bind(&return_right);
+    X87Fxch(left_reg);
+    X87Mov(left_reg, right_reg);
+
+    __ bind(&return_left);
   }
 }
 
@@ -2164,6 +2309,13 @@
       UNREACHABLE();
       break;
   }
+
+  // Only always explicitly storing to memory to force the round-down for double
+  // arithmetic.
+  __ lea(esp, Operand(esp, -kDoubleSize));
+  __ fstp_d(Operand(esp, 0));
+  __ fld_d(Operand(esp, 0));
+  __ lea(esp, Operand(esp, kDoubleSize));
 }
 
 
@@ -2217,7 +2369,11 @@
     __ test(reg, Operand(reg));
     EmitBranch(instr, not_zero);
   } else if (r.IsDouble()) {
-    UNREACHABLE();
+    X87Register reg = ToX87Register(instr->value());
+    X87LoadForUsage(reg);
+    __ fldz();
+    __ FCmp();
+    EmitBranch(instr, not_zero);
   } else {
     DCHECK(r.IsTagged());
     Register reg = ToRegister(instr->value());
@@ -2473,7 +2629,10 @@
   DCHECK(!rep.IsInteger32());
 
   if (rep.IsDouble()) {
-    UNREACHABLE();
+    X87Register input = ToX87Register(instr->value());
+    X87LoadForUsage(input);
+    __ FXamMinusZero();
+    EmitBranch(instr, equal);
   } else {
     Register value = ToRegister(instr->value());
     Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
@@ -3058,12 +3217,8 @@
             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     Register temp = ToRegister(instr->temp());
     int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWriteContextSlot(context,
-                              offset,
-                              value,
-                              temp,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
+    __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
   }
 
   __ bind(&skip_assignment);
@@ -3732,7 +3887,9 @@
   Representation r = instr->hydrogen()->value()->representation();
 
   if (r.IsDouble()) {
-    UNIMPLEMENTED();
+    X87Register value = ToX87Register(instr->value());
+    X87Fxch(value);
+    __ fabs();
   } else if (r.IsSmiOrInteger32()) {
     EmitIntegerMathAbs(instr);
   } else {  // Tagged case.
@@ -3748,47 +3905,347 @@
 
 
 void LCodeGen::DoMathFloor(LMathFloor* instr) {
-  UNIMPLEMENTED();
+  Register output_reg = ToRegister(instr->result());
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+
+  Label not_minus_zero, done;
+  // Deoptimize on unordered.
+  __ fldz();
+  __ fld(1);
+  __ FCmp();
+  DeoptimizeIf(parity_even, instr);
+  __ j(below, &not_minus_zero, Label::kNear);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Check for negative zero.
+    __ j(not_equal, &not_minus_zero, Label::kNear);
+    // +- 0.0.
+    __ fld(0);
+    __ FXamSign();
+    DeoptimizeIf(not_zero, instr);
+    __ Move(output_reg, Immediate(0));
+    __ jmp(&done, Label::kFar);
+  }
+
+  // Positive input.
+  // rc=01B, round down.
+  __ bind(&not_minus_zero);
+  __ fnclex();
+  __ X87SetRC(0x0400);
+  __ sub(esp, Immediate(kPointerSize));
+  __ fist_s(Operand(esp, 0));
+  __ pop(output_reg);
+  __ X87CheckIA();
+  DeoptimizeIf(equal, instr);
+  __ fnclex();
+  __ X87SetRC(0x0000);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathRound(LMathRound* instr) {
-  UNIMPLEMENTED();
+  X87Register input_reg = ToX87Register(instr->value());
+  Register result = ToRegister(instr->result());
+  X87Fxch(input_reg);
+  Label below_one_half, below_minus_one_half, done;
+
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  ExternalReference minus_one_half =
+      ExternalReference::address_of_minus_one_half();
+
+  __ fld_d(Operand::StaticVariable(one_half));
+  __ fld(1);
+  __ FCmp();
+  __ j(carry, &below_one_half);
+
+  // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
+  __ fld(0);
+  __ fadd_d(Operand::StaticVariable(one_half));
+  // rc=11B, round toward zero.
+  __ X87SetRC(0x0c00);
+  __ sub(esp, Immediate(kPointerSize));
+  // Clear exception bits.
+  __ fnclex();
+  __ fistp_s(MemOperand(esp, 0));
+  // Check overflow.
+  __ X87CheckIA();
+  __ pop(result);
+  DeoptimizeIf(equal, instr, "conversion overflow");
+  __ fnclex();
+  // Restore round mode.
+  __ X87SetRC(0x0000);
+  __ jmp(&done);
+
+  __ bind(&below_one_half);
+  __ fld_d(Operand::StaticVariable(minus_one_half));
+  __ fld(1);
+  __ FCmp();
+  __ j(carry, &below_minus_one_half);
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ fld(0);
+    __ FXamSign();
+    DeoptimizeIf(not_zero, instr, "minus zero");
+  }
+  __ Move(result, Immediate(0));
+  __ jmp(&done);
+
+  __ bind(&below_minus_one_half);
+  __ fld(0);
+  __ fadd_d(Operand::StaticVariable(one_half));
+  // rc=01B, round down.
+  __ X87SetRC(0x0400);
+  __ sub(esp, Immediate(kPointerSize));
+  // Clear exception bits.
+  __ fnclex();
+  __ fistp_s(MemOperand(esp, 0));
+  // Check overflow.
+  __ X87CheckIA();
+  __ pop(result);
+  DeoptimizeIf(equal, instr, "conversion overflow");
+  __ fnclex();
+  // Restore round mode.
+  __ X87SetRC(0x0000);
+
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathFround(LMathFround* instr) {
-  UNIMPLEMENTED();
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+  __ sub(esp, Immediate(kPointerSize));
+  __ fstp_s(MemOperand(esp, 0));
+  X87Fld(MemOperand(esp, 0), kX87FloatOperand);
+  __ add(esp, Immediate(kPointerSize));
 }
 
 
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
-  UNIMPLEMENTED();
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Register output_reg = ToX87Register(instr->result());
+  DCHECK(output_reg.is(input_reg));
+  USE(output_reg);
+  X87Fxch(input_reg);
+  __ fsqrt();
 }
 
 
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
-  UNIMPLEMENTED();
+  X87Register input_reg = ToX87Register(instr->value());
+  DCHECK(ToX87Register(instr->result()).is(input_reg));
+  X87Fxch(input_reg);
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
+  __ fxam();
+  __ push(eax);
+  __ fnstsw_ax();
+  __ and_(eax, Immediate(0x4700));
+  __ cmp(eax, Immediate(0x0700));
+  __ j(not_equal, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ fchs();
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
+  __ fldz();
+  __ faddp();  // Convert -0 to +0.
+  __ fsqrt();
+  __ bind(&done);
+  __ pop(eax);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  UNIMPLEMENTED();
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  X87Register result = ToX87Register(instr->result());
+  // Having marked this as a call, we can use any registers.
+  X87Register base = ToX87Register(instr->left());
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+
+  if (exponent_type.IsSmi()) {
+    Register exponent = ToRegister(instr->right());
+    X87LoadForUsage(base);
+    __ SmiUntag(exponent);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+  } else if (exponent_type.IsTagged()) {
+    Register exponent = ToRegister(instr->right());
+    Register temp = exponent.is(ecx) ? eax : ecx;
+    Label no_deopt, done;
+    X87LoadForUsage(base);
+    __ JumpIfSmi(exponent, &no_deopt);
+    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
+    DeoptimizeIf(not_equal, instr);
+    // Heap number(double)
+    __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
+    __ jmp(&done);
+    // SMI
+    __ bind(&no_deopt);
+    __ SmiUntag(exponent);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+    __ bind(&done);
+  } else if (exponent_type.IsInteger32()) {
+    Register exponent = ToRegister(instr->right());
+    X87LoadForUsage(base);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    X87Register exponent_double = ToX87Register(instr->right());
+    X87LoadForUsage(base, exponent_double);
+  }
+
+  // FP data stack {base, exponent(TOS)}.
+  // Handle (exponent==+-0.5 && base == -0).
+  Label not_plus_0;
+  __ fld(0);
+  __ fabs();
+  X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
+  __ FCmp();
+  __ j(parity_even, &not_plus_0, Label::kNear);  // NaN.
+  __ j(not_equal, &not_plus_0, Label::kNear);
+  __ fldz();
+  // FP data stack {base, exponent(TOS), zero}.
+  __ faddp(2);
+  __ bind(&not_plus_0);
+
+  {
+    __ PrepareCallCFunction(4, eax);
+    __ fstp_d(MemOperand(esp, kDoubleSize));  // Exponent value.
+    __ fstp_d(MemOperand(esp, 0));            // Base value.
+    X87PrepareToWrite(result);
+    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+                     4);
+    // Return value is in st(0) on ia32.
+    X87CommitWrite(result);
+  }
 }
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  UNIMPLEMENTED();
+  DCHECK(instr->value()->Equals(instr->result()));
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+
+  Label positive, done, zero, nan_result;
+  __ fldz();
+  __ fld(1);
+  __ FCmp();
+  __ j(below, &nan_result, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
+  // Positive input.
+  // {input, ln2}.
+  __ fldln2();
+  // {ln2, input}.
+  __ fxch();
+  // {result}.
+  __ fyl2x();
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&nan_result);
+  ExternalReference nan =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  X87PrepareToWrite(input_reg);
+  __ fld_d(Operand::StaticVariable(nan));
+  X87CommitWrite(input_reg);
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&zero);
+  ExternalReference ninf = ExternalReference::address_of_negative_infinity();
+  X87PrepareToWrite(input_reg);
+  __ fld_d(Operand::StaticVariable(ninf));
+  X87CommitWrite(input_reg);
+
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathClz32(LMathClz32* instr) {
-  UNIMPLEMENTED();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label not_zero_input;
+  __ bsr(result, input);
+
+  __ j(not_zero, &not_zero_input);
+  __ Move(result, Immediate(63));  // 63^31 == 32
+
+  __ bind(&not_zero_input);
+  __ xor_(result, Immediate(31));  // for x in [0..31], 31^x == 31-x.
 }
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  UNIMPLEMENTED();
+  X87Register input = ToX87Register(instr->value());
+  X87Register result_reg = ToX87Register(instr->result());
+  Register temp_result = ToRegister(instr->temp1());
+  Register temp = ToRegister(instr->temp2());
+  Label slow, done, smi, finish;
+  DCHECK(result_reg.is(input));
+
+  // Store input into Heap number and call runtime function kMathExpRT.
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
+    __ jmp(&done, Label::kNear);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ Move(temp_result, Immediate(0));
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  __ bind(&done);
+  X87LoadForUsage(input);
+  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+
+  {
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ push(temp_result);
+    __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  X87PrepareToWrite(result_reg);
+  // return value of MathExpRT is Smi or Heap Number.
+  __ JumpIfSmi(temp_result, &smi);
+  // Heap number(double)
+  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+  __ jmp(&finish);
+  // SMI
+  __ bind(&smi);
+  __ SmiUntag(temp_result);
+  __ push(temp_result);
+  __ fild_s(MemOperand(esp, 0));
+  __ pop(temp_result);
+  __ bind(&finish);
+  X87CommitWrite(result_reg);
 }
 
 
@@ -3885,7 +4342,7 @@
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   DCHECK(ToRegister(instr->context()).is(esi));
-  CallRuntime(instr->function(), instr->arity(), instr);
+  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
 }
 
 
@@ -3956,7 +4413,7 @@
       __ mov(temp_map, transition);
       __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
       // Update the write barrier for the map field.
-      __ RecordWriteForMap(object, transition, temp_map, temp);
+      __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
     }
   }
 
@@ -3991,10 +4448,7 @@
     Register value = ToRegister(instr->value());
     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
     // Update the write barrier for the object for in-object properties.
-    __ RecordWriteField(write_register,
-                        offset,
-                        value,
-                        temp,
+    __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
                         EMIT_REMEMBERED_SET,
                         instr->hydrogen()->SmiCheckForWriteBarrier(),
                         instr->hydrogen()->PointersToHereCheckForValue());
@@ -4054,8 +4508,7 @@
       instr->base_offset()));
   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
       elements_kind == FLOAT32_ELEMENTS) {
-    __ fld(0);
-    __ fstp_s(operand);
+    X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
   } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
              elements_kind == FLOAT64_ELEMENTS) {
     X87Mov(operand, ToX87Register(instr->value()));
@@ -4191,10 +4644,7 @@
           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key, operand);
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   EMIT_REMEMBERED_SET,
+    __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
                    check_needed,
                    instr->hydrogen()->PointersToHereCheckForValue());
   }
@@ -4257,7 +4707,7 @@
     // Write barrier.
     DCHECK_NE(instr->temp(), NULL);
     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
-                         ToRegister(instr->temp()));
+                         ToRegister(instr->temp()), kDontSaveFPRegs);
   } else {
     DCHECK(ToRegister(instr->context()).is(esi));
     DCHECK(object_reg.is(eax));
@@ -4527,7 +4977,7 @@
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ CallRuntime(Runtime::kAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(reg, eax);
@@ -4557,7 +5007,9 @@
 
   // Put the value to the top of stack
   X87Register src = ToX87Register(instr->value());
-  X87LoadForUsage(src);
+  // Don't use X87LoadForUsage here, which is only used by Instruction which
+  // clobbers fp registers.
+  x87_stack_.Fxch(src);
 
   DeferredNumberTagD* deferred =
       new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
@@ -4568,7 +5020,7 @@
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+  __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
 }
 
 
@@ -4586,7 +5038,7 @@
   // The corresponding HChange instructions are added in a phase that does
   // not have easy access to the local context.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntime(Runtime::kAllocateHeapNumber);
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(reg, eax);
@@ -4635,7 +5087,7 @@
   X87PrepareToWrite(res_reg);
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
-    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+    __ JumpIfSmi(input_reg, &load_smi);
 
     // Heap number map check.
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
@@ -4644,7 +5096,7 @@
       DeoptimizeIf(not_equal, instr);
     } else {
       Label heap_number, convert;
-      __ j(equal, &heap_number, Label::kNear);
+      __ j(equal, &heap_number);
 
       // Convert undefined (or hole) to NaN.
       __ cmp(input_reg, factory()->undefined_value());
@@ -4726,16 +5178,14 @@
 
     __ bind(&check_false);
     __ cmp(input_reg, factory()->false_value());
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "cannot truncate");
     __ Move(input_reg, Immediate(0));
   } else {
     // TODO(olivf) Converting a number on the fpu is actually quite slow. We
     // should first try a fast conversion and then bailout to this slow case.
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            isolate()->factory()->heap_number_map());
-    __ RecordComment("Deferred TaggedToI: not a heap number");
-    DeoptimizeIf(not_equal, instr);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
 
     __ sub(esp, Immediate(kPointerSize));
     __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -4751,14 +5201,12 @@
 
       __ j(equal, &no_precision_lost, Label::kNear);
       __ fstp(0);
-      __ RecordComment("Deferred TaggedToI: lost precision");
-      DeoptimizeIf(no_condition, instr);
+      DeoptimizeIf(no_condition, instr, "lost precision");
       __ bind(&no_precision_lost);
 
       __ j(parity_odd, &not_nan);
       __ fstp(0);
-      __ RecordComment("Deferred TaggedToI: NaN");
-      DeoptimizeIf(no_condition, instr);
+      DeoptimizeIf(no_condition, instr, "NaN");
       __ bind(&not_nan);
 
       __ test(input_reg, Operand(input_reg));
@@ -4773,17 +5221,14 @@
       __ fstp_s(Operand(esp, 0));
       __ pop(input_reg);
       __ test(input_reg, Operand(input_reg));
-      __ RecordComment("Deferred TaggedToI: minus zero");
-      DeoptimizeIf(not_zero, instr);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     } else {
       __ fist_s(MemOperand(esp, 0));
       __ fild_s(MemOperand(esp, 0));
       __ FCmp();
       __ pop(input_reg);
-      __ RecordComment("Deferred TaggedToI: lost precision");
-      DeoptimizeIf(not_equal, instr);
-      __ RecordComment("Deferred TaggedToI: NaN");
-      DeoptimizeIf(parity_even, instr);
+      DeoptimizeIf(not_equal, instr, "lost precision");
+      DeoptimizeIf(parity_even, instr, "NaN");
     }
   }
 }
@@ -4973,7 +5418,7 @@
     PushSafepointRegistersScope scope(this);
     __ push(object);
     __ xor_(esi, esi);
-    __ CallRuntime(Runtime::kTryMigrateInstance);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
 
@@ -5043,7 +5488,10 @@
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  UNREACHABLE();
+  X87Register value_reg = ToX87Register(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  X87Fxch(value_reg);
+  __ ClampTOSToUint8(result_reg);
 }
 
 
@@ -5177,12 +5625,32 @@
 
 
 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
-  UNREACHABLE();
+  X87Register value_reg = ToX87Register(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  X87Fxch(value_reg);
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fst_d(Operand(esp, 0));
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ mov(result_reg, Operand(esp, kPointerSize));
+  } else {
+    __ mov(result_reg, Operand(esp, 0));
+  }
+  __ add(esp, Immediate(kDoubleSize));
 }
 
 
 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  UNREACHABLE();
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  X87Register result_reg = ToX87Register(instr->result());
+  // Follow below pattern to write a x87 fp register.
+  X87PrepareToWrite(result_reg);
+  __ sub(esp, Immediate(kDoubleSize));
+  __ mov(Operand(esp, 0), lo_reg);
+  __ mov(Operand(esp, kPointerSize), hi_reg);
+  __ fld_d(Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
+  X87CommitWrite(result_reg);
 }
 
 
@@ -5546,7 +6014,7 @@
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   PushSafepointRegistersScope scope(this);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntime(Runtime::kStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(
       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   DCHECK(instr->HasEnvironment());
@@ -5693,7 +6161,7 @@
   __ push(object);
   __ push(index);
   __ xor_(esi, esi);
-  __ CallRuntime(Runtime::kLoadMutableDouble);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(object, eax);
diff --git a/src/x87/lithium-codegen-x87.h b/src/x87/lithium-codegen-x87.h
index 080a468..1a4ca29 100644
--- a/src/x87/lithium-codegen-x87.h
+++ b/src/x87/lithium-codegen-x87.h
@@ -5,6 +5,7 @@
 #ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
 #define V8_X87_LITHIUM_CODEGEN_X87_H_
 
+#include <map>
 #include "src/x87/lithium-x87.h"
 
 #include "src/base/logging.h"
@@ -84,6 +85,8 @@
       X87OperandType operand = kX87DoubleOperand);
   void X87Mov(Operand src, X87Register reg,
       X87OperandType operand = kX87DoubleOperand);
+  void X87Mov(X87Register reg, X87Register src,
+              X87OperandType operand = kX87DoubleOperand);
 
   void X87PrepareBinaryOp(
       X87Register left, X87Register right, X87Register result);
@@ -198,9 +201,8 @@
                        LInstruction* instr,
                        SafepointMode safepoint_mode);
 
-  void CallRuntime(const Runtime::Function* fun,
-                   int argc,
-                   LInstruction* instr);
+  void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
 
   void CallRuntime(Runtime::FunctionId id,
                    int argc,
@@ -234,10 +236,10 @@
 
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* reason,
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
                     Deoptimizer::BailoutType bailout_type);
   void DeoptimizeIf(Condition cc, LInstruction* instr,
-                    const char* reason = NULL);
+                    const char* detail = NULL);
 
   bool DeoptEveryNTimes() {
     return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -376,7 +378,7 @@
   int osr_pc_offset_;
   bool frame_is_built_;
 
-  class X87Stack {
+  class X87Stack : public ZoneObject {
    public:
     explicit X87Stack(MacroAssembler* masm)
         : stack_depth_(0), is_mutable_(true), masm_(masm) { }
@@ -393,14 +395,23 @@
       }
       return true;
     }
+    X87Stack& operator=(const X87Stack& other) {
+      stack_depth_ = other.stack_depth_;
+      for (int i = 0; i < stack_depth_; i++) {
+        stack_[i] = other.stack_[i];
+      }
+      return *this;
+    }
     bool Contains(X87Register reg);
     void Fxch(X87Register reg, int other_slot = 0);
     void Free(X87Register reg);
     void PrepareToWrite(X87Register reg);
     void CommitWrite(X87Register reg);
     void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
-    void LeavingBlock(int current_block_id, LGoto* goto_instr);
+    void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen);
     int depth() const { return stack_depth_; }
+    int GetLayout();
+    int st(X87Register reg) { return st2idx(ArrayIndex(reg)); }
     void pop() {
       DCHECK(is_mutable_);
       stack_depth_--;
@@ -425,6 +436,9 @@
     MacroAssembler* masm_;
   };
   X87Stack x87_stack_;
+  // block_id -> X87Stack*;
+  typedef std::map<int, X87Stack*> X87StackMap;
+  X87StackMap x87_stack_map_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -458,6 +472,7 @@
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
+  friend class X87Stack;
   DISALLOW_COPY_AND_ASSIGN(LCodeGen);
 };
 
diff --git a/src/x87/lithium-gap-resolver-x87.cc b/src/x87/lithium-gap-resolver-x87.cc
index b94e34f..6a64275 100644
--- a/src/x87/lithium-gap-resolver-x87.cc
+++ b/src/x87/lithium-gap-resolver-x87.cc
@@ -317,10 +317,15 @@
   } else if (source->IsDoubleRegister()) {
     // load from the register onto the stack, store in destination, which must
     // be a double stack slot in the non-SSE2 case.
-    DCHECK(destination->IsDoubleStackSlot());
-    Operand dst = cgen_->ToOperand(destination);
-    X87Register src = cgen_->ToX87Register(source);
-    cgen_->X87Mov(dst, src);
+    if (destination->IsDoubleStackSlot()) {
+      Operand dst = cgen_->ToOperand(destination);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
+    } else {
+      X87Register dst = cgen_->ToX87Register(destination);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
+    }
   } else if (source->IsDoubleStackSlot()) {
     // load from the stack slot on top of the floating point stack, and then
     // store in destination. If destination is a double register, then it
diff --git a/src/x87/lithium-x87.cc b/src/x87/lithium-x87.cc
index 02037c3..993f5ad 100644
--- a/src/x87/lithium-x87.cc
+++ b/src/x87/lithium-x87.cc
@@ -484,6 +484,12 @@
 }
 
 
+LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                   X87Register::ToAllocationIndex(reg));
+}
+
+
 LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
   return Use(value, ToUnallocated(fixed_register));
 }
@@ -616,6 +622,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         X87Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
@@ -872,6 +884,14 @@
     if (current->IsControlInstruction() &&
         HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
         successor != NULL) {
+      // Always insert a fpu register barrier here when branch is optimized to
+      // be a direct goto.
+      // TODO(weiliang): require a better solution.
+      if (!current->IsGoto()) {
+        LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate());
+        clobber->set_hydrogen_value(current);
+        chunk_->AddInstruction(clobber, current_block_);
+      }
       instr = new(zone()) LGoto(successor);
     } else {
       instr = current->CompileToLithium(this);
@@ -931,7 +951,8 @@
   if (FLAG_stress_environments && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
-  if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) {
+  if (instr->IsGoto() &&
+      (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) {
     // TODO(olivf) Since phis of spilled values are joined as registers
     // (not in the stack slot), we need to allow the goto gaps to keep one
     // x87 register alive. To ensure all other values are still spilled, we
@@ -979,7 +1000,9 @@
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
-  LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+  LInstruction* branch =
+      temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
+                   : new (zone()) LBranch(UseRegisterAtStart(value), temp);
   if (!easy_case &&
       ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
@@ -1182,16 +1205,16 @@
 
 
 LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
-  // Crankshaft is turned off for nosse2.
-  UNREACHABLE();
-  return NULL;
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input));
+  return AssignEnvironment(result);
 }
 
 
 LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
-  LOperand* input = UseRegisterAtStart(instr->value());
+  LOperand* input = UseRegister(instr->value());
   LMathFround* result = new (zone()) LMathFround(input);
-  return AssignEnvironment(DefineAsRegister(result));
+  return DefineSameAsFirst(result);
 }
 
 
@@ -1225,11 +1248,11 @@
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
   DCHECK(instr->representation().IsDouble());
   DCHECK(instr->value()->representation().IsDouble());
-  LOperand* value = UseTempRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp1 = FixedTemp(ecx);
+  LOperand* temp2 = FixedTemp(edx);
   LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
-  return DefineAsRegister(result);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
 }
 
 
@@ -1242,8 +1265,7 @@
 
 LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+  LMathPowHalf* result = new (zone()) LMathPowHalf(input);
   return DefineSameAsFirst(result);
 }
 
@@ -1615,6 +1637,8 @@
 LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
   LOperand* left = NULL;
   LOperand* right = NULL;
+  LOperand* scratch = TempRegister();
+
   if (instr->representation().IsSmiOrInteger32()) {
     DCHECK(instr->left()->representation().Equals(instr->representation()));
     DCHECK(instr->right()->representation().Equals(instr->representation()));
@@ -1627,15 +1651,19 @@
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
-  LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+  LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch);
   return DefineSameAsFirst(minmax);
 }
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  // Crankshaft is turned off for nosse2.
-  UNREACHABLE();
-  return NULL;
+  // Unlike ia32, we don't have a MathPowStub and directly call c function.
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LPower* result = new (zone()) LPower(left, right);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
 }
 
 
@@ -1697,9 +1725,8 @@
 
 LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
     HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCompareMinusZeroAndBranch(value);
 }
 
 
@@ -2022,8 +2049,8 @@
   HValue* value = instr->value();
   Representation input_rep = value->representation();
   if (input_rep.IsDouble()) {
-    UNREACHABLE();
-    return NULL;
+    LOperand* reg = UseRegister(value);
+    return DefineFixed(new (zone()) LClampDToUint8(reg), eax);
   } else if (input_rep.IsInteger32()) {
     LOperand* reg = UseFixed(value, eax);
     return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
diff --git a/src/x87/lithium-x87.h b/src/x87/lithium-x87.h
index 233eaf2..e5cced2 100644
--- a/src/x87/lithium-x87.h
+++ b/src/x87/lithium-x87.h
@@ -413,6 +413,7 @@
   }
 
   bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+  HBasicBlock* block() const { return block_; }
 
  private:
   HBasicBlock* block_;
@@ -984,15 +985,11 @@
 };
 
 
-class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathPowHalf(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
+  explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
 };
@@ -1025,15 +1022,11 @@
 };
 
 
-class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
+  explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
                                "cmp-minus-zero-and-branch")
@@ -1508,15 +1501,17 @@
 };
 
 
-class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LMathMinMax(LOperand* left, LOperand* right) {
+  LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
     inputs_[1] = right;
+    temps_[0] = temp;
   }
 
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
   DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
@@ -2037,11 +2032,12 @@
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
   virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
-    return true;
+    return save_doubles() == kDontSaveFPRegs;
   }
 
   const Runtime::Function* function() const { return hydrogen()->function(); }
   int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
 };
 
 
@@ -2881,6 +2877,8 @@
   LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
   LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
                             Register reg);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            X87Register reg);
   LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
   // Assigns an environment to an instruction.  An instruction which can
   // deoptimize must have an environment.
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index 66f5703..80ce32c 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -148,8 +148,7 @@
 
 void MacroAssembler::RememberedSetHelper(
     Register object,  // Only used for debug checks.
-    Register addr,
-    Register scratch,
+    Register addr, Register scratch, SaveFPRegsMode save_fp,
     MacroAssembler::RememberedSetFinalAction and_then) {
   Label done;
   if (emit_debug_code()) {
@@ -180,7 +179,7 @@
     DCHECK(and_then == kFallThroughAtEnd);
     j(equal, &done, Label::kNear);
   }
-  StoreBufferOverflowStub store_buffer_overflow(isolate(), kDontSaveFPRegs);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
   if (and_then == kReturnAtEnd) {
     ret(0);
@@ -191,6 +190,31 @@
 }
 
 
+void MacroAssembler::ClampTOSToUint8(Register result_reg) {
+  Label done, conv_failure;
+  sub(esp, Immediate(kPointerSize));
+  fnclex();
+  fist_s(Operand(esp, 0));
+  pop(result_reg);
+  X87CheckIA();
+  j(equal, &conv_failure, Label::kNear);
+  test(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(sign, result_reg);
+  sub(result_reg, Immediate(1));
+  and_(result_reg, Immediate(255));
+  jmp(&done, Label::kNear);
+  bind(&conv_failure);
+  fnclex();
+  fldz();
+  fld(1);
+  FCmp();
+  setcc(below, result_reg);  // 1 if negative, 0 if positive.
+  dec_b(result_reg);         // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
 void MacroAssembler::ClampUint8(Register reg) {
   Label done;
   test(reg, Immediate(0xFFFFFF00));
@@ -270,11 +294,8 @@
 
 
 void MacroAssembler::RecordWriteArray(
-    Register object,
-    Register value,
-    Register index,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check,
+    Register object, Register value, Register index, SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis.
@@ -294,8 +315,8 @@
   lea(dst, Operand(object, index, times_half_pointer_size,
                    FixedArray::kHeaderSize - kHeapObjectTag));
 
-  RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
-              pointers_to_here_check_for_value);
+  RecordWrite(object, dst, value, save_fp, remembered_set_action,
+              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
 
   bind(&done);
 
@@ -309,13 +330,9 @@
 
 
 void MacroAssembler::RecordWriteField(
-    Register object,
-    int offset,
-    Register value,
-    Register dst,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check,
-    PointersToHereCheck pointers_to_here_check_for_value) {
+    Register object, int offset, Register value, Register dst,
+    SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action,
+    SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis.
   Label done;
@@ -338,8 +355,8 @@
     bind(&ok);
   }
 
-  RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
-              pointers_to_here_check_for_value);
+  RecordWrite(object, dst, value, save_fp, remembered_set_action,
+              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
 
   bind(&done);
 
@@ -352,11 +369,9 @@
 }
 
 
-void MacroAssembler::RecordWriteForMap(
-    Register object,
-    Handle<Map> map,
-    Register scratch1,
-    Register scratch2) {
+void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
+                                       Register scratch1, Register scratch2,
+                                       SaveFPRegsMode save_fp) {
   Label done;
 
   Register address = scratch1;
@@ -393,7 +408,8 @@
                       &done,
                       Label::kNear);
 
-  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET);
+  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
+                       save_fp);
   CallStub(&stub);
 
   bind(&done);
@@ -413,11 +429,8 @@
 
 
 void MacroAssembler::RecordWrite(
-    Register object,
-    Register address,
-    Register value,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check,
+    Register object, Register address, Register value, SaveFPRegsMode fp_mode,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
   DCHECK(!object.is(value));
   DCHECK(!object.is(address));
@@ -461,8 +474,8 @@
                 &done,
                 Label::kNear);
 
-  RecordWriteStub stub(isolate(), object, value, address,
-                       remembered_set_action);
+  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+                       fp_mode);
   CallStub(&stub);
 
   bind(&done);
@@ -707,6 +720,53 @@
 }
 
 
+void MacroAssembler::FXamMinusZero() {
+  fxam();
+  push(eax);
+  fnstsw_ax();
+  and_(eax, Immediate(0x4700));
+  // For minus zero, C3 == 1 && C1 == 1.
+  cmp(eax, Immediate(0x4200));
+  pop(eax);
+  fstp(0);
+}
+
+
+void MacroAssembler::FXamSign() {
+  fxam();
+  push(eax);
+  fnstsw_ax();
+  // For negative value (including -0.0), C1 == 1.
+  and_(eax, Immediate(0x0200));
+  pop(eax);
+  fstp(0);
+}
+
+
+void MacroAssembler::X87CheckIA() {
+  push(eax);
+  fnstsw_ax();
+  // For #IA, IE == 1 && SF == 0.
+  and_(eax, Immediate(0x0041));
+  cmp(eax, Immediate(0x0001));
+  pop(eax);
+}
+
+
+// rc=00B, round to nearest.
+// rc=01B, round down.
+// rc=10B, round up.
+// rc=11B, round toward zero.
+void MacroAssembler::X87SetRC(int rc) {
+  sub(esp, Immediate(kPointerSize));
+  fnstcw(MemOperand(esp, 0));
+  and_(MemOperand(esp, 0), Immediate(0xF3FF));
+  or_(MemOperand(esp, 0), Immediate(rc));
+  fldcw(MemOperand(esp, 0));
+  add(esp, Immediate(kPointerSize));
+}
+
+
 void MacroAssembler::AssertNumber(Register object) {
   if (emit_debug_code()) {
     Label ok;
@@ -844,8 +904,17 @@
 }
 
 
-void MacroAssembler::EnterExitFrameEpilogue(int argc) {
-  sub(esp, Immediate(argc * kPointerSize));
+void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+  // Optionally save FPU state.
+  if (save_doubles) {
+    // Store FPU state to m108byte.
+    int space = 108 + argc * kPointerSize;
+    sub(esp, Immediate(space));
+    const int offset = -2 * kPointerSize;  // entry fp + code object.
+    fnsave(MemOperand(ebp, offset - 108));
+  } else {
+    sub(esp, Immediate(argc * kPointerSize));
+  }
 
   // Get the required frame alignment for the OS.
   const int kFrameAlignment = base::OS::ActivationFrameAlignment();
@@ -859,7 +928,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame() {
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
   // Set up argc and argv in callee-saved registers.
@@ -868,17 +937,23 @@
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
-  EnterExitFrameEpilogue(3);
+  EnterExitFrameEpilogue(3, save_doubles);
 }
 
 
 void MacroAssembler::EnterApiExitFrame(int argc) {
   EnterExitFramePrologue();
-  EnterExitFrameEpilogue(argc);
+  EnterExitFrameEpilogue(argc, false);
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore FPU state.
+  if (save_doubles) {
+    const int offset = -2 * kPointerSize;
+    frstor(MemOperand(ebp, offset - 108));
+  }
+
   // Get the return address from the stack and restore the frame pointer.
   mov(ecx, Operand(ebp, 1 * kPointerSize));
   mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1908,8 +1983,8 @@
 }
 
 
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
-                                 int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+                                 SaveFPRegsMode save_doubles) {
   // If the expected number of arguments of the runtime function is
   // constant, we check that the actual number of arguments match the
   // expectation.
@@ -1921,7 +1996,7 @@
   // smarter.
   Move(eax, Immediate(num_arguments));
   mov(ebx, Immediate(ExternalReference(f, isolate())));
-  CEntryStub ces(isolate(), 1);
+  CEntryStub ces(isolate(), 1, save_doubles);
   CallStub(&ces);
 }
 
@@ -2794,9 +2869,9 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 1fdca3c..322c24a 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -74,8 +74,8 @@
   // at the address pointed to by the addr register.  Only works if addr is not
   // in new space.
   void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
+                           Register addr, Register scratch,
+                           SaveFPRegsMode save_fp,
                            RememberedSetFinalAction and_then);
 
   void CheckPageFlag(Register object,
@@ -146,10 +146,8 @@
   // The offset is the offset from the start of the object, not the offset from
   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
   void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
+      Register object, int offset, Register value, Register scratch,
+      SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -158,20 +156,14 @@
   // As above, but the offset has the tag presubtracted.  For use with
   // Operand(reg, off).
   void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
+      Register context, int offset, Register value, Register scratch,
+      SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     remembered_set_action,
-                     smi_check,
+    RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
+                     remembered_set_action, smi_check,
                      pointers_to_here_check_for_value);
   }
 
@@ -182,9 +174,7 @@
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
   void RecordWriteArray(
-      Register array,
-      Register value,
-      Register index,
+      Register array, Register value, Register index, SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -196,9 +186,7 @@
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
   void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
+      Register object, Register address, Register value, SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -207,11 +195,8 @@
   // For page containing |object| mark the region covering the object's map
   // dirty. |object| is the object being stored into, |map| is the Map object
   // that was stored.
-  void RecordWriteForMap(
-      Register object,
-      Handle<Map> map,
-      Register scratch1,
-      Register scratch2);
+  void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
+                         Register scratch2, SaveFPRegsMode save_fp);
 
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -226,14 +211,14 @@
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
   // esi.
-  void EnterExitFrame();
+  void EnterExitFrame(bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
   // argument in register esi.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in
   // register eax (untouched).
@@ -435,8 +420,13 @@
   // FCmp is similar to integer cmp, but requires unsigned
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
+  void FXamMinusZero();
+  void FXamSign();
+  void X87CheckIA();
+  void X87SetRC(int rc);
 
   void ClampUint8(Register reg);
+  void ClampTOSToUint8(Register result_reg);
 
   void SlowTruncateToI(Register result_reg, Register input_reg,
       int offset = HeapNumber::kValueOffset - kHeapObjectTag);
@@ -717,14 +707,17 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  void CallRuntime(const Runtime::Function* f, int num_arguments);
-  // Convenience function: Same as above, but takes the fid instead.
-  void CallRuntime(Runtime::FunctionId id) {
+  void CallRuntime(const Runtime::Function* f, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
     const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, function->nargs);
+    CallRuntime(function, function->nargs, kSaveFPRegs);
   }
-  void CallRuntime(Runtime::FunctionId id, int num_arguments) {
-    CallRuntime(Runtime::FunctionForId(id), num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
   }
 
   // Convenience function: call an external reference.
@@ -889,13 +882,13 @@
       Label* on_not_flat_one_byte_strings);
 
   // Checks if the given register or operand is a unique name
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar) {
-    JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar) {
+    JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
   }
 
-  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
@@ -956,7 +949,7 @@
                       const CallWrapper& call_wrapper = NullCallWrapper());
 
   void EnterExitFramePrologue();
-  void EnterExitFrameEpilogue(int argc);
+  void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
   void LeaveExitFrameEpilogue(bool restore_context);
 
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 44ad3ae..5198af6 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -80,11 +80,6 @@
   ##############################################################################
   # TurboFan compiler failures.
 
-  # TODO(dcarney): C calls are broken all over the place.
-  'test-run-machops/RunCall*': [SKIP],
-  'test-run-machops/RunLoadImmIndex': [SKIP],
-  'test-run-machops/RunSpillLotsOfThingsWithCall': [SKIP],
-
   # TODO(sigurds): The schedule is borked with multiple inlinees,
   # and cannot handle free-floating loops yet
   'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc
index d2c018f..cf126c2 100644
--- a/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/test/cctest/compiler/test-js-typed-lowering.cc
@@ -174,7 +174,7 @@
     Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
     Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
     Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
-    Type::Number()};
+    Type::OrderedNumber(),   Type::Number()};
 
 
 static Type* kJSTypes[] = {Type::Undefined(), Type::Null(),   Type::Boolean(),
@@ -1383,30 +1383,3 @@
     }
   }
 }
-
-
-TEST(BuiltinMathImul) {
-  JSTypedLoweringTester R;
-
-  for (size_t i = 0; i < arraysize(kNumberTypes); i++) {
-    for (size_t j = 0; j < arraysize(kNumberTypes); j++) {
-      Type* t0 = kNumberTypes[i];
-      Node* p0 = R.Parameter(t0, 0);
-      Type* t1 = kNumberTypes[j];
-      Node* p1 = R.Parameter(t1, 1);
-      Node* fun = R.HeapConstant(handle(R.isolate->context()->math_imul_fun()));
-      Node* call = R.graph.NewNode(R.javascript.Call(4, NO_CALL_FUNCTION_FLAGS),
-                                   fun, R.UndefinedConstant(), p0, p1);
-      Node* r = R.reduce(call);
-
-      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
-        R.CheckPureBinop(R.machine.Int32Mul(), r);
-        CHECK_EQ(p0, r->InputAt(0));
-        CHECK_EQ(p1, r->InputAt(1));
-      } else {
-        CHECK_EQ(IrOpcode::kJSCallFunction, r->opcode());
-        CHECK_EQ(call, r);
-      }
-    }
-  }
-}
diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc
index 1b5aa61..985e0f8 100644
--- a/test/cctest/compiler/test-run-machops.cc
+++ b/test/cctest/compiler/test-run-machops.cc
@@ -3567,82 +3567,6 @@
 }
 
 
-#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-static int Seven() { return 7; }
-static int UnaryMinus(int a) { return -a; }
-static int APlusTwoB(int a, int b) { return a + 2 * b; }
-
-
-TEST(RunCallSeven) {
-  for (int i = 0; i < 2; i++) {
-    bool call_direct = i == 0;
-    void* function_address =
-        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
-
-    RawMachineAssemblerTester<int32_t> m;
-    Node** args = NULL;
-    MachineType* arg_types = NULL;
-    Node* function = call_direct
-                         ? m.PointerConstant(function_address)
-                         : m.LoadFromPointer(&function_address, kMachPtr);
-    m.Return(m.CallC(function, kMachInt32, arg_types, args, 0));
-
-    CHECK_EQ(7, m.Call());
-  }
-}
-
-
-TEST(RunCallUnaryMinus) {
-  for (int i = 0; i < 2; i++) {
-    bool call_direct = i == 0;
-    void* function_address =
-        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&UnaryMinus));
-
-    RawMachineAssemblerTester<int32_t> m(kMachInt32);
-    Node* args[] = {m.Parameter(0)};
-    MachineType arg_types[] = {kMachInt32};
-    Node* function = call_direct
-                         ? m.PointerConstant(function_address)
-                         : m.LoadFromPointer(&function_address, kMachPtr);
-    m.Return(m.CallC(function, kMachInt32, arg_types, args, 1));
-
-    FOR_INT32_INPUTS(i) {
-      int a = *i;
-      CHECK_EQ(-a, m.Call(a));
-    }
-  }
-}
-
-
-TEST(RunCallAPlusTwoB) {
-  for (int i = 0; i < 2; i++) {
-    bool call_direct = i == 0;
-    void* function_address =
-        reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&APlusTwoB));
-
-    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
-    Node* args[] = {m.Parameter(0), m.Parameter(1)};
-    MachineType arg_types[] = {kMachInt32, kMachInt32};
-    Node* function = call_direct
-                         ? m.PointerConstant(function_address)
-                         : m.LoadFromPointer(&function_address, kMachPtr);
-    m.Return(m.CallC(function, kMachInt32, arg_types, args, 2));
-
-    FOR_INT32_INPUTS(i) {
-      FOR_INT32_INPUTS(j) {
-        int a = *i;
-        int b = *j;
-        int result = m.Call(a, b);
-        CHECK_EQ(a + 2 * b, result);
-      }
-    }
-  }
-}
-
-#endif  // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-
 static const int kFloat64CompareHelperTestCases = 15;
 static const int kFloat64CompareHelperNodeType = 4;
 
@@ -4030,39 +3954,6 @@
 }
 
 
-#if MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-TEST(RunSpillLotsOfThingsWithCall) {
-  static const int kInputSize = 1000;
-  RawMachineAssemblerTester<void> m;
-  Node* accs[kInputSize];
-  int32_t outputs[kInputSize];
-  Node* one = m.Int32Constant(1);
-  Node* acc = one;
-  for (int i = 0; i < kInputSize; i++) {
-    acc = m.Int32Add(acc, one);
-    accs[i] = acc;
-  }
-  // If the spill slot computation is wrong, it might load from the c frame
-  {
-    void* func = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(&Seven));
-    Node** args = NULL;
-    MachineType* arg_types = NULL;
-    m.CallC(m.PointerConstant(func), kMachInt32, arg_types, args, 0);
-  }
-  for (int i = 0; i < kInputSize; i++) {
-    m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
-  }
-  m.Return(one);
-  m.Call();
-  for (int i = 0; i < kInputSize; i++) {
-    CHECK_EQ(outputs[i], i + 2);
-  }
-}
-
-#endif  // MACHINE_ASSEMBLER_SUPPORTS_CALL_C
-
-
 TEST(RunInt32AddWithOverflowP) {
   int32_t actual_val = -1;
   RawMachineAssemblerTester<int32_t> m;
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index 69c10c2..2f0674a 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -765,6 +765,7 @@
   CHECK_NE(debug->break_id(), 0);
 
   if (event == v8::Break) {
+    break_point_hit_count++;
     for (int i = 0; checks[i].expr != NULL; i++) {
       const int argc = 3;
       v8::Handle<v8::Value> argv[argc] = {
@@ -2406,7 +2407,7 @@
   };
 
   // Simple test function. The "y=0" is in the function foo to provide a break
-  // location. For "y=0" the "y" is at position 15 in the barbar function
+  // location. For "y=0" the "y" is at position 15 in the foo function
   // therefore setting breakpoint at position 15 will break at "y=0" and
   // setting it higher will break after.
   v8::Local<v8::Function> foo = CompileFunction(&env,
@@ -2439,6 +2440,34 @@
   checks = checks_hh;
   foo->Call(env->Global(), 1, argv_foo);
 
+  // Test that overriding Object.prototype will not interfere into evaluation
+  // on call frame.
+  v8::Local<v8::Function> zoo =
+      CompileFunction(&env,
+                      "x = undefined;"
+                      "function zoo(t) {"
+                      "  var a=x;"
+                      "  Object.prototype.x = 42;"
+                      "  x=t;"
+                      "  y=0;"  // To ensure break location.
+                      "  delete Object.prototype.x;"
+                      "  x=a;"
+                      "}",
+                      "zoo");
+  const int zoo_break_position = 50;
+
+  // Arguments with one parameter "Hello, world!"
+  v8::Handle<v8::Value> argv_zoo[1] = {
+      v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")};
+
+  // Call zoo with breakpoint set at y=0.
+  DebugEventCounterClear();
+  bp = SetBreakPoint(zoo, zoo_break_position);
+  checks = checks_hu;
+  zoo->Call(env->Global(), 1, argv_zoo);
+  CHECK_EQ(1, break_point_hit_count);
+  ClearBreakPoint(bp);
+
   // Test function with an inner function. The "y=0" is in function barbar
   // to provide a break location. For "y=0" the "y" is at position 8 in the
   // barbar function therefore setting breakpoint at position 8 will break at
diff --git a/test/cctest/test-disasm-x87.cc b/test/cctest/test-disasm-x87.cc
index 17c49af..6cd33e5 100644
--- a/test/cctest/test-disasm-x87.cc
+++ b/test/cctest/test-disasm-x87.cc
@@ -349,6 +349,7 @@
   __ fprem1();
   __ fincstp();
   __ ftst();
+  __ fxam();
   __ fxch(3);
   __ fld_s(Operand(ebx, ecx, times_4, 10000));
   __ fstp_s(Operand(ebx, ecx, times_4, 10000));
@@ -378,6 +379,12 @@
   __ fninit();
   __ nop();
 
+  __ fldcw(Operand(ebx, ecx, times_4, 10000));
+  __ fnstcw(Operand(ebx, ecx, times_4, 10000));
+  __ fadd_d(Operand(ebx, ecx, times_4, 10000));
+  __ fnsave(Operand(ebx, ecx, times_4, 10000));
+  __ frstor(Operand(ebx, ecx, times_4, 10000));
+
   // xchg.
   {
     __ xchg(eax, eax);
diff --git a/test/mjsunit/keyed-named-access.js b/test/mjsunit/keyed-named-access.js
index f9541e8..11f8fb5 100644
--- a/test/mjsunit/keyed-named-access.js
+++ b/test/mjsunit/keyed-named-access.js
@@ -34,3 +34,39 @@
 f(o3);
 %OptimizeFunctionOnNextCall(f);
 assertEquals(1200, f(o3));
+
+(function CountOperationDeoptimizationGetter() {
+  var global = {};
+  global.__defineGetter__("A", function () { return "x"; });
+
+  function h() {
+    return "A";
+  }
+
+  function g(a, b, c) {
+    try {
+      return a + b.toString() + c;
+    } catch (e) { }
+  }
+
+  function test(o)  {
+   return g(1, o[h()]--, 10);
+  }
+
+  test(global);
+  test(global);
+  %OptimizeFunctionOnNextCall(test);
+  print(test(global));
+})();
+
+
+(function CountOperationDeoptimizationPoint() {
+  function test()  {
+   this[0, ""]--;
+  }
+
+  test();
+  test();
+  %OptimizeFunctionOnNextCall(test);
+  test();
+})();
diff --git a/test/mjsunit/regress/regress-json-parse-index.js b/test/mjsunit/regress/regress-json-parse-index.js
new file mode 100644
index 0000000..d1a785a
--- /dev/null
+++ b/test/mjsunit/regress/regress-json-parse-index.js
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = JSON.parse('{"\\u0030":100}');
+assertEquals(100, o[0]);
diff --git a/test/mjsunit/regress/string-set-char-deopt.js b/test/mjsunit/regress/string-set-char-deopt.js
index a4b34e8..c8e8538 100644
--- a/test/mjsunit/regress/string-set-char-deopt.js
+++ b/test/mjsunit/regress/string-set-char-deopt.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbo-deoptimization
 
 (function OneByteSeqStringSetCharDeoptOsr() {
   function deopt() {
diff --git a/testing/gtest-support.h b/testing/gtest-support.h
index 159858e..66b1094 100644
--- a/testing/gtest-support.h
+++ b/testing/gtest-support.h
@@ -36,7 +36,7 @@
 #define TRACED_FOREACH(_type, _var, _array)                                \
   for (size_t _i = 0; _i < arraysize(_array); ++_i)                        \
     for (bool _done = false; !_done;)                                      \
-      for (const _type _var = _array[_i]; !_done;)                         \
+      for (_type const _var = _array[_i]; !_done;)                         \
         for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
              !_done; _done = true)
 
@@ -48,7 +48,7 @@
 #define TRACED_FORRANGE(_type, _var, _low, _high)                          \
   for (_type _i = _low; _i <= _high; ++_i)                                 \
     for (bool _done = false; !_done;)                                      \
-      for (const _type _var = _i; !_done;)                                 \
+      for (_type const _var = _i; !_done;)                                 \
         for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
              !_done; _done = true)
 
diff --git a/tools/push-to-trunk/auto_push.py b/tools/push-to-trunk/auto_push.py
index 5a061f6..fef3b53 100755
--- a/tools/push-to-trunk/auto_push.py
+++ b/tools/push-to-trunk/auto_push.py
@@ -36,16 +36,8 @@
 from common_includes import *
 import push_to_trunk
 
-SETTINGS_LOCATION = "SETTINGS_LOCATION"
-
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-auto-push-tempfile",
-  SETTINGS_LOCATION: "~/.auto-roll",
-}
-
 PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
 
-
 class Preparation(Step):
   MESSAGE = "Preparation."
 
@@ -58,7 +50,7 @@
   MESSAGE = "Checking settings file."
 
   def RunStep(self):
-    settings_file = os.path.realpath(self.Config(SETTINGS_LOCATION))
+    settings_file = os.path.realpath(self.Config("SETTINGS_LOCATION"))
     if os.path.exists(settings_file):
       settings_dict = json.loads(FileToText(settings_file))
       if settings_dict.get("enable_auto_roll") is False:
@@ -118,9 +110,8 @@
 
     # TODO(machenbach): Update the script before calling it.
     if self._options.push:
-      P = push_to_trunk.PushToTrunk
       self._side_effect_handler.Call(
-          P(push_to_trunk.CONFIG, self._side_effect_handler).Run,
+          push_to_trunk.PushToTrunk().Run,
           ["--author", self._options.author,
            "--reviewer", self._options.reviewer,
            "--revision", self["lkgr"],
@@ -140,6 +131,12 @@
     options.requires_editor = False
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
+      "SETTINGS_LOCATION": "~/.auto-roll",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -152,4 +149,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(AutoPush(CONFIG).Run())
+  sys.exit(AutoPush().Run())
diff --git a/tools/push-to-trunk/auto_roll.py b/tools/push-to-trunk/auto_roll.py
index a40c356..120e633 100755
--- a/tools/push-to-trunk/auto_roll.py
+++ b/tools/push-to-trunk/auto_roll.py
@@ -12,13 +12,6 @@
 from common_includes import *
 import chromium_roll
 
-CLUSTERFUZZ_API_KEY_FILE = "CLUSTERFUZZ_API_KEY_FILE"
-
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
-  CLUSTERFUZZ_API_KEY_FILE: ".cf_api_key",
-}
-
 
 class CheckActiveRoll(Step):
   MESSAGE = "Check active roll."
@@ -76,10 +69,10 @@
   MESSAGE = "Check ClusterFuzz api for new problems."
 
   def RunStep(self):
-    if not os.path.exists(self.Config(CLUSTERFUZZ_API_KEY_FILE)):
+    if not os.path.exists(self.Config("CLUSTERFUZZ_API_KEY_FILE")):
       print "Skipping ClusterFuzz check. No api key file found."
       return False
-    api_key = FileToText(self.Config(CLUSTERFUZZ_API_KEY_FILE))
+    api_key = FileToText(self.Config("CLUSTERFUZZ_API_KEY_FILE"))
     # Check for open, reproducible issues that have no associated bug.
     result = self._side_effect_handler.ReadClusterFuzzAPI(
         api_key, job_type="linux_asan_d8_dbg", reproducible="True",
@@ -106,10 +99,7 @@
             "--sheriff", "--googlers-mapping", self._options.googlers_mapping])
       if self._options.dry_run:
         args.extend(["--dry-run"])
-      R = chromium_roll.ChromiumRoll
-      self._side_effect_handler.Call(
-          R(chromium_roll.CONFIG, self._side_effect_handler).Run,
-          args)
+      self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
 
 
 class AutoRoll(ScriptsBase):
@@ -129,6 +119,12 @@
       return False
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
+      "CLUSTERFUZZ_API_KEY_FILE": ".cf_api_key",
+    }
+
   def _Steps(self):
     return [
       CheckActiveRoll,
@@ -140,4 +136,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(AutoRoll(CONFIG).Run())
+  sys.exit(AutoRoll().Run())
diff --git a/tools/push-to-trunk/auto_tag.py b/tools/push-to-trunk/auto_tag.py
index 007a50d..175e10e 100755
--- a/tools/push-to-trunk/auto_tag.py
+++ b/tools/push-to-trunk/auto_tag.py
@@ -8,12 +8,6 @@
 
 from common_includes import *
 
-CONFIG = {
-  BRANCHNAME: "auto-tag-v8",
-  PERSISTFILE_BASENAME: "/tmp/v8-auto-tag-tempfile",
-  VERSION_FILE: "src/version.cc",
-}
-
 
 class Preparation(Step):
   MESSAGE = "Preparation."
@@ -29,7 +23,7 @@
   MESSAGE = "Get all V8 tags."
 
   def RunStep(self):
-    self.GitCreateBranch(self._config[BRANCHNAME])
+    self.GitCreateBranch(self._config["BRANCHNAME"])
 
     # Get remote tags.
     tags = filter(lambda s: re.match(r"^svn/tags/[\d+\.]+$", s),
@@ -54,7 +48,7 @@
         format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines():
 
       # Get the version.
-      if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash):
+      if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
         continue
 
       self.ReadAndPersistVersion()
@@ -65,7 +59,7 @@
         version = version[:-2]
 
       # Clean up checked-out version file.
-      self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD")
+      self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
 
       if version in tags:
         if self["candidate"]:
@@ -183,6 +177,12 @@
     options.force_upload = True
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "auto-tag-v8",
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-tag-tempfile",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -196,4 +196,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(AutoTag(CONFIG).Run())
+  sys.exit(AutoTag().Run())
diff --git a/tools/push-to-trunk/bump_up_version.py b/tools/push-to-trunk/bump_up_version.py
index e4e75a1..c9f052b 100755
--- a/tools/push-to-trunk/bump_up_version.py
+++ b/tools/push-to-trunk/bump_up_version.py
@@ -25,12 +25,6 @@
 
 from common_includes import *
 
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-bump-up-version-tempfile",
-  PATCH_FILE: "/tmp/v8-bump-up-version-tempfile-patch-file",
-  VERSION_FILE: "src/version.cc",
-}
-
 VERSION_BRANCH = "auto-bump-up-version"
 
 
@@ -73,7 +67,7 @@
   MESSAGE = "Stop script if the last change modified the version."
 
   def RunStep(self):
-    if self._config[VERSION_FILE] in self.GitChangedFiles(self["latest"]):
+    if VERSION_FILE in self.GitChangedFiles(self["latest"]):
       print "Stop due to recent version change."
       return True
 
@@ -122,7 +116,7 @@
   def RunStep(self):
     # If a version-change commit becomes the lkgr, don't bump up the version
     # again.
-    if self._config[VERSION_FILE] in self.GitChangedFiles(self["lkgr"]):
+    if VERSION_FILE in self.GitChangedFiles(self["lkgr"]):
       print "Stop because the lkgr is a version change itself."
       return True
 
@@ -194,7 +188,7 @@
   def RunStep(self):
     self.GitCreateBranch(VERSION_BRANCH, "bleeding_edge")
 
-    self.SetVersion(self.Config(VERSION_FILE), "new_")
+    self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
 
     try:
       msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
@@ -228,6 +222,11 @@
     options.force_upload = True
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -243,4 +242,4 @@
     ]
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(BumpUpVersion(CONFIG).Run())
+  sys.exit(BumpUpVersion().Run())
diff --git a/tools/push-to-trunk/chromium_roll.py b/tools/push-to-trunk/chromium_roll.py
index 35603a2..dc5e6eb 100755
--- a/tools/push-to-trunk/chromium_roll.py
+++ b/tools/push-to-trunk/chromium_roll.py
@@ -9,12 +9,6 @@
 
 from common_includes import *
 
-CHROMIUM = "CHROMIUM"
-
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-chromium-roll-tempfile",
-}
-
 
 class Preparation(Step):
   MESSAGE = "Preparation."
@@ -116,7 +110,7 @@
           % self["trunk_revision"])
 
     # Clean up all temporary files.
-    Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+    Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
 
 
 class ChromiumRoll(ScriptsBase):
@@ -140,6 +134,11 @@
     options.manual = False
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -154,4 +153,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(ChromiumRoll(CONFIG).Run())
+  sys.exit(ChromiumRoll().Run())
diff --git a/tools/push-to-trunk/common_includes.py b/tools/push-to-trunk/common_includes.py
index d33c539..00fb097 100644
--- a/tools/push-to-trunk/common_includes.py
+++ b/tools/push-to-trunk/common_includes.py
@@ -45,13 +45,7 @@
 from git_recipes import GitRecipesMixin
 from git_recipes import GitFailedException
 
-PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
-BRANCHNAME = "BRANCHNAME"
-VERSION_FILE = "VERSION_FILE"
-CHANGELOG_FILE = "CHANGELOG_FILE"
-CHANGELOG_ENTRY_FILE = "CHANGELOG_ENTRY_FILE"
-COMMITMSG_FILE = "COMMITMSG_FILE"
-PATCH_FILE = "PATCH_FILE"
+VERSION_FILE = os.path.join("src", "version.cc")
 
 # V8 base directory.
 DEFAULT_CWD = os.path.dirname(
@@ -262,9 +256,8 @@
 
 
 class Step(GitRecipesMixin):
-  def __init__(self, text, requires, number, config, state, options, handler):
+  def __init__(self, text, number, config, state, options, handler):
     self._text = text
-    self._requires = requires
     self._number = number
     self._config = config
     self._state = state
@@ -294,14 +287,10 @@
 
   def Run(self):
     # Restore state.
-    state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+    state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
     if not self._state and os.path.exists(state_file):
       self._state.update(json.loads(FileToText(state_file)))
 
-    # Skip step if requirement is not met.
-    if self._requires and not self._state.get(self._requires):
-      return
-
     print ">>> Step %d: %s" % (self._number, self._text)
     try:
       return self.RunStep()
@@ -437,15 +426,15 @@
 
   def PrepareBranch(self):
     # Delete the branch that will be created later if it exists already.
-    self.DeleteBranch(self._config[BRANCHNAME])
+    self.DeleteBranch(self._config["BRANCHNAME"])
 
   def CommonCleanup(self):
     self.GitCheckout(self["current_branch"])
-    if self._config[BRANCHNAME] != self["current_branch"]:
-      self.GitDeleteBranch(self._config[BRANCHNAME])
+    if self._config["BRANCHNAME"] != self["current_branch"]:
+      self.GitDeleteBranch(self._config["BRANCHNAME"])
 
     # Clean up all temporary files.
-    for f in glob.iglob("%s*" % self._config[PERSISTFILE_BASENAME]):
+    for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
       if os.path.isfile(f):
         os.remove(f)
       if os.path.isdir(f):
@@ -457,7 +446,7 @@
       if match:
         value = match.group(1)
         self["%s%s" % (prefix, var_name)] = value
-    for line in LinesInFile(self._config[VERSION_FILE]):
+    for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
       for (var_name, def_name) in [("major", "MAJOR_VERSION"),
                                    ("minor", "MINOR_VERSION"),
                                    ("build", "BUILD_NUMBER"),
@@ -530,12 +519,12 @@
 
   def SVNCommit(self, root, commit_message):
     patch = self.GitDiff("HEAD^", "HEAD")
-    TextToFile(patch, self._config[PATCH_FILE])
+    TextToFile(patch, self._config["PATCH_FILE"])
     self.Command("svn", "update", cwd=self._options.svn)
     if self.Command("svn", "status", cwd=self._options.svn) != "":
       self.Die("SVN checkout not clean.")
     if not self.Command("patch", "-d %s -p1 -i %s" %
-                        (root, self._config[PATCH_FILE]),
+                        (root, self._config["PATCH_FILE"]),
                         cwd=self._options.svn):
       self.Die("Could not apply patch.")
     self.Command(
@@ -604,21 +593,19 @@
       message = step_class.MESSAGE
     except AttributeError:
       message = step_class.__name__
-    try:
-      requires = step_class.REQUIRES
-    except AttributeError:
-      requires = None
 
-    return step_class(message, requires, number=number, config=config,
+    return step_class(message, number=number, config=config,
                       state=state, options=options,
                       handler=side_effect_handler)
 
 
 class ScriptsBase(object):
   # TODO(machenbach): Move static config here.
-  def __init__(self, config, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
+  def __init__(self,
+               config=None,
+               side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
                state=None):
-    self._config = config
+    self._config = config or self._Config()
     self._side_effect_handler = side_effect_handler
     self._state = state if state is not None else {}
 
@@ -634,6 +621,9 @@
   def _Steps(self):  # pragma: no cover
     raise Exception("Not implemented.")
 
+  def _Config(self):
+    return {}
+
   def MakeOptions(self, args=None):
     parser = argparse.ArgumentParser(description=self._Description())
     parser.add_argument("-a", "--author", default="",
@@ -699,7 +689,7 @@
     if not options:
       return 1
 
-    state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+    state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
     if options.step == 0 and os.path.exists(state_file):
       os.remove(state_file)
 
diff --git a/tools/push-to-trunk/merge_to_branch.py b/tools/push-to-trunk/merge_to_branch.py
index ec06f9c..3fd3450 100755
--- a/tools/push-to-trunk/merge_to_branch.py
+++ b/tools/push-to-trunk/merge_to_branch.py
@@ -32,32 +32,16 @@
 
 from common_includes import *
 
-ALREADY_MERGING_SENTINEL_FILE = "ALREADY_MERGING_SENTINEL_FILE"
-COMMIT_HASHES_FILE = "COMMIT_HASHES_FILE"
-TEMPORARY_PATCH_FILE = "TEMPORARY_PATCH_FILE"
-
-CONFIG = {
-  BRANCHNAME: "prepare-merge",
-  PERSISTFILE_BASENAME: "/tmp/v8-merge-to-branch-tempfile",
-  ALREADY_MERGING_SENTINEL_FILE:
-      "/tmp/v8-merge-to-branch-tempfile-already-merging",
-  VERSION_FILE: "src/version.cc",
-  TEMPORARY_PATCH_FILE: "/tmp/v8-prepare-merge-tempfile-temporary-patch",
-  COMMITMSG_FILE: "/tmp/v8-prepare-merge-tempfile-commitmsg",
-  COMMIT_HASHES_FILE: "/tmp/v8-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
-}
-
-
 class Preparation(Step):
   MESSAGE = "Preparation."
 
   def RunStep(self):
-    if os.path.exists(self.Config(ALREADY_MERGING_SENTINEL_FILE)):
+    if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
       if self._options.force:
-        os.remove(self.Config(ALREADY_MERGING_SENTINEL_FILE))
+        os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
       elif self._options.step == 0:  # pragma: no cover
         self.Die("A merge is already in progress")
-    open(self.Config(ALREADY_MERGING_SENTINEL_FILE), "a").close()
+    open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
 
     self.InitialEnvironmentChecks(self.default_cwd)
     if self._options.revert_bleeding_edge:
@@ -75,7 +59,7 @@
   MESSAGE = "Create a fresh branch for the patch."
 
   def RunStep(self):
-    self.GitCreateBranch(self.Config(BRANCHNAME),
+    self.GitCreateBranch(self.Config("BRANCHNAME"),
                          "svn/%s" % self["merge_to_branch"])
 
 
@@ -158,8 +142,8 @@
       print("Applying patch for %s to %s..."
             % (commit_hash, self["merge_to_branch"]))
       patch = self.GitGetPatch(commit_hash)
-      TextToFile(patch, self.Config(TEMPORARY_PATCH_FILE))
-      self.ApplyPatch(self.Config(TEMPORARY_PATCH_FILE), self._options.revert)
+      TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
+      self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"), self._options.revert)
     if self._options.patch:
       self.ApplyPatch(self._options.patch, self._options.revert)
 
@@ -184,14 +168,14 @@
     if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
                     "fire up your EDITOR on %s so you can make arbitrary "
                     "changes. When you're done, save the file and exit your "
-                    "EDITOR.)" % self.Config(VERSION_FILE)):
-      text = FileToText(self.Config(VERSION_FILE))
+                    "EDITOR.)" % VERSION_FILE):
+      text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
       text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
                   r"\g<space>%s" % new_patch,
                   text)
-      TextToFile(text, self.Config(VERSION_FILE))
+      TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
     else:
-      self.Editor(self.Config(VERSION_FILE))
+      self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
     self.ReadAndPersistVersion("new_")
     self["version"] = "%s.%s.%s.%s" % (self["new_major"],
                                        self["new_minor"],
@@ -214,15 +198,15 @@
       title = ("Version %s (merged %s)"
                % (self["version"], self["revision_list"]))
     self["new_commit_msg"] = "%s\n\n%s" % (title, self["new_commit_msg"])
-    TextToFile(self["new_commit_msg"], self.Config(COMMITMSG_FILE))
-    self.GitCommit(file_name=self.Config(COMMITMSG_FILE))
+    TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
+    self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
 
 
 class CommitRepository(Step):
   MESSAGE = "Commit to the repository."
 
   def RunStep(self):
-    self.GitCheckout(self.Config(BRANCHNAME))
+    self.GitCheckout(self.Config("BRANCHNAME"))
     self.WaitForLGTM()
     self.GitPresubmit()
     self.GitDCommit()
@@ -311,6 +295,16 @@
     options.bypass_upload_hooks = True
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "prepare-merge",
+      "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+      "ALREADY_MERGING_SENTINEL_FILE":
+          "/tmp/v8-merge-to-branch-tempfile-already-merging",
+      "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
+      "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -330,4 +324,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(MergeToBranch(CONFIG).Run())
+  sys.exit(MergeToBranch().Run())
diff --git a/tools/push-to-trunk/push_to_trunk.py b/tools/push-to-trunk/push_to_trunk.py
index 70aa801..8a9629e 100755
--- a/tools/push-to-trunk/push_to_trunk.py
+++ b/tools/push-to-trunk/push_to_trunk.py
@@ -34,23 +34,9 @@
 
 from common_includes import *
 
-TRUNKBRANCH = "TRUNKBRANCH"
-
-CONFIG = {
-  BRANCHNAME: "prepare-push",
-  TRUNKBRANCH: "trunk-push",
-  PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile",
-  VERSION_FILE: "src/version.cc",
-  CHANGELOG_FILE: "ChangeLog",
-  CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
-  PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
-  COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
-}
-
 PUSH_MESSAGE_SUFFIX = " (based on bleeding_edge revision r%d)"
 PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
 
-
 class Preparation(Step):
   MESSAGE = "Preparation."
 
@@ -58,19 +44,19 @@
     self.InitialEnvironmentChecks(self.default_cwd)
     self.CommonPrepare()
 
-    if(self["current_branch"] == self.Config(TRUNKBRANCH)
-       or self["current_branch"] == self.Config(BRANCHNAME)):
+    if(self["current_branch"] == self.Config("TRUNKBRANCH")
+       or self["current_branch"] == self.Config("BRANCHNAME")):
       print "Warning: Script started on branch %s" % self["current_branch"]
 
     self.PrepareBranch()
-    self.DeleteBranch(self.Config(TRUNKBRANCH))
+    self.DeleteBranch(self.Config("TRUNKBRANCH"))
 
 
 class FreshBranch(Step):
   MESSAGE = "Create a fresh branch."
 
   def RunStep(self):
-    self.GitCreateBranch(self.Config(BRANCHNAME), "svn/bleeding_edge")
+    self.GitCreateBranch(self.Config("BRANCHNAME"), "svn/bleeding_edge")
 
 
 class PreparePushRevision(Step):
@@ -130,7 +116,7 @@
   MESSAGE = "Get latest bleeding edge version."
 
   def RunStep(self):
-    self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/bleeding_edge")
+    self.GitCheckoutFile(VERSION_FILE, "svn/bleeding_edge")
 
     # Store latest version.
     self.ReadAndPersistVersion("latest_")
@@ -143,7 +129,7 @@
 
   def RunStep(self):
     # Retrieve current version from last trunk push.
-    self.GitCheckoutFile(self.Config(VERSION_FILE), self["last_push_trunk"])
+    self.GitCheckoutFile(VERSION_FILE, self["last_push_trunk"])
     self.ReadAndPersistVersion()
     self["trunk_version"] = self.ArrayToVersion("")
 
@@ -154,21 +140,21 @@
 
     if SortingKey(self["trunk_version"]) < SortingKey(self["latest_version"]):
       # If the version on bleeding_edge is newer than on trunk, use it.
-      self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/bleeding_edge")
+      self.GitCheckoutFile(VERSION_FILE, "svn/bleeding_edge")
       self.ReadAndPersistVersion()
 
     if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
                      "fire up your EDITOR on %s so you can make arbitrary "
                      "changes. When you're done, save the file and exit your "
-                     "EDITOR.)" % self.Config(VERSION_FILE))):
+                     "EDITOR.)" % VERSION_FILE)):
 
-      text = FileToText(self.Config(VERSION_FILE))
+      text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
       text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
                   r"\g<space>%s" % str(int(self["build"]) + 1),
                   text)
-      TextToFile(text, self.Config(VERSION_FILE))
+      TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
     else:
-      self.Editor(self.Config(VERSION_FILE))
+      self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
 
     # Variables prefixed with 'new_' contain the new version numbers for the
     # ongoing trunk push.
@@ -206,7 +192,7 @@
   def RunStep(self):
     self["date"] = self.GetDate()
     output = "%s: Version %s\n\n" % (self["date"], self["version"])
-    TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
+    TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
     commits = self.GitLog(format="%H",
         git_hash="%s..%s" % (self["last_push_bleeding_edge"],
                              self["push_hash"]))
@@ -222,17 +208,17 @@
 
     # Auto-format commit messages.
     body = MakeChangeLogBody(commit_messages, auto_format=True)
-    AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
+    AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE"))
 
     msg = ("        Performance and stability improvements on all platforms."
            "\n#\n# The change log above is auto-generated. Please review if "
            "all relevant\n# commit messages from the list below are included."
            "\n# All lines starting with # will be stripped.\n#\n")
-    AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
+    AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE"))
 
     # Include unformatted commit messages as a reference in a comment.
     comment_body = MakeComment(MakeChangeLogBody(commit_messages))
-    AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
+    AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE"))
 
 
 class EditChangeLog(Step):
@@ -243,10 +229,10 @@
            "entry, then edit its contents to your liking. When you're done, "
            "save the file and exit your EDITOR. ")
     self.ReadLine(default="")
-    self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
+    self.Editor(self.Config("CHANGELOG_ENTRY_FILE"))
 
     # Strip comments and reformat with correct indentation.
-    changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
+    changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip()
     changelog_entry = StripComments(changelog_entry)
     changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
     changelog_entry = changelog_entry.lstrip()
@@ -255,7 +241,7 @@
       self.Die("Empty ChangeLog entry.")
 
     # Safe new change log for adding it later to the trunk patch.
-    TextToFile(changelog_entry, self.Config(CHANGELOG_ENTRY_FILE))
+    TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
 
 
 class StragglerCommits(Step):
@@ -274,10 +260,10 @@
     # Instead of relying on "git rebase -i", we'll just create a diff, because
     # that's easier to automate.
     TextToFile(self.GitDiff("svn/trunk", self["push_hash"]),
-               self.Config(PATCH_FILE))
+               self.Config("PATCH_FILE"))
 
     # Convert the ChangeLog entry to commit message format.
-    text = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
+    text = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
 
     # Remove date and trailing white space.
     text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
@@ -297,22 +283,22 @@
 
     if not text:  # pragma: no cover
       self.Die("Commit message editing failed.")
-    TextToFile(text, self.Config(COMMITMSG_FILE))
+    TextToFile(text, self.Config("COMMITMSG_FILE"))
 
 
 class NewBranch(Step):
   MESSAGE = "Create a new branch from trunk."
 
   def RunStep(self):
-    self.GitCreateBranch(self.Config(TRUNKBRANCH), "svn/trunk")
+    self.GitCreateBranch(self.Config("TRUNKBRANCH"), "svn/trunk")
 
 
 class ApplyChanges(Step):
   MESSAGE = "Apply squashed changes."
 
   def RunStep(self):
-    self.ApplyPatch(self.Config(PATCH_FILE))
-    os.remove(self.Config(PATCH_FILE))
+    self.ApplyPatch(self.Config("PATCH_FILE"))
+    os.remove(self.Config("PATCH_FILE"))
 
 
 class AddChangeLog(Step):
@@ -322,12 +308,12 @@
     # The change log has been modified by the patch. Reset it to the version
     # on trunk and apply the exact changes determined by this PrepareChangeLog
     # step above.
-    self.GitCheckoutFile(self.Config(CHANGELOG_FILE), "svn/trunk")
-    changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
-    old_change_log = FileToText(self.Config(CHANGELOG_FILE))
+    self.GitCheckoutFile(self.Config("CHANGELOG_FILE"), "svn/trunk")
+    changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+    old_change_log = FileToText(self.Config("CHANGELOG_FILE"))
     new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
-    TextToFile(new_change_log, self.Config(CHANGELOG_FILE))
-    os.remove(self.Config(CHANGELOG_ENTRY_FILE))
+    TextToFile(new_change_log, self.Config("CHANGELOG_FILE"))
+    os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
 
 
 class SetVersion(Step):
@@ -336,16 +322,16 @@
   def RunStep(self):
     # The version file has been modified by the patch. Reset it to the version
     # on trunk and apply the correct version.
-    self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/trunk")
-    self.SetVersion(self.Config(VERSION_FILE), "new_")
+    self.GitCheckoutFile(VERSION_FILE, "svn/trunk")
+    self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
 
 
 class CommitTrunk(Step):
   MESSAGE = "Commit to local trunk branch."
 
   def RunStep(self):
-    self.GitCommit(file_name = self.Config(COMMITMSG_FILE))
-    os.remove(self.Config(COMMITMSG_FILE))
+    self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
+    os.remove(self.Config("COMMITMSG_FILE"))
 
 
 class SanityCheck(Step):
@@ -356,7 +342,7 @@
     # prepare push process.
     if not self.Confirm("Please check if your local checkout is sane: Inspect "
         "%s, compile, run tests. Do you want to commit this new trunk "
-        "revision to the repository?" % self.Config(VERSION_FILE)):
+        "revision to the repository?" % VERSION_FILE):
       self.Die("Execution canceled.")  # pragma: no cover
 
 
@@ -402,8 +388,8 @@
     print "%s\ttrunk\t%s" % (self["version"], self["trunk_revision"])
 
     self.CommonCleanup()
-    if self.Config(TRUNKBRANCH) != self["current_branch"]:
-      self.GitDeleteBranch(self.Config(TRUNKBRANCH))
+    if self.Config("TRUNKBRANCH") != self["current_branch"]:
+      self.GitDeleteBranch(self.Config("TRUNKBRANCH"))
 
 
 class PushToTrunk(ScriptsBase):
@@ -439,6 +425,17 @@
     options.tbr_commit = not options.manual
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "prepare-push",
+      "TRUNKBRANCH": "trunk-push",
+      "PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
+      "CHANGELOG_FILE": "ChangeLog",
+      "CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
+      "PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
+      "COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -464,4 +461,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(PushToTrunk(CONFIG).Run())
+  sys.exit(PushToTrunk().Run())
diff --git a/tools/push-to-trunk/releases.py b/tools/push-to-trunk/releases.py
index f2d9856..1d26198 100755
--- a/tools/push-to-trunk/releases.py
+++ b/tools/push-to-trunk/releases.py
@@ -20,12 +20,9 @@
 
 from common_includes import *
 
-CHROMIUM = "CHROMIUM"
-
 CONFIG = {
-  BRANCHNAME: "retrieve-v8-releases",
-  PERSISTFILE_BASENAME: "/tmp/v8-releases-tempfile",
-  VERSION_FILE: "src/version.cc",
+  "BRANCHNAME": "retrieve-v8-releases",
+  "PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
 }
 
 # Expression for retrieving the bleeding edge revision from a commit message.
@@ -206,11 +203,11 @@
     releases = []
     try:
       for git_hash in self.GitLog(format="%H").splitlines():
-        if self._config[VERSION_FILE] not in self.GitChangedFiles(git_hash):
+        if VERSION_FILE not in self.GitChangedFiles(git_hash):
           continue
         if self.ExceedsMax(releases):
           break  # pragma: no cover
-        if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash):
+        if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
           break  # pragma: no cover
 
         release, patch_level = self.GetRelease(git_hash, branch)
@@ -228,11 +225,11 @@
       pass
 
     # Clean up checked-out version file.
-    self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD")
+    self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
     return releases
 
   def RunStep(self):
-    self.GitCreateBranch(self._config[BRANCHNAME])
+    self.GitCreateBranch(self._config["BRANCHNAME"])
     # Get relevant remote branches, e.g. "svn/3.25".
     branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s),
                       self.GitRemotes())
@@ -285,7 +282,7 @@
     cwd = self._options.chromium
     self.GitCheckout("master", cwd=cwd)
     self.GitPull(cwd=cwd)
-    self.GitCreateBranch(self.Config(BRANCHNAME), cwd=cwd)
+    self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
 
 
 def ConvertToCommitNumber(step, revision):
@@ -409,7 +406,7 @@
 
   def RunStep(self):
     self.GitCheckout("master", cwd=self._options.chromium)
-    self.GitDeleteBranch(self.Config(BRANCHNAME), cwd=self._options.chromium)
+    self.GitDeleteBranch(self.Config("BRANCHNAME"), cwd=self._options.chromium)
     self.CommonCleanup()
 
 
@@ -450,6 +447,12 @@
   def _ProcessOptions(self, options):  # pragma: no cover
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "retrieve-v8-releases",
+      "PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -464,4 +467,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(Releases(CONFIG).Run())
+  sys.exit(Releases().Run())
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index 61d98a8..b0d1c58 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -34,9 +34,7 @@
 
 import auto_push
 from auto_push import CheckLastPush
-from auto_push import SETTINGS_LOCATION
 import auto_roll
-from auto_roll import CLUSTERFUZZ_API_KEY_FILE
 import common_includes
 from common_includes import *
 import merge_to_branch
@@ -44,7 +42,6 @@
 import push_to_trunk
 from push_to_trunk import *
 import chromium_roll
-from chromium_roll import CHROMIUM
 from chromium_roll import ChromiumRoll
 import releases
 from releases import Releases
@@ -57,21 +54,19 @@
 
 TEST_CONFIG = {
   "DEFAULT_CWD": None,
-  BRANCHNAME: "test-prepare-push",
-  TRUNKBRANCH: "test-trunk-push",
-  PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile",
-  VERSION_FILE: None,
-  CHANGELOG_FILE: None,
-  CHANGELOG_ENTRY_FILE: "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
-  PATCH_FILE: "/tmp/test-v8-push-to-trunk-tempfile-patch",
-  COMMITMSG_FILE: "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
-  CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
-  SETTINGS_LOCATION: None,
-  ALREADY_MERGING_SENTINEL_FILE:
+  "BRANCHNAME": "test-prepare-push",
+  "TRUNKBRANCH": "test-trunk-push",
+  "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
+  "CHANGELOG_FILE": None,
+  "CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
+  "PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
+  "COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
+  "CHROMIUM": "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+  "SETTINGS_LOCATION": None,
+  "ALREADY_MERGING_SENTINEL_FILE":
       "/tmp/test-merge-to-branch-tempfile-already-merging",
-  COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
-  TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch",
-  CLUSTERFUZZ_API_KEY_FILE: "/tmp/test-fake-cf-api-key",
+  "TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
+  "CLUSTERFUZZ_API_KEY_FILE": "/tmp/test-fake-cf-api-key",
 }
 
 
@@ -362,7 +357,10 @@
 
 
   def WriteFakeVersionFile(self, minor=22, build=4, patch=0):
-    with open(TEST_CONFIG[VERSION_FILE], "w") as f:
+    version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
+    if not os.path.exists(os.path.dirname(version_file)):
+      os.makedirs(os.path.dirname(version_file))
+    with open(version_file, "w") as f:
       f.write("  // Some line...\n")
       f.write("\n")
       f.write("#define MAJOR_VERSION    3\n")
@@ -426,8 +424,8 @@
     TEST_CONFIG["DEFAULT_CWD"] = self.MakeEmptyTempDirectory()
 
   def tearDown(self):
-    if os.path.exists(TEST_CONFIG[PERSISTFILE_BASENAME]):
-      shutil.rmtree(TEST_CONFIG[PERSISTFILE_BASENAME])
+    if os.path.exists(TEST_CONFIG["PERSISTFILE_BASENAME"]):
+      shutil.rmtree(TEST_CONFIG["PERSISTFILE_BASENAME"])
 
     # Clean up temps. Doesn't work automatically.
     for name in self._tmp_files:
@@ -449,9 +447,9 @@
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch"),
       Cmd("git svn fetch", ""),
-      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
       RL("Y"),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
     self.MakeStep().CommonPrepare()
     self.MakeStep().PrepareBranch()
@@ -462,7 +460,7 @@
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch"),
       Cmd("git svn fetch", ""),
-      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
       RL("n"),
     ])
     self.MakeStep().CommonPrepare()
@@ -474,9 +472,9 @@
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch"),
       Cmd("git svn fetch", ""),
-      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
       RL("Y"),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], None),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
     ])
     self.MakeStep().CommonPrepare()
     self.assertRaises(Exception, self.MakeStep().PrepareBranch)
@@ -491,7 +489,6 @@
     self.MakeStep().InitialEnvironmentChecks(TEST_CONFIG["DEFAULT_CWD"])
 
   def testReadAndPersistVersion(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile(build=5)
     step = self.MakeStep()
     step.ReadAndPersistVersion()
@@ -531,9 +528,8 @@
     self.assertEquals("push_hash", self._state["push_hash"])
 
   def testPrepareChangeLog(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
 
     self.Expect([
       Cmd("git log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
@@ -560,7 +556,7 @@
     self._state["version"] = "3.22.5"
     self.RunStep(PushToTrunk, PrepareChangeLog)
 
-    actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+    actual_cl = FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
 
     expected_cl = """1999-07-31: Version 3.22.5
 
@@ -591,32 +587,30 @@
     self.assertEquals(expected_cl, actual_cl)
 
   def testEditChangeLog(self):
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-    TextToFile("  New  \n\tLines  \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+    TextToFile("  New  \n\tLines  \n", TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
     os.environ["EDITOR"] = "vi"
     self.Expect([
       RL(""),  # Open editor.
-      Cmd("vi %s" % TEST_CONFIG[CHANGELOG_ENTRY_FILE], ""),
+      Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""),
     ])
 
     self.RunStep(PushToTrunk, EditChangeLog)
 
     self.assertEquals("New\n        Lines",
-                      FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE]))
+                      FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"]))
 
   # Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6.
   # Make sure that the increment is 3.22.7.0.
   def testIncrementVersion(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
     self._state["last_push_trunk"] = "hash1"
     self._state["latest_build"] = "6"
     self._state["latest_version"] = "3.22.6.0"
 
     self.Expect([
-      Cmd("git checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], ""),
-      Cmd(("git checkout -f svn/bleeding_edge -- %s" %
-           TEST_CONFIG[VERSION_FILE]),
+      Cmd("git checkout -f hash1 -- src/version.cc", ""),
+      Cmd("git checkout -f svn/bleeding_edge -- src/version.cc",
           "", cb=lambda: self.WriteFakeVersionFile(22, 6)),
       RL("Y"),  # Increment build number.
     ])
@@ -629,8 +623,8 @@
     self.assertEquals("0", self._state["new_patch"])
 
   def _TestSquashCommits(self, change_log, expected_msg):
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-    with open(TEST_CONFIG[CHANGELOG_ENTRY_FILE], "w") as f:
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+    with open(TEST_CONFIG["CHANGELOG_ENTRY_FILE"], "w") as f:
       f.write(change_log)
 
     self.Expect([
@@ -642,9 +636,9 @@
     self._state["date"] = "1999-11-11"
 
     self.RunStep(PushToTrunk, SquashCommits)
-    self.assertEquals(FileToText(TEST_CONFIG[COMMITMSG_FILE]), expected_msg)
+    self.assertEquals(FileToText(TEST_CONFIG["COMMITMSG_FILE"]), expected_msg)
 
-    patch = FileToText(TEST_CONFIG[ PATCH_FILE])
+    patch = FileToText(TEST_CONFIG["PATCH_FILE"])
     self.assertTrue(re.search(r"patch content", patch))
 
   def testSquashCommitsUnformatted(self):
@@ -685,13 +679,12 @@
 
     # The version file on bleeding edge has build level 5, while the version
     # file from trunk has build level 4.
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile(build=5)
 
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-    TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+    TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
     bleeding_edge_change_log = "2014-03-17: Sentinel\n"
-    TextToFile(bleeding_edge_change_log, TEST_CONFIG[CHANGELOG_FILE])
+    TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
     os.environ["EDITOR"] = "vi"
 
     def ResetChangeLog():
@@ -700,21 +693,22 @@
       trunk_change_log = """1999-04-05: Version 3.22.4
 
         Performance and stability improvements on all platforms.\n"""
-      TextToFile(trunk_change_log, TEST_CONFIG[CHANGELOG_FILE])
+      TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
 
     def ResetToTrunk():
       ResetChangeLog()
       self.WriteFakeVersionFile()
 
     def CheckSVNCommit():
-      commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
       self.assertEquals(
 """Version 3.22.5 (based on bleeding_edge revision r123455)
 
 Log text 1 (issue 321).
 
 Performance and stability improvements on all platforms.""", commit)
-      version = FileToText(TEST_CONFIG[VERSION_FILE])
+      version = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
       self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
       self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
       self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
@@ -722,7 +716,7 @@
       self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
 
       # Check that the change log on the trunk branch got correctly modified.
-      change_log = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+      change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
       self.assertEquals(
 """1999-07-31: Version 3.22.5
 
@@ -746,7 +740,7 @@
       Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* branch2\n"),
       Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd("git checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME],
+      Cmd("git checkout -b %s svn/bleeding_edge" % TEST_CONFIG["BRANCHNAME"],
           ""),
       Cmd("git svn find-rev r123455", "push_hash\n"),
       Cmd(("git log -1 --format=%H --grep="
@@ -760,10 +754,9 @@
       Cmd("git log -1 --format=%s hash2",
        "Version 3.4.5 (based on bleeding_edge revision r1234)\n"),
       Cmd("git svn find-rev r1234", "hash3\n"),
-      Cmd(("git checkout -f svn/bleeding_edge -- %s" %
-           TEST_CONFIG[VERSION_FILE]),
+      Cmd("git checkout -f svn/bleeding_edge -- src/version.cc",
           "", cb=self.WriteFakeVersionFile),
-      Cmd("git checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f hash2 -- src/version.cc", "",
           cb=self.WriteFakeVersionFile),
     ]
     if manual:
@@ -777,20 +770,21 @@
     if manual:
       expectations.append(RL(""))  # Open editor.
     if not force:
-      expectations.append(Cmd("vi %s" % TEST_CONFIG[CHANGELOG_ENTRY_FILE], ""))
+      expectations.append(
+          Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""))
     expectations += [
       Cmd("git svn fetch", "fetch result\n"),
       Cmd("git checkout -f svn/bleeding_edge", ""),
       Cmd("git diff svn/trunk push_hash", "patch content\n"),
       Cmd("git svn find-rev push_hash", "123455\n"),
-      Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], "",
+      Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["TRUNKBRANCH"], "",
           cb=ResetToTrunk),
-      Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""),
-      Cmd("git checkout -f svn/trunk -- %s" % TEST_CONFIG[CHANGELOG_FILE], "",
+      Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
+      Cmd("git checkout -f svn/trunk -- %s" % TEST_CONFIG["CHANGELOG_FILE"], "",
           cb=ResetChangeLog),
-      Cmd("git checkout -f svn/trunk -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f svn/trunk -- src/version.cc", "",
           cb=self.WriteFakeVersionFile),
-      Cmd("git commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "",
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
           cb=CheckSVNCommit),
     ]
     if manual:
@@ -800,8 +794,8 @@
           "Some output\nCommitted r123456\nSome output\n"),
       Cmd("git svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
       Cmd("git checkout -f some_branch", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
-      Cmd("git branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["TRUNKBRANCH"], ""),
     ]
     self.Expect(expectations)
 
@@ -811,7 +805,7 @@
     else: args += ["-r", "reviewer@chromium.org"]
     PushToTrunk(TEST_CONFIG, self).Run(args)
 
-    cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+    cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
     self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
     self.assertTrue(re.search(r"        Log text 1 \(issue 321\).", cl))
     self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
@@ -848,7 +842,7 @@
 """
 
   def testChromiumRoll(self):
-    googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG[PERSISTFILE_BASENAME]
+    googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG["PERSISTFILE_BASENAME"]
     with open(googlers_mapping_py, "w") as f:
       f.write("""
 def list_to_dict(entries):
@@ -857,9 +851,9 @@
   pass""")
 
     # Setup fake directory structures.
-    TEST_CONFIG[CHROMIUM] = self.MakeEmptyTempDirectory()
-    TextToFile("", os.path.join(TEST_CONFIG[CHROMIUM], ".git"))
-    chrome_dir = TEST_CONFIG[CHROMIUM]
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
+    chrome_dir = TEST_CONFIG["CHROMIUM"]
     os.makedirs(os.path.join(chrome_dir, "v8"))
 
     # Write fake deps file.
@@ -922,7 +916,7 @@
 
   def testAutoPush(self):
     TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
-    TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+    TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
 
     self.Expect([
       Cmd("git status -s -uno", ""),
@@ -942,14 +936,15 @@
     auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
 
     state = json.loads(FileToText("%s-state.json"
-                                  % TEST_CONFIG[PERSISTFILE_BASENAME]))
+                                  % TEST_CONFIG["PERSISTFILE_BASENAME"]))
 
     self.assertEquals("100", state["lkgr"])
 
   def testAutoPushStoppedBySettings(self):
     TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
-    TEST_CONFIG[SETTINGS_LOCATION] = self.MakeEmptyTempFile()
-    TextToFile("{\"enable_auto_push\": false}", TEST_CONFIG[SETTINGS_LOCATION])
+    TEST_CONFIG["SETTINGS_LOCATION"] = self.MakeEmptyTempFile()
+    TextToFile("{\"enable_auto_push\": false}",
+               TEST_CONFIG["SETTINGS_LOCATION"])
 
     self.Expect([
       Cmd("git status -s -uno", ""),
@@ -963,7 +958,7 @@
 
   def testAutoPushStoppedByTreeStatus(self):
     TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
-    TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+    TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
 
     self.Expect([
       Cmd("git status -s -uno", ""),
@@ -986,7 +981,7 @@
     ])
 
     result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
+        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
     self.assertEquals(0, result)
 
   # Snippet from the original DEPS file.
@@ -1002,8 +997,8 @@
 """
 
   def testAutoRollUpToDate(self):
-    TEST_CONFIG[CHROMIUM] = self.MakeEmptyTempDirectory()
-    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG[CHROMIUM], "DEPS"))
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
     self.Expect([
       URL("https://codereview.chromium.org/search",
           "owner=author%40chromium.org&limit=30&closed=3&format=json",
@@ -1016,14 +1011,14 @@
     ])
 
     result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
+        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
     self.assertEquals(0, result)
 
   def testAutoRoll(self):
-    TEST_CONFIG[CHROMIUM] = self.MakeEmptyTempDirectory()
-    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG[CHROMIUM], "DEPS"))
-    TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE]  = self.MakeEmptyTempFile()
-    TextToFile("fake key", TEST_CONFIG[CLUSTERFUZZ_API_KEY_FILE])
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+    TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"]  = self.MakeEmptyTempFile()
+    TextToFile("fake key", TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"])
 
     self.Expect([
       URL("https://codereview.chromium.org/search",
@@ -1037,20 +1032,19 @@
     ])
 
     result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM], "--roll"])
+        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
     self.assertEquals(0, result)
 
   def testMergeToBranch(self):
-    TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile()
+    TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
     TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile(build=5)
     os.environ["EDITOR"] = "vi"
     extra_patch = self.MakeEmptyTempFile()
 
     def VerifyPatch(patch):
       return lambda: self.assertEquals(patch,
-          FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE]))
+          FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
 
     msg = """Version 3.22.5.1 (merged r12345, r23456, r34567, r45678, r56789)
 
@@ -1069,9 +1063,10 @@
 """
 
     def VerifySVNCommit():
-      commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
       self.assertEquals(msg, commit)
-      version = FileToText(TEST_CONFIG[VERSION_FILE])
+      version = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
       self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
       self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
       self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
@@ -1082,7 +1077,7 @@
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["BRANCHNAME"], ""),
       Cmd(("git log --format=%H --grep=\"Port r12345\" "
            "--reverse svn/bleeding_edge"),
           "hash1\nhash2"),
@@ -1120,31 +1115,31 @@
       Cmd("git log -1 hash5", "Revert \"Something\"\nBUG=none"),
       Cmd("git log -1 -p hash4", "patch4"),
       Cmd(("git apply --index --reject \"%s\"" %
-           TEST_CONFIG[TEMPORARY_PATCH_FILE]),
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch4")),
       Cmd("git log -1 -p hash2", "patch2"),
       Cmd(("git apply --index --reject \"%s\"" %
-           TEST_CONFIG[TEMPORARY_PATCH_FILE]),
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch2")),
       Cmd("git log -1 -p hash3", "patch3"),
       Cmd(("git apply --index --reject \"%s\"" %
-           TEST_CONFIG[TEMPORARY_PATCH_FILE]),
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch3")),
       Cmd("git log -1 -p hash1", "patch1"),
       Cmd(("git apply --index --reject \"%s\"" %
-           TEST_CONFIG[TEMPORARY_PATCH_FILE]),
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch1")),
       Cmd("git log -1 -p hash5", "patch5\n"),
       Cmd(("git apply --index --reject \"%s\"" %
-           TEST_CONFIG[TEMPORARY_PATCH_FILE]),
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch5\n")),
       Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
       RL("Y"),  # Automatically increment patch level?
-      Cmd("git commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], ""),
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
       RL("reviewer@chromium.org"),  # V8 reviewer.
       Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
           "--bypass-hooks", ""),
-      Cmd("git checkout -f %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
       RL("LGTM"),  # Enter LGTM for V8 CL.
       Cmd("git cl presubmit", "Presubmit successfull\n"),
       Cmd("git cl dcommit -f --bypass-hooks", "Closing issue\n",
@@ -1157,7 +1152,7 @@
            "https://v8.googlecode.com/svn/tags/3.22.5.1 -m "
            "\"Tagging version 3.22.5.1\""), ""),
       Cmd("git checkout -f some_branch", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
 
     # r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
@@ -1223,11 +1218,10 @@
 """
     json_output = self.MakeEmptyTempFile()
     csv_output = self.MakeEmptyTempFile()
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
 
-    TEST_CONFIG[CHROMIUM] = self.MakeEmptyTempDirectory()
-    chrome_dir = TEST_CONFIG[CHROMIUM]
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    chrome_dir = TEST_CONFIG["CHROMIUM"]
     chrome_v8_dir = os.path.join(chrome_dir, "v8")
     os.makedirs(chrome_v8_dir)
     def WriteDEPS(revision):
@@ -1248,42 +1242,42 @@
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd("git checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], ""),
       Cmd("git branch -r", "  svn/3.21\n  svn/3.3\n"),
       Cmd("git reset --hard svn/3.3", ""),
       Cmd("git log --format=%H", "hash1\nhash2"),
       Cmd("git diff --name-only hash1 hash1^", ""),
-      Cmd("git diff --name-only hash2 hash2^", TEST_CONFIG[VERSION_FILE]),
-      Cmd("git checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git diff --name-only hash2 hash2^", VERSION_FILE),
+      Cmd("git checkout -f hash2 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(3, 1, 1)),
       Cmd("git log -1 --format=%B hash2",
           "Version 3.3.1.1 (merged 12)\n\nReview URL: fake.com\n"),
       Cmd("git log -1 --format=%s hash2", ""),
       Cmd("git svn find-rev hash2", "234"),
       Cmd("git log -1 --format=%ci hash2", "18:15"),
-      Cmd("git checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
       Cmd("git reset --hard svn/3.21", ""),
       Cmd("git log --format=%H", "hash3\nhash4\nhash5\n"),
-      Cmd("git diff --name-only hash3 hash3^", TEST_CONFIG[VERSION_FILE]),
-      Cmd("git checkout -f hash3 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git diff --name-only hash3 hash3^", VERSION_FILE),
+      Cmd("git checkout -f hash3 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(21, 2)),
       Cmd("git log -1 --format=%B hash3", ""),
       Cmd("git log -1 --format=%s hash3", ""),
       Cmd("git svn find-rev hash3", "123"),
       Cmd("git log -1 --format=%ci hash3", "03:15"),
-      Cmd("git checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
       Cmd("git reset --hard svn/trunk", ""),
       Cmd("git log --format=%H", "hash6\n"),
-      Cmd("git diff --name-only hash6 hash6^", TEST_CONFIG[VERSION_FILE]),
-      Cmd("git checkout -f hash6 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git diff --name-only hash6 hash6^", VERSION_FILE),
+      Cmd("git checkout -f hash6 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 3)),
       Cmd("git log -1 --format=%B hash6", ""),
       Cmd("git log -1 --format=%s hash6", ""),
       Cmd("git svn find-rev hash6", "345"),
       Cmd("git log -1 --format=%ci hash6", ""),
-      Cmd("git checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
       Cmd("git reset --hard svn/bleeding_edge", ""),
       Cmd("svn log https://v8.googlecode.com/svn/tags -v --limit 20",
@@ -1297,7 +1291,7 @@
       Cmd("git status -s -uno", "", cwd=chrome_dir),
       Cmd("git checkout -f master", "", cwd=chrome_dir),
       Cmd("git pull", "", cwd=chrome_dir),
-      Cmd("git checkout -b %s" % TEST_CONFIG[BRANCHNAME], "", cwd=chrome_dir),
+      Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
       Cmd("git fetch origin", "", cwd=chrome_v8_dir),
       Cmd("git log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\nc_hash3\n",
           cwd=chrome_dir),
@@ -1325,12 +1319,12 @@
       Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
           cwd=chrome_dir),
       Cmd("git checkout -f master", "", cwd=chrome_dir),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], "", cwd=chrome_dir),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
       Cmd("git checkout -f some_branch", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
 
-    args = ["-c", TEST_CONFIG[CHROMIUM],
+    args = ["-c", TEST_CONFIG["CHROMIUM"],
             "--json", json_output,
             "--csv", csv_output,
             "--max-releases", "1"]
@@ -1371,7 +1365,6 @@
 
 
   def _bumpUpVersion(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
 
     def ResetVersion(minor, build, patch=0):
@@ -1422,17 +1415,18 @@
     BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
 
   def testBumpUpVersionSvn(self):
+    svn_root = self.MakeEmptyTempDirectory()
     expectations = self._bumpUpVersion()
     expectations += [
       Cmd("git diff HEAD^ HEAD", "patch content"),
-      Cmd("svn update", "", cwd="[SVN_ROOT]"),
-      Cmd("svn status", "", cwd="[SVN_ROOT]"),
+      Cmd("svn update", "", cwd=svn_root),
+      Cmd("svn status", "", cwd=svn_root),
       Cmd("patch -d branches/bleeding_edge -p1 -i %s" %
-          TEST_CONFIG[PATCH_FILE], "Applied patch...", cwd="[SVN_ROOT]"),
+          TEST_CONFIG["PATCH_FILE"], "Applied patch...", cwd=svn_root),
       Cmd("svn commit --non-interactive --username=author@chromium.org "
           "--config-dir=[CONFIG_DIR] "
           "-m \"[Auto-roll] Bump up version to 3.11.6.0\"",
-          "", cwd="[SVN_ROOT]"),
+          "", cwd=svn_root),
       Cmd("git checkout -f bleeding_edge", ""),
       Cmd("git branch", "auto-bump-up-version\n* bleeding_edge"),
       Cmd("git branch -D auto-bump-up-version", ""),
@@ -1441,11 +1435,10 @@
 
     BumpUpVersion(TEST_CONFIG, self).Run(
         ["-a", "author@chromium.org",
-         "--svn", "[SVN_ROOT]",
+         "--svn", svn_root,
          "--svn-config", "[CONFIG_DIR]"])
 
   def testAutoTag(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
 
     def ResetVersion(minor, build, patch=0):
@@ -1460,24 +1453,24 @@
       Cmd("git branch", "  branch1\n* branch2\n"),
       Cmd("git checkout -f master", ""),
       Cmd("git svn rebase", ""),
-      Cmd("git checkout -b %s" % TEST_CONFIG[BRANCHNAME], "",
+      Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "",
           cb=ResetVersion(4, 5)),
       Cmd("git branch -r",
           "svn/tags/3.4.2\nsvn/tags/3.2.1.0\nsvn/branches/3.4"),
       Cmd(("git log --format=%H --grep="
            "\"\\[Auto\\-roll\\] Bump up version to\""),
           "hash125\nhash118\nhash111\nhash101"),
-      Cmd("git checkout -f hash125 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f hash125 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(4, 4)),
-      Cmd("git checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(4, 5)),
-      Cmd("git checkout -f hash118 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f hash118 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(4, 3)),
-      Cmd("git checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(4, 5)),
-      Cmd("git checkout -f hash111 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f hash111 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(4, 2)),
-      Cmd("git checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(4, 5)),
       URL("https://v8-status.appspot.com/revisions?format=json",
           "[{\"revision\": \"126\", \"status\": true},"
@@ -1490,19 +1483,17 @@
       Cmd("git reset --hard hash123", ""),
       Cmd("git svn tag 3.4.3 -m \"Tagging version 3.4.3\"", ""),
       Cmd("git checkout -f some_branch", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
 
     AutoTag(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
 
   # Test that we bail out if the last change was a version change.
   def testBumpUpVersionBailout1(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self._state["latest"] = "latest_hash"
 
     self.Expect([
-      Cmd("git diff --name-only latest_hash latest_hash^",
-          TEST_CONFIG[VERSION_FILE]),
+      Cmd("git diff --name-only latest_hash latest_hash^", VERSION_FILE),
     ])
 
     self.assertEquals(0,
@@ -1510,12 +1501,10 @@
 
   # Test that we bail out if the lkgr was a version change.
   def testBumpUpVersionBailout2(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self._state["lkgr"] = "lkgr_hash"
 
     self.Expect([
-      Cmd("git diff --name-only lkgr_hash lkgr_hash^",
-          TEST_CONFIG[VERSION_FILE]),
+      Cmd("git diff --name-only lkgr_hash lkgr_hash^", VERSION_FILE),
     ])
 
     self.assertEquals(0,
@@ -1524,7 +1513,6 @@
   # Test that we bail out if the last version is already newer than the lkgr's
   # version.
   def testBumpUpVersionBailout3(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self._state["lkgr"] = "lkgr_hash"
     self._state["lkgr_version"] = "3.22.4.0"
     self._state["latest_version"] = "3.22.5.0"