Version 3.31.0 (based on 50a829b3cfe8ec0b7ccd1b7e154e632c9a73e5f0)

Classes: Partial fix for constructor not calling super (issues 3661, 3672).

Performance and stability improvements on all platforms.

git-svn-id: https://v8.googlecode.com/svn/trunk@25239 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/Makefile b/Makefile
index 3b02f52..d04d9ef 100644
--- a/Makefile
+++ b/Makefile
@@ -64,6 +64,10 @@
 ifeq ($(verifyheap), on)
   GYPFLAGS += -Dv8_enable_verify_heap=1
 endif
+# tracemaps=on
+ifeq ($(tracemaps), on)
+  GYPFLAGS += -Dv8_trace_maps=1
+endif
 # backtrace=off
 ifeq ($(backtrace), off)
   GYPFLAGS += -Dv8_enable_backtrace=0
diff --git a/build/features.gypi b/build/features.gypi
index 7ce66e4..612055a 100644
--- a/build/features.gypi
+++ b/build/features.gypi
@@ -39,6 +39,8 @@
 
     'v8_enable_verify_heap%': 0,
 
+    'v8_trace_maps%': 0,
+
     'v8_use_snapshot%': 'true',
 
     'v8_enable_verify_predictable%': 0,
@@ -77,6 +79,9 @@
       ['v8_enable_verify_heap==1', {
         'defines': ['VERIFY_HEAP',],
       }],
+      ['v8_trace_maps==1', {
+        'defines': ['TRACE_MAPS',],
+      }],
       ['v8_enable_verify_predictable==1', {
         'defines': ['VERIFY_PREDICTABLE',],
       }],
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index 20c2c94..82a10e8 100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -852,7 +852,8 @@
           'V8_ENABLE_CHECKS',
           'OBJECT_PRINT',
           'VERIFY_HEAP',
-          'DEBUG'
+          'DEBUG',
+          'TRACE_MAPS'
         ],
         'conditions': [
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
diff --git a/include/v8.h b/include/v8.h
index d5433a6..ddae1ed 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -6110,7 +6110,7 @@
   static const int kNullValueRootIndex = 7;
   static const int kTrueValueRootIndex = 8;
   static const int kFalseValueRootIndex = 9;
-  static const int kEmptyStringRootIndex = 154;
+  static const int kEmptyStringRootIndex = 155;
 
   // The external allocation limit should be below 256 MB on all architectures
   // to avoid that resource-constrained embedders run low on memory.
diff --git a/src/api.cc b/src/api.cc
index 75fc2d7..7380cc4 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -3636,7 +3636,9 @@
       i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
       false);
   if (result->IsUndefined()) return false;
-  if (fast) i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0);
+  if (fast) {
+    i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0, "APISetAccessor");
+  }
   return true;
 }
 
@@ -3822,7 +3824,8 @@
   // as optimized code does not always handle access checks.
   i::Deoptimizer::DeoptimizeGlobalObject(*obj);
 
-  i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map()));
+  i::Handle<i::Map> new_map =
+      i::Map::Copy(i::Handle<i::Map>(obj->map()), "APITurnOnAccessCheck");
   new_map->set_is_access_check_needed(true);
   i::JSObject::MigrateToMap(obj, new_map);
 }
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index eae38be..a65cf15 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -1798,71 +1798,119 @@
 }
 
 
-void Assembler::uxtb(Register dst,
-                     const Operand& src,
-                     Condition cond) {
+void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.233.
+  // cond(31-28) | 01101010(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.233.
+  // cond(31-28) | 01101010(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
+}
+
+
+void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.235.
+  // cond(31-28) | 01101011(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.235.
+  // cond(31-28) | 01101011(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
+}
+
+
+void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8.8.274.
   // cond(31-28) | 01101110(27-20) | 1111(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   DCHECK(!dst.is(pc));
-  DCHECK(!src.rm().is(pc));
-  DCHECK(!src.rm().is(no_reg));
-  DCHECK(src.rs().is(no_reg));
-  DCHECK((src.shift_imm_ == 0) ||
-         (src.shift_imm_ == 8) ||
-         (src.shift_imm_ == 16) ||
-         (src.shift_imm_ == 24));
-  // Operand maps ROR #0 to LSL #0.
-  DCHECK((src.shift_op() == ROR) ||
-         ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
-  emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
-       ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
 }
 
 
-void Assembler::uxtab(Register dst,
-                      Register src1,
-                      const Operand& src2,
+void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
                       Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8.8.271.
   // cond(31-28) | 01101110(27-20) | Rn(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   DCHECK(!dst.is(pc));
   DCHECK(!src1.is(pc));
-  DCHECK(!src2.rm().is(pc));
-  DCHECK(!src2.rm().is(no_reg));
-  DCHECK(src2.rs().is(no_reg));
-  DCHECK((src2.shift_imm_ == 0) ||
-         (src2.shift_imm_ == 8) ||
-         (src2.shift_imm_ == 16) ||
-         (src2.shift_imm_ == 24));
-  // Operand maps ROR #0 to LSL #0.
-  DCHECK((src2.shift_op() == ROR) ||
-         ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
-  emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
-       ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
 }
 
 
-void Assembler::uxtb16(Register dst,
-                       const Operand& src,
-                       Condition cond) {
+void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8.8.275.
   // cond(31-28) | 01101100(27-20) | 1111(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   DCHECK(!dst.is(pc));
-  DCHECK(!src.rm().is(pc));
-  DCHECK(!src.rm().is(no_reg));
-  DCHECK(src.rs().is(no_reg));
-  DCHECK((src.shift_imm_ == 0) ||
-         (src.shift_imm_ == 8) ||
-         (src.shift_imm_ == 16) ||
-         (src.shift_imm_ == 24));
-  // Operand maps ROR #0 to LSL #0.
-  DCHECK((src.shift_op() == ROR) ||
-         ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
-  emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
-       ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.276.
+  // cond(31-28) | 01101111(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.273.
+  // cond(31-28) | 01101111(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 9087fab..54c9278 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1034,12 +1034,20 @@
   void pkhtb(Register dst, Register src1, const Operand& src2,
              Condition cond = al);
 
-  void uxtb(Register dst, const Operand& src, Condition cond = al);
-
-  void uxtab(Register dst, Register src1, const Operand& src2,
+  void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
+             Condition cond = al);
+  void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
              Condition cond = al);
 
-  void uxtb16(Register dst, const Operand& src, Condition cond = al);
+  void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
+             Condition cond = al);
+  void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
+             Condition cond = al);
 
   // Status register access instructions
 
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index a0e7e4a..e0750cd 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -2686,6 +2686,10 @@
 void CallICStub::Generate(MacroAssembler* masm) {
   // r1 - function
   // r3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2724,37 +2728,70 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
   __ b(eq, &slow_start);
-  __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
-  __ b(eq, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(r4);
-    __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
-    __ b(ne, &miss);
-    __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
-    __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
-    __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ ldr(r4, FieldMemOperand(r2, with_types_offset));
-    __ sub(r4, r4, Operand(Smi::FromInt(1)));
-    __ str(r4, FieldMemOperand(r2, with_types_offset));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ ldr(r4, FieldMemOperand(r2, generic_offset));
-    __ add(r4, r4, Operand(Smi::FromInt(1)));
-    __ str(r4, FieldMemOperand(r2, generic_offset));
-    __ jmp(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
+  __ b(eq, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(r4);
+  __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+  __ b(ne, &miss);
+  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+  __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ ldr(r4, FieldMemOperand(r2, with_types_offset));
+  __ sub(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r2, with_types_offset));
+  __ ldr(r4, FieldMemOperand(r2, generic_offset));
+  __ add(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r2, generic_offset));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(r1, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+  __ b(ne, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+  __ cmp(r1, r4);
+  __ b(eq, &miss);
+
+  // Update stats.
+  __ ldr(r4, FieldMemOperand(r2, with_types_offset));
+  __ add(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r2, with_types_offset));
+
+  // Store the function.
+  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ str(r1, MemOperand(r4, 0));
+
+  // Update the write barrier.
+  __ mov(r5, r1);
+  __ RecordWrite(r2, r4, r5, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index b577f59..fd1b0ef 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -288,8 +288,8 @@
 
     __ bind(&loop);
     __ ldr(temp1, MemOperand(src, 4, PostIndex));
-    __ uxtb16(temp3, Operand(temp1, ROR, 0));
-    __ uxtb16(temp4, Operand(temp1, ROR, 8));
+    __ uxtb16(temp3, temp1);
+    __ uxtb16(temp4, temp1, 8);
     __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
     __ str(temp1, MemOperand(dest));
     __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
@@ -301,9 +301,9 @@
     __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
     __ b(&not_two, cc);
     __ ldrh(temp1, MemOperand(src, 2, PostIndex));
-    __ uxtb(temp3, Operand(temp1, ROR, 8));
+    __ uxtb(temp3, temp1, 8);
     __ mov(temp3, Operand(temp3, LSL, 16));
-    __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+    __ uxtab(temp3, temp3, temp1);
     __ str(temp3, MemOperand(dest, 4, PostIndex));
     __ bind(&not_two);
     __ ldrb(temp1, MemOperand(src), ne);
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index dc26018..4e631b0 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -1027,7 +1027,75 @@
               UNREACHABLE();
               break;
             case 1:
-              UNREACHABLE();
+              if (instr->Bits(9, 6) == 1) {
+                if (instr->Bit(20) == 0) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxtb'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxtb'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxtb'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxtb'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
+                  }
+                } else {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxth'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxth'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxth'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxth'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
+                  }
+                }
+              } else {
+                UNREACHABLE();
+              }
               break;
             case 2:
               if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@@ -1054,36 +1122,70 @@
               }
               break;
             case 3:
-              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
-                if (instr->Bits(19, 16) == 0xF) {
-                  switch (instr->Bits(11, 10)) {
-                    case 0:
-                      Format(instr, "uxtb'cond 'rd, 'rm");
-                      break;
-                    case 1:
-                      Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
-                      break;
-                    case 2:
-                      Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
-                      break;
-                    case 3:
-                      Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
-                      break;
+              if ((instr->Bits(9, 6) == 1)) {
+                if ((instr->Bit(20) == 0)) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxtb'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
                   }
                 } else {
-                  switch (instr->Bits(11, 10)) {
-                    case 0:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
-                      break;
-                    case 1:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
-                      break;
-                    case 2:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
-                      break;
-                    case 3:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
-                      break;
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxth'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxth'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxth'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxth'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
                   }
                 }
               } else {
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 972fd07..e34c311 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -2629,7 +2629,89 @@
               UNIMPLEMENTED();
               break;
             case 1:
-              UNIMPLEMENTED();
+              if (instr->Bits(9, 6) == 1) {
+                if (instr->Bit(20) == 0) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Sxtb.
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, static_cast<int8_t>(rm_val));
+                  } else {
+                    // Sxtab.
+                    int32_t rn_val = get_register(rn);
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + static_cast<int8_t>(rm_val));
+                  }
+                } else {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Sxth.
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, static_cast<int16_t>(rm_val));
+                  } else {
+                    // Sxtah.
+                    int32_t rn_val = get_register(rn);
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + static_cast<int16_t>(rm_val));
+                  }
+                }
+              } else {
+                UNREACHABLE();
+              }
               break;
             case 2:
               if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@@ -2650,8 +2732,7 @@
                       rm_val = (rm_val >> 24) | (rm_val << 8);
                       break;
                   }
-                  set_register(rd,
-                               (rm_val & 0xFF) | (rm_val & 0xFF0000));
+                  set_register(rd, (rm_val & 0xFF) | (rm_val & 0xFF0000));
                 } else {
                   UNIMPLEMENTED();
                 }
@@ -2660,44 +2741,85 @@
               }
               break;
             case 3:
-              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
-                if (instr->Bits(19, 16) == 0xF) {
-                  // Uxtb.
-                  uint32_t rm_val = get_register(instr->RmValue());
-                  int32_t rotate = instr->Bits(11, 10);
-                  switch (rotate) {
-                    case 0:
-                      break;
-                    case 1:
-                      rm_val = (rm_val >> 8) | (rm_val << 24);
-                      break;
-                    case 2:
-                      rm_val = (rm_val >> 16) | (rm_val << 16);
-                      break;
-                    case 3:
-                      rm_val = (rm_val >> 24) | (rm_val << 8);
-                      break;
+              if ((instr->Bits(9, 6) == 1)) {
+                if (instr->Bit(20) == 0) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Uxtb.
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, (rm_val & 0xFF));
+                  } else {
+                    // Uxtab.
+                    uint32_t rn_val = get_register(rn);
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + (rm_val & 0xFF));
                   }
-                  set_register(rd, (rm_val & 0xFF));
                 } else {
-                  // Uxtab.
-                  uint32_t rn_val = get_register(rn);
-                  uint32_t rm_val = get_register(instr->RmValue());
-                  int32_t rotate = instr->Bits(11, 10);
-                  switch (rotate) {
-                    case 0:
-                      break;
-                    case 1:
-                      rm_val = (rm_val >> 8) | (rm_val << 24);
-                      break;
-                    case 2:
-                      rm_val = (rm_val >> 16) | (rm_val << 16);
-                      break;
-                    case 3:
-                      rm_val = (rm_val >> 24) | (rm_val << 8);
-                      break;
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Uxth.
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, (rm_val & 0xFFFF));
+                  } else {
+                    // Uxtah.
+                    uint32_t rn_val = get_register(rn);
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + (rm_val & 0xFFFF));
                   }
-                  set_register(rd, rn_val + (rm_val & 0xFF));
                 }
               } else {
                 UNIMPLEMENTED();
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 076e143..524be15 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -44,17 +44,8 @@
 // CpuFeatures implementation.
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  if (cross_compile) {
-    // Always align csp in cross compiled code - this is safe and ensures that
-    // csp will always be aligned if it is enabled by probing at runtime.
-    if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
-  } else {
-    base::CPU cpu;
-    if (FLAG_enable_always_align_csp &&
-        (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
-      supported_ |= 1u << ALWAYS_ALIGN_CSP;
-    }
-  }
+  // AArch64 has no configuration options, no further probing is required.
+  supported_ = 0;
 }
 
 
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index 6583775..0d6f8ad 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -3016,6 +3016,10 @@
 
   // x1 - function
   // x3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -3064,35 +3068,72 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
-  __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(x4);
-    __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
-    __ Add(x4, feedback_vector,
-           Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-    __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
-    __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
-    __ Subs(x4, x4, Operand(Smi::FromInt(1)));
-    __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
-    __ Adds(x4, x4, Operand(Smi::FromInt(1)));
-    __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
-    __ B(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(x4);
+  __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
+  __ Add(x4, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
+  __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+  __ Subs(x4, x4, Operand(Smi::FromInt(1)));
+  __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+  __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
+  __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+  __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
+  __ B(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(function, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+  __ Cmp(function, x5);
+  __ B(eq, &miss);
+
+  // Update stats.
+  __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+  __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+  __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+
+  // Store the function.
+  __ Add(x4, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+  __ Add(x4, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ Str(function, MemOperand(x4, 0));
+
+  // Update the write barrier.
+  __ Mov(x5, function);
+  __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ B(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -4293,18 +4334,10 @@
 }
 
 
-static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
-  // The entry hook is a "BumpSystemStackPointer" instruction (sub),
-  // followed by a "Push lr" instruction, followed by a call.
-  unsigned int size =
-      Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
-  if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-    // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
-    // "BumpSystemStackPointer".
-    size += kInstructionSize;
-  }
-  return size;
-}
+// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+    Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
 
 
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -4317,7 +4350,7 @@
     __ Push(lr);
     __ CallStub(&stub);
     DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
-           GetProfileEntryHookCallSize(masm));
+           kProfileEntryHookCallSize);
 
     __ Pop(lr);
   }
@@ -4335,7 +4368,7 @@
   const int kNumSavedRegs = kCallerSaved.Count();
 
   // Compute the function's address as the first argument.
-  __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
+  __ Sub(x0, lr, kProfileEntryHookCallSize);
 
 #if V8_HOST_ARCH_ARM64
   uintptr_t entry_hook =
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
index a285e7b..1b8ae1b 100644
--- a/src/arm64/lithium-codegen-arm64.cc
+++ b/src/arm64/lithium-codegen-arm64.cc
@@ -557,11 +557,6 @@
       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
     }
   }
-
-  if (kind & Safepoint::kWithRegisters) {
-    // Register cp always contains a pointer to the context.
-    safepoint.DefinePointerRegister(cp, zone());
-  }
 }
 
 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h
index 4a4d644..b691e21 100644
--- a/src/arm64/macro-assembler-arm64-inl.h
+++ b/src/arm64/macro-assembler-arm64-inl.h
@@ -1244,14 +1244,7 @@
 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
   DCHECK(!csp.Is(sp_));
   if (!TmpList()->IsEmpty()) {
-    if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-      UseScratchRegisterScope temps(this);
-      Register temp = temps.AcquireX();
-      Sub(temp, StackPointer(), space);
-      Bic(csp, temp, 0xf);
-    } else {
-      Sub(csp, StackPointer(), space);
-    }
+    Sub(csp, StackPointer(), space);
   } else {
     // TODO(jbramley): Several callers rely on this not using scratch
     // registers, so we use the assembler directly here. However, this means
@@ -1288,11 +1281,7 @@
   DCHECK(emit_debug_code());
   DCHECK(!csp.Is(sp_));
   { InstructionAccurateScope scope(this);
-    if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-      bic(csp, StackPointer(), 0xf);
-    } else {
-      mov(csp, StackPointer());
-    }
+    mov(csp, StackPointer());
   }
   AssertStackConsistency();
 }
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index e0a2190..f2ca92b 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -1308,7 +1308,7 @@
   // Avoid emitting code when !use_real_abort() since non-real aborts cause too
   // much code to be generated.
   if (emit_debug_code() && use_real_aborts()) {
-    if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+    if (csp.Is(StackPointer())) {
       // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true.  We
       // can't check the alignment of csp without using a scratch register (or
       // clobbering the flags), but the processor (or simulator) will abort if
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index cff42d7..db51156 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -761,9 +761,9 @@
   // it can be evidence of a potential bug because the ABI forbids accesses
   // below csp.
   //
-  // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
-  // enabled, then csp will be dereferenced to  cause the processor
-  // (or simulator) to abort if it is not properly aligned.
+  // If StackPointer() is the system stack pointer (csp), then csp will be
+  // dereferenced to cause the processor (or simulator) to abort if it is not
+  // properly aligned.
   //
   // If emit_debug_code() is false, this emits no code.
   void AssertStackConsistency();
@@ -831,9 +831,7 @@
   inline void BumpSystemStackPointer(const Operand& space);
 
   // Re-synchronizes the system stack pointer (csp) with the current stack
-  // pointer (according to StackPointer()).  This function will ensure the
-  // new value of the system stack pointer is remains aligned to 16 bytes, and
-  // is lower than or equal to the value of the current stack pointer.
+  // pointer (according to StackPointer()).
   //
   // This method asserts that StackPointer() is not csp, since the call does
   // not make sense in that context.
diff --git a/src/ast.h b/src/ast.h
index 749e579..1700916 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -2576,6 +2576,12 @@
   bool is_concise_method() {
     return IsConciseMethod(FunctionKindBits::decode(bitfield_));
   }
+  bool is_default_constructor() {
+    return IsDefaultConstructor(FunctionKindBits::decode(bitfield_));
+  }
+  bool is_default_constructor_call_super() {
+    return IsDefaultConstructorCallSuper(FunctionKindBits::decode(bitfield_));
+  }
 
   int ast_node_count() { return ast_properties_.node_count(); }
   AstProperties::Flags* flags() { return ast_properties_.flags(); }
@@ -2647,7 +2653,7 @@
   class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
   class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
   class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
-  class FunctionKindBits : public BitField<FunctionKind, 6, 3> {};
+  class FunctionKindBits : public BitField<FunctionKind, 6, 5> {};
 };
 
 
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
index 1c46cf6..d68e861 100644
--- a/src/base/platform/platform-win32.cc
+++ b/src/base/platform/platform-win32.cc
@@ -346,26 +346,41 @@
 }
 
 
-int64_t FileTimeToInt64(FILETIME ft) {
-  ULARGE_INTEGER result;
-  result.LowPart = ft.dwLowDateTime;
-  result.HighPart = ft.dwHighDateTime;
-  return static_cast<int64_t>(result.QuadPart);
-}
-
-
 // Return the local timezone offset in milliseconds east of UTC. This
 // takes into account whether daylight saving is in effect at the time.
 // Only times in the 32-bit Unix range may be passed to this function.
 // Also, adding the time-zone offset to the input must not overflow.
 // The function EquivalentTime() in date.js guarantees this.
 int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
-  FILETIME local;
-  SYSTEMTIME system_utc, system_local;
-  FileTimeToSystemTime(&time_.ft_, &system_utc);
-  SystemTimeToTzSpecificLocalTime(NULL, &system_utc, &system_local);
-  SystemTimeToFileTime(&system_local, &local);
-  return (FileTimeToInt64(local) - FileTimeToInt64(time_.ft_)) / kTimeScaler;
+  cache->InitializeIfNeeded();
+
+  Win32Time rounded_to_second(*this);
+  rounded_to_second.t() =
+      rounded_to_second.t() / 1000 / kTimeScaler * 1000 * kTimeScaler;
+  // Convert to local time using POSIX localtime function.
+  // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+  // very slow.  Other browsers use localtime().
+
+  // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+  // POSIX seconds past 1/1/1970 0:00:00.
+  double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+  if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+    return 0;
+  }
+  // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+  time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+  // Convert to local time, as struct with fields for day, hour, year, etc.
+  tm posix_local_time_struct;
+  if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+
+  if (posix_local_time_struct.tm_isdst > 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
+  } else if (posix_local_time_struct.tm_isdst == 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
+  } else {
+    return cache->tzinfo_.Bias * -kMsPerMinute;
+  }
 }
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 264d1c6..6f37a94 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -361,7 +361,7 @@
 static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
   // object.__proto__ = proto;
   Handle<Map> old_map = Handle<Map>(object->map());
-  Handle<Map> new_map = Map::Copy(old_map);
+  Handle<Map> new_map = Map::Copy(old_map, "SetObjectPrototype");
   new_map->set_prototype(*proto);
   JSObject::MigrateToMap(object, new_map);
 }
@@ -510,7 +510,8 @@
     Handle<JSObject> prototype = factory->NewJSObject(
         isolate->object_function(),
         TENURED);
-    Handle<Map> map = Map::Copy(handle(prototype->map()));
+    Handle<Map> map =
+        Map::Copy(handle(prototype->map()), "EmptyObjectPrototype");
     map->set_is_prototype_map(true);
     prototype->set_map(*map);
 
@@ -908,6 +909,10 @@
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
+  Handle<GlobalContextTable> global_context_table =
+      factory->NewGlobalContextTable();
+  native_context()->set_global_context_table(*global_context_table);
+
   Handle<String> object_name = factory->Object_string();
   JSObject::AddProperty(
       global_object, object_name, isolate->object_function(), DONT_ENUM);
@@ -1089,7 +1094,7 @@
     initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
 
     // RegExp prototype object is itself a RegExp.
-    Handle<Map> proto_map = Map::Copy(initial_map);
+    Handle<Map> proto_map = Map::Copy(initial_map, "RegExpPrototype");
     proto_map->set_prototype(native_context()->initial_object_prototype());
     Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
     proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
@@ -1244,7 +1249,8 @@
   }
 
   {  // --- aliased arguments map
-    Handle<Map> map = Map::Copy(isolate->sloppy_arguments_map());
+    Handle<Map> map =
+        Map::Copy(isolate->sloppy_arguments_map(), "AliasedArguments");
     map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
     DCHECK_EQ(2, map->pre_allocated_property_fields());
     native_context()->set_aliased_arguments_map(*map);
@@ -1657,7 +1663,7 @@
   array_function->shared()->DontAdaptArguments();
 
   Handle<Map> original_map(array_function->initial_map());
-  Handle<Map> initial_map = Map::Copy(original_map);
+  Handle<Map> initial_map = Map::Copy(original_map, "InternalArray");
   initial_map->set_elements_kind(elements_kind);
   JSFunction::SetInitialMap(array_function, initial_map, prototype);
 
@@ -1935,7 +1941,7 @@
     // Create maps for generator functions and their prototypes.  Store those
     // maps in the native context.
     Handle<Map> generator_function_map =
-        Map::Copy(sloppy_function_map_writable_prototype_);
+        Map::Copy(sloppy_function_map_writable_prototype_, "GeneratorFunction");
     generator_function_map->set_prototype(*generator_function_prototype);
     native_context()->set_sloppy_generator_function_map(
         *generator_function_map);
@@ -1966,7 +1972,8 @@
                      rw_attribs, poison_pair);
 
     Handle<Map> strict_function_map(native_context()->strict_function_map());
-    Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
+    Handle<Map> strict_generator_function_map =
+        Map::Copy(strict_function_map, "StrictGeneratorFunction");
     // "arguments" and "caller" already poisoned.
     strict_generator_function_map->set_prototype(*generator_function_prototype);
     native_context()->set_strict_generator_function_map(
@@ -2712,6 +2719,15 @@
     AddToWeakNativeContextList(*native_context());
     isolate->set_context(*native_context());
     isolate->counters()->contexts_created_by_snapshot()->Increment();
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      Handle<JSFunction> object_fun = isolate->object_function();
+      PrintF("[TraceMap: InitialMap map= %p SFI= %d_Object ]\n",
+             reinterpret_cast<void*>(object_fun->initial_map()),
+             object_fun->shared()->unique_id());
+      Map::TraceAllTransitions(object_fun->initial_map());
+    }
+#endif
     Handle<GlobalObject> global_object;
     Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
         global_proxy_template, maybe_global_proxy, &global_object);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 06eff69..13f8e42 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -577,10 +577,14 @@
   bool is_arrow() const { return IsArrowFunction(kind()); }
   bool is_generator() const { return IsGeneratorFunction(kind()); }
   bool is_concise_method() const { return IsConciseMethod(kind()); }
+  bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
+  bool is_default_constructor_call_super() const {
+    return IsDefaultConstructorCallSuper(kind());
+  }
 
  private:
   class StrictModeBits : public BitField<StrictMode, 0, 1> {};
-  class FunctionKindBits : public BitField<FunctionKind, 1, 3> {};
+  class FunctionKindBits : public BitField<FunctionKind, 1, 5> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
   DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
index 3433765..f4b8716 100644
--- a/src/compiler/arm/code-generator-arm.cc
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -299,6 +299,42 @@
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSxtb:
+      __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxth:
+      __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxtab:
+      __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxtah:
+      __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtb:
+      __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxth:
+      __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtab:
+      __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtah:
+      __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
     case kArmCmp:
       __ cmp(i.InputRegister(0), i.InputOperand2(1));
       DCHECK_EQ(SetCC, i.OutputSBit());
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
index c48369e..ecd0b2d 100644
--- a/src/compiler/arm/instruction-codes-arm.h
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -35,6 +35,14 @@
   V(ArmMvn)                        \
   V(ArmBfc)                        \
   V(ArmUbfx)                       \
+  V(ArmSxtb)                       \
+  V(ArmSxth)                       \
+  V(ArmSxtab)                      \
+  V(ArmSxtah)                      \
+  V(ArmUxtb)                       \
+  V(ArmUxth)                       \
+  V(ArmUxtab)                      \
+  V(ArmUxtah)                      \
   V(ArmVcmpF64)                    \
   V(ArmVaddF64)                    \
   V(ArmVsubF64)                    \
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
index a071bbc..2bae140 100644
--- a/src/compiler/arm/instruction-selector-arm.cc
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -91,6 +91,14 @@
       case kArmUdiv:
       case kArmBfc:
       case kArmUbfx:
+      case kArmSxtb:
+      case kArmSxth:
+      case kArmSxtab:
+      case kArmSxtah:
+      case kArmUxtb:
+      case kArmUxth:
+      case kArmUxtab:
+      case kArmUxtah:
       case kArmVcmpF64:
       case kArmVaddF64:
       case kArmVsubF64:
@@ -255,8 +263,20 @@
   InstructionOperand* outputs[2];
   size_t output_count = 0;
 
-  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
-                               &input_count, &inputs[1])) {
+  if (m.left().node() == m.right().node()) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov r0, r1, asr #16
+    //   adds r0, r0, r1, asr #16
+    //   bvs label
+    InstructionOperand* const input = g.UseRegister(m.left().node());
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = input;
+    inputs[input_count++] = input;
+  } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                                      &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
   } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
@@ -430,12 +450,12 @@
       return;
     }
   }
-  if (IsSupported(ARMv7) && m.right().HasValue()) {
-    // Try to interpret this AND as UBFX.
+  if (m.right().HasValue()) {
     uint32_t const value = m.right().Value();
     uint32_t width = base::bits::CountPopulation32(value);
     uint32_t msb = base::bits::CountLeadingZeros32(value);
-    if (width != 0 && msb + width == 32) {
+    // Try to interpret this AND as UBFX.
+    if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
       DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
       if (m.left().IsWord32Shr()) {
         Int32BinopMatcher mleft(m.left().node());
@@ -450,7 +470,6 @@
            g.TempImmediate(0), g.TempImmediate(width));
       return;
     }
-
     // Try to interpret this AND as BIC.
     if (g.CanBeImmediate(~value)) {
       Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
@@ -458,16 +477,23 @@
            g.TempImmediate(~value));
       return;
     }
-
-    // Try to interpret this AND as BFC.
-    width = 32 - width;
-    msb = base::bits::CountLeadingZeros32(~value);
-    uint32_t lsb = base::bits::CountTrailingZeros32(~value);
-    if (msb + width + lsb == 32) {
-      Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
-           g.TempImmediate(lsb), g.TempImmediate(width));
+    // Try to interpret this AND as UXTH.
+    if (value == 0xffff) {
+      Emit(kArmUxth, g.DefineAsRegister(m.node()),
+           g.UseRegister(m.left().node()), g.TempImmediate(0));
       return;
     }
+    // Try to interpret this AND as BFC.
+    if (IsSupported(ARMv7)) {
+      width = 32 - width;
+      msb = base::bits::CountLeadingZeros32(~value);
+      uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+      if (msb + width + lsb == 32) {
+        Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+             g.TempImmediate(lsb), g.TempImmediate(width));
+        return;
+      }
+    }
   }
   VisitBinop(this, node, kArmAnd, kArmAnd);
 }
@@ -571,6 +597,20 @@
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kArmSxth, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kArmSxtb, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+      return;
+    }
+  }
   VisitShift(this, node, TryMatchASR);
 }
 
@@ -583,31 +623,113 @@
 void InstructionSelector::VisitInt32Add(Node* node) {
   ArmOperandGenerator g(this);
   Int32BinopMatcher m(node);
-  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
-    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
-         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
-    return;
+  if (CanCover(node, m.left().node())) {
+    switch (m.left().opcode()) {
+      case IrOpcode::kInt32Mul: {
+        Int32BinopMatcher mleft(m.left().node());
+        Emit(kArmMla, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseRegister(mleft.right().node()),
+             g.UseRegister(m.right().node()));
+        return;
+      }
+      case IrOpcode::kInt32MulHigh: {
+        Int32BinopMatcher mleft(m.left().node());
+        Emit(kArmSmmla, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseRegister(mleft.right().node()),
+             g.UseRegister(m.right().node()));
+        return;
+      }
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().Is(0xff)) {
+          Emit(kArmUxtab, g.DefineAsRegister(node),
+               g.UseRegister(m.right().node()),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+          return;
+        } else if (mleft.right().Is(0xffff)) {
+          Emit(kArmUxtah, g.DefineAsRegister(node),
+               g.UseRegister(m.right().node()),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+          return;
+        }
+      }
+      case IrOpcode::kWord32Sar: {
+        Int32BinopMatcher mleft(m.left().node());
+        if (CanCover(mleft.node(), mleft.left().node()) &&
+            mleft.left().IsWord32Shl()) {
+          Int32BinopMatcher mleftleft(mleft.left().node());
+          if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
+            Emit(kArmSxtab, g.DefineAsRegister(node),
+                 g.UseRegister(m.right().node()),
+                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
+            return;
+          } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
+            Emit(kArmSxtah, g.DefineAsRegister(node),
+                 g.UseRegister(m.right().node()),
+                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
+            return;
+          }
+        }
+      }
+      default:
+        break;
+    }
   }
-  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
-    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
-    return;
-  }
-  if (m.left().IsInt32MulHigh() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
-    Emit(kArmSmmla, g.DefineAsRegister(node),
-         g.UseRegister(mleft.left().node()),
-         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
-    return;
-  }
-  if (m.right().IsInt32MulHigh() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
-    Emit(kArmSmmla, g.DefineAsRegister(node),
-         g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
-    return;
+  if (CanCover(node, m.right().node())) {
+    switch (m.right().opcode()) {
+      case IrOpcode::kInt32Mul: {
+        Int32BinopMatcher mright(m.right().node());
+        Emit(kArmMla, g.DefineAsRegister(node),
+             g.UseRegister(mright.left().node()),
+             g.UseRegister(mright.right().node()),
+             g.UseRegister(m.left().node()));
+        return;
+      }
+      case IrOpcode::kInt32MulHigh: {
+        Int32BinopMatcher mright(m.right().node());
+        Emit(kArmSmmla, g.DefineAsRegister(node),
+             g.UseRegister(mright.left().node()),
+             g.UseRegister(mright.right().node()),
+             g.UseRegister(m.left().node()));
+        return;
+      }
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher mright(m.right().node());
+        if (mright.right().Is(0xff)) {
+          Emit(kArmUxtab, g.DefineAsRegister(node),
+               g.UseRegister(m.left().node()),
+               g.UseRegister(mright.left().node()), g.TempImmediate(0));
+          return;
+        } else if (mright.right().Is(0xffff)) {
+          Emit(kArmUxtah, g.DefineAsRegister(node),
+               g.UseRegister(m.left().node()),
+               g.UseRegister(mright.left().node()), g.TempImmediate(0));
+          return;
+        }
+      }
+      case IrOpcode::kWord32Sar: {
+        Int32BinopMatcher mright(m.right().node());
+        if (CanCover(mright.node(), mright.left().node()) &&
+            mright.left().IsWord32Shl()) {
+          Int32BinopMatcher mrightleft(mright.left().node());
+          if (mright.right().Is(24) && mrightleft.right().Is(24)) {
+            Emit(kArmSxtab, g.DefineAsRegister(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
+            return;
+          } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
+            Emit(kArmSxtah, g.DefineAsRegister(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
+            return;
+          }
+        }
+      }
+      default:
+        break;
+    }
   }
   VisitBinop(this, node, kArmAdd, kArmAdd);
 }
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
index 1040131..151d1aa 100644
--- a/src/compiler/arm64/instruction-selector-arm64.cc
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -542,6 +542,17 @@
 
 
 void InstructionSelector::VisitWord64Shl(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+      m.right().IsInRange(32, 63)) {
+    // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+    // 32 bits anyway.
+    Emit(kArm64Lsl, g.DefineAsRegister(node),
+         g.UseRegister(m.left().node()->InputAt(0)),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
 }
 
@@ -884,6 +895,18 @@
 
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    Int64BinopMatcher m(value);
+    if ((m.IsWord64Sar() && m.right().HasValue() &&
+         (m.right().Value() == 32)) ||
+        (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
+      Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseImmediate(m.right().node()));
+      return;
+    }
+  }
+
   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
 }
 
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
index 6b04eef..708b18e 100644
--- a/src/compiler/code-generator.cc
+++ b/src/compiler/code-generator.cc
@@ -422,7 +422,8 @@
     if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
         type == kMachInt16) {
       translation->StoreInt32StackSlot(op->index());
-    } else if (type == kMachUint32) {
+    } else if (type == kMachUint32 || type == kMachUint16 ||
+               type == kMachUint8) {
       translation->StoreUint32StackSlot(op->index());
     } else if ((type & kRepMask) == kRepTagged) {
       translation->StoreStackSlot(op->index());
@@ -437,7 +438,8 @@
     if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
         type == kMachInt16) {
       translation->StoreInt32Register(converter.ToRegister(op));
-    } else if (type == kMachUint32) {
+    } else if (type == kMachUint32 || type == kMachUint16 ||
+               type == kMachUint8) {
       translation->StoreUint32Register(converter.ToRegister(op));
     } else if ((type & kRepMask) == kRepTagged) {
       translation->StoreRegister(converter.ToRegister(op));
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
index fb18ba1..87b2604 100644
--- a/src/compiler/js-generic-lowering.cc
+++ b/src/compiler/js-generic-lowering.cc
@@ -223,7 +223,7 @@
       linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
   Node* ref = ExternalConstant(ExternalReference(f, isolate()));
   Node* arity = Int32Constant(nargs);
-  PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant());
+  PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant(fun->result_size));
   PatchInsertInput(node, nargs + 1, ref);
   PatchInsertInput(node, nargs + 2, arity);
   PatchOperator(node, common()->Call(desc));
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
index da6d66d..b8a7f97 100644
--- a/src/compiler/js-graph.cc
+++ b/src/compiler/js-graph.cc
@@ -17,12 +17,16 @@
 }
 
 
-Node* JSGraph::CEntryStubConstant() {
-  if (!c_entry_stub_constant_.is_set()) {
-    c_entry_stub_constant_.set(
-        ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+Node* JSGraph::CEntryStubConstant(int result_size) {
+  if (result_size == 1) {
+    if (!c_entry_stub_constant_.is_set()) {
+      c_entry_stub_constant_.set(
+          ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+    }
+    return c_entry_stub_constant_.get();
   }
-  return c_entry_stub_constant_.get();
+
+  return ImmovableHeapConstant(CEntryStub(isolate(), result_size).GetCode());
 }
 
 
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
index 83e103d..e1a7b69 100644
--- a/src/compiler/js-graph.h
+++ b/src/compiler/js-graph.h
@@ -32,7 +32,7 @@
         cache_(zone()) {}
 
   // Canonicalized global constants.
-  Node* CEntryStubConstant();
+  Node* CEntryStubConstant(int result_size);
   Node* UndefinedConstant();
   Node* TheHoleConstant();
   Node* TrueConstant();
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
index a97e484..16529b4 100644
--- a/src/compiler/linkage.cc
+++ b/src/compiler/linkage.cc
@@ -186,6 +186,7 @@
     case Runtime::kRegExpCompile:
     case Runtime::kRegExpExecMultiple:
     case Runtime::kResolvePossiblyDirectEval:
+    case Runtime::kRunMicrotasks:
     case Runtime::kSetPrototype:
     case Runtime::kSetScriptBreakPoint:
     case Runtime::kSparseJoinWithSeparator:
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
index a55e7bf..b1147a7 100644
--- a/src/compiler/node-matchers.h
+++ b/src/compiler/node-matchers.h
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_NODE_MATCHERS_H_
 #define V8_COMPILER_NODE_MATCHERS_H_
 
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/unique.h"
@@ -116,7 +118,7 @@
 // right hand sides of a binary operation and can put constants on the right
 // if they appear on the left hand side of a commutative operation.
 template <typename Left, typename Right>
-struct BinopMatcher FINAL : public NodeMatcher {
+struct BinopMatcher : public NodeMatcher {
   explicit BinopMatcher(Node* node)
       : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
     if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
@@ -128,12 +130,17 @@
   bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
   bool LeftEqualsRight() const { return left().node() == right().node(); }
 
+ protected:
+  void SwapInputs() {
+    std::swap(left_, right_);
+    node()->ReplaceInput(0, left().node());
+    node()->ReplaceInput(1, right().node());
+  }
+
  private:
   void PutConstantOnRight() {
     if (left().HasValue() && !right().HasValue()) {
-      std::swap(left_, right_);
-      node()->ReplaceInput(0, left().node());
-      node()->ReplaceInput(1, right().node());
+      SwapInputs();
     }
   }
 
@@ -150,6 +157,189 @@
 typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
 typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
 
+struct Int32AddMatcher : public Int32BinopMatcher {
+  explicit Int32AddMatcher(Node* node)
+      : Int32BinopMatcher(node), scale_exponent_(-1) {
+    PutScaledInputOnLeft();
+  }
+
+  bool HasScaledInput() const { return scale_exponent_ != -1; }
+  Node* ScaledInput() const {
+    DCHECK(HasScaledInput());
+    return left().node()->InputAt(0);
+  }
+  int ScaleExponent() const {
+    DCHECK(HasScaledInput());
+    return scale_exponent_;
+  }
+
+ private:
+  int GetInputScaleExponent(Node* node) const {
+    if (node->opcode() == IrOpcode::kWord32Shl) {
+      Int32BinopMatcher m(node);
+      if (m.right().HasValue()) {
+        int32_t value = m.right().Value();
+        if (value >= 0 && value <= 3) {
+          return value;
+        }
+      }
+    } else if (node->opcode() == IrOpcode::kInt32Mul) {
+      Int32BinopMatcher m(node);
+      if (m.right().HasValue()) {
+        int32_t value = m.right().Value();
+        if (value == 1) {
+          return 0;
+        } else if (value == 2) {
+          return 1;
+        } else if (value == 4) {
+          return 2;
+        } else if (value == 8) {
+          return 3;
+        }
+      }
+    }
+    return -1;
+  }
+
+  void PutScaledInputOnLeft() {
+    scale_exponent_ = GetInputScaleExponent(right().node());
+    if (scale_exponent_ >= 0) {
+      int left_scale_exponent = GetInputScaleExponent(left().node());
+      if (left_scale_exponent == -1) {
+        SwapInputs();
+      } else {
+        scale_exponent_ = left_scale_exponent;
+      }
+    } else {
+      scale_exponent_ = GetInputScaleExponent(left().node());
+      if (scale_exponent_ == -1) {
+        if (right().opcode() == IrOpcode::kInt32Add &&
+            left().opcode() != IrOpcode::kInt32Add) {
+          SwapInputs();
+        }
+      }
+    }
+  }
+
+  int scale_exponent_;
+};
+
+struct ScaledWithOffsetMatcher {
+  explicit ScaledWithOffsetMatcher(Node* node)
+      : matches_(false),
+        scaled_(NULL),
+        scale_exponent_(0),
+        offset_(NULL),
+        constant_(NULL) {
+    if (node->opcode() != IrOpcode::kInt32Add) return;
+
+    // The Int32AddMatcher canonicalizes the order of constants and scale
+    // factors that are used as inputs, so instead of enumerating all possible
+    // patterns by brute force, checking for node clusters using the following
+    // templates in the following order suffices to find all of the interesting
+    // cases (S = scaled input, O = offset input, C = constant input):
+    // (S + (O + C))
+    // (S + (O + O))
+    // (S + C)
+    // (S + O)
+    // ((S + C) + O)
+    // ((S + O) + C)
+    // ((O + C) + O)
+    // ((O + O) + C)
+    // (O + C)
+    // (O + O)
+    Int32AddMatcher base_matcher(node);
+    Node* left = base_matcher.left().node();
+    Node* right = base_matcher.right().node();
+    if (base_matcher.HasScaledInput() && left->OwnedBy(node)) {
+      scaled_ = base_matcher.ScaledInput();
+      scale_exponent_ = base_matcher.ScaleExponent();
+      if (right->opcode() == IrOpcode::kInt32Add && right->OwnedBy(node)) {
+        Int32AddMatcher right_matcher(right);
+        if (right_matcher.right().HasValue()) {
+          // (S + (O + C))
+          offset_ = right_matcher.left().node();
+          constant_ = right_matcher.right().node();
+        } else {
+          // (S + (O + O))
+          offset_ = right;
+        }
+      } else if (base_matcher.right().HasValue()) {
+        // (S + C)
+        constant_ = right;
+      } else {
+        // (S + O)
+        offset_ = right;
+      }
+    } else {
+      if (left->opcode() == IrOpcode::kInt32Add && left->OwnedBy(node)) {
+        Int32AddMatcher left_matcher(left);
+        Node* left_left = left_matcher.left().node();
+        Node* left_right = left_matcher.right().node();
+        if (left_matcher.HasScaledInput() && left_left->OwnedBy(left)) {
+          scaled_ = left_matcher.ScaledInput();
+          scale_exponent_ = left_matcher.ScaleExponent();
+          if (left_matcher.right().HasValue()) {
+            // ((S + C) + O)
+            constant_ = left_right;
+            offset_ = right;
+          } else if (base_matcher.right().HasValue()) {
+            // ((S + O) + C)
+            offset_ = left_right;
+            constant_ = right;
+          } else {
+            // (O + O)
+            scaled_ = left;
+            offset_ = right;
+          }
+        } else {
+          if (left_matcher.right().HasValue()) {
+            // ((O + C) + O)
+            scaled_ = left_left;
+            constant_ = left_right;
+            offset_ = right;
+          } else if (base_matcher.right().HasValue()) {
+            // ((O + O) + C)
+            scaled_ = left_left;
+            offset_ = left_right;
+            constant_ = right;
+          } else {
+            // (O + O)
+            scaled_ = left;
+            offset_ = right;
+          }
+        }
+      } else {
+        if (base_matcher.right().HasValue()) {
+          // (O + C)
+          offset_ = left;
+          constant_ = right;
+        } else {
+          // (O + O)
+          offset_ = left;
+          scaled_ = right;
+        }
+      }
+    }
+    matches_ = true;
+  }
+
+  bool matches() const { return matches_; }
+  Node* scaled() const { return scaled_; }
+  int scale_exponent() const { return scale_exponent_; }
+  Node* offset() const { return offset_; }
+  Node* constant() const { return constant_; }
+
+ private:
+  bool matches_;
+
+ protected:
+  Node* scaled_;
+  int scale_exponent_;
+  Node* offset_;
+  Node* constant_;
+};
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
index b65a507..95b93b3 100644
--- a/src/compiler/schedule.h
+++ b/src/compiler/schedule.h
@@ -265,8 +265,6 @@
 
  private:
   friend class Scheduler;
-  friend class CodeGenerator;
-  friend class ScheduleVisualizer;
   friend class BasicBlockInstrumentor;
 
   void AddSuccessor(BasicBlock* block, BasicBlock* succ);
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
index 36ed088..af8fba0 100644
--- a/src/compiler/scheduler.cc
+++ b/src/compiler/scheduler.cc
@@ -685,7 +685,7 @@
     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
     previous_block_count_ = schedule_->BasicBlockCount();
     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
-    int num_loops = 0;
+    int num_loops = static_cast<int>(loops_.size());
 
     while (stack_depth > 0) {
       int current = stack_depth - 1;
@@ -717,7 +717,7 @@
     }
 
     // If no loops were encountered, then the order we computed was correct.
-    if (num_loops != 0) {
+    if (num_loops > static_cast<int>(loops_.size())) {
       // Otherwise, compute the loop information from the backedges in order
       // to perform a traversal that groups loop bodies together.
       ComputeLoopInfo(stack_, num_loops, &backedges_);
@@ -725,7 +725,7 @@
       // Initialize the "loop stack". Note the entry could be a loop header.
       LoopInfo* loop =
           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
-      order = NULL;
+      order = insert_after;
 
       // Perform an iterative post-order traversal, visiting loop bodies before
       // edges that lead out of loops. Visits each block once, but linking loop
@@ -737,7 +737,7 @@
         BasicBlock* block = frame->block;
         BasicBlock* succ = NULL;
 
-        if (frame->index < block->SuccessorCount()) {
+        if (block != end && frame->index < block->SuccessorCount()) {
           // Process the next normal successor.
           succ = block->SuccessorAt(frame->index++);
         } else if (HasLoopNumber(block)) {
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
index c70944b..e46357a 100644
--- a/src/compiler/x64/instruction-selector-x64.cc
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -364,8 +364,79 @@
   VisitWord64Shift(this, node, kX64Ror);
 }
 
+namespace {
+
+AddressingMode GenerateMemoryOperandInputs(X64OperandGenerator* g, Node* scaled,
+                                           int scale_exponent, Node* offset,
+                                           Node* constant,
+                                           InstructionOperand* inputs[],
+                                           size_t* input_count) {
+  AddressingMode mode = kMode_MRI;
+  if (offset != NULL) {
+    inputs[(*input_count)++] = g->UseRegister(offset);
+    if (scaled != NULL) {
+      DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+      inputs[(*input_count)++] = g->UseRegister(scaled);
+      if (constant != NULL) {
+        inputs[(*input_count)++] = g->UseImmediate(constant);
+        static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+                                                     kMode_MR4I, kMode_MR8I};
+        mode = kMRnI_modes[scale_exponent];
+      } else {
+        static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+                                                    kMode_MR4, kMode_MR8};
+        mode = kMRn_modes[scale_exponent];
+      }
+    } else {
+      DCHECK(constant != NULL);
+      inputs[(*input_count)++] = g->UseImmediate(constant);
+      mode = kMode_MRI;
+    }
+  } else {
+    DCHECK(scaled != NULL);
+    DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+    inputs[(*input_count)++] = g->UseRegister(scaled);
+    if (constant != NULL) {
+      inputs[(*input_count)++] = g->UseImmediate(constant);
+      static const AddressingMode kMnI_modes[] = {kMode_M1I, kMode_M2I,
+                                                  kMode_M4I, kMode_M8I};
+      mode = kMnI_modes[scale_exponent];
+    } else {
+      static const AddressingMode kMn_modes[] = {kMode_M1, kMode_M2, kMode_M4,
+                                                 kMode_M8};
+      mode = kMn_modes[scale_exponent];
+    }
+  }
+  return mode;
+}
+
+}  // namespace
+
 
 void InstructionSelector::VisitInt32Add(Node* node) {
+  // Try to match the Add to a leal pattern
+  ScaledWithOffsetMatcher m(node);
+  X64OperandGenerator g(this);
+  if (m.matches() && (m.constant() == NULL || g.CanBeImmediate(m.constant()))) {
+    InstructionOperand* inputs[4];
+    size_t input_count = 0;
+
+    AddressingMode mode = GenerateMemoryOperandInputs(
+        &g, m.scaled(), m.scale_exponent(), m.offset(), m.constant(), inputs,
+        &input_count);
+
+    DCHECK_NE(0, static_cast<int>(input_count));
+    DCHECK_GE(arraysize(inputs), input_count);
+
+    InstructionOperand* outputs[1];
+    outputs[0] = g.DefineAsRegister(node);
+
+    InstructionCode opcode = AddressingModeField::encode(mode) | kX64Lea32;
+
+    Emit(opcode, 1, outputs, input_count, inputs);
+    return;
+  }
+
   VisitBinop(this, node, kX64Add32);
 }
 
diff --git a/src/contexts.cc b/src/contexts.cc
index 537d92d..37db84d 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -11,6 +11,48 @@
 namespace v8 {
 namespace internal {
 
+
+Handle<GlobalContextTable> GlobalContextTable::Extend(
+    Handle<GlobalContextTable> table, Handle<Context> global_context) {
+  Handle<GlobalContextTable> result;
+  int used = table->used();
+  int length = table->length();
+  CHECK(used >= 0 && length > 0 && used < length);
+  if (used + 1 == length) {
+    CHECK(length < Smi::kMaxValue / 2);
+    result = Handle<GlobalContextTable>::cast(
+        FixedArray::CopySize(table, length * 2));
+  } else {
+    result = table;
+  }
+  result->set_used(used + 1);
+
+  DCHECK(global_context->IsGlobalContext());
+  result->set(used + 1, *global_context);
+  return result;
+}
+
+
+bool GlobalContextTable::Lookup(Handle<GlobalContextTable> table,
+                                Handle<String> name, LookupResult* result) {
+  for (int i = 0; i < table->used(); i++) {
+    Handle<Context> context = GetContext(table, i);
+    DCHECK(context->IsGlobalContext());
+    Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+    int slot_index = ScopeInfo::ContextSlotIndex(
+        scope_info, name, &result->mode, &result->init_flag,
+        &result->maybe_assigned_flag);
+
+    if (slot_index >= 0) {
+      result->context_index = i;
+      result->slot_index = slot_index;
+      return true;
+    }
+  }
+  return false;
+}
+
+
 Context* Context::declaration_context() {
   Context* current = this;
   while (!current->IsFunctionContext() && !current->IsNativeContext()) {
@@ -102,6 +144,53 @@
   return attrs;
 }
 
+static void GetAttributesAndBindingFlags(VariableMode mode,
+                                         InitializationFlag init_flag,
+                                         PropertyAttributes* attributes,
+                                         BindingFlags* binding_flags) {
+  switch (mode) {
+    case INTERNAL:  // Fall through.
+    case VAR:
+      *attributes = NONE;
+      *binding_flags = MUTABLE_IS_INITIALIZED;
+      break;
+    case LET:
+      *attributes = NONE;
+      *binding_flags = (init_flag == kNeedsInitialization)
+                           ? MUTABLE_CHECK_INITIALIZED
+                           : MUTABLE_IS_INITIALIZED;
+      break;
+    case CONST_LEGACY:
+      *attributes = READ_ONLY;
+      *binding_flags = (init_flag == kNeedsInitialization)
+                           ? IMMUTABLE_CHECK_INITIALIZED
+                           : IMMUTABLE_IS_INITIALIZED;
+      break;
+    case CONST:
+      *attributes = READ_ONLY;
+      *binding_flags = (init_flag == kNeedsInitialization)
+                           ? IMMUTABLE_CHECK_INITIALIZED_HARMONY
+                           : IMMUTABLE_IS_INITIALIZED_HARMONY;
+      break;
+    case MODULE:
+      *attributes = READ_ONLY;
+      *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
+      break;
+    case DYNAMIC:
+    case DYNAMIC_GLOBAL:
+    case DYNAMIC_LOCAL:
+    case TEMPORARY:
+      // Note: Fixed context slots are statically allocated by the compiler.
+      // Statically allocated variables always have a statically known mode,
+      // which is the mode with which they were declared when added to the
+      // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
+      // declared variables that were introduced through declaration nodes)
+      // must not appear here.
+      UNREACHABLE();
+      break;
+  }
+}
+
 
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
@@ -122,8 +211,6 @@
     PrintF(")\n");
   }
 
-  bool visited_global_context = false;
-
   do {
     if (FLAG_trace_contexts) {
       PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
@@ -132,19 +219,6 @@
       PrintF("\n");
     }
 
-    if (follow_context_chain && FLAG_harmony_scoping &&
-        !visited_global_context &&
-        (context->IsGlobalContext() || context->IsNativeContext())) {
-      // For lexical scoping, on a top level, we might resolve to the
-      // lexical bindings introduced by later scrips. Therefore we need to
-      // switch to the the last added global context during lookup here.
-      context = Handle<Context>(context->global_object()->global_context());
-      visited_global_context = true;
-      if (FLAG_trace_contexts) {
-        PrintF("   - switching to current global context %p\n",
-               reinterpret_cast<void*>(*context));
-      }
-    }
 
     // 1. Check global objects, subjects of with, and extension objects.
     if (context->IsNativeContext() ||
@@ -152,6 +226,30 @@
         (context->IsFunctionContext() && context->has_extension())) {
       Handle<JSReceiver> object(
           JSReceiver::cast(context->extension()), isolate);
+
+      if (context->IsNativeContext()) {
+        if (FLAG_trace_contexts) {
+          PrintF(" - trying other global contexts\n");
+        }
+        // Try other global contexts.
+        Handle<GlobalContextTable> global_contexts(
+            context->global_object()->native_context()->global_context_table());
+        GlobalContextTable::LookupResult r;
+        if (GlobalContextTable::Lookup(global_contexts, name, &r)) {
+          if (FLAG_trace_contexts) {
+            Handle<Context> c = GlobalContextTable::GetContext(global_contexts,
+                                                               r.context_index);
+            PrintF("=> found property in global context %d: %p\n",
+                   r.context_index, reinterpret_cast<void*>(*c));
+          }
+          *index = r.slot_index;
+          GetAttributesAndBindingFlags(r.mode, r.init_flag, attributes,
+                                       binding_flags);
+          return GlobalContextTable::GetContext(global_contexts,
+                                                r.context_index);
+        }
+      }
+
       // Context extension objects needs to behave as if they have no
       // prototype.  So even if we want to follow prototype chains, we need
       // to only do a local lookup for context extension objects.
@@ -206,45 +304,8 @@
                  slot_index, mode);
         }
         *index = slot_index;
-        // Note: Fixed context slots are statically allocated by the compiler.
-        // Statically allocated variables always have a statically known mode,
-        // which is the mode with which they were declared when added to the
-        // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
-        // declared variables that were introduced through declaration nodes)
-        // must not appear here.
-        switch (mode) {
-          case INTERNAL:  // Fall through.
-          case VAR:
-            *attributes = NONE;
-            *binding_flags = MUTABLE_IS_INITIALIZED;
-            break;
-          case LET:
-            *attributes = NONE;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
-            break;
-          case CONST_LEGACY:
-            *attributes = READ_ONLY;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
-            break;
-          case CONST:
-            *attributes = READ_ONLY;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
-                IMMUTABLE_IS_INITIALIZED_HARMONY;
-            break;
-          case MODULE:
-            *attributes = READ_ONLY;
-            *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
-            break;
-          case DYNAMIC:
-          case DYNAMIC_GLOBAL:
-          case DYNAMIC_LOCAL:
-          case TEMPORARY:
-            UNREACHABLE();
-            break;
-        }
+        GetAttributesAndBindingFlags(mode, init_flag, attributes,
+                                     binding_flags);
         return context;
       }
 
diff --git a/src/contexts.h b/src/contexts.h
index dc77861..716682d 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -183,7 +183,57 @@
   V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map)                             \
   V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol)                            \
   V(UNSCOPABLES_SYMBOL_INDEX, Symbol, unscopables_symbol)                      \
-  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)
+  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)            \
+  V(GLOBAL_CONTEXT_TABLE_INDEX, GlobalContextTable, global_context_table)
+
+
+// A table of all global contexts. Every loaded top-level script with top-level
+// lexical declarations contributes its GlobalContext into this table.
+//
+// The table is a fixed array, its first slot is the current used count and
+// the subsequent slots 1..used contain GlobalContexts.
+class GlobalContextTable : public FixedArray {
+ public:
+  // Conversions.
+  static GlobalContextTable* cast(Object* context) {
+    DCHECK(context->IsGlobalContextTable());
+    return reinterpret_cast<GlobalContextTable*>(context);
+  }
+
+  struct LookupResult {
+    int context_index;
+    int slot_index;
+    VariableMode mode;
+    InitializationFlag init_flag;
+    MaybeAssignedFlag maybe_assigned_flag;
+  };
+
+  int used() const { return Smi::cast(get(kUsedSlot))->value(); }
+
+  void set_used(int used) { set(kUsedSlot, Smi::FromInt(used)); }
+
+  static Handle<Context> GetContext(Handle<GlobalContextTable> table, int i) {
+    DCHECK(i < table->used());
+    return Handle<Context>::cast(FixedArray::get(table, i + 1));
+  }
+
+  // Lookup a variable `name` in a GlobalContextTable.
+  // If it returns true, the variable is found and `result` contains
+  // valid information about its location.
+  // If it returns false, `result` is untouched.
+  MUST_USE_RESULT
+  static bool Lookup(Handle<GlobalContextTable> table, Handle<String> name,
+                     LookupResult* result);
+
+  MUST_USE_RESULT
+  static Handle<GlobalContextTable> Extend(Handle<GlobalContextTable> table,
+                                           Handle<Context> global_context);
+
+ private:
+  static const int kUsedSlot = 0;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalContextTable);
+};
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -229,6 +279,8 @@
 //
 // Finally, with Harmony scoping, the JSFunction representing a top level
 // script will have the GlobalContext rather than a FunctionContext.
+// Global contexts from all top-level scripts are gathered in
+// GlobalContextTable.
 
 class Context: public FixedArray {
  public:
@@ -360,6 +412,7 @@
     ITERATOR_SYMBOL_INDEX,
     UNSCOPABLES_SYMBOL_INDEX,
     ARRAY_VALUES_ITERATOR_INDEX,
+    GLOBAL_CONTEXT_TABLE_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
diff --git a/src/factory.cc b/src/factory.cc
index 72974a3..796fd13 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -708,6 +708,16 @@
 }
 
 
+Handle<GlobalContextTable> Factory::NewGlobalContextTable() {
+  Handle<FixedArray> array = NewFixedArray(1);
+  array->set_map_no_write_barrier(*global_context_table_map());
+  Handle<GlobalContextTable> context_table =
+      Handle<GlobalContextTable>::cast(array);
+  context_table->set_used(0);
+  return context_table;
+}
+
+
 Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
   Handle<FixedArray> array =
       NewFixedArray(scope_info->ContextLength(), TENURED);
@@ -2077,6 +2087,9 @@
   share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
   Handle<TypeFeedbackVector> feedback_vector = NewTypeFeedbackVector(0, 0);
   share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
+#if TRACE_MAPS
+  share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
+#endif
   share->set_profiler_ticks(0);
   share->set_ast_node_count(0);
   share->set_counters(0);
diff --git a/src/factory.h b/src/factory.h
index 9f9813c..6a9ee55 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -229,6 +229,9 @@
   Handle<Context> NewGlobalContext(Handle<JSFunction> function,
                                    Handle<ScopeInfo> scope_info);
 
+  // Create an empty global context table.
+  Handle<GlobalContextTable> NewGlobalContextTable();
+
   // Create a module context.
   Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
 
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index ca82ce7..5f2a672 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -438,11 +438,6 @@
 DEFINE_BOOL(force_long_branches, false,
             "force all emitted branches to be in long mode (MIPS only)")
 
-// cpu-arm64.cc
-DEFINE_BOOL(enable_always_align_csp, true,
-            "enable alignment of csp to 16 bytes on platforms which prefer "
-            "the register to always be aligned (ARM64 only)")
-
 // bootstrapper.cc
 DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
 DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
@@ -620,6 +615,9 @@
 
 // objects.cc
 DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
+#if TRACE_MAPS
+DEFINE_BOOL(trace_maps, false, "trace map creation")
+#endif
 
 // parser.cc
 DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 58e5e97..01f2faf 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -1575,11 +1575,7 @@
     __ Push(isolate()->factory()->the_hole_value());
   }
 
-  if (lit->constructor() != NULL) {
-    VisitForStackValue(lit->constructor());
-  } else {
-    __ Push(isolate()->factory()->undefined_value());
-  }
+  VisitForStackValue(lit->constructor());
 
   __ Push(script());
   __ Push(Smi::FromInt(lit->start_position()));
diff --git a/src/globals.h b/src/globals.h
index c6ba010..7fa4317 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -776,7 +776,9 @@
   kArrowFunction = 1,
   kGeneratorFunction = 2,
   kConciseMethod = 4,
-  kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod
+  kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
+  kDefaultConstructor = 8,
+  kDefaultConstructorCallSuper = 16
 };
 
 
@@ -785,7 +787,9 @@
          kind == FunctionKind::kArrowFunction ||
          kind == FunctionKind::kGeneratorFunction ||
          kind == FunctionKind::kConciseMethod ||
-         kind == FunctionKind::kConciseGeneratorMethod;
+         kind == FunctionKind::kConciseGeneratorMethod ||
+         kind == FunctionKind::kDefaultConstructor ||
+         kind == FunctionKind::kDefaultConstructorCallSuper;
 }
 
 
@@ -805,6 +809,18 @@
   DCHECK(IsValidFunctionKind(kind));
   return kind & FunctionKind::kConciseMethod;
 }
+
+
+inline bool IsDefaultConstructor(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kDefaultConstructor;
+}
+
+
+inline bool IsDefaultConstructorCallSuper(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kDefaultConstructorCallSuper;
+}
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
index b352f44..accb87c 100644
--- a/src/heap/gc-idle-time-handler.cc
+++ b/src/heap/gc-idle-time-handler.cc
@@ -14,6 +14,7 @@
 const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
 const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
 const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
+const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
 
 
 void GCIdleTimeAction::Print() {
@@ -129,14 +130,12 @@
 // (2) If the new space is almost full and we can affort a Scavenge or if the
 // next Scavenge will very likely take long, then a Scavenge is performed.
 // (3) If there is currently no MarkCompact idle round going on, we start a
-// new idle round if enough garbage was created or we received a context
-// disposal event. Otherwise we do not perform garbage collection to keep
-// system utilization low.
+// new idle round if enough garbage was created. Otherwise we do not perform
+// garbage collection to keep system utilization low.
 // (4) If incremental marking is done, we perform a full garbage collection
-// if context was disposed or if we are allowed to still do full garbage
-// collections during this idle round or if we are not allowed to start
-// incremental marking. Otherwise we do not perform garbage collection to
-// keep system utilization low.
+// if  we are allowed to still do full garbage collections during this idle
+// round or if we are not allowed to start incremental marking. Otherwise we
+// do not perform garbage collection to keep system utilization low.
 // (5) If sweeping is in progress and we received a large enough idle time
 // request, we finalize sweeping here.
 // (6) If incremental marking is in progress, we perform a marking step. Note,
@@ -145,8 +144,8 @@
                                             HeapState heap_state) {
   if (idle_time_in_ms == 0) {
     if (heap_state.incremental_marking_stopped) {
-      if (heap_state.size_of_objects < kSmallHeapSize &&
-          heap_state.contexts_disposed > 0) {
+      if (heap_state.contexts_disposed > 0 &&
+          heap_state.contexts_disposal_rate < kHighContextDisposalRate) {
         return GCIdleTimeAction::FullGC();
       }
     }
@@ -162,7 +161,7 @@
   }
 
   if (IsMarkCompactIdleRoundFinished()) {
-    if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
+    if (EnoughGarbageSinceLastIdleRound()) {
       StartIdleRound();
     } else {
       return GCIdleTimeAction::Done();
@@ -170,11 +169,8 @@
   }
 
   if (heap_state.incremental_marking_stopped) {
-    // TODO(jochen): Remove context disposal dependant logic.
     if (ShouldDoMarkCompact(idle_time_in_ms, heap_state.size_of_objects,
-                            heap_state.mark_compact_speed_in_bytes_per_ms) ||
-        (heap_state.size_of_objects < kSmallHeapSize &&
-         heap_state.contexts_disposed > 0)) {
+                            heap_state.mark_compact_speed_in_bytes_per_ms)) {
       // If there are no more than two GCs left in this idle round and we are
       // allowed to do a full GC, then make those GCs full in order to compact
       // the code space.
@@ -182,10 +178,9 @@
       // can get rid of this special case and always start incremental marking.
       int remaining_mark_sweeps =
           kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
-      if (heap_state.contexts_disposed > 0 ||
-          (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
-           (remaining_mark_sweeps <= 2 ||
-            !heap_state.can_start_incremental_marking))) {
+      if (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
+          (remaining_mark_sweeps <= 2 ||
+           !heap_state.can_start_incremental_marking)) {
         return GCIdleTimeAction::FullGC();
       }
     }
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
index edd5e42..cae6936 100644
--- a/src/heap/gc-idle-time-handler.h
+++ b/src/heap/gc-idle-time-handler.h
@@ -106,10 +106,6 @@
   // Number of scavenges that will trigger start of new idle round.
   static const int kIdleScavengeThreshold;
 
-  // Heap size threshold below which we prefer mark-compact over incremental
-  // step.
-  static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
-
   // That is the maximum idle time we will have during frame rendering.
   static const size_t kMaxFrameRenderingIdleTime = 16;
 
@@ -117,8 +113,12 @@
   // lower bound for the scavenger speed.
   static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
 
+  // If contexts are disposed at a higher rate a full gc is triggered.
+  static const double kHighContextDisposalRate;
+
   struct HeapState {
     int contexts_disposed;
+    double contexts_disposal_rate;
     size_t size_of_objects;
     bool incremental_marking_stopped;
     bool can_start_incremental_marking;
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
index 8a40b53..6d7231e 100644
--- a/src/heap/gc-tracer.cc
+++ b/src/heap/gc-tracer.cc
@@ -26,6 +26,11 @@
 }
 
 
+GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
+  time_ = time;
+}
+
+
 GCTracer::Event::Event(Type type, const char* gc_reason,
                        const char* collector_reason)
     : type(type),
@@ -207,6 +212,11 @@
 }
 
 
+void GCTracer::AddContextDisposalTime(double time) {
+  context_disposal_events_.push_front(ContextDisposalEvent(time));
+}
+
+
 void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
   cumulative_incremental_marking_steps_++;
   cumulative_incremental_marking_bytes_ += bytes;
@@ -319,6 +329,7 @@
   PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
   PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
          NewSpaceAllocationThroughputInBytesPerMillisecond());
+  PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
 
   if (current_.type == Event::SCAVENGER) {
     PrintF("steps_count=%d ", current_.incremental_marking_steps);
@@ -476,5 +487,21 @@
 
   return static_cast<intptr_t>(bytes / durations);
 }
+
+
+double GCTracer::ContextDisposalRateInMilliseconds() const {
+  if (context_disposal_events_.size() == 0) return 0.0;
+
+  double begin = base::OS::TimeCurrentMillis();
+  double end = 0.0;
+  ContextDisposalEventBuffer::const_iterator iter =
+      context_disposal_events_.begin();
+  while (iter != context_disposal_events_.end()) {
+    end = iter->time_;
+    ++iter;
+  }
+
+  return (begin - end) / context_disposal_events_.size();
+}
 }
 }  // namespace v8::internal
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
index 4e70f07..3e6a8a7 100644
--- a/src/heap/gc-tracer.h
+++ b/src/heap/gc-tracer.h
@@ -145,6 +145,19 @@
     intptr_t allocation_in_bytes_;
   };
 
+
+  class ContextDisposalEvent {
+   public:
+    // Default constructor leaves the event uninitialized.
+    ContextDisposalEvent() {}
+
+    explicit ContextDisposalEvent(double time);
+
+    // Time when context disposal event happened.
+    double time_;
+  };
+
+
   class Event {
    public:
     enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
@@ -241,6 +254,9 @@
 
   typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
 
+  typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
+      ContextDisposalEventBuffer;
+
   explicit GCTracer(Heap* heap);
 
   // Start collecting data.
@@ -253,6 +269,8 @@
   // Log an allocation throughput event.
   void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
 
+  void AddContextDisposalTime(double time);
+
   // Log an incremental marking step.
   void AddIncrementalMarkingStep(double duration, intptr_t bytes);
 
@@ -322,6 +340,12 @@
   // Returns 0 if no events have been recorded.
   intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
 
+  // Computes the context disposal rate in milliseconds. It takes the time
+  // frame of the first recorded context disposal to the current time and
+  // divides it by the number of recorded events.
+  // Returns 0 if no events have been recorded.
+  double ContextDisposalRateInMilliseconds() const;
+
  private:
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
@@ -359,6 +383,8 @@
   // RingBuffer for allocation events.
   AllocationEventBuffer allocation_events_;
 
+  ContextDisposalEventBuffer context_disposal_events_;
+
   // Cumulative number of incremental marking steps since creation of tracer.
   int cumulative_incremental_marking_steps_;
 
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
index 166dd3a..9c57ea3 100644
--- a/src/heap/heap.cc
+++ b/src/heap/heap.cc
@@ -865,6 +865,7 @@
   }
   flush_monomorphic_ics_ = true;
   AgeInlineCaches();
+  tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
   return ++contexts_disposed_;
 }
 
@@ -2608,6 +2609,7 @@
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context_table)
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
     native_context_map()->set_dictionary_map(true);
@@ -4363,6 +4365,8 @@
 
   GCIdleTimeHandler::HeapState heap_state;
   heap_state.contexts_disposed = contexts_disposed_;
+  heap_state.contexts_disposal_rate =
+      tracer()->ContextDisposalRateInMilliseconds();
   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
   // TODO(ulan): Start incremental marking only for large heaps.
diff --git a/src/heap/heap.h b/src/heap/heap.h
index ee1fca9..184cb42 100644
--- a/src/heap/heap.h
+++ b/src/heap/heap.h
@@ -149,6 +149,7 @@
   V(Map, block_context_map, BlockContextMap)                                   \
   V(Map, module_context_map, ModuleContextMap)                                 \
   V(Map, global_context_map, GlobalContextMap)                                 \
+  V(Map, global_context_table_map, GlobalContextTableMap)                      \
   V(Map, undefined_map, UndefinedMap)                                          \
   V(Map, the_hole_map, TheHoleMap)                                             \
   V(Map, null_map, NullMap)                                                    \
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 233ca42..9101576 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -7562,6 +7562,10 @@
   bool is_arrow() const { return IsArrowFunction(kind()); }
   bool is_generator() const { return IsGeneratorFunction(kind()); }
   bool is_concise_method() const { return IsConciseMethod(kind()); }
+  bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
+  bool is_default_constructor_call_super() const {
+    return IsDefaultConstructorCallSuper(kind());
+  }
   FunctionKind kind() const { return FunctionKindField::decode(bit_field_); }
   StrictMode strict_mode() const { return StrictModeField::decode(bit_field_); }
 
@@ -7581,10 +7585,10 @@
 
   virtual bool IsDeletable() const OVERRIDE { return true; }
 
-  class FunctionKindField : public BitField<FunctionKind, 0, 3> {};
-  class PretenureField : public BitField<bool, 3, 1> {};
-  class HasNoLiteralsField : public BitField<bool, 4, 1> {};
-  class StrictModeField : public BitField<StrictMode, 5, 1> {};
+  class FunctionKindField : public BitField<FunctionKind, 0, 5> {};
+  class PretenureField : public BitField<bool, 5, 1> {};
+  class HasNoLiteralsField : public BitField<bool, 6, 1> {};
+  class StrictModeField : public BitField<StrictMode, 7, 1> {};
 
   Handle<SharedFunctionInfo> shared_info_;
   uint32_t bit_field_;
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index caef04c..91df47a 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -2214,6 +2214,10 @@
   // edi - function
   // edx - slot id
   Isolate* isolate = masm->isolate();
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2253,35 +2257,66 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
   __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &slow_start);
-  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
-  __ j(equal, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(ecx);
-    __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &miss);
-    __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                        FixedArray::kHeaderSize),
-           Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
-    __ jmp(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
+  __ j(equal, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(ecx);
+  __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+  // We have to update statistics for runtime profiling.
+  __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+  __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(edi, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+  __ cmp(edi, ecx);
+  __ j(equal, &miss);
+
+  // Update stats.
+  __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+
+  // Store the function.
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      edi);
+
+  // Update the write barrier.
+  __ mov(eax, edi);
+  __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
index a9369ed..e83a4b2 100644
--- a/src/ic/ic.cc
+++ b/src/ic/ic.cc
@@ -661,6 +661,21 @@
 
   bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
 
+  if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+    // Look up in global context table.
+    Handle<String> str_name = Handle<String>::cast(name);
+    Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+    Handle<GlobalContextTable> global_contexts(
+        global->native_context()->global_context_table());
+
+    GlobalContextTable::LookupResult lookup_result;
+    if (GlobalContextTable::Lookup(global_contexts, str_name, &lookup_result)) {
+      return FixedArray::get(GlobalContextTable::GetContext(
+                                 global_contexts, lookup_result.context_index),
+                             lookup_result.slot_index);
+    }
+  }
+
   // Named lookup in the object.
   LookupIterator it(object, name);
   LookupForRead(&it);
@@ -1363,6 +1378,25 @@
 MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
                                    Handle<Object> value,
                                    JSReceiver::StoreFromKeyed store_mode) {
+  if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+    // Look up in global context table.
+    Handle<String> str_name = Handle<String>::cast(name);
+    Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+    Handle<GlobalContextTable> global_contexts(
+        global->native_context()->global_context_table());
+
+    GlobalContextTable::LookupResult lookup_result;
+    if (GlobalContextTable::Lookup(global_contexts, str_name, &lookup_result)) {
+      Handle<Context> global_context = GlobalContextTable::GetContext(
+          global_contexts, lookup_result.context_index);
+      if (lookup_result.mode == CONST) {
+        return TypeError("harmony_const_assign", object, name);
+      }
+      global_context->set(lookup_result.slot_index, *value);
+      return value;
+    }
+  }
+
   // TODO(verwaest): Let SetProperty do the migration, since storing a property
   // might deprecate the current map again, if value does not fit.
   if (MigrateDeprecated(object) || object->IsJSProxy()) {
diff --git a/src/isolate.cc b/src/isolate.cc
index 2595d2f..2bf2e3b 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -1650,6 +1650,9 @@
       optimizing_compiler_thread_(NULL),
       stress_deopt_count_(0),
       next_optimization_id_(0),
+#if TRACE_MAPS
+      next_unique_sfi_id_(0),
+#endif
       use_counter_callback_(NULL),
       basic_block_profiler_(NULL) {
   {
@@ -2356,7 +2359,8 @@
     for (unsigned i = 0; i < arraysize(nested); ++i) {
       Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
       Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
-      JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
+      JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8,
+                                    "SetupSymbolRegistry");
       JSObject::SetProperty(registry, name, obj, STRICT).Assert();
     }
   }
diff --git a/src/isolate.h b/src/isolate.h
index 3551632..5d001f4 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -1109,6 +1109,10 @@
 
   std::string GetTurboCfgFileName();
 
+#if TRACE_MAPS
+  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
+#endif
+
  private:
   explicit Isolate(bool enable_serializer);
 
@@ -1311,6 +1315,10 @@
 
   int next_optimization_id_;
 
+#if TRACE_MAPS
+  int next_unique_sfi_id_;
+#endif
+
   // List of callbacks when a Call completes.
   List<CallCompletedCallback> call_completed_callbacks_;
 
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index c8db844..5b34cc2 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -2829,8 +2829,12 @@
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
-  // r1 - function
-  // r3 - slot id (Smi)
+  // a1 - function
+  // a3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2869,38 +2873,71 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&slow_start, eq, t0, Operand(at));
-  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
-  __ Branch(&miss, eq, t0, Operand(at));
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(t0);
-    __ GetObjectType(t0, t1, t1);
-    __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
-    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(t0, a2, Operand(t0));
-    __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-    __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ lw(t0, FieldMemOperand(a2, with_types_offset));
-    __ Subu(t0, t0, Operand(Smi::FromInt(1)));
-    __ sw(t0, FieldMemOperand(a2, with_types_offset));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ lw(t0, FieldMemOperand(a2, generic_offset));
-    __ Addu(t0, t0, Operand(Smi::FromInt(1)));
-    __ sw(t0, FieldMemOperand(a2, generic_offset));
-    __ Branch(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ Branch(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
+  __ Branch(&uninitialized, eq, t0, Operand(at));
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(t0);
+  __ GetObjectType(t0, t1, t1);
+  __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+  __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ lw(t0, FieldMemOperand(a2, with_types_offset));
+  __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(a2, with_types_offset));
+  __ lw(t0, FieldMemOperand(a2, generic_offset));
+  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+  __ Branch(USE_DELAY_SLOT, &slow_start);
+  __ sw(t0, FieldMemOperand(a2, generic_offset));  // In delay slot.
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(a1, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ GetObjectType(a1, t0, t0);
+  __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+  __ Branch(&miss, eq, a1, Operand(t0));
+
+  // Update stats.
+  __ lw(t0, FieldMemOperand(a2, with_types_offset));
+  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(a2, with_types_offset));
+
+  // Store the function.
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sw(a1, MemOperand(t0, 0));
+
+  // Update the write barrier.
+  __ mov(t1, a1);
+  __ RecordWrite(a2, t0, t1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Branch(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index ef812c7..8f39d02 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -2892,6 +2892,10 @@
 void CallICStub::Generate(MacroAssembler* masm) {
   // a1 - function
   // a3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2930,38 +2934,71 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&slow_start, eq, a4, Operand(at));
-  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
-  __ Branch(&miss, eq, a4, Operand(at));
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(a4);
-    __ GetObjectType(a4, a5, a5);
-    __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
-    __ dsrl(a4, a3, 32 - kPointerSizeLog2);
-    __ Daddu(a4, a2, Operand(a4));
-    __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-    __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-    FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ ld(a4, FieldMemOperand(a2, with_types_offset));
-    __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
-    __ sd(a4, FieldMemOperand(a2, with_types_offset));
-    const int generic_offset =
-    FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ ld(a4, FieldMemOperand(a2, generic_offset));
-    __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
-    __ Branch(USE_DELAY_SLOT, &slow_start);
-    __ sd(a4, FieldMemOperand(a2, generic_offset));  // In delay slot.
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ Branch(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
+  __ Branch(&uninitialized, eq, a4, Operand(at));
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(a4);
+  __ GetObjectType(a4, a5, a5);
+  __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+  __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ ld(a4, FieldMemOperand(a2, with_types_offset));
+  __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
+  __ sd(a4, FieldMemOperand(a2, with_types_offset));
+  __ ld(a4, FieldMemOperand(a2, generic_offset));
+  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+  __ Branch(USE_DELAY_SLOT, &slow_start);
+  __ sd(a4, FieldMemOperand(a2, generic_offset));  // In delay slot.
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(a1, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ GetObjectType(a1, a4, a4);
+  __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+  __ Branch(&miss, eq, a1, Operand(a4));
+
+  // Update stats.
+  __ ld(a4, FieldMemOperand(a2, with_types_offset));
+  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+  __ sd(a4, FieldMemOperand(a2, with_types_offset));
+
+  // Store the function.
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sd(a1, MemOperand(a4, 0));
+
+  // Update the write barrier.
+  __ mov(a5, a1);
+  __ RecordWrite(a2, a4, a5, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Branch(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 0288bfb..40ce81a 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -767,6 +767,14 @@
 }
 
 
+bool Object::IsGlobalContextTable() const {
+  if (!Object::IsHeapObject()) return false;
+  Map* map = HeapObject::cast(this)->map();
+  Heap* heap = map->GetHeap();
+  return map == heap->global_context_table_map();
+}
+
+
 bool Object::IsScopeInfo() const {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
@@ -5492,6 +5500,9 @@
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
           kFeedbackVectorOffset)
+#if TRACE_MAPS
+SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
+#endif
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -5684,6 +5695,11 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
                kIsConciseMethod)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
+               kIsDefaultConstructor)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints,
+               is_default_constructor_call_super,
+               kIsDefaultConstructorCallSuper)
 
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index ba05b47..2fb924c 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -1129,4 +1129,38 @@
 #endif  // OBJECT_PRINT
 
 
+#if TRACE_MAPS
+
+
+void Name::NameShortPrint() {
+  if (this->IsString()) {
+    PrintF("%s", String::cast(this)->ToCString().get());
+  } else {
+    DCHECK(this->IsSymbol());
+    Symbol* s = Symbol::cast(this);
+    if (s->name()->IsUndefined()) {
+      PrintF("#<%s>", s->PrivateSymbolToName());
+    } else {
+      PrintF("<%s>", String::cast(s->name())->ToCString().get());
+    }
+  }
+}
+
+
+int Name::NameShortPrint(Vector<char> str) {
+  if (this->IsString()) {
+    return SNPrintF(str, "%s", String::cast(this)->ToCString().get());
+  } else {
+    DCHECK(this->IsSymbol());
+    Symbol* s = Symbol::cast(this);
+    if (s->name()->IsUndefined()) {
+      return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
+    } else {
+      return SNPrintF(str, "<%s>", String::cast(s->name())->ToCString().get());
+    }
+  }
+}
+
+
+#endif  // TRACE_MAPS
 } }  // namespace v8::internal
diff --git a/src/objects.cc b/src/objects.cc
index 258390c..2b5b567 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -708,6 +708,13 @@
         // the hole value.
         Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
         DCHECK(new_map->is_dictionary_map());
+#if TRACE_MAPS
+        if (FLAG_trace_maps) {
+          PrintF("[TraceMaps: GlobalDeleteNormalized from= %p to= %p ]\n",
+                 reinterpret_cast<void*>(object->map()),
+                 reinterpret_cast<void*>(*new_map));
+        }
+#endif
         JSObject::MigrateToMap(object, new_map);
       }
       Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
@@ -2150,7 +2157,7 @@
                                                   PropertyAttributes attributes,
                                                   const char* reason) {
   Isolate* isolate = map->GetIsolate();
-  Handle<Map> new_map = Copy(map);
+  Handle<Map> new_map = Copy(map, reason);
 
   DescriptorArray* descriptors = new_map->instance_descriptors();
   int length = descriptors->number_of_descriptors();
@@ -2459,8 +2466,8 @@
   // Check the state of the root map.
   Handle<Map> root_map(old_map->FindRootMap(), isolate);
   if (!old_map->EquivalentToForTransition(*root_map)) {
-    return CopyGeneralizeAllRepresentations(
-        old_map, modify_index, store_mode, "not equivalent");
+    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                            "GenAll_NotEquivalent");
   }
   int root_nof = root_map->NumberOfOwnDescriptors();
   if (modify_index < root_nof) {
@@ -2469,8 +2476,8 @@
         (old_details.type() == FIELD &&
          (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
           !new_representation.fits_into(old_details.representation())))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, modify_index, store_mode, "root modification");
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                              "GenAll_RootModification");
     }
   }
 
@@ -2493,8 +2500,8 @@
     if ((tmp_type == CALLBACKS || old_type == CALLBACKS) &&
         (tmp_type != old_type ||
          tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, modify_index, store_mode, "incompatible");
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                              "GenAll_Incompatible");
     }
     Representation old_representation = old_details.representation();
     Representation tmp_representation = tmp_details.representation();
@@ -2559,8 +2566,8 @@
     if ((tmp_details.type() == CALLBACKS || old_details.type() == CALLBACKS) &&
         (tmp_details.type() != old_details.type() ||
          tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, modify_index, store_mode, "incompatible");
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                              "GenAll_Incompatible");
     }
     target_map = tmp_map;
   }
@@ -4260,11 +4267,12 @@
 
 void JSObject::NormalizeProperties(Handle<JSObject> object,
                                    PropertyNormalizationMode mode,
-                                   int expected_additional_properties) {
+                                   int expected_additional_properties,
+                                   const char* reason) {
   if (!object->HasFastProperties()) return;
 
   Handle<Map> map(object->map());
-  Handle<Map> new_map = Map::Normalize(map, mode);
+  Handle<Map> new_map = Map::Normalize(map, mode, reason);
 
   MigrateFastToSlow(object, new_map, expected_additional_properties);
 }
@@ -4372,7 +4380,8 @@
 
 
 void JSObject::MigrateSlowToFast(Handle<JSObject> object,
-                                 int unused_property_fields) {
+                                 int unused_property_fields,
+                                 const char* reason) {
   if (object->HasFastProperties()) return;
   DCHECK(!object->IsGlobalObject());
   Isolate* isolate = object->GetIsolate();
@@ -4414,6 +4423,14 @@
   Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
   new_map->set_dictionary_map(false);
 
+#if TRACE_MAPS
+  if (FLAG_trace_maps) {
+    PrintF("[TraceMaps: SlowToFast from= %p to= %p reason= %s ]\n",
+           reinterpret_cast<void*>(object->map()),
+           reinterpret_cast<void*>(*new_map), reason);
+  }
+#endif
+
   if (instance_descriptor_length == 0) {
     DisallowHeapAllocation no_gc;
     DCHECK_LE(unused_property_fields, inobject_props);
@@ -5074,7 +5091,7 @@
             !(object->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
           return it.isolate()->factory()->true_value();
         }
-        NormalizeProperties(holder, mode, 0);
+        NormalizeProperties(holder, mode, 0, "DeletingProperty");
         Handle<Object> result =
             DeleteNormalizedProperty(holder, name, delete_mode);
         ReoptimizeIfPrototype(holder);
@@ -5292,7 +5309,7 @@
   // Do a map transition, other objects with this map may still
   // be extensible.
   // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
-  Handle<Map> new_map = Map::Copy(handle(object->map()));
+  Handle<Map> new_map = Map::Copy(handle(object->map()), "PreventExtensions");
 
   new_map->set_is_extensible(false);
   JSObject::MigrateToMap(object, new_map);
@@ -5404,11 +5421,11 @@
   } else {
     DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
     // Slow path: need to normalize properties for safety
-    NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+    NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0, "SlowFreeze");
 
     // Create a new map, since other objects with this map may be extensible.
     // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
-    Handle<Map> new_map = Map::Copy(handle(object->map()));
+    Handle<Map> new_map = Map::Copy(handle(object->map()), "SlowCopyForFreeze");
     new_map->freeze();
     new_map->set_is_extensible(false);
     new_map->set_elements_kind(DICTIONARY_ELEMENTS);
@@ -5450,7 +5467,7 @@
   } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
     new_map = Map::CopyForObserved(old_map);
   } else {
-    new_map = Map::Copy(old_map);
+    new_map = Map::Copy(old_map, "SlowObserved");
     new_map->set_is_observed();
   }
   JSObject::MigrateToMap(object, new_map);
@@ -6167,13 +6184,20 @@
                                        ? KEEP_INOBJECT_PROPERTIES
                                        : CLEAR_INOBJECT_PROPERTIES;
   // Normalize object to make this operation simple.
-  NormalizeProperties(object, mode, 0);
+  NormalizeProperties(object, mode, 0, "SetPropertyCallback");
 
   // For the global object allocate a new map to invalidate the global inline
   // caches which have a global property cell reference directly in the code.
   if (object->IsGlobalObject()) {
     Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
     DCHECK(new_map->is_dictionary_map());
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      PrintF("[TraceMaps: GlobalPropertyCallback from= %p to= %p ]\n",
+             reinterpret_cast<void*>(object->map()),
+             reinterpret_cast<void*>(*new_map));
+    }
+#endif
     JSObject::MigrateToMap(object, new_map);
 
     // When running crankshaft, changing the map is not enough. We
@@ -6494,8 +6518,8 @@
 }
 
 
-Handle<Map> Map::Normalize(Handle<Map> fast_map,
-                           PropertyNormalizationMode mode) {
+Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
+                           const char* reason) {
   DCHECK(!fast_map->is_dictionary_map());
 
   Isolate* isolate = fast_map->GetIsolate();
@@ -6534,6 +6558,13 @@
       cache->Set(fast_map, new_map);
       isolate->counters()->normalized_maps()->Increment();
     }
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      PrintF("[TraceMaps: Normalize from= %p to= %p reason= %s ]\n",
+             reinterpret_cast<void*>(*fast_map),
+             reinterpret_cast<void*>(*new_map), reason);
+    }
+#endif
   }
   fast_map->NotifyLeafMapLayoutChange();
   return new_map;
@@ -6616,11 +6647,41 @@
 }
 
 
+#if TRACE_MAPS
+
+// static
+void Map::TraceTransition(const char* what, Map* from, Map* to, Name* name) {
+  if (FLAG_trace_maps) {
+    PrintF("[TraceMaps: %s from= %p to= %p name= ", what,
+           reinterpret_cast<void*>(from), reinterpret_cast<void*>(to));
+    name->NameShortPrint();
+    PrintF(" ]\n");
+  }
+}
+
+
+// static
+void Map::TraceAllTransitions(Map* map) {
+  if (!map->HasTransitionArray()) return;
+  TransitionArray* transitions = map->transitions();
+  for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+    Map* target = transitions->GetTarget(i);
+    Map::TraceTransition("Transition", map, target, transitions->GetKey(i));
+    Map::TraceAllTransitions(target);
+  }
+}
+
+#endif  // TRACE_MAPS
+
+
 void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
                             Handle<Name> name, SimpleTransitionFlag flag) {
   parent->set_owns_descriptors(false);
   if (parent->is_prototype_map()) {
     DCHECK(child->is_prototype_map());
+#if TRACE_MAPS
+    Map::TraceTransition("NoTransition", *parent, *child, *name);
+#endif
   } else {
     Handle<TransitionArray> transitions =
         TransitionArray::Insert(parent, name, child, flag);
@@ -6629,6 +6690,9 @@
       parent->set_transitions(*transitions);
     }
     child->SetBackPointer(*parent);
+#if TRACE_MAPS
+    Map::TraceTransition("Transition", *parent, *child, *name);
+#endif
   }
 }
 
@@ -6637,6 +6701,7 @@
                                         Handle<DescriptorArray> descriptors,
                                         TransitionFlag flag,
                                         MaybeHandle<Name> maybe_name,
+                                        const char* reason,
                                         SimpleTransitionFlag simple_flag) {
   DCHECK(descriptors->IsSortedNoDuplicates());
 
@@ -6658,6 +6723,16 @@
       }
     }
   }
+#if TRACE_MAPS
+  if (FLAG_trace_maps &&
+      // Mirror conditions above that did not call ConnectTransition().
+      (map->is_prototype_map() ||
+       !(flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()))) {
+    PrintF("[TraceMaps: ReplaceDescriptors from= %p to= %p reason= %s ]\n",
+           reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*result),
+           reason);
+  }
+#endif
 
   return result;
 }
@@ -6726,7 +6801,7 @@
   // In case the map did not own its own descriptors, a split is forced by
   // copying the map; creating a new descriptor array cell.
   // Create a new free-floating map only if we are not allowed to store it.
-  Handle<Map> new_map = Copy(map);
+  Handle<Map> new_map = Copy(map, "CopyAsElementsKind");
 
   new_map->set_elements_kind(kind);
 
@@ -6750,7 +6825,7 @@
     new_map = CopyDropDescriptors(map);
   } else {
     DCHECK(!map->is_prototype_map());
-    new_map = Copy(map);
+    new_map = Copy(map, "CopyForObserved");
   }
 
   new_map->set_is_observed();
@@ -6766,18 +6841,20 @@
 }
 
 
-Handle<Map> Map::Copy(Handle<Map> map) {
+Handle<Map> Map::Copy(Handle<Map> map, const char* reason) {
   Handle<DescriptorArray> descriptors(map->instance_descriptors());
   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
   Handle<DescriptorArray> new_descriptors =
       DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors);
   return CopyReplaceDescriptors(map, new_descriptors, OMIT_TRANSITION,
-                                MaybeHandle<Name>(), SPECIAL_TRANSITION);
+                                MaybeHandle<Name>(), reason,
+                                SPECIAL_TRANSITION);
 }
 
 
 Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
-  Handle<Map> copy = Copy(handle(isolate->object_function()->initial_map()));
+  Handle<Map> copy =
+      Copy(handle(isolate->object_function()->initial_map()), "MapCreate");
 
   // Check that we do not overflow the instance size when adding the extra
   // inobject properties. If the instance size overflows, we allocate as many
@@ -6808,7 +6885,7 @@
       handle(map->instance_descriptors(), isolate), num_descriptors, FROZEN);
   Handle<Map> new_map = CopyReplaceDescriptors(
       map, new_desc, INSERT_TRANSITION, isolate->factory()->frozen_symbol(),
-      SPECIAL_TRANSITION);
+      "CopyForFreeze", SPECIAL_TRANSITION);
   new_map->freeze();
   new_map->set_is_extensible(false);
   new_map->set_elements_kind(DICTIONARY_ELEMENTS);
@@ -6898,7 +6975,17 @@
 
   Handle<Map> result;
   if (!maybe_map.ToHandle(&result)) {
-    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      Vector<char> name_buffer = Vector<char>::New(100);
+      name->NameShortPrint(name_buffer);
+      Vector<char> buffer = Vector<char>::New(128);
+      SNPrintF(buffer, "TooManyFastProperties %s", name_buffer.start());
+      return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, buffer.start());
+    }
+#endif
+    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES,
+                          "TooManyFastProperties");
   }
 
   return result;
@@ -6912,8 +6999,8 @@
 
   // For now, give up on transitioning and just create a unique map.
   // TODO(verwaest/ishell): Cache transitions with different attributes.
-  return CopyGeneralizeAllRepresentations(map, descriptor, FORCE_FIELD,
-                                          attributes, "attributes mismatch");
+  return CopyGeneralizeAllRepresentations(
+      map, descriptor, FORCE_FIELD, attributes, "GenAll_AttributesMismatch");
 }
 
 
@@ -6928,7 +7015,7 @@
   if (map->is_dictionary_map()) {
     // For global objects, property cells are inlined. We need to change the
     // map.
-    if (map->IsGlobalObjectMap()) return Copy(map);
+    if (map->IsGlobalObjectMap()) return Copy(map, "GlobalAccessor");
     return map;
   }
 
@@ -6951,12 +7038,12 @@
 
     Handle<Object> maybe_pair(descriptors->GetValue(descriptor), isolate);
     if (!maybe_pair->IsAccessorPair()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "TransitionToAccessorFromNonPair");
     }
 
     Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
     if (pair->get(component) != *accessor) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "TransitionToDifferentAccessor");
     }
 
     return transition;
@@ -6967,33 +7054,33 @@
   int descriptor = old_descriptors->SearchWithCache(*name, *map);
   if (descriptor != DescriptorArray::kNotFound) {
     if (descriptor != map->LastAdded()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
     }
     PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
     if (old_details.type() != CALLBACKS) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingNonAccessors");
     }
 
     if (old_details.attributes() != attributes) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsWithAttributes");
     }
 
     Handle<Object> maybe_pair(old_descriptors->GetValue(descriptor), isolate);
     if (!maybe_pair->IsAccessorPair()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingNonPair");
     }
 
     Object* current = Handle<AccessorPair>::cast(maybe_pair)->get(component);
     if (current == *accessor) return map;
 
     if (!current->IsTheHole()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingAccessors");
     }
 
     pair = AccessorPair::Copy(Handle<AccessorPair>::cast(maybe_pair));
   } else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
              map->TooManyFastProperties(CERTAINLY_NOT_STORE_FROM_KEYED)) {
-    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, "TooManyAccessors");
   } else {
     pair = isolate->factory()->NewAccessorPair();
   }
@@ -7024,7 +7111,7 @@
   new_descriptors->Append(descriptor);
 
   return CopyReplaceDescriptors(map, new_descriptors, flag,
-                                descriptor->GetKey(),
+                                descriptor->GetKey(), "CopyAddDescriptor",
                                 SIMPLE_PROPERTY_TRANSITION);
 }
 
@@ -7121,7 +7208,8 @@
       (insertion_index == descriptors->number_of_descriptors() - 1)
           ? SIMPLE_PROPERTY_TRANSITION
           : PROPERTY_TRANSITION;
-  return CopyReplaceDescriptors(map, new_descriptors, flag, key, simple_flag);
+  return CopyReplaceDescriptors(map, new_descriptors, flag, key,
+                                "CopyReplaceDescriptor", simple_flag);
 }
 
 
@@ -9414,14 +9502,15 @@
   if (object->IsJSGlobalProxy()) return;
   if (mode == FAST_PROTOTYPE && !object->map()->is_prototype_map()) {
     // First normalize to ensure all JSFunctions are CONSTANT.
-    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0);
+    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+                                  "NormalizeAsPrototype");
   }
   if (!object->HasFastProperties()) {
-    JSObject::MigrateSlowToFast(object, 0);
+    JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
   }
   if (mode == FAST_PROTOTYPE && object->HasFastProperties() &&
       !object->map()->is_prototype_map()) {
-    Handle<Map> new_map = Map::Copy(handle(object->map()));
+    Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
     JSObject::MigrateToMap(object, new_map);
     object->map()->set_is_prototype_map(true);
   }
@@ -9491,7 +9580,7 @@
       // into the initial map where it belongs.
       function->set_prototype_or_initial_map(*value);
     } else {
-      Handle<Map> new_map = Map::Copy(initial_map);
+      Handle<Map> new_map = Map::Copy(initial_map, "SetInstancePrototype");
       JSFunction::SetInitialMap(function, new_map, value);
 
       // If the function is used as the global Array function, cache the
@@ -9531,7 +9620,7 @@
     // Copy the map so this does not affect unrelated functions.
     // Remove map transitions because they point to maps with a
     // different prototype.
-    Handle<Map> new_map = Map::Copy(handle(function->map()));
+    Handle<Map> new_map = Map::Copy(handle(function->map()), "SetPrototype");
 
     JSObject::MigrateToMap(function, new_map);
     new_map->set_constructor(*value);
@@ -9579,6 +9668,13 @@
   map->set_prototype(*prototype);
   function->set_prototype_or_initial_map(*map);
   map->set_constructor(*function);
+#if TRACE_MAPS
+  if (FLAG_trace_maps) {
+    PrintF("[TraceMaps: InitialMap map= %p SFI= %d_%s ]\n",
+           reinterpret_cast<void*>(*map), function->shared()->unique_id(),
+           function->shared()->DebugName()->ToCString().get());
+  }
+#endif
 }
 
 
@@ -11700,7 +11796,7 @@
                                        Handle<Object> prototype) {
   Handle<Map> new_map = GetPrototypeTransition(map, prototype);
   if (new_map.is_null()) {
-    new_map = Copy(map);
+    new_map = Copy(map, "TransitionToPrototype");
     PutPrototypeTransition(map, prototype, new_map);
     new_map->set_prototype(*prototype);
   }
@@ -14677,6 +14773,25 @@
 }
 
 
+void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
+                                          Handle<Name> name) {
+  DCHECK(!global->HasFastProperties());
+  Isolate* isolate = global->GetIsolate();
+  int entry = global->property_dictionary()->FindEntry(name);
+  if (entry != NameDictionary::kNotFound) {
+    Handle<PropertyCell> cell(
+        PropertyCell::cast(global->property_dictionary()->ValueAt(entry)));
+
+    Handle<Object> value(cell->value(), isolate);
+    Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(value);
+    global->property_dictionary()->ValueAtPut(entry, *new_cell);
+
+    Handle<Object> hole = global->GetIsolate()->factory()->the_hole_value();
+    PropertyCell::SetValueInferType(cell, hole);
+  }
+}
+
+
 Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
     Handle<JSGlobalObject> global,
     Handle<Name> name) {
@@ -14899,16 +15014,19 @@
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY,
                       RelocInfo::kNoPosition);
-  int entry = cache->FindEntry(&key);
-  if (entry != kNotFound) {
+  {
     Handle<Object> k = key.AsHandle(isolate);
-    cache->set(EntryToIndex(entry), *k);
-    cache->set(EntryToIndex(entry) + 1, *value);
-    return cache;
+    DisallowHeapAllocation no_allocation_scope;
+    int entry = cache->FindEntry(&key);
+    if (entry != kNotFound) {
+      cache->set(EntryToIndex(entry), *k);
+      cache->set(EntryToIndex(entry) + 1, *value);
+      return cache;
+    }
   }
 
   cache = EnsureCapacity(cache, 1, &key);
-  entry = cache->FindInsertionEntry(key.Hash());
+  int entry = cache->FindInsertionEntry(key.Hash());
   Handle<Object> k =
       isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
   cache->set(EntryToIndex(entry), *k);
@@ -14924,16 +15042,19 @@
     int scope_position) {
   Isolate* isolate = cache->GetIsolate();
   StringSharedKey key(src, outer_info, value->strict_mode(), scope_position);
-  int entry = cache->FindEntry(&key);
-  if (entry != kNotFound) {
+  {
     Handle<Object> k = key.AsHandle(isolate);
-    cache->set(EntryToIndex(entry), *k);
-    cache->set(EntryToIndex(entry) + 1, *value);
-    return cache;
+    DisallowHeapAllocation no_allocation_scope;
+    int entry = cache->FindEntry(&key);
+    if (entry != kNotFound) {
+      cache->set(EntryToIndex(entry), *k);
+      cache->set(EntryToIndex(entry) + 1, *value);
+      return cache;
+    }
   }
 
   cache = EnsureCapacity(cache, 1, &key);
-  entry = cache->FindInsertionEntry(key.Hash());
+  int entry = cache->FindInsertionEntry(key.Hash());
   Handle<Object> k =
       isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
   cache->set(EntryToIndex(entry), *k);
diff --git a/src/objects.h b/src/objects.h
index 9333e9e..31d0a16 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -87,6 +87,7 @@
 //         - JSFunctionResultCache
 //         - ScopeInfo
 //         - TransitionArray
+//         - GlobalContextTable
 //       - FixedDoubleArray
 //       - ExternalArray
 //         - ExternalUint8ClampedArray
@@ -944,6 +945,7 @@
   V(FixedDoubleArray)              \
   V(ConstantPoolArray)             \
   V(Context)                       \
+  V(GlobalContextTable)            \
   V(NativeContext)                 \
   V(ScopeInfo)                     \
   V(JSFunction)                    \
@@ -2049,7 +2051,8 @@
   // an initial capacity for holding these properties.
   static void NormalizeProperties(Handle<JSObject> object,
                                   PropertyNormalizationMode mode,
-                                  int expected_additional_properties);
+                                  int expected_additional_properties,
+                                  const char* reason);
 
   // Convert and update the elements backing store to be a
   // SeededNumberDictionary dictionary.  Returns the backing after conversion.
@@ -2058,7 +2061,7 @@
 
   // Transform slow named properties to fast variants.
   static void MigrateSlowToFast(Handle<JSObject> object,
-                                int unused_property_fields);
+                                int unused_property_fields, const char* reason);
 
   // Access fast-case object properties at index.
   static Handle<Object> FastPropertyAt(Handle<JSObject> object,
@@ -5878,7 +5881,8 @@
                                             int descriptor_number,
                                             Handle<Object> value);
 
-  static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode);
+  static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
+                               const char* reason);
 
   // Returns the constructor name (the name (possibly, inferred name) of the
   // function that was used to instantiate the object).
@@ -6087,7 +6091,7 @@
 
   // Returns a copy of the map, with all transitions dropped from the
   // instance descriptors.
-  static Handle<Map> Copy(Handle<Map> map);
+  static Handle<Map> Copy(Handle<Map> map, const char* reason);
   static Handle<Map> Create(Isolate* isolate, int inobject_properties);
 
   // Returns the next free property index (only valid for FAST MODE).
@@ -6315,6 +6319,11 @@
   // The "shared" flags of both this map and |other| are ignored.
   bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
 
+#if TRACE_MAPS
+  static void TraceTransition(const char* what, Map* from, Map* to, Name* name);
+  static void TraceAllTransitions(Map* map);
+#endif
+
  private:
   static void ConnectElementsTransition(Handle<Map> parent, Handle<Map> child);
   static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
@@ -6336,6 +6345,7 @@
                                             Handle<DescriptorArray> descriptors,
                                             TransitionFlag flag,
                                             MaybeHandle<Name> maybe_name,
+                                            const char* reason,
                                             SimpleTransitionFlag simple_flag);
   static Handle<Map> CopyReplaceDescriptor(Handle<Map> map,
                                            Handle<DescriptorArray> descriptors,
@@ -6709,6 +6719,13 @@
   // available.
   DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
 
+#if TRACE_MAPS
+  // [unique_id] - For --trace-maps purposes, an identifier that's persistent
+  // even if the GC moves this SharedFunctionInfo.
+  inline int unique_id() const;
+  inline void set_unique_id(int value);
+#endif
+
   // [instance class name]: class name for instances.
   DECL_ACCESSORS(instance_class_name, Object)
 
@@ -6855,6 +6872,13 @@
   // Indicates that this function is a concise method.
   DECL_BOOLEAN_ACCESSORS(is_concise_method)
 
+  // Indicates that this function is a default constructor.
+  DECL_BOOLEAN_ACCESSORS(is_default_constructor)
+
+  // Indicates that this function is a default constructor that needs to call
+  // super.
+  DECL_BOOLEAN_ACCESSORS(is_default_constructor_call_super)
+
   // Indicates that this function is an asm function.
   DECL_BOOLEAN_ACCESSORS(asm_function)
 
@@ -6955,10 +6979,16 @@
   static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
   static const int kFeedbackVectorOffset =
       kInferredNameOffset + kPointerSize;
+#if TRACE_MAPS
+  static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
+  static const int kLastPointerFieldOffset = kUniqueIdOffset;
+#else
+  static const int kLastPointerFieldOffset = kFeedbackVectorOffset;
+#endif
+
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
-  static const int kLengthOffset =
-      kFeedbackVectorOffset + kPointerSize;
+  static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -6994,8 +7024,7 @@
 // word is not set and thus this word cannot be treated as pointer
 // to HeapObject during old space traversal.
 #if V8_TARGET_LITTLE_ENDIAN
-  static const int kLengthOffset =
-      kFeedbackVectorOffset + kPointerSize;
+  static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -7029,7 +7058,7 @@
 
 #elif V8_TARGET_BIG_ENDIAN
   static const int kFormalParameterCountOffset =
-      kFeedbackVectorOffset + kPointerSize;
+      kLastPointerFieldOffset + kPointerSize;
   static const int kLengthOffset = kFormalParameterCountOffset + kIntSize;
 
   static const int kNumLiteralsOffset = kLengthOffset + kIntSize;
@@ -7062,7 +7091,7 @@
   static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
   typedef FixedBodyDescriptor<kNameOffset,
-                              kFeedbackVectorOffset + kPointerSize,
+                              kLastPointerFieldOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
   // Bit positions in start_position_and_type.
@@ -7092,12 +7121,14 @@
     kIsArrow,
     kIsGenerator,
     kIsConciseMethod,
+    kIsDefaultConstructor,
+    kIsDefaultConstructorCallSuper,
     kIsAsmFunction,
     kDeserialized,
     kCompilerHintsCount  // Pseudo entry
   };
 
-  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 3> {};
+  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 5> {};
 
   class DeoptCountBits : public BitField<int, 0, 4> {};
   class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7538,6 +7569,9 @@
 
   DECLARE_CAST(GlobalObject)
 
+  static void InvalidatePropertyCell(Handle<GlobalObject> object,
+                                     Handle<Name> name);
+
   // Layout description.
   static const int kBuiltinsOffset = JSObject::kHeaderSize;
   static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
@@ -8578,6 +8612,10 @@
   DECLARE_CAST(Name)
 
   DECLARE_PRINTER(Name)
+#if TRACE_MAPS
+  void NameShortPrint();
+  int NameShortPrint(Vector<char> str);
+#endif
 
   // Layout description.
   static const int kHashFieldSlot = HeapObject::kHeaderSize;
@@ -8680,6 +8718,10 @@
 
   const char* PrivateSymbolToName() const;
 
+#if TRACE_MAPS
+  friend class Name;  // For PrivateSymbolToName.
+#endif
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
 };
 
diff --git a/src/parser.cc b/src/parser.cc
index c5bf0d9..e34854f 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -275,6 +275,62 @@
 }
 
 
+FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
+                                            int pos, int end_pos) {
+  int materialized_literal_count = -1;
+  int expected_property_count = -1;
+  int handler_count = 0;
+  int parameter_count = 0;
+  AstProperties ast_properties;
+  BailoutReason dont_optimize_reason = kNoReason;
+  const AstRawString* name = ast_value_factory()->empty_string();
+  FunctionKind kind = call_super ? FunctionKind::kDefaultConstructorCallSuper
+                                 : FunctionKind::kDefaultConstructor;
+
+  Scope* function_scope = NewScope(scope, FUNCTION_SCOPE);
+  function_scope->SetStrictMode(STRICT);
+  // Set start and end position to the same value
+  function_scope->set_start_position(pos);
+  function_scope->set_end_position(pos);
+  ZoneList<Statement*>* body = NULL;
+
+  {
+    AstNodeFactory<AstConstructionVisitor> function_factory(
+        ast_value_factory());
+    FunctionState function_state(&function_state_, &scope_, function_scope,
+                                 &function_factory);
+
+    body = new (zone()) ZoneList<Statement*>(1, zone());
+    if (call_super) {
+      Expression* prop = SuperReference(function_scope, factory(), pos);
+      ZoneList<Expression*>* args =
+          new (zone()) ZoneList<Expression*>(0, zone());
+      Call* call = factory()->NewCall(prop, args, pos);
+      body->Add(factory()->NewExpressionStatement(call, pos), zone());
+    }
+
+    materialized_literal_count = function_state.materialized_literal_count();
+    expected_property_count = function_state.expected_property_count();
+    handler_count = function_state.handler_count();
+
+    ast_properties = *factory()->visitor()->ast_properties();
+    dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
+  }
+
+  FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
+      name, ast_value_factory(), function_scope, body,
+      materialized_literal_count, expected_property_count, handler_count,
+      parameter_count, FunctionLiteral::kNoDuplicateParameters,
+      FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+      FunctionLiteral::kNotParenthesized, kind, pos);
+
+  function_literal->set_ast_properties(&ast_properties);
+  function_literal->set_dont_optimize_reason(dont_optimize_reason);
+
+  return function_literal;
+}
+
+
 // ----------------------------------------------------------------------------
 // Target is a support class to facilitate manipulation of the
 // Parser's target_stack_ (the stack of potential 'break' and
@@ -648,6 +704,13 @@
                                   start_position, end_position);
 }
 
+
+Expression* ParserTraits::DefaultConstructor(bool call_super, Scope* scope,
+                                             int pos, int end_pos) {
+  return parser_->DefaultConstructor(call_super, scope, pos, end_pos);
+}
+
+
 Literal* ParserTraits::ExpressionFromLiteral(
     Token::Value token, int pos,
     Scanner* scanner,
@@ -1004,6 +1067,11 @@
       Expression* expression = ParseExpression(false, &ok);
       DCHECK(expression->IsFunctionLiteral());
       result = expression->AsFunctionLiteral();
+    } else if (shared_info->is_default_constructor() ||
+               shared_info->is_default_constructor_call_super()) {
+      result = DefaultConstructor(
+          shared_info->is_default_constructor_call_super(), scope,
+          shared_info->start_position(), shared_info->end_position());
     } else {
       result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
                                     false,  // Strict mode name already checked.
diff --git a/src/parser.h b/src/parser.h
index db9071a..c815a45 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -547,7 +547,8 @@
                               ZoneList<ObjectLiteral::Property*>* properties,
                               int start_position, int end_position,
                               AstNodeFactory<AstConstructionVisitor>* factory);
-
+  Expression* DefaultConstructor(bool call_super, Scope* scope, int pos,
+                                 int end_pos);
   Literal* ExpressionFromLiteral(
       Token::Value token, int pos, Scanner* scanner,
       AstNodeFactory<AstConstructionVisitor>* factory);
@@ -804,6 +805,9 @@
 
   Scope* NewScope(Scope* parent, ScopeType type);
 
+  FunctionLiteral* DefaultConstructor(bool call_super, Scope* scope, int pos,
+                                      int end_pos);
+
   // Skip over a lazy function, either using cached data if we have it, or
   // by parsing the function with PreParser. Consumes the ending }.
   void SkipLazyFunctionBody(const AstRawString* function_name,
diff --git a/src/preparser.h b/src/preparser.h
index ddf9cec..7c9791f 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -1325,6 +1325,12 @@
     return PreParserExpression::Default();
   }
 
+  static PreParserExpression DefaultConstructor(bool call_super,
+                                                PreParserScope* scope, int pos,
+                                                int end_pos) {
+    return PreParserExpression::Default();
+  }
+
   static PreParserExpression ExpressionFromLiteral(
       Token::Value token, int pos, Scanner* scanner,
       PreParserFactory* factory) {
@@ -2747,12 +2753,14 @@
     return this->EmptyExpression();
   }
 
+  bool has_extends = false;
   ExpressionT extends = this->EmptyExpression();
   if (Check(Token::EXTENDS)) {
     typename Traits::Type::ScopePtr scope = this->NewScope(scope_, BLOCK_SCOPE);
     BlockState block_state(&scope_, Traits::Type::ptr_to_scope(scope));
     scope_->SetStrictMode(STRICT);
     extends = this->ParseLeftHandSideExpression(CHECK_OK);
+    has_extends = true;
   }
 
   // TODO(arv): Implement scopes and name binding in class body only.
@@ -2791,6 +2799,11 @@
   int end_pos = peek_position();
   Expect(Token::RBRACE, CHECK_OK);
 
+  if (!has_seen_constructor) {
+    constructor =
+        this->DefaultConstructor(has_extends, scope_, pos, end_pos + 1);
+  }
+
   return this->ClassExpression(name, extends, constructor, properties, pos,
                                end_pos + 1, factory());
 }
diff --git a/src/runtime/runtime-classes.cc b/src/runtime/runtime-classes.cc
index cc4e09b..30ff918 100644
--- a/src/runtime/runtime-classes.cc
+++ b/src/runtime/runtime-classes.cc
@@ -62,7 +62,7 @@
   DCHECK(args.length() == 6);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
   CONVERT_ARG_HANDLE_CHECKED(Script, script, 3);
   CONVERT_SMI_ARG_CHECKED(start_position, 4);
   CONVERT_SMI_ARG_CHECKED(end_position, 5);
@@ -104,52 +104,45 @@
   Handle<String> name_string = name->IsString()
                                    ? Handle<String>::cast(name)
                                    : isolate->factory()->empty_string();
+  constructor->shared()->set_name(*name_string);
 
-  Handle<JSFunction> ctor;
-  if (constructor->IsSpecFunction()) {
-    ctor = Handle<JSFunction>::cast(constructor);
-    JSFunction::SetPrototype(ctor, prototype);
-    PropertyAttributes attribs =
-        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            ctor, isolate->factory()->prototype_string(), prototype, attribs));
-  } else {
-    // TODO(arv): This should not use an empty function but a function that
-    // calls super.
-    Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
-    ctor = isolate->factory()->NewFunction(name_string, code, prototype, true);
-  }
-
+  JSFunction::SetPrototype(constructor, prototype);
+  PropertyAttributes attribs =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                   constructor, isolate->factory()->prototype_string(),
+                   prototype, attribs));
   Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                   ctor, home_object_symbol, prototype, DONT_ENUM));
+                   constructor, home_object_symbol, prototype, DONT_ENUM));
 
   if (!constructor_parent.is_null()) {
     RETURN_FAILURE_ON_EXCEPTION(
-        isolate, JSObject::SetPrototype(ctor, constructor_parent, false));
+        isolate,
+        JSObject::SetPrototype(constructor, constructor_parent, false));
   }
 
   JSObject::AddProperty(prototype, isolate->factory()->constructor_string(),
-                        ctor, DONT_ENUM);
+                        constructor, DONT_ENUM);
 
   // Install private properties that are used to construct the FunctionToString.
   RETURN_FAILURE_ON_EXCEPTION(
+      isolate, Object::SetProperty(constructor,
+                                   isolate->factory()->class_script_symbol(),
+                                   script, STRICT));
+  RETURN_FAILURE_ON_EXCEPTION(
       isolate,
-      Object::SetProperty(ctor, isolate->factory()->class_script_symbol(),
-                          script, STRICT));
+      Object::SetProperty(
+          constructor, isolate->factory()->class_start_position_symbol(),
+          handle(Smi::FromInt(start_position), isolate), STRICT));
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, Object::SetProperty(
-                   ctor, isolate->factory()->class_start_position_symbol(),
-                   handle(Smi::FromInt(start_position), isolate), STRICT));
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      Object::SetProperty(ctor, isolate->factory()->class_end_position_symbol(),
-                          handle(Smi::FromInt(end_position), isolate), STRICT));
+                   constructor, isolate->factory()->class_end_position_symbol(),
+                   handle(Smi::FromInt(end_position), isolate), STRICT));
 
-  return *ctor;
+  return *constructor;
 }
 
 
diff --git a/src/runtime/runtime-debug.cc b/src/runtime/runtime-debug.cc
index 95ac77b..00ac921 100644
--- a/src/runtime/runtime-debug.cc
+++ b/src/runtime/runtime-debug.cc
@@ -2083,7 +2083,8 @@
 static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) {
   Handle<JSObject> result =
       isolate->factory()->NewJSObject(isolate->object_function());
-  Handle<Map> new_map = Map::Copy(Handle<Map>(result->map()));
+  Handle<Map> new_map =
+      Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
   new_map->set_prototype(*isolate->factory()->null_value());
   JSObject::MigrateToMap(result, new_map);
   return result;
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index 23b5b19..c6efd02 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -109,7 +109,7 @@
   if (should_normalize) {
     // TODO(verwaest): We might not want to ever normalize here.
     JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
-                                  length / 2);
+                                  length / 2, "Boilerplate");
   }
   // TODO(verwaest): Support tracking representations in the boilerplate.
   for (int index = 0; index < length; index += 2) {
@@ -166,7 +166,8 @@
   // constant function properties.
   if (should_transform && !has_function_literal) {
     JSObject::MigrateSlowToFast(boilerplate,
-                                boilerplate->map()->unused_property_fields());
+                                boilerplate->map()->unused_property_fields(),
+                                "FastLiteral");
   }
 
   return boilerplate;
diff --git a/src/runtime/runtime-object.cc b/src/runtime/runtime-object.cc
index 74cb8cb..b2a736d 100644
--- a/src/runtime/runtime-object.cc
+++ b/src/runtime/runtime-object.cc
@@ -469,7 +469,7 @@
   bool needs_access_checks = old_map->is_access_check_needed();
   if (needs_access_checks) {
     // Copy map so it won't interfere constructor's initial map.
-    Handle<Map> new_map = Map::Copy(old_map);
+    Handle<Map> new_map = Map::Copy(old_map, "DisableAccessChecks");
     new_map->set_is_access_check_needed(false);
     JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
   }
@@ -484,7 +484,7 @@
   Handle<Map> old_map(object->map());
   RUNTIME_ASSERT(!old_map->is_access_check_needed());
   // Copy map so it won't interfere constructor's initial map.
-  Handle<Map> new_map = Map::Copy(old_map);
+  Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
   new_map->set_is_access_check_needed(true);
   JSObject::MigrateToMap(object, new_map);
   return isolate->heap()->undefined_value();
@@ -499,7 +499,8 @@
   // Conservative upper limit to prevent fuzz tests from going OOM.
   RUNTIME_ASSERT(properties <= 100000);
   if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
-    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties,
+                                  "OptimizeForAdding");
   }
   return *object;
 }
@@ -1152,7 +1153,8 @@
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   if (object->IsJSObject() && !object->IsGlobalObject()) {
-    JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0);
+    JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
+                                "RuntimeToFastProperties");
   }
   return *object;
 }
@@ -1456,7 +1458,7 @@
   bool fast = obj->HasFastProperties();
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, JSObject::DefineAccessor(obj, name, getter, setter, attr));
-  if (fast) JSObject::MigrateSlowToFast(obj, 0);
+  if (fast) JSObject::MigrateSlowToFast(obj, 0, "RuntimeDefineAccessor");
   return isolate->heap()->undefined_value();
 }
 
diff --git a/src/runtime/runtime-scopes.cc b/src/runtime/runtime-scopes.cc
index c935cda..5ee9b25 100644
--- a/src/runtime/runtime-scopes.cc
+++ b/src/runtime/runtime-scopes.cc
@@ -27,6 +27,14 @@
                               Handle<String> name, Handle<Object> value,
                               PropertyAttributes attr, bool is_var,
                               bool is_const, bool is_function) {
+  Handle<GlobalContextTable> global_contexts(
+      global->native_context()->global_context_table());
+  GlobalContextTable::LookupResult lookup;
+  if (GlobalContextTable::Lookup(global_contexts, name, &lookup) &&
+      IsLexicalVariableMode(lookup.mode)) {
+    return ThrowRedeclarationError(isolate, name);
+  }
+
   // Do the lookup own properties only, see ES5 erratum.
   LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
@@ -347,7 +355,7 @@
           isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
       parameter_map->set_map(isolate->heap()->sloppy_arguments_elements_map());
 
-      Handle<Map> map = Map::Copy(handle(result->map()));
+      Handle<Map> map = Map::Copy(handle(result->map()), "NewSloppyArguments");
       map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
 
       result->set_map(*map);
@@ -507,6 +515,35 @@
                                                                 pretenure_flag);
 }
 
+static Object* FindNameClash(Handle<ScopeInfo> scope_info,
+                             Handle<GlobalObject> global_object,
+                             Handle<GlobalContextTable> global_context) {
+  Isolate* isolate = scope_info->GetIsolate();
+  for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
+    Handle<String> name(scope_info->ContextLocalName(var));
+    VariableMode mode = scope_info->ContextLocalMode(var);
+    GlobalContextTable::LookupResult lookup;
+    if (GlobalContextTable::Lookup(global_context, name, &lookup)) {
+      if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
+        return ThrowRedeclarationError(isolate, name);
+      }
+    }
+
+    if (IsLexicalVariableMode(mode)) {
+      LookupIterator it(global_object, name,
+                        LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+      Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+      if (!maybe.has_value) return isolate->heap()->exception();
+      if ((maybe.value & DONT_DELETE) != 0) {
+        return ThrowRedeclarationError(isolate, name);
+      }
+
+      GlobalObject::InvalidatePropertyCell(global_object, name);
+    }
+  }
+  return isolate->heap()->undefined_value();
+}
+
 
 RUNTIME_FUNCTION(Runtime_NewGlobalContext) {
   HandleScope scope(isolate);
@@ -514,12 +551,25 @@
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+  Handle<GlobalObject> global_object(function->context()->global_object());
+  Handle<Context> native_context(global_object->native_context());
+  Handle<GlobalContextTable> global_context_table(
+      native_context->global_context_table());
+
+  Handle<String> clashed_name;
+  Object* name_clash_result =
+      FindNameClash(scope_info, global_object, global_context_table);
+  if (isolate->has_pending_exception()) return name_clash_result;
+
   Handle<Context> result =
       isolate->factory()->NewGlobalContext(function, scope_info);
 
   DCHECK(function->context() == isolate->context());
   DCHECK(function->context()->global_object() == result->global_object());
-  result->global_object()->set_global_context(*result);
+
+  Handle<GlobalContextTable> new_global_context_table =
+      GlobalContextTable::Extend(global_context_table, result);
+  native_context->set_global_context_table(*new_global_context_table);
   return *result;
 }
 
diff --git a/src/scanner.cc b/src/scanner.cc
index e63239d..ddcd937 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -1138,24 +1138,6 @@
 }
 
 
-bool Scanner::ScanLiteralUnicodeEscape() {
-  DCHECK(c0_ == '\\');
-  AddLiteralChar(c0_);
-  Advance();
-  int hex_digits_read = 0;
-  if (c0_ == 'u') {
-    AddLiteralChar(c0_);
-    while (hex_digits_read < 4) {
-      Advance();
-      if (!IsHexDigit(c0_)) break;
-      AddLiteralChar(c0_);
-      ++hex_digits_read;
-    }
-  }
-  return hex_digits_read == 4;
-}
-
-
 bool Scanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
@@ -1163,10 +1145,7 @@
     if (c0_ != '\\') {
       AddLiteralCharAdvance();
     } else {
-      if (!ScanLiteralUnicodeEscape()) {
-        return false;
-      }
-      Advance();
+      return false;
     }
   }
   literal.Complete();
diff --git a/src/scanner.h b/src/scanner.h
index 387d331..e626f20 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -637,10 +637,6 @@
   // Decodes a Unicode escape-sequence which is part of an identifier.
   // If the escape sequence cannot be decoded the result is kBadChar.
   uc32 ScanIdentifierUnicodeEscape();
-  // Scans a Unicode escape-sequence and adds its characters,
-  // uninterpreted, to the current literal. Used for parsing RegExp
-  // flags.
-  bool ScanLiteralUnicodeEscape();
 
   // Return the current source position.
   int source_pos() {
diff --git a/src/version.cc b/src/version.cc
index 601a599..6136739 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,8 +33,8 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     30
-#define BUILD_NUMBER      37
+#define MINOR_VERSION     31
+#define BUILD_NUMBER      0
 #define PATCH_LEVEL       0
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index 5ea5f72..f17389c 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -2087,6 +2087,10 @@
   // rdi - function
   // rdx - slot id
   Isolate* isolate = masm->isolate();
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2128,34 +2132,64 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
                             FixedArray::kHeaderSize));
   __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
   __ j(equal, &slow_start);
-  __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
-  __ j(equal, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(rcx);
-    __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
-    __ j(not_equal, &miss);
-    __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
-            TypeFeedbackVector::MegamorphicSentinel(isolate));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
-    __ jmp(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
+  __ j(equal, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(rcx);
+  __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &miss);
+  __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+          TypeFeedbackVector::MegamorphicSentinel(isolate));
+  // We have to update statistics for runtime profiling.
+  __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
+  __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(rdi, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+  __ cmpp(rdi, rcx);
+  __ j(equal, &miss);
+
+  // Update stats.
+  __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
+
+  // Store the function.
+  __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+          rdi);
+
+  // Update the write barrier.
+  __ movp(rax, rdi);
+  __ RecordWriteArray(rbx, rax, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 202dec6..8925d4f 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -1900,6 +1900,10 @@
   // edi - function
   // edx - slot id
   Isolate* isolate = masm->isolate();
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -1939,35 +1943,66 @@
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
   __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &slow_start);
-  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
-  __ j(equal, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(ecx);
-    __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &miss);
-    __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                        FixedArray::kHeaderSize),
-           Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
-    __ jmp(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
+  __ j(equal, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(ecx);
+  __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+  // We have to update statistics for runtime profiling.
+  __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+  __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(edi, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+  __ cmp(edi, ecx);
+  __ j(equal, &miss);
+
+  // Update stats.
+  __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+
+  // Store the function.
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      edi);
+
+  // Update the write barrier.
+  __ mov(eax, edi);
+  __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -2862,6 +2897,25 @@
 }
 
 
+void ToNumberStub::Generate(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in eax.
+  Label check_heap_number, call_builtin;
+  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
+  __ Ret();
+
+  __ bind(&check_heap_number);
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &call_builtin, Label::kNear);
+  __ Ret();
+
+  __ bind(&call_builtin);
+  __ pop(ecx);  // Pop return address.
+  __ push(eax);
+  __ push(ecx);  // Push return address.
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
diff --git a/src/x87/lithium-codegen-x87.cc b/src/x87/lithium-codegen-x87.cc
index f8872d7..284a666 100644
--- a/src/x87/lithium-codegen-x87.cc
+++ b/src/x87/lithium-codegen-x87.cc
@@ -4063,8 +4063,8 @@
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(temp_result);
     __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(temp_result, eax);
   }
   X87PrepareToWrite(result_reg);
@@ -4278,7 +4278,7 @@
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(temp_result);
     __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
-    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
                                  Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(temp_result, eax);
   }
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 0b76995..9e85a0c 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -81,10 +81,7 @@
   ##############################################################################
   # TurboFan compiler failures.
 
-  # TODO(sigurds): The schedule is borked with multiple inlinees,
-  # and cannot handle free-floating loops yet
-  'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
-  'test-run-inlining/InlineTwiceDependentDiamondDifferent': [SKIP],
+  # TODO(mstarzinger): The scheduler cannot handle free-floating loops yet.
   'test-run-inlining/InlineLoop': [SKIP],
 
   # Some tests are just too slow to run for now.
diff --git a/test/cctest/compiler/test-scheduler.cc b/test/cctest/compiler/test-scheduler.cc
index aed8f8b..659aacd 100644
--- a/test/cctest/compiler/test-scheduler.cc
+++ b/test/cctest/compiler/test-scheduler.cc
@@ -1816,6 +1816,48 @@
 }
 
 
+TEST(NestedFloatingDiamondWithLoop) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  SimplifiedOperatorBuilder simplified(scope.main_zone());
+  MachineOperatorBuilder machine;
+
+  Node* start = graph.NewNode(common.Start(2));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+
+  Node* fv = graph.NewNode(common.Int32Constant(7));
+  Node* br = graph.NewNode(common.Branch(), p0, graph.start());
+  Node* t = graph.NewNode(common.IfTrue(), br);
+  Node* f = graph.NewNode(common.IfFalse(), br);
+
+  Node* loop = graph.NewNode(common.Loop(2), f, start);
+  Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+  // TODO(mstarzinger): Make scheduler deal with non-empty loops here.
+  // Node* add = graph.NewNode(machine.IntAdd(), ind, fv);
+
+  Node* br1 = graph.NewNode(common.Branch(), ind, loop);
+  Node* t1 = graph.NewNode(common.IfTrue(), br1);
+  Node* f1 = graph.NewNode(common.IfFalse(), br1);
+
+  loop->ReplaceInput(1, t1);  // close loop.
+  ind->ReplaceInput(1, ind);  // close induction variable.
+
+  Node* m = graph.NewNode(common.Merge(2), t, f1);
+  Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), fv, ind, m);
+
+  Node* ret = graph.NewNode(common.Return(), phi, start, start);
+  Node* end = graph.NewNode(common.End(), ret, start);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(19, &graph);
+}
+
+
 TEST(LoopedFloatingDiamond1) {
   HandleAndZoneScope scope;
   Graph graph(scope.main_zone());
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index b6e260e..2bcf022 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -1375,14 +1375,14 @@
   __ pkhtb(r2, r0, Operand(r1, ASR, 8));
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst1)));
 
-  __ uxtb16(r2, Operand(r0, ROR, 8));
+  __ uxtb16(r2, r0, 8);
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst2)));
 
-  __ uxtb(r2, Operand(r0, ROR, 8));
+  __ uxtb(r2, r0, 8);
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst3)));
 
   __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src2)));
-  __ uxtab(r2, r0, Operand(r1, ROR, 8));
+  __ uxtab(r2, r0, r1, 8);
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst4)));
 
   __ ldm(ia_w, sp, r4.bit() | pc.bit());
@@ -1606,6 +1606,214 @@
 }
 
 
+TEST(sxtb) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxtb(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(sxtab) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxtab(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
+TEST(sxth) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxth(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(sxtah) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxtah(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxtb) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxtb(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxtab) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxtab(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxth) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxth(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxtah) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxtah(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
 TEST(code_relative_offset) {
   // Test extracting the offset of a label from the beginning of the code
   // in a register.
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index f2ccdab..0c13a68 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -644,6 +644,44 @@
 }
 
 
+TEST(CrossScriptReferences_Simple) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_use_strict = true;
+
+  v8::Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+
+  {
+    SimpleContext context;
+    context.Check("let x = 1; x", EXPECT_RESULT, Number::New(isolate, 1));
+    context.Check("let x = 5; x", EXPECT_EXCEPTION);
+  }
+}
+
+
+TEST(CrossScriptReferences_Simple2) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_use_strict = true;
+
+  v8::Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+
+  for (int k = 0; k < 100; k++) {
+    SimpleContext context;
+    bool cond = (k % 2) == 0;
+    if (cond) {
+      context.Check("let x = 1; x", EXPECT_RESULT, Number::New(isolate, 1));
+      context.Check("let z = 4; z", EXPECT_RESULT, Number::New(isolate, 4));
+    } else {
+      context.Check("let z = 1; z", EXPECT_RESULT, Number::New(isolate, 1));
+      context.Check("let x = 4; x", EXPECT_RESULT, Number::New(isolate, 4));
+    }
+    context.Check("let y = 2; x", EXPECT_RESULT,
+                  Number::New(isolate, cond ? 1 : 4));
+  }
+}
+
+
 TEST(CrossScriptReferencesHarmony) {
   i::FLAG_use_strict = true;
   i::FLAG_harmony_scoping = true;
@@ -704,12 +742,12 @@
       SimpleContext context;
       context.Check(firsts[i], EXPECT_RESULT,
                     Number::New(CcTest::isolate(), 1));
-      // TODO(rossberg): All tests should actually be errors in Harmony,
-      // but we currently do not detect the cases where the first declaration
-      // is not lexical.
-      context.Check(seconds[j],
-                    i < 2 ? EXPECT_RESULT : EXPECT_ERROR,
-                    Number::New(CcTest::isolate(), 2));
+      bool success_case = i < 2 && j < 2;
+      Local<Value> success_result;
+      if (success_case) success_result = Number::New(CcTest::isolate(), 2);
+
+      context.Check(seconds[j], success_case ? EXPECT_RESULT : EXPECT_EXCEPTION,
+                    success_result);
     }
   }
 }
@@ -740,9 +778,86 @@
         EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
     context.Check(
         "'use strict';"
-        "g({});"
+        "g({});0",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
+    context.Check("f({})", EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check("h({})", EXPECT_RESULT, number_string);
+  }
+}
+
+
+TEST(CrossScriptGlobal) {
+  i::FLAG_harmony_scoping = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+  {
+    SimpleContext context;
+
+    context.Check(
+        "var global = this;"
+        "global.x = 255;"
         "x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 255));
+    context.Check(
+        "'use strict';"
+        "let x = 1;"
+        "global.x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 255));
+    context.Check("global.x = 15; x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 1));
+    context.Check("x = 221; global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 15));
+    context.Check(
+        "z = 15;"
+        "function f() { return z; };"
+        "for (var k = 0; k < 3; k++) { f(); }"
+        "f()",
         EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check(
+        "'use strict';"
+        "let z = 5; f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    context.Check(
+        "function f() { konst = 10; return konst; };"
+        "f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 10));
+    context.Check(
+        "'use strict';"
+        "const konst = 255;"
+        "f()",
+        EXPECT_EXCEPTION);
+  }
+}
+
+
+TEST(CrossScriptStaticLookupUndeclared) {
+  i::FLAG_harmony_scoping = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+
+  {
+    SimpleContext context;
+    Local<String> undefined_string = String::NewFromUtf8(
+        CcTest::isolate(), "undefined", String::kInternalizedString);
+    Local<String> number_string = String::NewFromUtf8(
+        CcTest::isolate(), "number", String::kInternalizedString);
+
+    context.Check(
+        "function f(o) { return x; }"
+        "function g(o) { x = 15; }"
+        "function h(o) { return typeof x; }",
+        EXPECT_RESULT, Undefined(CcTest::isolate()));
+    context.Check("h({})", EXPECT_RESULT, undefined_string);
+    context.Check(
+        "'use strict';"
+        "let x = 1;"
+        "f({})",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+    context.Check(
+        "'use strict';"
+        "g({});x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check("h({})", EXPECT_RESULT, number_string);
     context.Check("f({})", EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
     context.Check("h({})", EXPECT_RESULT, number_string);
   }
diff --git a/test/cctest/test-disasm-arm.cc b/test/cctest/test-disasm-arm.cc
index 39356b1..095c636 100644
--- a/test/cctest/test-disasm-arm.cc
+++ b/test/cctest/test-disasm-arm.cc
@@ -410,14 +410,32 @@
             "e6843895       pkhbt r3, r4, r5, lsl #17");
     COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
             "e68438d5       pkhtb r3, r4, r5, asr #17");
-    COMPARE(uxtb(r9, Operand(r10, ROR, 0)),
-            "e6ef907a       uxtb r9, r10");
-    COMPARE(uxtb(r3, Operand(r4, ROR, 8)),
-            "e6ef3474       uxtb r3, r4, ror #8");
-    COMPARE(uxtab(r3, r4, Operand(r5, ROR, 8)),
-            "e6e43475       uxtab r3, r4, r5, ror #8");
-    COMPARE(uxtb16(r3, Operand(r4, ROR, 8)),
-            "e6cf3474       uxtb16 r3, r4, ror #8");
+
+    COMPARE(sxtb(r1, r7, 0, eq), "06af1077       sxtbeq r1, r7");
+    COMPARE(sxtb(r0, r0, 8, ne), "16af0470       sxtbne r0, r0, ror #8");
+    COMPARE(sxtb(r9, r10, 16), "e6af987a       sxtb r9, r10, ror #16");
+    COMPARE(sxtb(r4, r3, 24), "e6af4c73       sxtb r4, r3, ror #24");
+
+    COMPARE(sxtab(r3, r4, r5), "e6a43075       sxtab r3, r4, r5");
+
+    COMPARE(sxth(r5, r0), "e6bf5070       sxth r5, r0");
+    COMPARE(sxth(r5, r9, 8), "e6bf5479       sxth r5, r9, ror #8");
+    COMPARE(sxth(r5, r9, 16, hi), "86bf5879       sxthhi r5, r9, ror #16");
+    COMPARE(sxth(r8, r9, 24, cc), "36bf8c79       sxthcc r8, r9, ror #24");
+
+    COMPARE(sxtah(r3, r4, r5, 16), "e6b43875       sxtah r3, r4, r5, ror #16");
+
+    COMPARE(uxtb(r9, r10), "e6ef907a       uxtb r9, r10");
+    COMPARE(uxtb(r3, r4, 8), "e6ef3474       uxtb r3, r4, ror #8");
+
+    COMPARE(uxtab(r3, r4, r5, 8), "e6e43475       uxtab r3, r4, r5, ror #8");
+
+    COMPARE(uxtb16(r3, r4, 8), "e6cf3474       uxtb16 r3, r4, ror #8");
+
+    COMPARE(uxth(r9, r10), "e6ff907a       uxth r9, r10");
+    COMPARE(uxth(r3, r4, 8), "e6ff3474       uxth r3, r4, ror #8");
+
+    COMPARE(uxtah(r3, r4, r5, 24), "e6f43c75       uxtah r3, r4, r5, ror #24");
   }
 
   COMPARE(smmla(r0, r1, r2, r3), "e7503211       smmla r0, r1, r2, r3");
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 1909b3e..79d7654 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -4302,7 +4302,9 @@
     "var foob\\u123r = 0;",
     "var \\u123roo = 0;",
     "\"foob\\u123rr\"",
-    "/regex/g\\u123r",
+    // No escapes allowed in regexp flags
+    "/regex/\\u0069g",
+    "/regex/\\u006g",
     NULL};
   RunParserSyncTest(context_data, data, kError);
 }
diff --git a/test/mjsunit/asm/sign-extend.js b/test/mjsunit/asm/sign-extend.js
new file mode 100644
index 0000000..62d8d34
--- /dev/null
+++ b/test/mjsunit/asm/sign-extend.js
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var sext8 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function sext8(i) {
+    i = i|0;
+    i = i << 24 >> 24;
+    return i|0;
+  }
+  return { sext8: sext8 };
+})(stdlib, foreign, buffer).sext8;
+
+assertEquals(-128, sext8(128));
+assertEquals(-1, sext8(-1));
+assertEquals(-1, sext8(255));
+assertEquals(0, sext8(0));
+assertEquals(0, sext8(256));
+assertEquals(42, sext8(42));
+assertEquals(127, sext8(127));
+
+
+var sext16 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function sext16(i) {
+    i = i|0;
+    i = i << 16 >> 16;
+    return i|0;
+  }
+  return { sext16: sext16 };
+})(stdlib, foreign, buffer).sext16;
+
+assertEquals(-32768, sext16(32768));
+assertEquals(-1, sext16(-1));
+assertEquals(-1, sext16(65535));
+assertEquals(0, sext16(0));
+assertEquals(0, sext16(65536));
+assertEquals(128, sext16(128));
+assertEquals(32767, sext16(32767));
diff --git a/test/mjsunit/asm/zero-extend.js b/test/mjsunit/asm/zero-extend.js
new file mode 100644
index 0000000..a1f9da6
--- /dev/null
+++ b/test/mjsunit/asm/zero-extend.js
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var zext8 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function zext8(i) {
+    i = i|0;
+    return i & 0xff;
+  }
+  return { zext8: zext8 };
+})(stdlib, foreign, buffer).zext8;
+
+assertEquals(0, zext8(0));
+assertEquals(0, zext8(0x100));
+assertEquals(0xff, zext8(-1));
+assertEquals(0xff, zext8(0xff));
+
+
+var zext16 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function zext16(i) {
+    i = i|0;
+    return i & 0xffff;
+  }
+  return { zext16: zext16 };
+})(stdlib, foreign, buffer).zext16;
+
+assertEquals(0, zext16(0));
+assertEquals(0, zext16(0x10000));
+assertEquals(0xffff, zext16(-1));
+assertEquals(0xffff, zext16(0xffff));
diff --git a/test/mjsunit/compiler/division-by-constant.js b/test/mjsunit/compiler/division-by-constant.js
index 0778e95..d3f3ac3 100644
--- a/test/mjsunit/compiler/division-by-constant.js
+++ b/test/mjsunit/compiler/division-by-constant.js
@@ -101,6 +101,7 @@
 
 // -----------------------------------------------------------------------------
 
+
 function TestDivisionLike(ref, construct, values, divisor) {
   // Define the function to test.
   var OptFun = new Function("dividend", construct(divisor));
@@ -111,12 +112,14 @@
   %OptimizeFunctionOnNextCall(OptFun);
   OptFun(13);
 
-  // Check results.
-  values.forEach(function(dividend) {
+function dude(dividend) {
     // Avoid deopt caused by overflow, we do not want to test this here.
     if (dividend === -2147483648 && divisor === -1) return;
     assertEquals(ref(dividend, divisor), OptFun(dividend));
-  });
+  }
+
+  // Check results.
+  values.forEach(dude);
 }
 
 function Test(ref, construct) {
diff --git a/test/mjsunit/compiler/regress-uint8-deopt.js b/test/mjsunit/compiler/regress-uint8-deopt.js
new file mode 100644
index 0000000..ba2823f
--- /dev/null
+++ b/test/mjsunit/compiler/regress-uint8-deopt.js
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-asm --turbo-deoptimization --allow-natives-syntax
+
+function Module(heap) {
+  "use asm";
+  var a = new Uint8Array(heap);
+  function f() {
+    var x = a[0] | 0;
+    %DeoptimizeFunction(f);
+    return x;
+  }
+  return f;
+}
+assertEquals(0, Module(new ArrayBuffer(1))());
diff --git a/test/mjsunit/harmony/classes.js b/test/mjsunit/harmony/classes.js
index 59371e4..8748f62 100644
--- a/test/mjsunit/harmony/classes.js
+++ b/test/mjsunit/harmony/classes.js
@@ -19,8 +19,16 @@
   assertEquals(Function.prototype, Object.getPrototypeOf(D));
   assertEquals('D', D.name);
 
+  class D2 { constructor() {} }
+  assertEquals('D2', D2.name);
+
+  // TODO(arv): The logic for the name of anonymous functions in ES6 requires
+  // the below to be 'E';
   var E = class {}
-  assertEquals('', E.name);
+  assertEquals('', E.name);  // Should be 'E'.
+
+  var F = class { constructor() {} };
+  assertEquals('', F.name);  // Should be 'F'.
 })();
 
 
@@ -589,6 +597,33 @@
 })();
 
 
+(function TestDefaultConstructorNoCrash() {
+  // Regression test for https://code.google.com/p/v8/issues/detail?id=3661
+  class C {}
+  assertEquals(undefined, C());
+  assertEquals(undefined, C(1));
+  assertTrue(new C() instanceof C);
+  assertTrue(new C(1) instanceof C);
+})();
+
+
+(function TestDefaultConstructor() {
+  var calls = 0;
+  class Base {
+    constructor() {
+      calls++;
+    }
+  }
+  class Derived extends Base {}
+  var object = new Derived;
+  assertEquals(1, calls);
+
+  calls = 0;
+  Derived();
+  assertEquals(1, calls);
+})();
+
+
 /* TODO(arv): Implement
 (function TestNameBindingInConstructor() {
   class C {
diff --git a/test/mjsunit/regress/regress-136048.js b/test/mjsunit/regress/regress-136048.js
index c9972e9..21ae622 100644
--- a/test/mjsunit/regress/regress-136048.js
+++ b/test/mjsunit/regress/regress-136048.js
@@ -26,9 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 try {
-  /foo/\u0069
+  eval("/foo/\\u0069")
 } catch (e) {
   assertEquals(
-      "SyntaxError: Invalid flags supplied to RegExp constructor '\\u0069'",
+      "SyntaxError: Invalid regular expression flags",
       e.toString());
 }
diff --git a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
index 6e4306de..f922d62 100644
--- a/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
+++ b/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
@@ -1558,6 +1558,150 @@
 }
 
 
+TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xff)), p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xff)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xffff)), p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xffff)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24)),
+        p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        p1,
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16)),
+        p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        p1,
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
 TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
   StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
   m.Return(
@@ -1943,6 +2087,72 @@
 }
 
 
+TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r = m.Word32And(p0, m.Int32Constant(0xffff));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxth, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r = m.Word32And(m.Int32Constant(0xffff), p0);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxth, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r =
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtb, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r =
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxth, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
 TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
   TRACED_FORRANGE(int32_t, lsb, 0, 31) {
     TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
diff --git a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 96b8a83..eee9c58 100644
--- a/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -1229,6 +1229,76 @@
                         ::testing::ValuesIn(kShiftInstructions));
 
 
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachUint32);
+    Node* const p0 = m.Parameter(0);
+    Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  Node* const p = m.Parameter(0);
+  Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+  m.Return(t);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(32, s.ToInt64(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt32, kMachInt64);
+    Node* const p = m.Parameter(0);
+    Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(x)));
+    m.Return(t);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Mul and Div instructions.
 
diff --git a/test/unittests/compiler/node-matchers-unittest.cc b/test/unittests/compiler/node-matchers-unittest.cc
new file mode 100644
index 0000000..843a44e
--- /dev/null
+++ b/test/unittests/compiler/node-matchers-unittest.cc
@@ -0,0 +1,317 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/opcodes.h"
+
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeMatcherTest : public GraphTest {
+ public:
+  NodeMatcherTest() {}
+  virtual ~NodeMatcherTest() {}
+
+  MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+  MachineOperatorBuilder machine_;
+};
+
+namespace {
+
+void CheckScaledWithOffsetMatch(ScaledWithOffsetMatcher* matcher, Node* scaled,
+                                int scale_exponent, Node* offset,
+                                Node* constant) {
+  EXPECT_TRUE(matcher->matches());
+  EXPECT_EQ(scaled, matcher->scaled());
+  EXPECT_EQ(scale_exponent, matcher->scale_exponent());
+  EXPECT_EQ(offset, matcher->offset());
+  EXPECT_EQ(constant, matcher->constant());
+}
+};
+
+
+TEST_F(NodeMatcherTest, ScaledWithOffsetMatcher) {
+  graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+  const Operator* c0_op = common()->Int32Constant(0);
+  Node* c0 = graph()->NewNode(c0_op);
+  USE(c0);
+  const Operator* c1_op = common()->Int32Constant(1);
+  Node* c1 = graph()->NewNode(c1_op);
+  USE(c1);
+  const Operator* c2_op = common()->Int32Constant(2);
+  Node* c2 = graph()->NewNode(c2_op);
+  USE(c2);
+  const Operator* c3_op = common()->Int32Constant(3);
+  Node* c3 = graph()->NewNode(c3_op);
+  USE(c3);
+  const Operator* c4_op = common()->Int32Constant(4);
+  Node* c4 = graph()->NewNode(c4_op);
+  USE(c4);
+  const Operator* c8_op = common()->Int32Constant(8);
+  Node* c8 = graph()->NewNode(c8_op);
+  USE(c8);
+
+  const Operator* o0_op = common()->Parameter(0);
+  Node* o0 = graph()->NewNode(o0_op, graph()->start());
+  USE(o0);
+  const Operator* o1_op = common()->Parameter(1);
+  Node* o1 = graph()->NewNode(o1_op, graph()->start());
+  USE(o0);
+
+  const Operator* p1_op = common()->Parameter(3);
+  Node* p1 = graph()->NewNode(p1_op, graph()->start());
+  USE(p1);
+
+  const Operator* a_op = machine()->Int32Add();
+  USE(a_op);
+
+  const Operator* m_op = machine()->Int32Mul();
+  Node* m1 = graph()->NewNode(m_op, p1, c1);
+  Node* m2 = graph()->NewNode(m_op, p1, c2);
+  Node* m4 = graph()->NewNode(m_op, p1, c4);
+  Node* m8 = graph()->NewNode(m_op, p1, c8);
+  Node* m3 = graph()->NewNode(m_op, p1, c3);
+
+  const Operator* s_op = machine()->Word32Shl();
+  Node* s0 = graph()->NewNode(s_op, p1, c0);
+  Node* s1 = graph()->NewNode(s_op, p1, c1);
+  Node* s2 = graph()->NewNode(s_op, p1, c2);
+  Node* s3 = graph()->NewNode(s_op, p1, c3);
+  Node* s4 = graph()->NewNode(s_op, p1, c4);
+
+  // 1 INPUT
+
+  // Only relevant test cases is checking for non-match.
+  ScaledWithOffsetMatcher match0(c0);
+  EXPECT_FALSE(match0.matches());
+
+  // 2 INPUT
+
+  // (O0 + O1) -> [O0, 0, O1, NULL]
+  ScaledWithOffsetMatcher match1(graph()->NewNode(a_op, o0, o1));
+  CheckScaledWithOffsetMatch(&match1, o1, 0, o0, NULL);
+
+  // (O0 + C0) -> [NULL, 0, O0, C0]
+  ScaledWithOffsetMatcher match2(graph()->NewNode(a_op, o0, c0));
+  CheckScaledWithOffsetMatch(&match2, NULL, 0, o0, c0);
+
+  // (C0 + O0) -> [NULL, 0, O0, C0]
+  ScaledWithOffsetMatcher match3(graph()->NewNode(a_op, c0, o0));
+  CheckScaledWithOffsetMatch(&match3, NULL, 0, o0, c0);
+
+  // (O0 + M1) -> [p1, 0, O0, NULL]
+  ScaledWithOffsetMatcher match4(graph()->NewNode(a_op, o0, m1));
+  CheckScaledWithOffsetMatch(&match4, p1, 0, o0, NULL);
+
+  // (M1 + O0) -> [p1, 0, O0, NULL]
+  m1 = graph()->NewNode(m_op, p1, c1);
+  ScaledWithOffsetMatcher match5(graph()->NewNode(a_op, m1, o0));
+  CheckScaledWithOffsetMatch(&match5, p1, 0, o0, NULL);
+
+  // (C0 + M1) -> [P1, 0, NULL, C0]
+  m1 = graph()->NewNode(m_op, p1, c1);
+  ScaledWithOffsetMatcher match6(graph()->NewNode(a_op, c0, m1));
+  CheckScaledWithOffsetMatch(&match6, p1, 0, NULL, c0);
+
+  // (M1 + C0) -> [P1, 0, NULL, C0]
+  m1 = graph()->NewNode(m_op, p1, c1);
+  ScaledWithOffsetMatcher match7(graph()->NewNode(a_op, m1, c0));
+  CheckScaledWithOffsetMatch(&match7, p1, 0, NULL, c0);
+
+  // (O0 + S0) -> [p1, 0, O0, NULL]
+  ScaledWithOffsetMatcher match8(graph()->NewNode(a_op, o0, s0));
+  CheckScaledWithOffsetMatch(&match8, p1, 0, o0, NULL);
+
+  // (S0 + O0) -> [p1, 0, O0, NULL]
+  s0 = graph()->NewNode(s_op, p1, c0);
+  ScaledWithOffsetMatcher match9(graph()->NewNode(a_op, s0, o0));
+  CheckScaledWithOffsetMatch(&match9, p1, 0, o0, NULL);
+
+  // (C0 + S0) -> [P1, 0, NULL, C0]
+  s0 = graph()->NewNode(s_op, p1, c0);
+  ScaledWithOffsetMatcher match10(graph()->NewNode(a_op, c0, s0));
+  CheckScaledWithOffsetMatch(&match10, p1, 0, NULL, c0);
+
+  // (S0 + C0) -> [P1, 0, NULL, C0]
+  s0 = graph()->NewNode(s_op, p1, c0);
+  ScaledWithOffsetMatcher match11(graph()->NewNode(a_op, s0, c0));
+  CheckScaledWithOffsetMatch(&match11, p1, 0, NULL, c0);
+
+  // (O0 + M2) -> [p1, 1, O0, NULL]
+  ScaledWithOffsetMatcher match12(graph()->NewNode(a_op, o0, m2));
+  CheckScaledWithOffsetMatch(&match12, p1, 1, o0, NULL);
+
+  // (M2 + O0) -> [p1, 1, O0, NULL]
+  m2 = graph()->NewNode(m_op, p1, c2);
+  ScaledWithOffsetMatcher match13(graph()->NewNode(a_op, m2, o0));
+  CheckScaledWithOffsetMatch(&match13, p1, 1, o0, NULL);
+
+  // (C0 + M2) -> [P1, 1, NULL, C0]
+  m2 = graph()->NewNode(m_op, p1, c2);
+  ScaledWithOffsetMatcher match14(graph()->NewNode(a_op, c0, m2));
+  CheckScaledWithOffsetMatch(&match14, p1, 1, NULL, c0);
+
+  // (M2 + C0) -> [P1, 1, NULL, C0]
+  m2 = graph()->NewNode(m_op, p1, c2);
+  ScaledWithOffsetMatcher match15(graph()->NewNode(a_op, m2, c0));
+  CheckScaledWithOffsetMatch(&match15, p1, 1, NULL, c0);
+
+  // (O0 + S1) -> [p1, 1, O0, NULL]
+  ScaledWithOffsetMatcher match16(graph()->NewNode(a_op, o0, s1));
+  CheckScaledWithOffsetMatch(&match16, p1, 1, o0, NULL);
+
+  // (S1 + O0) -> [p1, 1, O0, NULL]
+  s1 = graph()->NewNode(s_op, p1, c1);
+  ScaledWithOffsetMatcher match17(graph()->NewNode(a_op, s1, o0));
+  CheckScaledWithOffsetMatch(&match17, p1, 1, o0, NULL);
+
+  // (C0 + S1) -> [P1, 1, NULL, C0]
+  s1 = graph()->NewNode(s_op, p1, c1);
+  ScaledWithOffsetMatcher match18(graph()->NewNode(a_op, c0, s1));
+  CheckScaledWithOffsetMatch(&match18, p1, 1, NULL, c0);
+
+  // (S1 + C0) -> [P1, 1, NULL, C0]
+  s1 = graph()->NewNode(s_op, p1, c1);
+  ScaledWithOffsetMatcher match19(graph()->NewNode(a_op, s1, c0));
+  CheckScaledWithOffsetMatch(&match19, p1, 1, NULL, c0);
+
+  // (O0 + M4) -> [p1, 2, O0, NULL]
+  ScaledWithOffsetMatcher match20(graph()->NewNode(a_op, o0, m4));
+  CheckScaledWithOffsetMatch(&match20, p1, 2, o0, NULL);
+
+  // (M4 + O0) -> [p1, 2, O0, NULL]
+  m4 = graph()->NewNode(m_op, p1, c4);
+  ScaledWithOffsetMatcher match21(graph()->NewNode(a_op, m4, o0));
+  CheckScaledWithOffsetMatch(&match21, p1, 2, o0, NULL);
+
+  // (C0 + M4) -> [p1, 2, NULL, C0]
+  m4 = graph()->NewNode(m_op, p1, c4);
+  ScaledWithOffsetMatcher match22(graph()->NewNode(a_op, c0, m4));
+  CheckScaledWithOffsetMatch(&match22, p1, 2, NULL, c0);
+
+  // (M4 + C0) -> [p1, 2, NULL, C0]
+  m4 = graph()->NewNode(m_op, p1, c4);
+  ScaledWithOffsetMatcher match23(graph()->NewNode(a_op, m4, c0));
+  CheckScaledWithOffsetMatch(&match23, p1, 2, NULL, c0);
+
+  // (O0 + S2) -> [p1, 2, O0, NULL]
+  ScaledWithOffsetMatcher match24(graph()->NewNode(a_op, o0, s2));
+  CheckScaledWithOffsetMatch(&match24, p1, 2, o0, NULL);
+
+  // (S2 + O0) -> [p1, 2, O0, NULL]
+  s2 = graph()->NewNode(s_op, p1, c2);
+  ScaledWithOffsetMatcher match25(graph()->NewNode(a_op, s2, o0));
+  CheckScaledWithOffsetMatch(&match25, p1, 2, o0, NULL);
+
+  // (C0 + S2) -> [p1, 2, NULL, C0]
+  s2 = graph()->NewNode(s_op, p1, c2);
+  ScaledWithOffsetMatcher match26(graph()->NewNode(a_op, c0, s2));
+  CheckScaledWithOffsetMatch(&match26, p1, 2, NULL, c0);
+
+  // (S2 + C0) -> [p1, 2, NULL, C0]
+  s2 = graph()->NewNode(s_op, p1, c2);
+  ScaledWithOffsetMatcher match27(graph()->NewNode(a_op, s2, c0));
+  CheckScaledWithOffsetMatch(&match27, p1, 2, NULL, c0);
+
+  // (O0 + M8) -> [p1, 2, O0, NULL]
+  ScaledWithOffsetMatcher match28(graph()->NewNode(a_op, o0, m8));
+  CheckScaledWithOffsetMatch(&match28, p1, 3, o0, NULL);
+
+  // (M8 + O0) -> [p1, 2, O0, NULL]
+  m8 = graph()->NewNode(m_op, p1, c8);
+  ScaledWithOffsetMatcher match29(graph()->NewNode(a_op, m8, o0));
+  CheckScaledWithOffsetMatch(&match29, p1, 3, o0, NULL);
+
+  // (C0 + M8) -> [p1, 2, NULL, C0]
+  m8 = graph()->NewNode(m_op, p1, c8);
+  ScaledWithOffsetMatcher match30(graph()->NewNode(a_op, c0, m8));
+  CheckScaledWithOffsetMatch(&match30, p1, 3, NULL, c0);
+
+  // (M8 + C0) -> [p1, 2, NULL, C0]
+  m8 = graph()->NewNode(m_op, p1, c8);
+  ScaledWithOffsetMatcher match31(graph()->NewNode(a_op, m8, c0));
+  CheckScaledWithOffsetMatch(&match31, p1, 3, NULL, c0);
+
+  // (O0 + S3) -> [p1, 2, O0, NULL]
+  ScaledWithOffsetMatcher match32(graph()->NewNode(a_op, o0, s3));
+  CheckScaledWithOffsetMatch(&match32, p1, 3, o0, NULL);
+
+  // (S3 + O0) -> [p1, 2, O0, NULL]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match33(graph()->NewNode(a_op, s3, o0));
+  CheckScaledWithOffsetMatch(&match33, p1, 3, o0, NULL);
+
+  // (C0 + S3) -> [p1, 2, NULL, C0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match34(graph()->NewNode(a_op, c0, s3));
+  CheckScaledWithOffsetMatch(&match34, p1, 3, NULL, c0);
+
+  // (S3 + C0) -> [p1, 2, NULL, C0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match35(graph()->NewNode(a_op, s3, c0));
+  CheckScaledWithOffsetMatch(&match35, p1, 3, NULL, c0);
+
+  // 2 INPUT - NEGATIVE CASES
+
+  // (M3 + O1) -> [O0, 0, M3, NULL]
+  ScaledWithOffsetMatcher match36(graph()->NewNode(a_op, o1, m3));
+  CheckScaledWithOffsetMatch(&match36, m3, 0, o1, NULL);
+
+  // (S4 + O1) -> [O0, 0, S4, NULL]
+  ScaledWithOffsetMatcher match37(graph()->NewNode(a_op, o1, s4));
+  CheckScaledWithOffsetMatch(&match37, s4, 0, o1, NULL);
+
+  // 3 INPUT
+
+  // (C0 + S3) + O0 -> [p1, 2, o0, c0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match38(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, c0, s3), o0));
+  CheckScaledWithOffsetMatch(&match38, p1, 3, o0, c0);
+
+  // (O0 + C0) + S3 -> [p1, 2, o0, c0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match39(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, o0, c0), s3));
+  CheckScaledWithOffsetMatch(&match39, p1, 3, o0, c0);
+
+  // (S3 + O0) + C0 -> [p1, 2, o0, c0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match40(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, s3, o0), c0));
+  CheckScaledWithOffsetMatch(&match40, p1, 3, o0, c0);
+
+  // C0 + (S3 + O0) -> [p1, 2, o0, c0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match41(
+      graph()->NewNode(a_op, c0, graph()->NewNode(a_op, s3, o0)));
+  CheckScaledWithOffsetMatch(&match41, p1, 3, o0, c0);
+
+  // O0 + (C0 + S3) -> [p1, 2, o0, c0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match42(
+      graph()->NewNode(a_op, o0, graph()->NewNode(a_op, c0, s3)));
+  CheckScaledWithOffsetMatch(&match42, p1, 3, o0, c0);
+
+  // S3 + (O0 + C0) -> [p1, 2, o0, c0]
+  s3 = graph()->NewNode(s_op, p1, c3);
+  ScaledWithOffsetMatcher match43(
+      graph()->NewNode(a_op, s3, graph()->NewNode(a_op, o0, c0)));
+  CheckScaledWithOffsetMatch(&match43, p1, 3, o0, c0);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index 48c074e..f4070ec 100644
--- a/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -252,10 +252,366 @@
   m.Return(m.Int32Add(a0, p0));
   Stream s = m.Build();
   ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
   ASSERT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsLea) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(p0, c0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLea) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  m.Return(m.Int32Add(s0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+  m.Return(m.Int32Add(s0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(p0, m.Int32Add(s0, c0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(s0, m.Int32Add(c0, p0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(m.Int32Add(s0, c0), p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(m.Int32Add(c0, p0), s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(m.Int32Add(p0, s0), c0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
 }
 
 
diff --git a/test/unittests/heap/gc-idle-time-handler-unittest.cc b/test/unittests/heap/gc-idle-time-handler-unittest.cc
index 55dd6c6..977882a 100644
--- a/test/unittests/heap/gc-idle-time-handler-unittest.cc
+++ b/test/unittests/heap/gc-idle-time-handler-unittest.cc
@@ -22,6 +22,7 @@
   GCIdleTimeHandler::HeapState DefaultHeapState() {
     GCIdleTimeHandler::HeapState result;
     result.contexts_disposed = 0;
+    result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
     result.size_of_objects = kSizeOfObjects;
     result.incremental_marking_stopped = false;
     result.can_start_incremental_marking = true;
@@ -179,10 +180,34 @@
 }
 
 
-TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
+TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
   heap_state.incremental_marking_stopped = true;
+  int idle_time_ms = 0;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate =
+      GCIdleTimeHandler::kHighContextDisposalRate - 1;
+  heap_state.incremental_marking_stopped = true;
+  int idle_time_ms = 0;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
   int idle_time_ms =
       static_cast<int>((heap_state.size_of_objects + speed - 1) / speed);
@@ -194,8 +219,8 @@
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   heap_state.incremental_marking_stopped = true;
-  heap_state.size_of_objects = GCIdleTimeHandler::kSmallHeapSize / 2;
   int idle_time_ms = 0;
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_FULL_GC, action.type);
@@ -205,6 +230,7 @@
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   heap_state.incremental_marking_stopped = true;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
   int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
@@ -216,6 +242,7 @@
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
   int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
diff --git a/test/unittests/unittests.gyp b/test/unittests/unittests.gyp
index a881e46..b16d2c5 100644
--- a/test/unittests/unittests.gyp
+++ b/test/unittests/unittests.gyp
@@ -49,7 +49,7 @@
         'compiler/js-operator-unittest.cc',
         'compiler/js-typed-lowering-unittest.cc',
         'compiler/machine-operator-reducer-unittest.cc',
-        'compiler/machine-operator-unittest.cc',
+        'compiler/node-matchers-unittest.cc',
         'compiler/node-test-utils.cc',
         'compiler/node-test-utils.h',
         'compiler/register-allocator-unittest.cc',
diff --git a/tools/trace-maps-processor.py b/tools/trace-maps-processor.py
new file mode 100755
index 0000000..bf8c8a8
--- /dev/null
+++ b/tools/trace-maps-processor.py
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+
+action = sys.argv[1]
+
+if action in ["help", "-h", "--help"] or len(sys.argv) != 3:
+  print("Usage: %s <action> <inputfile>, where action can be: \n"
+        "help    Print this message\n"
+        "plain   Print ASCII tree to stdout\n"
+        "dot     Print dot file to stdout\n"
+        "count   Count most frequent transition reasons\n" % sys.argv[0])
+  sys.exit(0)
+
+
+filename = sys.argv[2]
+maps = {}
+root_maps = []
+transitions = {}
+annotations = {}
+
+
+class Map(object):
+
+  def __init__(self, pointer, origin):
+    self.pointer = pointer
+    self.origin = origin
+
+  def __str__(self):
+    return "%s (%s)" % (self.pointer, self.origin)
+
+
+class Transition(object):
+
+  def __init__(self, from_map, to_map, reason):
+    self.from_map = from_map
+    self.to_map = to_map
+    self.reason = reason
+
+
+def RegisterNewMap(raw_map):
+  if raw_map in annotations:
+    annotations[raw_map] += 1
+  else:
+    annotations[raw_map] = 0
+  return AnnotateExistingMap(raw_map)
+
+
+def AnnotateExistingMap(raw_map):
+  return "%s_%d" % (raw_map, annotations[raw_map])
+
+
+def AddMap(pointer, origin):
+  pointer = RegisterNewMap(pointer)
+  maps[pointer] = Map(pointer, origin)
+  return pointer
+
+
+def AddTransition(from_map, to_map, reason):
+  from_map = AnnotateExistingMap(from_map)
+  to_map = AnnotateExistingMap(to_map)
+  if from_map not in transitions:
+    transitions[from_map] = {}
+  targets = transitions[from_map]
+  if to_map in targets:
+    # Some events get printed twice, that's OK. In some cases, ignore the
+    # second output...
+    old_reason = targets[to_map].reason
+    if old_reason.startswith("ReplaceDescriptors"):
+      return
+    # ...and in others use it for additional detail.
+    if reason in []:
+      targets[to_map].reason = reason
+      return
+    # Unexpected duplicate events? Warn.
+    print("// warning: already have a transition from %s to %s, reason: %s" %
+            (from_map, to_map, targets[to_map].reason))
+    return
+  targets[to_map] = Transition(from_map, to_map, reason)
+
+
+with open(filename, "r") as f:
+  last_to_map = ""
+  for line in f:
+    if not line.startswith("[TraceMaps: "): continue
+    words = line.split(" ")
+    event = words[1]
+    if event == "InitialMap":
+      assert words[2] == "map="
+      assert words[4] == "SFI="
+      new_map = AddMap(words[3], "SFI#%s" % words[5])
+      root_maps.append(new_map)
+      continue
+    if words[2] == "from=" and words[4] == "to=":
+      from_map = words[3]
+      to_map = words[5]
+      if from_map not in annotations:
+        print("// warning: unknown from_map %s" % from_map)
+        new_map = AddMap(from_map, "<unknown>")
+        root_maps.append(new_map)
+      if to_map != last_to_map:
+        AddMap(to_map, "<transition> (%s)" % event)
+      last_to_map = to_map
+      if event in ["Transition", "NoTransition"]:
+        assert words[6] == "name=", line
+        reason = "%s: %s" % (event, words[7])
+      elif event in ["Normalize", "ReplaceDescriptors", "SlowToFast"]:
+        assert words[6] == "reason=", line
+        reason = "%s: %s" % (event, words[7])
+        if words[8].strip() != "]":
+          reason = "%s_%s" % (reason, words[8])
+      else:
+        reason = event
+      AddTransition(from_map, to_map, reason)
+      continue
+
+
+def PlainPrint(m, indent, label):
+  print("%s%s (%s)" % (indent, m, label))
+  if m in transitions:
+    for t in transitions[m]:
+      PlainPrint(t, indent + "  ", transitions[m][t].reason)
+
+
+def CountTransitions(m):
+  if m not in transitions: return 0
+  return len(transitions[m])
+
+
+def DotPrint(m, label):
+  print("m%s [label=\"%s\"]" % (m[2:], label))
+  if m in transitions:
+    for t in transitions[m]:
+      # GraphViz doesn't like node labels looking like numbers, so use
+      # "m..." instead of "0x...".
+      print("m%s -> m%s" % (m[2:], t[2:]))
+      reason = transitions[m][t].reason
+      reason = reason.replace("\\", "BACKSLASH")
+      reason = reason.replace("\"", "\\\"")
+      DotPrint(t, reason)
+
+
+if action == "plain":
+  root_maps = sorted(root_maps, key=CountTransitions, reverse=True)
+  for m in root_maps:
+    PlainPrint(m, "", maps[m].origin)
+
+elif action == "dot":
+  print("digraph g {")
+  for m in root_maps:
+    DotPrint(m, maps[m].origin)
+  print("}")
+
+elif action == "count":
+  reasons = {}
+  for s in transitions:
+    for t in transitions[s]:
+      reason = transitions[s][t].reason
+      if reason not in reasons:
+        reasons[reason] = 1
+      else:
+        reasons[reason] += 1
+  reasons_list = []
+  for r in reasons:
+    reasons_list.append("%8d %s" % (reasons[r], r))
+  reasons_list.sort(reverse=True)
+  for r in reasons_list[:20]:
+    print r