Version 2.3.3

Fixed error when building the d8 shell in a fresh checkout.

Implemented Function.prototype.bind (ES5 15.3.4.5).

Fixed an error in inlined stores on ia32.

Fixed an error when setting a breakpoint at the end of a function that does not end with a newline character.

Performance improvements on all platforms.


git-svn-id: http://v8.googlecode.com/svn/trunk@5133 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/ChangeLog b/ChangeLog
index 0e40b00..9a7aa81 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,17 @@
+2010-07-26: Version 2.3.3
+
+        Fixed error when building the d8 shell in a fresh checkout.
+
+        Implemented Function.prototype.bind (ES5 15.3.4.5).
+
+        Fixed an error in inlined stores on ia32.
+
+        Fixed an error when setting a breakpoint at the end of a function
+        that does not end with a newline character.
+
+        Performance improvements on all platforms.
+
+
 2010-07-21: Version 2.3.2
 
         Fixed compiler warnings when building with LLVM.
@@ -6,6 +20,7 @@
 
         Performance improvements on all platforms.
 
+
 2010-07-19: Version 2.3.1
 
         Fixed compilation and linking with V8_INTERPRETED_REGEXP flag.
diff --git a/SConstruct b/SConstruct
index 53d845c..c7543d9 100644
--- a/SConstruct
+++ b/SConstruct
@@ -43,7 +43,7 @@
   ANDROID_TOP=""
 
 # ARM_TARGET_LIB is the path to the dynamic library to use on the target
-# machine if cross-compiling to an arm machine. You will also need to set 
+# machine if cross-compiling to an arm machine. You will also need to set
 # the additional cross-compiling environment variables to the cross compiler.
 ARM_TARGET_LIB = os.environ.get('ARM_TARGET_LIB')
 if ARM_TARGET_LIB:
@@ -629,6 +629,9 @@
     'os:win32': {
       'LIBS': ['winmm', 'ws2_32'],
     },
+    'arch:arm': {
+      'LINKFLAGS':   ARM_LINK_FLAGS
+    },
   },
   'msvc': {
     'all': {
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index a16f200..b1705df 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -445,6 +445,37 @@
 }
 
 
+bool Assembler::IsStrRegisterImmediate(Instr instr) {
+  return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
+}
+
+
+Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
+  ASSERT(IsStrRegisterImmediate(instr));
+  bool positive = offset >= 0;
+  if (!positive) offset = -offset;
+  ASSERT(is_uint12(offset));
+  // Set bit indicating whether the offset should be added.
+  instr = (instr & ~B23) | (positive ? B23 : 0);
+  // Set the actual offset.
+  return (instr & ~Off12Mask) | offset;
+}
+
+
+bool Assembler::IsAddRegisterImmediate(Instr instr) {
+  return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
+}
+
+
+Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
+  ASSERT(IsAddRegisterImmediate(instr));
+  ASSERT(offset >= 0);
+  ASSERT(is_uint12(offset));
+  // Set the offset.
+  return (instr & ~Off12Mask) | offset;
+}
+
+
 Register Assembler::GetRd(Instr instr) {
   Register reg;
   reg.code_ = ((instr & kRdMask) >> kRdShift);
@@ -796,9 +827,10 @@
     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
   }
   emit(instr | rn.code()*B16 | rd.code()*B12);
-  if (rn.is(pc) || x.rm_.is(pc))
+  if (rn.is(pc) || x.rm_.is(pc)) {
     // Block constant pool emission for one instruction after reading pc.
     BlockConstPoolBefore(pc_offset() + kInstrSize);
+  }
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 39a5b4e..16e69e2 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -1120,6 +1120,10 @@
   static bool IsLdrRegisterImmediate(Instr instr);
   static int GetLdrRegisterImmediateOffset(Instr instr);
   static Instr SetLdrRegisterImmediateOffset(Instr instr, int offset);
+  static bool IsStrRegisterImmediate(Instr instr);
+  static Instr SetStrRegisterImmediateOffset(Instr instr, int offset);
+  static bool IsAddRegisterImmediate(Instr instr);
+  static Instr SetAddRegisterImmediateOffset(Instr instr, int offset);
   static Register GetRd(Instr instr);
   static bool IsPush(Instr instr);
   static bool IsPop(Instr instr);
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 0b2081b..1271e80 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -151,6 +151,8 @@
 // -------------------------------------------------------------------------
 // CodeGenerator implementation
 
+int CodeGenerator::inlined_write_barrier_size_ = -1;
+
 CodeGenerator::CodeGenerator(MacroAssembler* masm)
     : deferred_(8),
       masm_(masm),
@@ -815,7 +817,7 @@
         // Check they are both small and positive.
         __ tst(scratch, Operand(kSmiTagMask | 0xc0000000));
         ASSERT(rhs.is(r0) || lhs.is(r0));  // r0 is free now.
-        ASSERT_EQ(0, kSmiTag);
+        STATIC_ASSERT(kSmiTag == 0);
         if (op == Token::ADD) {
           __ add(r0, lhs, Operand(rhs), LeaveCC, eq);
         } else {
@@ -863,7 +865,7 @@
           __ and_(r0, lhs, Operand(rhs), LeaveCC, cond);
         } else {
           ASSERT(op == Token::BIT_XOR);
-          ASSERT_EQ(0, kSmiTag);
+          STATIC_ASSERT(kSmiTag == 0);
           __ eor(r0, lhs, Operand(rhs), LeaveCC, cond);
         }
         if (cond != al) {
@@ -1520,8 +1522,8 @@
   // JS_FUNCTION_TYPE is the last instance type and it is right
   // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
   // bound.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
   __ CompareObjectType(receiver_reg, r2, r3, FIRST_JS_OBJECT_TYPE);
   __ b(lt, &build_args);
 
@@ -2610,7 +2612,7 @@
     // The next handler address is on top of the frame.  Unlink from
     // the handler list and drop the rest of this handler from the
     // frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(r1);
     __ mov(r3, Operand(handler_address));
     __ str(r1, MemOperand(r3));
@@ -2636,7 +2638,7 @@
       __ ldr(sp, MemOperand(r3));
       frame_->Forget(frame_->height() - handler_height);
 
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(r1);
       __ str(r1, MemOperand(r3));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -2723,7 +2725,7 @@
   // chain and set the state on the frame to FALLING.
   if (has_valid_frame()) {
     // The next handler address is on top of the frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(r1);
     __ mov(r3, Operand(handler_address));
     __ str(r1, MemOperand(r3));
@@ -2762,7 +2764,7 @@
 
       // Unlink this handler and drop it from the frame.  The next
       // handler address is currently on top of the frame.
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(r1);
       __ str(r1, MemOperand(r3));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4181,8 +4183,8 @@
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
   // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
   // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
   __ cmp(r1, Operand(JS_FUNCTION_TYPE));
   function.Branch(eq);
 
@@ -5128,7 +5130,7 @@
 
   const int kFingerOffset =
       FixedArray::OffsetOfElementAt(JSFunctionResultCache::kFingerIndex);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ ldr(r0, FieldMemOperand(r1, kFingerOffset));
   // r0 now holds finger offset as a smi.
   __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
@@ -6207,6 +6209,60 @@
 }
 
 
+class DeferredReferenceSetNamedValue: public DeferredCode {
+ public:
+  DeferredReferenceSetNamedValue(Register value,
+                                 Register receiver,
+                                 Handle<String> name)
+      : value_(value), receiver_(receiver), name_(name) {
+    set_comment("[ DeferredReferenceSetNamedValue");
+  }
+
+  virtual void Generate();
+
+ private:
+  Register value_;
+  Register receiver_;
+  Handle<String> name_;
+};
+
+
+// Takes value in r0, receiver in r1 and returns the result (the
+// value) in r0.
+void DeferredReferenceSetNamedValue::Generate() {
+  // Record the entry frame and spill.
+  VirtualFrame copied_frame(*frame_state()->frame());
+  copied_frame.SpillAll();
+
+  // Ensure value in r0, receiver in r1 to match store ic calling
+  // convention.
+  ASSERT(value_.is(r0) && receiver_.is(r1));
+  __ mov(r2, Operand(name_));
+
+  // The rest of the instructions in the deferred code must be together.
+  { Assembler::BlockConstPoolScope block_const_pool(masm_);
+    // Call keyed store IC. It has the arguments value, key and receiver in r0,
+    // r1 and r2.
+    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+    __ Call(ic, RelocInfo::CODE_TARGET);
+    // The call must be followed by a nop instruction to indicate that the
+    // named store has been inlined.
+    __ nop(PROPERTY_ACCESS_INLINED);
+
+    // Go back to the frame we entered with. The instructions
+    // generated by this merge are skipped over by the inline store
+    // patching mechanism when looking for the branch instruction that
+    // tells it where the code to patch is.
+    copied_frame.MergeTo(frame_state()->frame());
+
+    // Block the constant pool for one more instruction after leaving this
+    // constant pool block scope to include the branch instruction ending the
+    // deferred code.
+    __ BlockConstPoolFor(1);
+  }
+}
+
+
 // Consumes the top of stack (the receiver) and pushes the result instead.
 void CodeGenerator::EmitNamedLoad(Handle<String> name, bool is_contextual) {
   if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
@@ -6277,11 +6333,88 @@
 
 void CodeGenerator::EmitNamedStore(Handle<String> name, bool is_contextual) {
 #ifdef DEBUG
-  int expected_height = frame_->height() - (is_contextual ? 1 : 2);
+  int expected_height = frame()->height() - (is_contextual ? 1 : 2);
 #endif
-  frame_->CallStoreIC(name, is_contextual);
 
-  ASSERT_EQ(expected_height, frame_->height());
+  Result result;
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+    frame()->CallStoreIC(name, is_contextual);
+  } else {
+    // Inline the in-object property case.
+    JumpTarget slow, done;
+
+    // Get the value and receiver from the stack.
+    frame()->PopToR0();
+    Register value = r0;
+    frame()->PopToR1();
+    Register receiver = r1;
+
+    DeferredReferenceSetNamedValue* deferred =
+        new DeferredReferenceSetNamedValue(value, receiver, name);
+
+    // Check that the receiver is a heap object.
+    __ tst(receiver, Operand(kSmiTagMask));
+    deferred->Branch(eq);
+
+    // The following instructions are the part of the inlined
+    // in-object property store code which can be patched. Therefore
+    // the exact number of instructions generated must be fixed, so
+    // the constant pool is blocked while generating this code.
+    { Assembler::BlockConstPoolScope block_const_pool(masm_);
+      Register scratch0 = VirtualFrame::scratch0();
+      Register scratch1 = VirtualFrame::scratch1();
+
+      // Check the map. Initially use an invalid map to force a
+      // failure. The map check will be patched in the runtime system.
+      __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+#ifdef DEBUG
+      Label check_inlined_codesize;
+      masm_->bind(&check_inlined_codesize);
+#endif
+      __ mov(scratch0, Operand(Factory::null_value()));
+      __ cmp(scratch0, scratch1);
+      deferred->Branch(ne);
+
+      int offset = 0;
+      __ str(value, MemOperand(receiver, offset));
+
+      // Update the write barrier and record its size. We do not use
+      // the RecordWrite macro here because we want the offset
+      // addition instruction first to make it easy to patch.
+      Label record_write_start, record_write_done;
+      __ bind(&record_write_start);
+      // Add offset into the object.
+      __ add(scratch0, receiver, Operand(offset));
+      // Test that the object is not in the new space.  We cannot set
+      // region marks for new space pages.
+      __ InNewSpace(receiver, scratch1, eq, &record_write_done);
+      // Record the actual write.
+      __ RecordWriteHelper(receiver, scratch0, scratch1);
+      __ bind(&record_write_done);
+      // Clobber all input registers when running with the debug-code flag
+      // turned on to provoke errors.
+      if (FLAG_debug_code) {
+        __ mov(receiver, Operand(BitCast<int32_t>(kZapValue)));
+        __ mov(scratch0, Operand(BitCast<int32_t>(kZapValue)));
+        __ mov(scratch1, Operand(BitCast<int32_t>(kZapValue)));
+      }
+      // Check that this is the first inlined write barrier or that
+      // this inlined write barrier has the same size as all the other
+      // inlined write barriers.
+      ASSERT((inlined_write_barrier_size_ == -1) ||
+             (inlined_write_barrier_size_ ==
+              masm()->InstructionsGeneratedSince(&record_write_start)));
+      inlined_write_barrier_size_ =
+          masm()->InstructionsGeneratedSince(&record_write_start);
+
+      // Make sure that the expected number of instructions are generated.
+      ASSERT_EQ(GetInlinedNamedStoreInstructionsAfterPatch(),
+                masm()->InstructionsGeneratedSince(&check_inlined_codesize));
+    }
+    deferred->BindExit();
+  }
+  ASSERT_EQ(expected_height, frame()->height());
 }
 
 
@@ -6848,7 +6981,7 @@
   // Move sign bit from source to destination.  This works because the sign bit
   // in the exponent word of the double has the same position and polarity as
   // the 2's complement sign bit in a Smi.
-  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
   // Subtract from 0 if source was negative.
   __ rsb(source_, source_, Operand(0), LeaveCC, ne);
@@ -6901,7 +7034,7 @@
   // the_int_ has the answer which is a signed int32 but not a Smi.
   // We test for the special value that has a different exponent.  This test
   // has the neat side effect of setting the flags according to the sign.
-  ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   __ cmp(the_int_, Operand(0x80000000u));
   __ b(eq, &max_negative_int);
   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
@@ -7246,7 +7379,7 @@
     // If either operand is a JSObject or an oddball value, then they are
     // not equal since their pointers are different.
     // There is no test for undetectability in strict equality.
-    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     Label first_non_object;
     // Get the type of the first operand into r2 and compare it with
     // FIRST_JS_OBJECT_TYPE.
@@ -7272,8 +7405,8 @@
 
     // Now that we have the types we might as well check for symbol-symbol.
     // Ensure that no non-strings have the symbol bit set.
-    ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
-    ASSERT(kSymbolTag != 0);
+    STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+    STATIC_ASSERT(kSymbolTag != 0);
     __ and_(r2, r2, Operand(r3));
     __ tst(r2, Operand(kIsSymbolMask));
     __ b(ne, &return_not_equal);
@@ -7324,7 +7457,7 @@
   // r2 is object type of rhs.
   // Ensure that no non-strings have the symbol bit set.
   Label object_test;
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(kSymbolTag != 0);
   __ tst(r2, Operand(kIsNotStringMask));
   __ b(ne, &object_test);
   __ tst(r2, Operand(kIsSymbolMask));
@@ -7395,7 +7528,7 @@
                   not_found,
                   true);
 
-      ASSERT_EQ(8, kDoubleSize);
+      STATIC_ASSERT(8 == kDoubleSize);
       __ add(scratch1,
              object,
              Operand(HeapNumber::kValueOffset - kHeapObjectTag));
@@ -7494,7 +7627,7 @@
 
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   ASSERT_EQ(0, Smi::FromInt(0));
   __ and_(r2, lhs_, Operand(rhs_));
   __ tst(r2, Operand(kSmiTagMask));
@@ -8497,7 +8630,7 @@
       Label not_smi;
       // Fast path.
       if (ShouldGenerateSmiCode()) {
-        ASSERT(kSmiTag == 0);  // Adjust code below.
+        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
         __ tst(smi_test_reg, Operand(kSmiTagMask));
         __ b(ne, &not_smi);
         __ add(r0, r1, Operand(r0), SetCC);  // Add y optimistically.
@@ -8513,7 +8646,7 @@
       Label not_smi;
       // Fast path.
       if (ShouldGenerateSmiCode()) {
-        ASSERT(kSmiTag == 0);  // Adjust code below.
+        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
         __ tst(smi_test_reg, Operand(kSmiTagMask));
         __ b(ne, &not_smi);
         if (lhs.is(r1)) {
@@ -8535,7 +8668,7 @@
     case Token::MUL: {
       Label not_smi, slow;
       if (ShouldGenerateSmiCode()) {
-        ASSERT(kSmiTag == 0);  // adjust code below
+        STATIC_ASSERT(kSmiTag == 0);  // adjust code below
         __ tst(smi_test_reg, Operand(kSmiTagMask));
         Register scratch2 = smi_test_reg;
         smi_test_reg = no_reg;
@@ -8671,7 +8804,7 @@
         Label slow;
         Label not_power_of_2;
         ASSERT(!ShouldGenerateSmiCode());
-        ASSERT(kSmiTag == 0);  // Adjust code below.
+        STATIC_ASSERT(kSmiTag == 0);  // Adjust code below.
         // Check for two positive smis.
         __ orr(smi_test_reg, lhs, Operand(rhs));
         __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask));
@@ -8731,7 +8864,7 @@
     case Token::SHR:
     case Token::SHL: {
       Label slow;
-      ASSERT(kSmiTag == 0);  // adjust code below
+      STATIC_ASSERT(kSmiTag == 0);  // adjust code below
       __ tst(smi_test_reg, Operand(kSmiTagMask));
       __ b(ne, &slow);
       Register scratch2 = smi_test_reg;
@@ -9045,17 +9178,17 @@
   // r0 holds the exception.
 
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
 
   // Drop the sp to the top of the handler.
   __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
   __ ldr(sp, MemOperand(r3));
 
   // Restore the next handler and frame pointer, discard handler state.
-  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   __ pop(r2);
   __ str(r2, MemOperand(r3));
-  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
   __ ldm(ia_w, sp, r3.bit() | fp.bit());  // r3: discarded state.
 
   // Before returning we restore the context from the frame pointer if
@@ -9071,7 +9204,7 @@
     __ mov(lr, Operand(pc));
   }
 #endif
-  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ pop(pc);
 }
 
@@ -9079,7 +9212,7 @@
 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
                                           UncatchableExceptionType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
 
   // Drop sp to the top stack handler.
   __ mov(r3, Operand(ExternalReference(Top::k_handler_address)));
@@ -9100,7 +9233,7 @@
   __ bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   __ pop(r2);
   __ str(r2, MemOperand(r3));
 
@@ -9124,7 +9257,7 @@
   //         lr
 
   // Discard handler state (r2 is not used) and restore frame pointer.
-  ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
   __ ldm(ia_w, sp, r2.bit() | fp.bit());  // r2: discarded state.
   // Before returning we restore the context from the frame pointer if
   // not NULL.  The frame pointer is NULL in the exception handler of a
@@ -9139,7 +9272,7 @@
     __ mov(lr, Operand(pc));
   }
 #endif
-  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ pop(pc);
 }
 
@@ -9234,7 +9367,7 @@
 
   // check for failure result
   Label failure_returned;
-  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   // Lower 2 bits of r2 are 0 iff r0 has failure tag.
   __ add(r2, r0, Operand(1));
   __ tst(r2, Operand(kFailureTagMask));
@@ -9249,7 +9382,7 @@
   // check if we should retry or throw exception
   Label retry;
   __ bind(&failure_returned);
-  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
   __ b(eq, &retry);
 
@@ -9652,12 +9785,12 @@
   }
 
   // Setup the callee in-object property.
-  ASSERT(Heap::arguments_callee_index == 0);
+  STATIC_ASSERT(Heap::arguments_callee_index == 0);
   __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
   __ str(r3, FieldMemOperand(r0, JSObject::kHeaderSize));
 
   // Get the length (smi tagged) and set that as an in-object property too.
-  ASSERT(Heap::arguments_length_index == 1);
+  STATIC_ASSERT(Heap::arguments_length_index == 1);
   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
   __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + kPointerSize));
 
@@ -9749,7 +9882,7 @@
 
   // Check that the first argument is a JSRegExp object.
   __ ldr(r0, MemOperand(sp, kJSRegExpOffset));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(r0, Operand(kSmiTagMask));
   __ b(eq, &runtime);
   __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
@@ -9776,8 +9909,8 @@
          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2. This
   // uses the asumption that smis are 2 * their untagged value.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(r2, r2, Operand(2));  // r2 was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(r2, Operand(OffsetsVector::kStaticOffsetsVectorSize));
@@ -9838,7 +9971,7 @@
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // First check for flat string.
   __ tst(r0, Operand(kIsNotStringMask | kStringRepresentationMask));
-  ASSERT_EQ(0, kStringTag | kSeqStringTag);
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
   __ b(eq, &seq_string);
 
   // subject: Subject string
@@ -9848,8 +9981,8 @@
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  ASSERT(kExternalStringTag !=0);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  STATIC_ASSERT(kExternalStringTag !=0);
+  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
   __ tst(r0, Operand(kIsNotStringMask | kExternalStringTag));
   __ b(ne, &runtime);
   __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
@@ -9860,7 +9993,7 @@
   __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
   __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
   // Is first part a flat string?
-  ASSERT_EQ(0, kSeqStringTag);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r0, Operand(kStringRepresentationMask));
   __ b(nz, &runtime);
 
@@ -9868,8 +10001,8 @@
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
   // r0: Instance type of subject string
-  ASSERT_EQ(4, kAsciiStringTag);
-  ASSERT_EQ(0, kTwoByteStringTag);
+  STATIC_ASSERT(4 == kAsciiStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
   // Find the code object based on the assumptions above.
   __ and_(r0, r0, Operand(kStringEncodingMask));
   __ mov(r3, Operand(r0, ASR, 2), SetCC);
@@ -9923,7 +10056,7 @@
   // calculate the shift of the index (0 for ASCII and 1 for two byte).
   __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset));
   __ mov(r0, Operand(r0, ASR, kSmiTagSize));
-  ASSERT_EQ(SeqAsciiString::kHeaderSize, SeqTwoByteString::kHeaderSize);
+  STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
   __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Argument 4 (r3): End of string data
@@ -9978,8 +10111,8 @@
   __ ldr(r1,
          FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(r1, r1, Operand(2));  // r1 was a smi.
 
   // r1: number of capture registers
@@ -10191,7 +10324,7 @@
   __ b(ls, index_out_of_range_);
 
   // We need special handling for non-flat strings.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
   __ b(eq, &flat_string);
 
@@ -10213,13 +10346,13 @@
   __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
   __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
   // If the first cons component is also non-flat, then go to runtime.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(result_, Operand(kStringRepresentationMask));
   __ b(nz, &call_runtime_);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0);
   __ tst(result_, Operand(kStringEncodingMask));
   __ b(nz, &ascii_string);
 
@@ -10227,7 +10360,7 @@
   // Load the 2-byte character code into the result register. We can
   // add without shifting since the smi tag size is the log2 of the
   // number of bytes in a two-byte character.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
   __ add(scratch_, object_, Operand(scratch_));
   __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
   __ jmp(&got_char_code);
@@ -10304,8 +10437,8 @@
 
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  ASSERT(kSmiTag == 0);
-  ASSERT(kSmiShiftSize == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiShiftSize == 0);
   ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
   __ tst(code_,
          Operand(kSmiTagMask |
@@ -10314,7 +10447,7 @@
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
   // At this point code register contains smi tagged ascii char code.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
@@ -10419,7 +10552,7 @@
   // Ensure that reading an entire aligned word containing the last character
   // of a string will not read outside the allocated area (because we pad up
   // to kObjectAlignment).
-  ASSERT(kObjectAlignment >= kReadAlignment);
+  STATIC_ASSERT(kObjectAlignment >= kReadAlignment);
   // Assumes word reads and writes are little endian.
   // Nothing to do for zero characters.
   Label done;
@@ -10623,7 +10756,7 @@
     __ and_(candidate, candidate, Operand(mask));
 
     // Load the entry from the symble table.
-    ASSERT_EQ(1, SymbolTable::kEntrySize);
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ ldr(candidate,
            MemOperand(first_symbol_table_element,
                       candidate,
@@ -10723,8 +10856,8 @@
   // Check bounds and smi-ness.
   __ ldr(r7, MemOperand(sp, kToOffset));
   __ ldr(r6, MemOperand(sp, kFromOffset));
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   // I.e., arithmetic shift right by one un-smi-tags.
   __ mov(r2, Operand(r7, ASR, 1), SetCC);
   __ mov(r3, Operand(r6, ASR, 1), SetCC, cc);
@@ -10747,7 +10880,7 @@
 
   // Make sure first argument is a sequential (or flat) string.
   __ ldr(r5, MemOperand(sp, kStringOffset));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(r5, Operand(kSmiTagMask));
   __ b(eq, &runtime);
   Condition is_string = masm->IsObjectStringType(r5, r1);
@@ -10761,8 +10894,8 @@
   // r7: to (smi)
   Label seq_string;
   __ and_(r4, r1, Operand(kStringRepresentationMask));
-  ASSERT(kSeqStringTag < kConsStringTag);
-  ASSERT(kExternalStringTag > kConsStringTag);
+  STATIC_ASSERT(kSeqStringTag < kConsStringTag);
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
   __ cmp(r4, Operand(kConsStringTag));
   __ b(gt, &runtime);  // External strings go to runtime.
   __ b(lt, &seq_string);  // Sequential strings are handled directly.
@@ -10774,7 +10907,7 @@
   __ ldr(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
   __ ldrb(r1, FieldMemOperand(r4, Map::kInstanceTypeOffset));
   __ tst(r1, Operand(kStringRepresentationMask));
-  ASSERT_EQ(0, kSeqStringTag);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ b(ne, &runtime);  // Cons and External strings go to runtime.
 
   // Definitly a sequential string.
@@ -10798,7 +10931,7 @@
   // Check for flat ascii string.
   Label non_ascii_flat;
   __ tst(r1, Operand(kStringEncodingMask));
-  ASSERT_EQ(0, kTwoByteStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
   __ b(eq, &non_ascii_flat);
 
   Label result_longer_than_two;
@@ -10847,7 +10980,7 @@
   // r1: first character of result string.
   // r2: result string length.
   // r5: first character of sub string to copy.
-  ASSERT_EQ(0, SeqAsciiString::kHeaderSize & kObjectAlignmentMask);
+  STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
                                            COPY_ASCII | DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@@ -10878,7 +11011,7 @@
   // r1: first character of result.
   // r2: result length.
   // r5: first character of string to copy.
-  ASSERT_EQ(0, SeqTwoByteString::kHeaderSize & kObjectAlignmentMask);
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
                                            DEST_ALWAYS_ALIGNED);
   __ IncrementCounter(&Counters::sub_string_native, 1, r3, r4);
@@ -10906,7 +11039,7 @@
   Register length_delta = scratch3;
   __ mov(scratch1, scratch2, LeaveCC, gt);
   Register min_length = scratch1;
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ tst(min_length, Operand(min_length));
   __ b(eq, &compare_lengths);
 
@@ -10962,8 +11095,8 @@
   Label not_same;
   __ cmp(r0, r1);
   __ b(ne, &not_same);
-  ASSERT_EQ(0, EQUAL);
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
   __ IncrementCounter(&Counters::string_compare_native, 1, r1, r2);
   __ add(sp, sp, Operand(2 * kPointerSize));
@@ -10998,14 +11131,14 @@
 
   // Make sure that both arguments are strings if not known in advance.
   if (string_check_) {
-    ASSERT_EQ(0, kSmiTag);
+    STATIC_ASSERT(kSmiTag == 0);
     __ JumpIfEitherSmi(r0, r1, &string_add_runtime);
     // Load instance types.
     __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset));
     __ ldr(r5, FieldMemOperand(r1, HeapObject::kMapOffset));
     __ ldrb(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
-    ASSERT_EQ(0, kStringTag);
+    STATIC_ASSERT(kStringTag == 0);
     // If either is not a string, go to runtime.
     __ tst(r4, Operand(kIsNotStringMask));
     __ tst(r5, Operand(kIsNotStringMask), eq);
@@ -11022,10 +11155,10 @@
     // Check if either of the strings are empty. In that case return the other.
     __ ldr(r2, FieldMemOperand(r0, String::kLengthOffset));
     __ ldr(r3, FieldMemOperand(r1, String::kLengthOffset));
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     __ cmp(r2, Operand(Smi::FromInt(0)));  // Test if first string is empty.
     __ mov(r0, Operand(r1), LeaveCC, eq);  // If first is empty, return second.
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
      // Else test if second string is empty.
     __ cmp(r3, Operand(Smi::FromInt(0)), ne);
     __ b(ne, &strings_not_empty);  // If either string was empty, return r0.
@@ -11049,7 +11182,7 @@
   // Look at the length of the result of adding the two strings.
   Label string_add_flat_result, longer_than_two;
   // Adding two lengths can't overflow.
-  ASSERT(String::kMaxLength * 2 > String::kMaxLength);
+  STATIC_ASSERT(String::kMaxLength < String::kMaxLength * 2);
   __ add(r6, r2, Operand(r3));
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
@@ -11097,7 +11230,7 @@
   __ cmp(r6, Operand(String::kMinNonFlatLength));
   __ b(lt, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
-  ASSERT((String::kMaxLength & 0x80000000) == 0);
+  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   ASSERT(IsPowerOf2(String::kMaxLength + 1));
   // kMaxLength + 1 is representable as shifted literal, kMaxLength is not.
   __ cmp(r6, Operand(String::kMaxLength + 1));
@@ -11112,7 +11245,7 @@
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
   Label non_ascii, allocated, ascii_data;
-  ASSERT_EQ(0, kTwoByteStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
   __ tst(r4, Operand(kStringEncodingMask));
   __ tst(r5, Operand(kStringEncodingMask), ne);
   __ b(eq, &non_ascii);
@@ -11138,7 +11271,7 @@
   __ tst(r5, Operand(kAsciiDataHintMask), ne);
   __ b(ne, &ascii_data);
   __ eor(r4, r4, Operand(r5));
-  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
   __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
   __ b(eq, &ascii_data);
@@ -11164,7 +11297,7 @@
     __ ldrb(r5, FieldMemOperand(r5, Map::kInstanceTypeOffset));
   }
   // Check that both strings are sequential.
-  ASSERT_EQ(0, kSeqStringTag);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ tst(r4, Operand(kStringRepresentationMask));
   __ tst(r5, Operand(kStringRepresentationMask), eq);
   __ b(ne, &string_add_runtime);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 83685d8..492e000 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -281,6 +281,10 @@
     return FLAG_debug_code ? 27 : 13;
   }
   static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
+  static int GetInlinedNamedStoreInstructionsAfterPatch() {
+    ASSERT(inlined_write_barrier_size_ != -1);
+    return inlined_write_barrier_size_ + 4;
+  }
 
  private:
   // Construction/Destruction
@@ -586,6 +590,9 @@
   // to some unlinking code).
   bool function_return_is_shadowed_;
 
+  // Size of inlined write barriers generated by EmitNamedStore.
+  static int inlined_write_barrier_size_;
+
   static InlineRuntimeLUT kInlineRuntimeLUT[];
 
   friend class VirtualFrame;
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
index 144f8e3..e7e3de3 100644
--- a/src/arm/ic-arm.cc
+++ b/src/arm/ic-arm.cc
@@ -989,8 +989,49 @@
 
 
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  // TODO(787): Implement inline stores on arm.
-  return false;
+  // Find the end of the inlined code for the store if there is an
+  // inlined version of the store.
+  Address inline_end_address;
+  if (!IsInlinedICSite(address, &inline_end_address)) return false;
+
+  // Compute the address of the map load instruction.
+  Address ldr_map_instr_address =
+      inline_end_address -
+      (CodeGenerator::GetInlinedNamedStoreInstructionsAfterPatch() *
+       Assembler::kInstrSize);
+
+  // Update the offsets if initializing the inlined store. No reason
+  // to update the offsets when clearing the inlined version because
+  // it will bail out in the map check.
+  if (map != Heap::null_value()) {
+    // Patch the offset in the actual store instruction.
+    Address str_property_instr_address =
+        ldr_map_instr_address + 3 * Assembler::kInstrSize;
+    Instr str_property_instr = Assembler::instr_at(str_property_instr_address);
+    ASSERT(Assembler::IsStrRegisterImmediate(str_property_instr));
+    str_property_instr = Assembler::SetStrRegisterImmediateOffset(
+        str_property_instr, offset - kHeapObjectTag);
+    Assembler::instr_at_put(str_property_instr_address, str_property_instr);
+
+    // Patch the offset in the add instruction that is part of the
+    // write barrier.
+    Address add_offset_instr_address =
+        str_property_instr_address + Assembler::kInstrSize;
+    Instr add_offset_instr = Assembler::instr_at(add_offset_instr_address);
+    ASSERT(Assembler::IsAddRegisterImmediate(add_offset_instr));
+    add_offset_instr = Assembler::SetAddRegisterImmediateOffset(
+        add_offset_instr, offset - kHeapObjectTag);
+    Assembler::instr_at_put(add_offset_instr_address, add_offset_instr);
+
+    // Indicate that code has changed.
+    CPU::FlushICache(str_property_instr_address, 2 * Assembler::kInstrSize);
+  }
+
+  // Patch the map check.
+  Assembler::set_target_address_at(ldr_map_instr_address,
+                                   reinterpret_cast<Address>(map));
+
+  return true;
 }
 
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index 1286aa4..9a11075 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -905,7 +905,7 @@
       __ AbortIfNotNumber(value.reg());
     }
     // Smi => false iff zero.
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     __ test(value.reg(), Operand(value.reg()));
     dest->false_target()->Branch(zero);
     __ test(value.reg(), Immediate(kSmiTagMask));
@@ -930,7 +930,7 @@
     dest->false_target()->Branch(equal);
 
     // Smi => false iff zero.
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     __ test(value.reg(), Operand(value.reg()));
     dest->false_target()->Branch(zero);
     __ test(value.reg(), Immediate(kSmiTagMask));
@@ -1169,7 +1169,7 @@
                                   const Result& left) {
   // Set TypeInfo of result according to the operation performed.
   // Rely on the fact that smis have a 31 bit payload on ia32.
-  ASSERT(kSmiValueSize == 31);
+  STATIC_ASSERT(kSmiValueSize == 31);
   switch (op) {
     case Token::COMMA:
       return right.type_info();
@@ -1445,6 +1445,55 @@
 }
 
 
+void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
+                                               Result* right,
+                                               JumpTarget* both_smi) {
+  TypeInfo left_info = left->type_info();
+  TypeInfo right_info = right->type_info();
+  if (left_info.IsDouble() || left_info.IsString() ||
+      right_info.IsDouble() || right_info.IsString()) {
+    // We know that left and right are not both smi.  Don't do any tests.
+    return;
+  }
+
+  if (left->reg().is(right->reg())) {
+    if (!left_info.IsSmi()) {
+      __ test(left->reg(), Immediate(kSmiTagMask));
+      both_smi->Branch(zero);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+      left->Unuse();
+      right->Unuse();
+      both_smi->Jump();
+    }
+  } else if (!left_info.IsSmi()) {
+    if (!right_info.IsSmi()) {
+      Result temp = allocator_->Allocate();
+      ASSERT(temp.is_valid());
+      __ mov(temp.reg(), left->reg());
+      __ or_(temp.reg(), Operand(right->reg()));
+      __ test(temp.reg(), Immediate(kSmiTagMask));
+      temp.Unuse();
+      both_smi->Branch(zero);
+    } else {
+      __ test(left->reg(), Immediate(kSmiTagMask));
+      both_smi->Branch(zero);
+    }
+  } else {
+    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+    if (!right_info.IsSmi()) {
+      __ test(right->reg(), Immediate(kSmiTagMask));
+      both_smi->Branch(zero);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+      left->Unuse();
+      right->Unuse();
+      both_smi->Jump();
+    }
+  }
+}
+
+
 void CodeGenerator::JumpIfNotBothSmiUsingTypeInfo(Register left,
                                                   Register right,
                                                   Register scratch,
@@ -1599,7 +1648,7 @@
       // Check for the corner case of dividing the most negative smi by
       // -1. We cannot use the overflow flag, since it is not set by
       // idiv instruction.
-      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
       __ cmp(eax, 0x40000000);
       deferred->Branch(equal);
       // Check that the remainder is zero.
@@ -1789,7 +1838,7 @@
 
     case Token::MUL: {
       // If the smi tag is 0 we can just leave the tag on one operand.
-      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
       // Remove smi tag from the left operand (but keep sign).
       // Left-hand operand has been copied into answer.
       __ SmiUntag(answer.reg());
@@ -2296,13 +2345,13 @@
             __ AbortIfNotSmi(operand->reg());
           }
           __ mov(answer.reg(), operand->reg());
-          ASSERT(kSmiTag == 0);  // adjust code if not the case
+          STATIC_ASSERT(kSmiTag == 0);  // adjust code if not the case
           // We do no shifts, only the Smi conversion, if shift_value is 1.
           if (shift_value > 1) {
             __ shl(answer.reg(), shift_value - 1);
           }
           // Convert int result to Smi, checking that it is in int range.
-          ASSERT(kSmiTagSize == 1);  // adjust code if not the case
+          STATIC_ASSERT(kSmiTagSize == 1);  // adjust code if not the case
           __ add(answer.reg(), Operand(answer.reg()));
           deferred->Branch(overflow);
           deferred->BindExit();
@@ -2370,8 +2419,8 @@
                                            overwrite_mode);
         // Check that lowest log2(value) bits of operand are zero, and test
         // smi tag at the same time.
-        ASSERT_EQ(0, kSmiTag);
-        ASSERT_EQ(1, kSmiTagSize);
+        STATIC_ASSERT(kSmiTag == 0);
+        STATIC_ASSERT(kSmiTagSize == 1);
         __ test(operand->reg(), Immediate(3));
         deferred->Branch(not_zero);  // Branch if non-smi or odd smi.
         __ sar(operand->reg(), 1);
@@ -2605,9 +2654,9 @@
       // side (which is always a symbol).
       if (cc == equal) {
         Label not_a_symbol;
-        ASSERT(kSymbolTag != 0);
+        STATIC_ASSERT(kSymbolTag != 0);
         // Ensure that no non-strings have the symbol bit set.
-        ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
         __ test(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
         __ j(zero, &not_a_symbol);
         // They are symbols, so do identity compare.
@@ -2735,42 +2784,44 @@
       Register right_reg = right_side.reg();
 
       // In-line check for comparing two smis.
-      Result temp = allocator_->Allocate();
-      ASSERT(temp.is_valid());
-      __ mov(temp.reg(), left_side.reg());
-      __ or_(temp.reg(), Operand(right_side.reg()));
-      __ test(temp.reg(), Immediate(kSmiTagMask));
-      temp.Unuse();
-      is_smi.Branch(zero, taken);
+      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
 
-      // Inline the equality check if both operands can't be a NaN. If both
-      // objects are the same they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
+      if (has_valid_frame()) {
+        // Inline the equality check if both operands can't be a NaN. If both
+        // objects are the same they are equal.
+        if (nan_info == kCantBothBeNaN && cc == equal) {
+          __ cmp(left_side.reg(), Operand(right_side.reg()));
+          dest->true_target()->Branch(equal);
+        }
+
+        // Inlined number comparison:
+        if (inline_number_compare) {
+          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+        }
+
+        // End of in-line compare, call out to the compare stub. Don't include
+        // number comparison in the stub if it was inlined.
+        CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+        __ test(answer.reg(), Operand(answer.reg()));
+        answer.Unuse();
+        if (is_smi.is_linked()) {
+          dest->true_target()->Branch(cc);
+          dest->false_target()->Jump();
+        } else {
+          dest->Split(cc);
+        }
+      }
+
+      if (is_smi.is_linked()) {
+        is_smi.Bind();
+        left_side = Result(left_reg);
+        right_side = Result(right_reg);
         __ cmp(left_side.reg(), Operand(right_side.reg()));
-        dest->true_target()->Branch(equal);
+        right_side.Unuse();
+        left_side.Unuse();
+        dest->Split(cc);
       }
-
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
-      }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ test(answer.reg(), Operand(answer.reg()));
-      answer.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_smi.Bind();
-      left_side = Result(left_reg);
-      right_side = Result(right_reg);
-      __ cmp(left_side.reg(), Operand(right_side.reg()));
-      right_side.Unuse();
-      left_side.Unuse();
-      dest->Split(cc);
     }
   }
 }
@@ -3151,8 +3202,8 @@
       // JS_FUNCTION_TYPE is the last instance type and it is right
       // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
       // bound.
-      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
       __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
       __ j(below, &build_args);
 
@@ -4476,7 +4527,7 @@
     // The next handler address is on top of the frame.  Unlink from
     // the handler list and drop the rest of this handler from the
     // frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(Operand::StaticVariable(handler_address));
     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
     if (has_unlinks) {
@@ -4507,7 +4558,7 @@
       __ mov(esp, Operand::StaticVariable(handler_address));
       frame_->Forget(frame_->height() - handler_height);
 
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(Operand::StaticVariable(handler_address));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
@@ -4593,7 +4644,7 @@
   // chain and set the state on the frame to FALLING.
   if (has_valid_frame()) {
     // The next handler address is on top of the frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     frame_->EmitPop(Operand::StaticVariable(handler_address));
     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
@@ -4632,7 +4683,7 @@
       frame_->Forget(frame_->height() - handler_height);
 
       // Unlink this handler and drop it from the frame.
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       frame_->EmitPop(Operand::StaticVariable(handler_address));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
 
@@ -5339,13 +5390,13 @@
           // Duplicate the object as the IC receiver.
           frame_->Dup();
           Load(property->value());
-          Result dummy = frame_->CallStoreIC(Handle<String>::cast(key), false);
+          Result ignored =
+              frame_->CallStoreIC(Handle<String>::cast(key), false);
           // A test eax instruction following the store IC call would
           // indicate the presence of an inlined version of the
           // store. Add a nop to indicate that there is no such
           // inlined version.
           __ nop();
-          dummy.Unuse();
           break;
         }
         // Fall through
@@ -6573,8 +6624,8 @@
   // As long as JS_FUNCTION_TYPE is the last instance type and it is
   // right after LAST_JS_OBJECT_TYPE, we can avoid checking for
   // LAST_JS_OBJECT_TYPE.
-  ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-  ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+  STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+  STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
   __ CmpInstanceType(obj.reg(), JS_FUNCTION_TYPE);
   function.Branch(equal);
 
@@ -6715,7 +6766,7 @@
 
 void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
   ASSERT(args->length() == 0);
-  ASSERT(kSmiTag == 0);  // EBP value is aligned, so it should look like Smi.
+  STATIC_ASSERT(kSmiTag == 0);  // EBP value is aligned, so it looks like a Smi.
   Result ebp_as_smi = allocator_->Allocate();
   ASSERT(ebp_as_smi.is_valid());
   __ mov(ebp_as_smi.reg(), Operand(ebp));
@@ -7069,7 +7120,7 @@
                                                           key.reg());
 
   // tmp.reg() now holds finger offset as a smi.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ mov(tmp.reg(), FieldOperand(cache.reg(),
                                  JSFunctionResultCache::kFingerOffset));
   __ cmp(key.reg(), FixedArrayElementOperand(cache.reg(), tmp.reg()));
@@ -9036,7 +9087,7 @@
 
     // Load and check that the result is not the hole.
     // Key holds a smi.
-    ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
+    STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
     __ mov(elements.reg(),
            FieldOperand(elements.reg(),
                         key.reg(),
@@ -9412,7 +9463,9 @@
   Label slow_case;
   __ mov(ecx, Operand(esp, 3 * kPointerSize));
   __ mov(eax, Operand(esp, 2 * kPointerSize));
-  ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+  STATIC_ASSERT(kPointerSize == 4);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);
   __ mov(ecx, CodeGenerator::FixedArrayElementOperand(ecx, eax));
   __ cmp(ecx, Factory::undefined_value());
   __ j(equal, &slow_case);
@@ -9476,7 +9529,7 @@
   // String value => false iff empty.
   __ CmpInstanceType(edx, FIRST_NONSTRING_TYPE);
   __ j(above_equal, &not_string);
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ cmp(FieldOperand(eax, String::kLengthOffset), Immediate(0));
   __ j(zero, &false_result);
   __ jmp(&true_result);
@@ -9726,7 +9779,7 @@
   }
 
   // 3. Perform the smi check of the operands.
-  ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
+  STATIC_ASSERT(kSmiTag == 0);  // Adjust zero check if not the case.
   __ test(combined, Immediate(kSmiTagMask));
   __ j(not_zero, &not_smis, not_taken);
 
@@ -9807,7 +9860,7 @@
 
     case Token::MUL:
       // If the smi tag is 0 we can just leave the tag on one operand.
-      ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
+      STATIC_ASSERT(kSmiTag == 0);  // Adjust code below if not the case.
       // We can't revert the multiplication if the result is not a smi
       // so save the right operand.
       __ mov(ebx, right);
@@ -9835,7 +9888,7 @@
       // Check for the corner case of dividing the most negative smi by
       // -1. We cannot use the overflow flag, since it is not set by idiv
       // instruction.
-      ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+      STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
       __ cmp(eax, 0x40000000);
       __ j(equal, &use_fp_on_smis);
       // Check for negative zero result.  Use combined = left | right.
@@ -10408,7 +10461,7 @@
   __ j(not_zero, &input_not_smi);
   // Input is a smi. Untag and load it onto the FPU stack.
   // Then load the low and high words of the double into ebx, edx.
-  ASSERT_EQ(1, kSmiTagSize);
+  STATIC_ASSERT(kSmiTagSize == 1);
   __ sar(eax, 1);
   __ sub(Operand(esp), Immediate(2 * kPointerSize));
   __ mov(Operand(esp, 0), eax);
@@ -11127,7 +11180,7 @@
     __ j(sign, &try_float, not_taken);
 
     // Tag the result as a smi and we're done.
-    ASSERT(kSmiTagSize == 1);
+    STATIC_ASSERT(kSmiTagSize == 1);
     __ lea(eax, Operand(ecx, times_2, kSmiTag));
     __ jmp(&done);
 
@@ -11203,7 +11256,8 @@
   __ j(above_equal, &slow, not_taken);
 
   // Read the argument from the stack and return it.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
   __ lea(ebx, Operand(ebp, eax, times_2, 0));
   __ neg(edx);
   __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@@ -11218,7 +11272,8 @@
   __ j(above_equal, &slow, not_taken);
 
   // Read the argument from the stack and return it.
-  ASSERT(kSmiTagSize == 1 && kSmiTag == 0);  // shifting code depends on this
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0);  // Shifting code depends on these.
   __ lea(ebx, Operand(ebx, ecx, times_2, 0));
   __ neg(edx);
   __ mov(eax, Operand(ebx, edx, times_2, kDisplacement));
@@ -11289,12 +11344,12 @@
   }
 
   // Setup the callee in-object property.
-  ASSERT(Heap::arguments_callee_index == 0);
+  STATIC_ASSERT(Heap::arguments_callee_index == 0);
   __ mov(ebx, Operand(esp, 3 * kPointerSize));
   __ mov(FieldOperand(eax, JSObject::kHeaderSize), ebx);
 
   // Get the length (smi tagged) and set that as an in-object property too.
-  ASSERT(Heap::arguments_length_index == 1);
+  STATIC_ASSERT(Heap::arguments_length_index == 1);
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
   __ mov(FieldOperand(eax, JSObject::kHeaderSize + kPointerSize), ecx);
 
@@ -11373,7 +11428,7 @@
 
   // Check that the first argument is a JSRegExp object.
   __ mov(eax, Operand(esp, kJSRegExpOffset));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &runtime);
   __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
@@ -11398,8 +11453,8 @@
   __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2. This
   // uses the asumption that smis are 2 * their untagged value.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(Operand(edx), Immediate(2));  // edx was a smi.
   // Check that the static offsets vector buffer is large enough.
   __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
@@ -11457,7 +11512,7 @@
   // First check for flat two byte string.
   __ and_(ebx,
           kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
-  ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);
   // Any other flat string must be a flat ascii string.
   __ test(Operand(ebx),
@@ -11469,8 +11524,8 @@
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  ASSERT(kExternalStringTag !=0);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
   __ test(Operand(ebx),
           Immediate(kIsNotStringMask | kExternalStringTag));
   __ j(not_zero, &runtime);
@@ -11486,7 +11541,7 @@
   // Is first part a flat two byte string?
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
             kStringRepresentationMask | kStringEncodingMask);
-  ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);
   // Any other flat string must be ascii.
   __ test_b(FieldOperand(ebx, Map::kInstanceTypeOffset),
@@ -11557,7 +11612,8 @@
   __ jmp(&setup_rest);
 
   __ bind(&setup_two_byte);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);  // edi is smi (powered by 2).
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);  // edi is smi (powered by 2).
   __ lea(ecx, FieldOperand(eax, edi, times_1, SeqTwoByteString::kHeaderSize));
   __ mov(Operand(esp, 3 * kPointerSize), ecx);  // Argument 4.
   __ lea(ecx, FieldOperand(eax, ebx, times_2, SeqTwoByteString::kHeaderSize));
@@ -11605,8 +11661,8 @@
   __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
   __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
   // Calculate number of capture registers (number_of_captures + 1) * 2.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(Operand(edx), Immediate(2));  // edx was a smi.
 
   // edx: Number of capture registers
@@ -11701,7 +11757,7 @@
     __ SmiUntag(scratch);
   } else {
     Label not_smi, hash_calculated;
-    ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTag == 0);
     __ test(object, Immediate(kSmiTagMask));
     __ j(not_zero, &not_smi);
     __ mov(scratch, object);
@@ -11711,7 +11767,7 @@
     __ cmp(FieldOperand(object, HeapObject::kMapOffset),
            Factory::heap_number_map());
     __ j(not_equal, not_found);
-    ASSERT_EQ(8, kDoubleSize);
+    STATIC_ASSERT(8 == kDoubleSize);
     __ mov(scratch, FieldOperand(object, HeapNumber::kValueOffset));
     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
     // Object is heap number and hash is now in scratch. Calculate cache index.
@@ -11842,7 +11898,7 @@
       // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
       // all bits in the mask are set. We only need to check the word
       // that contains the exponent and high bit of the mantissa.
-      ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+      STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
       __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
       __ xor_(eax, Operand(eax));
       // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
@@ -11850,7 +11906,7 @@
       __ add(edx, Operand(edx));
       __ cmp(edx, kQuietNaNHighBitsMask << 1);
       if (cc_ == equal) {
-        ASSERT_NE(1, EQUAL);
+        STATIC_ASSERT(EQUAL != 1);
         __ setcc(above_equal, eax);
         __ ret(0);
       } else {
@@ -11878,7 +11934,7 @@
     // slow-case code.
     // If either is a Smi (we know that not both are), then they can only
     // be equal if the other is a HeapNumber. If so, use the slow case.
-    ASSERT_EQ(0, kSmiTag);
+    STATIC_ASSERT(kSmiTag == 0);
     ASSERT_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, Operand(eax));
@@ -11887,7 +11943,7 @@
     // One operand is a smi.
 
     // Check whether the non-smi is a heap number.
-    ASSERT_EQ(1, kSmiTagMask);
+    STATIC_ASSERT(kSmiTagMask == 1);
     // ecx still holds eax & kSmiTag, which is either zero or one.
     __ sub(Operand(ecx), Immediate(0x01));
     __ mov(ebx, edx);
@@ -11913,13 +11969,13 @@
     // Get the type of the first operand.
     // If the first object is a JS object, we have done pointer comparison.
     Label first_non_object;
-    ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+    STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
     __ CmpObjectType(eax, FIRST_JS_OBJECT_TYPE, ecx);
     __ j(below, &first_non_object);
 
     // Return non-zero (eax is not zero)
     Label return_not_equal;
-    ASSERT(kHeapObjectTag != 0);
+    STATIC_ASSERT(kHeapObjectTag != 0);
     __ bind(&return_not_equal);
     __ ret(0);
 
@@ -12039,8 +12095,8 @@
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
-    ASSERT_EQ(0, kSmiTag);
-    ASSERT_EQ(1, kSmiTagMask);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(ecx, Operand(eax, edx, times_1, 0));
     __ test(ecx, Immediate(kSmiTagMask));
     __ j(not_zero, &not_both_objects);
@@ -12180,16 +12236,16 @@
   // eax holds the exception.
 
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
 
   // Drop the sp to the top of the handler.
   ExternalReference handler_address(Top::k_handler_address);
   __ mov(esp, Operand::StaticVariable(handler_address));
 
   // Restore next handler and frame pointer, discard handler state.
-  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   __ pop(Operand::StaticVariable(handler_address));
-  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
   __ pop(ebp);
   __ pop(edx);  // Remove state.
 
@@ -12203,7 +12259,7 @@
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   __ bind(&skip);
 
-  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ ret(0);
 }
 
@@ -12223,7 +12279,7 @@
   Label prologue;
   Label promote_scheduled_exception;
   __ EnterApiExitFrame(ExitFrame::MODE_NORMAL, kStackSpace, kArgc);
-  ASSERT_EQ(kArgc, 4);
+  STATIC_ASSERT(kArgc == 4);
   if (kPassHandlesDirectly) {
     // When handles as passed directly we don't have to allocate extra
     // space for and pass an out parameter.
@@ -12338,7 +12394,7 @@
 
   // Check for failure result.
   Label failure_returned;
-  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
   __ lea(ecx, Operand(eax, 1));
   // Lower 2 bits of ecx are 0 iff eax has failure tag.
   __ test(ecx, Immediate(kFailureTagMask));
@@ -12353,7 +12409,7 @@
 
   Label retry;
   // If the returned exception is RETRY_AFTER_GC continue at retry label
-  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ test(eax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
   __ j(zero, &retry, taken);
 
@@ -12384,7 +12440,7 @@
 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm,
                                           UncatchableExceptionType type) {
   // Adjust this code if not the case.
-  ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
 
   // Drop sp to the top stack handler.
   ExternalReference handler_address(Top::k_handler_address);
@@ -12404,7 +12460,7 @@
   __ bind(&done);
 
   // Set the top handler address to next handler past the current ENTRY handler.
-  ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
   __ pop(Operand::StaticVariable(handler_address));
 
   if (type == OUT_OF_MEMORY) {
@@ -12423,11 +12479,11 @@
   __ xor_(esi, Operand(esi));
 
   // Restore fp from handler and discard handler state.
-  ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 1 * kPointerSize);
   __ pop(ebp);
   __ pop(edx);  // State.
 
-  ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
   __ ret(0);
 }
 
@@ -12738,7 +12794,7 @@
   Label got_char_code;
 
   // If the receiver is a smi trigger the non-string case.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(object_, Immediate(kSmiTagMask));
   __ j(zero, receiver_not_string_);
 
@@ -12750,7 +12806,7 @@
   __ j(not_zero, receiver_not_string_);
 
   // If the index is non-smi trigger the non-smi case.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(index_, Immediate(kSmiTagMask));
   __ j(not_zero, &index_not_smi_);
 
@@ -12763,7 +12819,7 @@
   __ j(above_equal, index_out_of_range_);
 
   // We need special handling for non-flat strings.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result_, Immediate(kStringRepresentationMask));
   __ j(zero, &flat_string);
 
@@ -12784,19 +12840,19 @@
   __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   // If the first cons component is also non-flat, then go to runtime.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ test(result_, Immediate(kStringRepresentationMask));
   __ j(not_zero, &call_runtime_);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0);
   __ test(result_, Immediate(kStringEncodingMask));
   __ j(not_zero, &ascii_string);
 
   // 2-byte string.
   // Load the 2-byte character code into the result register.
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   __ movzx_w(result_, FieldOperand(object_,
                                    scratch_, times_1,  // Scratch is smi-tagged.
                                    SeqTwoByteString::kHeaderSize));
@@ -12846,7 +12902,7 @@
   __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   call_helper.AfterCall(masm);
   // If index is still not a smi, it must be out of range.
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(scratch_, Immediate(kSmiTagMask));
   __ j(not_zero, index_out_of_range_);
   // Otherwise, return to the fast path.
@@ -12875,8 +12931,8 @@
 
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
-  ASSERT(kSmiTag == 0);
-  ASSERT(kSmiShiftSize == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiShiftSize == 0);
   ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
   __ test(code_,
           Immediate(kSmiTagMask |
@@ -12884,9 +12940,9 @@
   __ j(not_zero, &slow_case_, not_taken);
 
   __ Set(result_, Immediate(Factory::single_character_string_cache()));
-  ASSERT(kSmiTag == 0);
-  ASSERT(kSmiTagSize == 1);
-  ASSERT(kSmiShiftSize == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiShiftSize == 0);
   // At this point code register contains smi tagged ascii char code.
   __ mov(result_, FieldOperand(result_,
                                code_, times_half_pointer_size,
@@ -12958,7 +13014,7 @@
   // Check if either of the strings are empty. In that case return the other.
   Label second_not_zero_length, both_not_zero_length;
   __ mov(ecx, FieldOperand(edx, String::kLengthOffset));
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(ecx, Operand(ecx));
   __ j(not_zero, &second_not_zero_length);
   // Second string is empty, result is first string which is already in eax.
@@ -12966,7 +13022,7 @@
   __ ret(2 * kPointerSize);
   __ bind(&second_not_zero_length);
   __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
-  ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(ebx, Operand(ebx));
   __ j(not_zero, &both_not_zero_length);
   // First string is empty, result is second string which is in edx.
@@ -12983,7 +13039,7 @@
   Label string_add_flat_result, longer_than_two;
   __ bind(&both_not_zero_length);
   __ add(ebx, Operand(ecx));
-  ASSERT(Smi::kMaxValue == String::kMaxLength);
+  STATIC_ASSERT(Smi::kMaxValue == String::kMaxLength);
   // Handle exceptionally long strings in the runtime system.
   __ j(overflow, &string_add_runtime);
   // Use the runtime system when adding two one character strings, as it
@@ -13024,7 +13080,7 @@
   __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
   __ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
   __ and_(ecx, Operand(edi));
-  ASSERT(kStringEncodingMask == kAsciiStringTag);
+  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ test(ecx, Immediate(kAsciiStringTag));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
@@ -13051,7 +13107,7 @@
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
   __ xor_(edi, Operand(ecx));
-  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
   __ j(equal, &ascii_data);
@@ -13080,7 +13136,7 @@
   // ebx: length of resulting flat string as a smi
   // edx: second string
   Label non_ascii_string_add_flat_result;
-  ASSERT(kStringEncodingMask == kAsciiStringTag);
+  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
   __ test_b(FieldOperand(ecx, Map::kInstanceTypeOffset), kAsciiStringTag);
   __ j(zero, &non_ascii_string_add_flat_result);
@@ -13199,9 +13255,9 @@
                                              Register count,
                                              Register scratch,
                                              bool ascii) {
-  // Copy characters using rep movs of doublewords. Align destination on 4 byte
-  // boundary before starting rep movs. Copy remaining characters after running
-  // rep movs.
+  // Copy characters using rep movs of doublewords.
+  // The destination is aligned on a 4 byte boundary because we are
+  // copying to the beginning of a newly allocated string.
   ASSERT(dest.is(edi));  // rep movs destination
   ASSERT(src.is(esi));  // rep movs source
   ASSERT(count.is(ecx));  // rep movs count
@@ -13322,9 +13378,9 @@
     }
     __ and_(scratch, Operand(mask));
 
-    // Load the entry from the symble table.
+    // Load the entry from the symbol table.
     Register candidate = scratch;  // Scratch register contains candidate.
-    ASSERT_EQ(1, SymbolTable::kEntrySize);
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ mov(candidate,
            FieldOperand(symbol_table,
                         scratch,
@@ -13367,7 +13423,7 @@
   // Scratch register contains result when we fall through to here.
   Register result = scratch;
   __ bind(&found_in_symbol_table);
-  __ pop(mask);  // Pop temporally saved mask from the stack.
+  __ pop(mask);  // Pop saved mask from the stack.
   if (!result.is(eax)) {
     __ mov(eax, result);
   }
@@ -13442,7 +13498,7 @@
 
   // Make sure first argument is a string.
   __ mov(eax, Operand(esp, 3 * kPointerSize));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ test(eax, Immediate(kSmiTagMask));
   __ j(zero, &runtime);
   Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
@@ -13450,6 +13506,7 @@
 
   // eax: string
   // ebx: instance type
+
   // Calculate length of sub string using the smi values.
   Label result_longer_than_two;
   __ mov(ecx, Operand(esp, 1 * kPointerSize));  // To index.
@@ -13555,8 +13612,8 @@
   __ mov(ebx, Operand(esp, 2 * kPointerSize));  // from
   // As from is a smi it is 2 times the value which matches the size of a two
   // byte character.
-  ASSERT_EQ(0, kSmiTag);
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
   __ add(esi, Operand(ebx));
 
   // eax: result string
@@ -13642,8 +13699,8 @@
   __ j(not_zero, &result_not_equal);
 
   // Result is EQUAL.
-  ASSERT_EQ(0, EQUAL);
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ ret(0);
 
@@ -13675,8 +13732,8 @@
   Label not_same;
   __ cmp(edx, Operand(eax));
   __ j(not_equal, &not_same);
-  ASSERT_EQ(0, EQUAL);
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
   __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
   __ IncrementCounter(&Counters::string_compare_native, 1);
   __ ret(2 * kPointerSize);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 24f9957..66014a0 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -519,6 +519,15 @@
   void GenericBinaryOperation(BinaryOperation* expr,
                               OverwriteMode overwrite_mode);
 
+  // Emits code sequence that jumps to a JumpTarget if the inputs
+  // are both smis.  Cannot be in MacroAssembler because it takes
+  // advantage of TypeInfo to skip unneeded checks.
+  // Allocates a temporary register, possibly spilling from the frame,
+  // if it needs to check both left and right.
+  void JumpIfBothSmiUsingTypeInfo(Result* left,
+                                  Result* right,
+                                  JumpTarget* both_smi);
+
   // Emits code sequence that jumps to deferred code if the inputs
   // are not both smis.  Cannot be in MacroAssembler because it takes
   // advantage of TypeInfo to skip unneeded checks.
diff --git a/src/jump-target-light.h b/src/jump-target-light.h
index 91b7266..5ca4d60 100644
--- a/src/jump-target-light.h
+++ b/src/jump-target-light.h
@@ -101,8 +101,7 @@
 
   // Emit a conditional branch to the target.  There must be a current
   // frame at the branch.  The current frame will fall through to the
-  // code after the branch.  The arg is a result that is live both at
-  // the target and the fall-through.
+  // code after the branch.
   virtual void Branch(Condition cc, Hint hint = no_hint);
 
   // Bind a jump target.  If there is no current frame at the binding
diff --git a/src/objects.cc b/src/objects.cc
index 8f668fb..7a08eec 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -2966,7 +2966,8 @@
         break;
     }
 
-    SetElementCallback(index, info, info->property_attributes());
+    Object* ok = SetElementCallback(index, info, info->property_attributes());
+    if (ok->IsFailure()) return ok;
   } else {
     // Lookup the name.
     LookupResult result;
@@ -2976,7 +2977,8 @@
     if (result.IsProperty() && (result.IsReadOnly() || result.IsDontDelete())) {
       return Heap::undefined_value();
     }
-    SetPropertyCallback(name, info, info->property_attributes());
+    Object* ok = SetPropertyCallback(name, info, info->property_attributes());
+    if (ok->IsFailure()) return ok;
   }
 
   return this;
diff --git a/src/runtime.cc b/src/runtime.cc
index fa881eb..a6924a0 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -212,23 +212,42 @@
     Handle<Context> context,
     Handle<FixedArray> constant_properties,
     bool* is_result_from_cache) {
-  int number_of_properties = constant_properties->length() / 2;
+  int properties_length = constant_properties->length();
+  int number_of_properties = properties_length / 2;
   if (FLAG_canonicalize_object_literal_maps) {
-    // First find prefix of consecutive symbol keys.
+    // Check that there are only symbols and array indices among keys.
     int number_of_symbol_keys = 0;
-    while ((number_of_symbol_keys < number_of_properties) &&
-           (constant_properties->get(number_of_symbol_keys*2)->IsSymbol())) {
-      number_of_symbol_keys++;
+    for (int p = 0; p != properties_length; p += 2) {
+      Object* key = constant_properties->get(p);
+      uint32_t element_index = 0;
+      if (key->IsSymbol()) {
+        number_of_symbol_keys++;
+      } else if (key->ToArrayIndex(&element_index)) {
+        // An index key does not require space in the property backing store.
+        number_of_properties--;
+      } else {
+        // Bail out as a non-symbol non-index key makes caching impossible.
+        // ASSERT to make sure that the if condition after the loop is false.
+        ASSERT(number_of_symbol_keys != number_of_properties);
+        break;
+      }
     }
-    // Based on the number of prefix symbols key we decide whether
-    // to use the map cache in the global context.
+    // If we only have symbols and array indices among keys then we can
+    // use the map cache in the global context.
     const int kMaxKeys = 10;
     if ((number_of_symbol_keys == number_of_properties) &&
         (number_of_symbol_keys < kMaxKeys)) {
       // Create the fixed array with the key.
       Handle<FixedArray> keys = Factory::NewFixedArray(number_of_symbol_keys);
-      for (int i = 0; i < number_of_symbol_keys; i++) {
-        keys->set(i, constant_properties->get(i*2));
+      if (number_of_symbol_keys > 0) {
+        int index = 0;
+        for (int p = 0; p < properties_length; p += 2) {
+          Object* key = constant_properties->get(p);
+          if (key->IsSymbol()) {
+            keys->set(index++, key);
+          }
+        }
+        ASSERT(index == number_of_symbol_keys);
       }
       *is_result_from_cache = true;
       return Factory::ObjectLiteralMapFromCache(context, keys);
@@ -6732,6 +6751,26 @@
   return *result;
 }
 
+static Object* Runtime_NewObjectFromBound(Arguments args) {
+  HandleScope scope;
+  ASSERT(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  CONVERT_ARG_CHECKED(JSArray, params, 1);
+
+  FixedArray* fixed = FixedArray::cast(params->elements());
+
+  bool exception = false;
+  Object*** param_data = NewArray<Object**>(fixed->length());
+  for (int i = 0; i < fixed->length();  i++) {
+    Handle<Object> val = Handle<Object>(fixed->get(i));
+    param_data[i] = val.location();
+  }
+
+  Handle<Object> result = Execution::New(
+      function, fixed->length(), param_data, &exception);
+  return *result;
+}
+
 
 static Code* ComputeConstructStub(Handle<JSFunction> function) {
   Handle<Object> prototype = Factory::null_value();
@@ -9342,6 +9381,13 @@
     }
     Debug::SetBreakPoint(shared, break_point_object_arg, &position);
     position += shared->start_position();
+
+    // The result position may become beyond script source end.
+    // This is expected when the function is toplevel. This may become
+    // a problem later when actual position gets converted into line/column.
+    if (shared->is_toplevel() && position == shared->end_position()) {
+      position = shared->end_position() - 1;
+    }
     return Smi::FromInt(position);
   }
   return  Heap::undefined_value();
diff --git a/src/runtime.h b/src/runtime.h
index 1c9bb08..dca3c7b 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -258,6 +258,7 @@
   /* Statements */ \
   F(NewClosure, 2, 1) \
   F(NewObject, 1, 1) \
+  F(NewObjectFromBound, 2, 1) \
   F(Throw, 1, 1) \
   F(ReThrow, 1, 1) \
   F(ThrowReferenceError, 1, 1) \
diff --git a/src/v8-counters.h b/src/v8-counters.h
index 509de3d..64ea9fc 100644
--- a/src/v8-counters.h
+++ b/src/v8-counters.h
@@ -169,7 +169,7 @@
   SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)       \
   SC(constructed_objects_stub, V8.ConstructedObjectsStub)             \
   SC(negative_lookups, V8.NegativeLookups)                            \
-  SC(negative_lookups_miss, V8.NegativeLookupsMiss)                  \
+  SC(negative_lookups_miss, V8.NegativeLookupsMiss)                   \
   SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
   SC(array_function_native, V8.ArrayFunctionNative)                   \
   SC(for_in, V8.ForIn)                                                \
diff --git a/src/v8natives.js b/src/v8natives.js
index 198cecc..2b7a9bb 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -539,21 +539,21 @@
     throw MakeTypeError("define_disallowed", ["defineProperty"]);
 
   if (!IS_UNDEFINED(current) && !current.isConfigurable()) {
-      // Step 5 and 6
-     if ((!desc.hasEnumerable() || 
-          SameValue(desc.isEnumerable() && current.isEnumerable())) &&
-         (!desc.hasConfigurable() || 
-          SameValue(desc.isConfigurable(), current.isConfigurable())) &&
-         (!desc.hasWritable() || 
-          SameValue(desc.isWritable(), current.isWritable())) &&
-         (!desc.hasValue() ||
-          SameValue(desc.getValue(), current.getValue())) &&
-         (!desc.hasGetter() ||
-          SameValue(desc.getGet(), current.getGet())) &&
-         (!desc.hasSetter() ||
-          SameValue(desc.getSet(), current.getSet()))) {
-       return true;
-     }
+    // Step 5 and 6
+    if ((!desc.hasEnumerable() || 
+         SameValue(desc.isEnumerable() && current.isEnumerable())) &&
+        (!desc.hasConfigurable() || 
+         SameValue(desc.isConfigurable(), current.isConfigurable())) &&
+        (!desc.hasWritable() || 
+         SameValue(desc.isWritable(), current.isWritable())) &&
+        (!desc.hasValue() ||
+         SameValue(desc.getValue(), current.getValue())) &&
+        (!desc.hasGetter() ||
+         SameValue(desc.getGet(), current.getGet())) &&
+        (!desc.hasSetter() ||
+         SameValue(desc.getSet(), current.getSet()))) {
+      return true;
+    }
 
     // Step 7
     if (desc.isConfigurable() ||  desc.isEnumerable() != current.isEnumerable())
@@ -1099,6 +1099,57 @@
 }
 
 
+// ES5 15.3.4.5
+function FunctionBind(this_arg) { // Length is 1.
+  if (!IS_FUNCTION(this)) {
+      throw new $TypeError('Bind must be called on a function');
+  }
+  // this_arg is not an argument that should be bound.
+  var argc_bound = %_ArgumentsLength() - 1;
+  if (argc_bound > 0) {
+    var bound_args = new $Array(argc_bound);
+    for(var i = 0; i < argc_bound; i++) {
+      bound_args[i] = %_Arguments(i+1);
+    }  
+  }
+  global.print(argc_bound);
+  var fn = this;
+  var result = function() {
+    // Combine the args we got from the bind call with the args
+    // given as argument to the invocation. 
+    var argc = %_ArgumentsLength();
+    var args = new $Array(argc + argc_bound);
+    // Add bound arguments.
+    for (var i = 0; i < argc_bound; i++) {
+      args[i] = bound_args[i];
+    }
+    // Add arguments from call.
+    for (var i = 0; i < argc; i++) {
+      args[argc_bound + i] = %_Arguments(i); 
+    }
+    // If this is a construct call we use a special runtime method
+    // to generate the actual object using the bound function.
+    if (%_IsConstructCall()) {
+      return %NewObjectFromBound(fn, args);
+    }
+    return fn.apply(this_arg, args);
+  };
+
+  // We already have caller and arguments properties on functions,
+  // which are non-configurable. It therefore makes no sence to
+  // try to redefine these as defined by the spec. The spec says
+  // that bind should make these throw a TypeError if get or set
+  // is called and make them non-enumerable and non-configurable.
+  // To be consistent with our normal functions we leave this as it is. 
+
+  // Set the correct length.
+  var length = (this.length - argc_bound) > 0 ? this.length - argc_bound : 0;
+  %FunctionSetLength(result, length);
+
+  return result;
+}
+
+
 function NewFunction(arg1) {  // length == 1
   var n = %_ArgumentsLength();
   var p = '';
@@ -1130,6 +1181,7 @@
 
 function SetupFunction() {
   InstallFunctions($Function.prototype, DONT_ENUM, $Array(
+    "bind", FunctionBind,
     "toString", FunctionToString
   ));
 }
diff --git a/src/version.cc b/src/version.cc
index 6fcb23e..34d6ec9 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,8 +34,8 @@
 // cannot be changed without changing the SCons build script.
 #define MAJOR_VERSION     2
 #define MINOR_VERSION     3
-#define BUILD_NUMBER      2
-#define PATCH_LEVEL       1
+#define BUILD_NUMBER      3
+#define PATCH_LEVEL       0
 #define CANDIDATE_VERSION false
 
 // Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index a38ebaf..959b4b0 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -895,8 +895,8 @@
   __ lea(rbx, FieldOperand(rbx, Code::kHeaderSize));
   __ jmp(rbx);
 
-  // edi: called object
-  // eax: number of arguments
+  // rdi: called object
+  // rax: number of arguments
   __ bind(&non_function_call);
   // CALL_NON_FUNCTION expects the non-function constructor as receiver
   // (instead of the original receiver from the call site).  The receiver is
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 723631a..35c1a3d 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -1201,6 +1201,50 @@
 }
 
 
+void CodeGenerator::JumpIfBothSmiUsingTypeInfo(Result* left,
+                                               Result* right,
+                                               JumpTarget* both_smi) {
+  TypeInfo left_info = left->type_info();
+  TypeInfo right_info = right->type_info();
+  if (left_info.IsDouble() || left_info.IsString() ||
+      right_info.IsDouble() || right_info.IsString()) {
+    // We know that left and right are not both smi.  Don't do any tests.
+    return;
+  }
+
+  if (left->reg().is(right->reg())) {
+    if (!left_info.IsSmi()) {
+      Condition is_smi = masm()->CheckSmi(left->reg());
+      both_smi->Branch(is_smi);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+      left->Unuse();
+      right->Unuse();
+      both_smi->Jump();
+    }
+  } else if (!left_info.IsSmi()) {
+    if (!right_info.IsSmi()) {
+      Condition is_smi = masm()->CheckBothSmi(left->reg(), right->reg());
+      both_smi->Branch(is_smi);
+    } else {
+      Condition is_smi = masm()->CheckSmi(left->reg());
+      both_smi->Branch(is_smi);
+    }
+  } else {
+    if (FLAG_debug_code) __ AbortIfNotSmi(left->reg());
+    if (!right_info.IsSmi()) {
+      Condition is_smi = masm()->CheckSmi(right->reg());
+      both_smi->Branch(is_smi);
+    } else {
+      if (FLAG_debug_code) __ AbortIfNotSmi(right->reg());
+      left->Unuse();
+      right->Unuse();
+      both_smi->Jump();
+    }
+  }
+}
+
+
 void CodeGenerator::JumpIfNotSmiUsingTypeInfo(Register reg,
                                               TypeInfo type,
                                               DeferredCode* deferred) {
@@ -2100,9 +2144,9 @@
       // side (which is always a symbol).
       if (cc == equal) {
         Label not_a_symbol;
-        ASSERT(kSymbolTag != 0);
+        STATIC_ASSERT(kSymbolTag != 0);
         // Ensure that no non-strings have the symbol bit set.
-        ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+        STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
         __ testb(temp.reg(), Immediate(kIsSymbolMask));  // Test the symbol bit.
         __ j(zero, &not_a_symbol);
         // They are symbols, so do identity compare.
@@ -2242,37 +2286,45 @@
       Register left_reg = left_side.reg();
       Register right_reg = right_side.reg();
 
-      Condition both_smi = masm_->CheckBothSmi(left_reg, right_reg);
-      is_smi.Branch(both_smi);
+      // In-line check for comparing two smis.
+      JumpIfBothSmiUsingTypeInfo(&left_side, &right_side, &is_smi);
 
-      // Inline the equality check if both operands can't be a NaN. If both
-      // objects are the same they are equal.
-      if (nan_info == kCantBothBeNaN && cc == equal) {
-        __ cmpq(left_side.reg(), right_side.reg());
-        dest->true_target()->Branch(equal);
+      if (has_valid_frame()) {
+        // Inline the equality check if both operands can't be a NaN. If both
+        // objects are the same they are equal.
+        if (nan_info == kCantBothBeNaN && cc == equal) {
+          __ cmpq(left_side.reg(), right_side.reg());
+          dest->true_target()->Branch(equal);
+        }
+
+        // Inlined number comparison:
+        if (inline_number_compare) {
+          GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+        }
+
+        // End of in-line compare, call out to the compare stub. Don't include
+        // number comparison in the stub if it was inlined.
+        CompareStub stub(cc, strict, nan_info, !inline_number_compare);
+        Result answer = frame_->CallStub(&stub, &left_side, &right_side);
+        __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
+        answer.Unuse();
+        if (is_smi.is_linked()) {
+          dest->true_target()->Branch(cc);
+          dest->false_target()->Jump();
+        } else {
+          dest->Split(cc);
+        }
       }
 
-      // Inlined number comparison:
-      if (inline_number_compare) {
-        GenerateInlineNumberComparison(&left_side, &right_side, cc, dest);
+      if (is_smi.is_linked()) {
+        is_smi.Bind();
+        left_side = Result(left_reg);
+        right_side = Result(right_reg);
+        __ SmiCompare(left_side.reg(), right_side.reg());
+        right_side.Unuse();
+        left_side.Unuse();
+        dest->Split(cc);
       }
-
-      // End of in-line compare, call out to the compare stub. Don't include
-      // number comparison in the stub if it was inlined.
-      CompareStub stub(cc, strict, nan_info, !inline_number_compare);
-      Result answer = frame_->CallStub(&stub, &left_side, &right_side);
-      __ testq(answer.reg(), answer.reg());  // Sets both zero and sign flags.
-      answer.Unuse();
-      dest->true_target()->Branch(cc);
-      dest->false_target()->Jump();
-
-      is_smi.Bind();
-      left_side = Result(left_reg);
-      right_side = Result(right_reg);
-      __ SmiCompare(left_side.reg(), right_side.reg());
-      right_side.Unuse();
-      left_side.Unuse();
-      dest->Split(cc);
     }
   }
 }
@@ -2567,8 +2619,8 @@
       // JS_FUNCTION_TYPE is the last instance type and it is right
       // after LAST_JS_OBJECT_TYPE, we do not have to check the upper
       // bound.
-      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
-      ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
+      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      STATIC_ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
       __ j(below, &build_args);
 
@@ -4011,7 +4063,7 @@
     // The next handler address is on top of the frame.  Unlink from
     // the handler list and drop the rest of this handler from the
     // frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     __ movq(kScratchRegister, handler_address);
     frame_->EmitPop(Operand(kScratchRegister, 0));
     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4044,7 +4096,7 @@
       __ movq(rsp, Operand(kScratchRegister, 0));
       frame_->Forget(frame_->height() - handler_height);
 
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       __ movq(kScratchRegister, handler_address);
       frame_->EmitPop(Operand(kScratchRegister, 0));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4131,7 +4183,7 @@
   // chain and set the state on the frame to FALLING.
   if (has_valid_frame()) {
     // The next handler address is on top of the frame.
-    ASSERT(StackHandlerConstants::kNextOffset == 0);
+    STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
     __ movq(kScratchRegister, handler_address);
     frame_->EmitPop(Operand(kScratchRegister, 0));
     frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4172,7 +4224,7 @@
       frame_->Forget(frame_->height() - handler_height);
 
       // Unlink this handler and drop it from the frame.
-      ASSERT(StackHandlerConstants::kNextOffset == 0);
+      STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
       __ movq(kScratchRegister, handler_address);
       frame_->EmitPop(Operand(kScratchRegister, 0));
       frame_->Drop(StackHandlerConstants::kSize / kPointerSize - 1);
@@ -4840,8 +4892,13 @@
           // Duplicate the object as the IC receiver.
           frame_->Dup();
           Load(property->value());
-          frame_->Push(key);
-          Result ignored = frame_->CallStoreIC();
+          Result ignored =
+              frame_->CallStoreIC(Handle<String>::cast(key), false);
+          // A test rax instruction following the store IC call would
+          // indicate the presence of an inlined version of the
+          // store. Add a nop to indicate that there is no such
+          // inlined version.
+          __ nop();
           break;
         }
         // Fall through
@@ -5133,6 +5190,98 @@
 }
 
 
+void CodeGenerator::EmitKeyedPropertyAssignment(Assignment* node) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Comment cmnt(masm_, "[ Keyed Property Assignment");
+  Property* prop = node->target()->AsProperty();
+  ASSERT_NOT_NULL(prop);
+
+  // Evaluate the receiver subexpression.
+  Load(prop->obj());
+
+  // Change to slow case in the beginning of an initialization block to
+  // avoid the quadratic behavior of repeatedly adding fast properties.
+  if (node->starts_initialization_block()) {
+    frame_->Dup();
+    Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
+  }
+
+  // Change to fast case at the end of an initialization block. To prepare for
+  // that add an extra copy of the receiver to the frame, so that it can be
+  // converted back to fast case after the assignment.
+  if (node->ends_initialization_block()) {
+    frame_->Dup();
+  }
+
+  // Evaluate the key subexpression.
+  Load(prop->key());
+
+  // Stack layout:
+  // [tos]   : key
+  // [tos+1] : receiver
+  // [tos+2] : receiver if at the end of an initialization block
+
+  // Evaluate the right-hand side.
+  if (node->is_compound()) {
+    // For a compound assignment the right-hand side is a binary operation
+    // between the current property value and the actual right-hand side.
+    // Duplicate receiver and key for loading the current property value.
+    frame()->PushElementAt(1);
+    frame()->PushElementAt(1);
+    Result value = EmitKeyedLoad();
+    frame()->Push(&value);
+    Load(node->value());
+
+    // Perform the binary operation.
+    bool overwrite_value =
+        (node->value()->AsBinaryOperation() != NULL &&
+         node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
+    BinaryOperation expr(node, node->binary_op(), node->target(),
+                         node->value());
+    GenericBinaryOperation(&expr,
+                           overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
+  } else {
+    // For non-compound assignment just load the right-hand side.
+    Load(node->value());
+  }
+
+  // Stack layout:
+  // [tos]   : value
+  // [tos+1] : key
+  // [tos+2] : receiver
+  // [tos+3] : receiver if at the end of an initialization block
+
+  // Perform the assignment.  It is safe to ignore constants here.
+  ASSERT(node->op() != Token::INIT_CONST);
+  CodeForSourcePosition(node->position());
+  Result answer = EmitKeyedStore(prop->key()->type());
+  frame()->Push(&answer);
+
+  // Stack layout:
+  // [tos]   : result
+  // [tos+1] : receiver if at the end of an initialization block
+
+  // Change to fast case at the end of an initialization block.
+  if (node->ends_initialization_block()) {
+    // The argument to the runtime call is the extra copy of the receiver,
+    // which is below the value of the assignment.  Swap the receiver and
+    // the value of the assignment expression.
+    Result result = frame()->Pop();
+    Result receiver = frame()->Pop();
+    frame()->Push(&result);
+    frame()->Push(&receiver);
+    Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  // Stack layout:
+  // [tos]   : result
+
+  ASSERT(frame()->height() == original_height + 1);
+}
+
+
 void CodeGenerator::VisitAssignment(Assignment* node) {
 #ifdef DEBUG
   int original_height = frame()->height();
@@ -5150,105 +5299,19 @@
     // global 'this' because it is not a valid left-hand side.
     EmitNamedPropertyAssignment(node);
 
+  } else if (prop != NULL) {
+    // Other properties (including rewritten parameters for a function that
+    // uses arguments) are keyed property assignments.
+    EmitKeyedPropertyAssignment(node);
+
   } else {
-    Comment cmnt(masm_, "[ Assignment");
-
-    { Reference target(this, node->target(), node->is_compound());
-      if (target.is_illegal()) {
-        // Fool the virtual frame into thinking that we left the assignment's
-        // value on the frame.
-        frame_->Push(Smi::FromInt(0));
-        return;
-      }
-
-      if (node->starts_initialization_block()) {
-        ASSERT(target.type() == Reference::NAMED ||
-               target.type() == Reference::KEYED);
-        // Change to slow case in the beginning of an initialization
-        // block to avoid the quadratic behavior of repeatedly adding
-        // fast properties.
-
-        // The receiver is the argument to the runtime call.  It is the
-        // first value pushed when the reference was loaded to the
-        // frame.
-        frame_->PushElementAt(target.size() - 1);
-        Result ignored = frame_->CallRuntime(Runtime::kToSlowProperties, 1);
-      }
-      if (node->ends_initialization_block()) {
-        // Add an extra copy of the receiver to the frame, so that it can be
-        // converted back to fast case after the assignment.
-        ASSERT(target.type() == Reference::NAMED ||
-               target.type() == Reference::KEYED);
-        if (target.type() == Reference::NAMED) {
-          frame_->Dup();
-          // Dup target receiver on stack.
-        } else {
-          ASSERT(target.type() == Reference::KEYED);
-          Result temp = frame_->Pop();
-          frame_->Dup();
-          frame_->Push(&temp);
-        }
-      }
-      if (node->op() == Token::ASSIGN ||
-          node->op() == Token::INIT_VAR ||
-          node->op() == Token::INIT_CONST) {
-        Load(node->value());
-
-      } else {  // Assignment is a compound assignment.
-        Literal* literal = node->value()->AsLiteral();
-        bool overwrite_value =
-            (node->value()->AsBinaryOperation() != NULL &&
-             node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
-        Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
-        // There are two cases where the target is not read in the right hand
-        // side, that are easy to test for: the right hand side is a literal,
-        // or the right hand side is a different variable.  TakeValue
-        // invalidates the target, with an implicit promise that it will be
-        // written to again
-        // before it is read.
-        if (literal != NULL || (right_var != NULL && right_var != var)) {
-          target.TakeValue();
-        } else {
-          target.GetValue();
-        }
-        Load(node->value());
-        BinaryOperation expr(node, node->binary_op(), node->target(),
-                             node->value());
-        GenericBinaryOperation(
-            &expr, overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
-      }
-      if (var != NULL &&
-          var->mode() == Variable::CONST &&
-          node->op() != Token::INIT_VAR && node->op() != Token::INIT_CONST) {
-        // Assignment ignored - leave the value on the stack.
-        UnloadReference(&target);
-      } else {
-        CodeForSourcePosition(node->position());
-        if (node->op() == Token::INIT_CONST) {
-          // Dynamic constant initializations must use the function context
-          // and initialize the actual constant declared. Dynamic variable
-          // initializations are simply assignments and use SetValue.
-          target.SetValue(CONST_INIT);
-        } else {
-          target.SetValue(NOT_CONST_INIT);
-        }
-        if (node->ends_initialization_block()) {
-          ASSERT(target.type() == Reference::UNLOADED);
-          // End of initialization block. Revert to fast case.  The
-          // argument to the runtime call is the extra copy of the receiver,
-          // which is below the value of the assignment.
-          // Swap the receiver and the value of the assignment expression.
-          Result lhs = frame_->Pop();
-          Result receiver = frame_->Pop();
-          frame_->Push(&lhs);
-          frame_->Push(&receiver);
-          Result ignored = frame_->CallRuntime(Runtime::kToFastProperties, 1);
-        }
-      }
-    }
+    // Invalid left-hand side.
+    Load(node->target());
+    Result result = frame()->CallRuntime(Runtime::kThrowReferenceError, 1);
+    // The runtime call doesn't actually return but the code generator will
+    // still generate code and expects a certain frame height.
+    frame()->Push(&result);
   }
-  // Stack layout:
-  // [tos]   : result
 
   ASSERT(frame()->height() == original_height + 1);
 }
@@ -6181,7 +6244,7 @@
   ASSERT(args->length() == 0);
   // RBP value is aligned, so it should be tagged as a smi (without necesarily
   // being padded as a smi, so it should not be treated as a smi.).
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
   Result rbp_as_smi = allocator_->Allocate();
   ASSERT(rbp_as_smi.is_valid());
   __ movq(rbp_as_smi.reg(), rbp);
@@ -7986,11 +8049,105 @@
   int expected_height = frame()->height() - (is_contextual ? 1 : 2);
 #endif
 
-  Result result = frame()->CallStoreIC(name, is_contextual);
-  // A test rax instruction following the call signals that the inobject
-  // property case was inlined.  Ensure that there is not a test rax
-  // instruction here.
-  __ nop();
+  Result result;
+  if (is_contextual || scope()->is_global_scope() || loop_nesting() == 0) {
+      result = frame()->CallStoreIC(name, is_contextual);
+      // A test rax instruction following the call signals that the inobject
+      // property case was inlined.  Ensure that there is not a test rax
+      // instruction here.
+      __ nop();
+  } else {
+    // Inline the in-object property case.
+    JumpTarget slow, done;
+    Label patch_site;
+
+    // Get the value and receiver from the stack.
+    Result value = frame()->Pop();
+    value.ToRegister();
+    Result receiver = frame()->Pop();
+    receiver.ToRegister();
+
+    // Allocate result register.
+    result = allocator()->Allocate();
+    ASSERT(result.is_valid() && receiver.is_valid() && value.is_valid());
+
+    // Check that the receiver is a heap object.
+    Condition is_smi = __ CheckSmi(receiver.reg());
+    slow.Branch(is_smi, &value, &receiver);
+
+    // This is the map check instruction that will be patched.
+    // Initially use an invalid map to force a failure. The exact
+    // instruction sequence is important because we use the
+    // kOffsetToStoreInstruction constant for patching. We avoid using
+    // the __ macro for the following two instructions because it
+    // might introduce extra instructions.
+    __ bind(&patch_site);
+    masm()->Move(kScratchRegister, Factory::null_value());
+    masm()->cmpq(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
+                 kScratchRegister);
+    // This branch is always a forwards branch so it's always a fixed size
+    // which allows the assert below to succeed and patching to work.
+    slow.Branch(not_equal, &value, &receiver);
+
+    // The delta from the patch label to the store offset must be
+    // statically known.
+    ASSERT(masm()->SizeOfCodeGeneratedSince(&patch_site) ==
+           StoreIC::kOffsetToStoreInstruction);
+
+    // The initial (invalid) offset has to be large enough to force a 32-bit
+    // instruction encoding to allow patching with an arbitrary offset.  Use
+    // kMaxInt (minus kHeapObjectTag).
+    int offset = kMaxInt;
+    __ movq(FieldOperand(receiver.reg(), offset), value.reg());
+    __ movq(result.reg(), value.reg());
+
+    // Allocate scratch register for write barrier.
+    Result scratch = allocator()->Allocate();
+    ASSERT(scratch.is_valid());
+
+    // The write barrier clobbers all input registers, so spill the
+    // receiver and the value.
+    frame_->Spill(receiver.reg());
+    frame_->Spill(value.reg());
+
+    // If the receiver and the value share a register allocate a new
+    // register for the receiver.
+    if (receiver.reg().is(value.reg())) {
+      receiver = allocator()->Allocate();
+      ASSERT(receiver.is_valid());
+      __ movq(receiver.reg(), value.reg());
+    }
+
+    // Update the write barrier. To save instructions in the inlined
+    // version we do not filter smis.
+    Label skip_write_barrier;
+    __ InNewSpace(receiver.reg(), value.reg(), equal, &skip_write_barrier);
+    int delta_to_record_write = masm_->SizeOfCodeGeneratedSince(&patch_site);
+    __ lea(scratch.reg(), Operand(receiver.reg(), offset));
+    __ RecordWriteHelper(receiver.reg(), scratch.reg(), value.reg());
+    if (FLAG_debug_code) {
+      __ movq(receiver.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+      __ movq(value.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+      __ movq(scratch.reg(), BitCast<int64_t>(kZapValue), RelocInfo::NONE);
+    }
+    __ bind(&skip_write_barrier);
+    value.Unuse();
+    scratch.Unuse();
+    receiver.Unuse();
+    done.Jump(&result);
+
+    slow.Bind(&value, &receiver);
+    frame()->Push(&receiver);
+    frame()->Push(&value);
+    result = frame()->CallStoreIC(name, is_contextual);
+    // Encode the offset to the map check instruction and the offset
+    // to the write barrier store address computation in a test rax
+    // instruction.
+    int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site);
+    __ testl(rax,
+             Immediate((delta_to_record_write << 16) | delta_to_patch_site));
+    done.Bind(&result);
+  }
 
   ASSERT_EQ(expected_height, frame()->height());
   return result;
@@ -8097,6 +8254,112 @@
 }
 
 
+Result CodeGenerator::EmitKeyedStore(StaticType* key_type) {
+#ifdef DEBUG
+  int original_height = frame()->height();
+#endif
+  Result result;
+  // Generate inlined version of the keyed store if the code is in a loop
+  // and the key is likely to be a smi.
+  if (loop_nesting() > 0 && key_type->IsLikelySmi()) {
+    Comment cmnt(masm(), "[ Inlined store to keyed Property");
+
+    // Get the receiver, key and value into registers.
+    result = frame()->Pop();
+    Result key = frame()->Pop();
+    Result receiver = frame()->Pop();
+
+    Result tmp = allocator_->Allocate();
+    ASSERT(tmp.is_valid());
+    Result tmp2 = allocator_->Allocate();
+    ASSERT(tmp2.is_valid());
+
+    // Determine whether the value is a constant before putting it in a
+    // register.
+    bool value_is_constant = result.is_constant();
+
+    // Make sure that value, key and receiver are in registers.
+    result.ToRegister();
+    key.ToRegister();
+    receiver.ToRegister();
+
+    DeferredReferenceSetKeyedValue* deferred =
+        new DeferredReferenceSetKeyedValue(result.reg(),
+                                           key.reg(),
+                                           receiver.reg());
+
+    // Check that the receiver is not a smi.
+    __ JumpIfSmi(receiver.reg(), deferred->entry_label());
+
+    // Check that the key is a smi.
+    if (!key.is_smi()) {
+      __ JumpIfNotSmi(key.reg(), deferred->entry_label());
+    } else if (FLAG_debug_code) {
+      __ AbortIfNotSmi(key.reg());
+    }
+
+    // Check that the receiver is a JSArray.
+    __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
+    deferred->Branch(not_equal);
+
+    // Check that the key is within bounds.  Both the key and the length of
+    // the JSArray are smis. Use unsigned comparison to handle negative keys.
+    __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
+                  key.reg());
+    deferred->Branch(below_equal);
+
+    // Get the elements array from the receiver and check that it is not a
+    // dictionary.
+    __ movq(tmp.reg(),
+            FieldOperand(receiver.reg(), JSArray::kElementsOffset));
+
+    // Check whether it is possible to omit the write barrier. If the elements
+    // array is in new space or the value written is a smi we can safely update
+    // the elements array without write barrier.
+    Label in_new_space;
+    __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
+    if (!value_is_constant) {
+      __ JumpIfNotSmi(result.reg(), deferred->entry_label());
+    }
+
+    __ bind(&in_new_space);
+    // Bind the deferred code patch site to be able to locate the fixed
+    // array map comparison.  When debugging, we patch this comparison to
+    // always fail so that we will hit the IC call in the deferred code
+    // which will allow the debugger to break for fast case stores.
+    __ bind(deferred->patch_site());
+    // Avoid using __ to ensure the distance from patch_site
+    // to the map address is always the same.
+    masm()->movq(kScratchRegister, Factory::fixed_array_map(),
+               RelocInfo::EMBEDDED_OBJECT);
+    __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
+            kScratchRegister);
+    deferred->Branch(not_equal);
+
+    // Store the value.
+    SmiIndex index =
+        masm()->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
+    __ movq(FieldOperand(tmp.reg(),
+                         index.reg,
+                         index.scale,
+                         FixedArray::kHeaderSize),
+            result.reg());
+    __ IncrementCounter(&Counters::keyed_store_inline, 1);
+
+    deferred->BindExit();
+  } else {
+    result = frame()->CallKeyedStoreIC();
+    // Make sure that we do not have a test instruction after the
+    // call.  A test instruction after the call is used to
+    // indicate that we have generated an inline version of the
+    // keyed store.
+    __ nop();
+  }
+  ASSERT(frame()->height() == original_height - 3);
+  return result;
+}
+
+
 #undef __
 #define __ ACCESS_MASM(masm)
 
@@ -8222,14 +8485,13 @@
       Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
       ASSERT(slot != NULL);
       cgen_->StoreToSlot(slot, init_state);
-      cgen_->UnloadReference(this);
+      set_unloaded();
       break;
     }
 
     case NAMED: {
       Comment cmnt(masm, "[ Store to named Property");
-      cgen_->frame()->Push(GetName());
-      Result answer = cgen_->frame()->CallStoreIC();
+      Result answer = cgen_->EmitNamedStore(GetName(), false);
       cgen_->frame()->Push(&answer);
       set_unloaded();
       break;
@@ -8237,117 +8499,17 @@
 
     case KEYED: {
       Comment cmnt(masm, "[ Store to keyed Property");
-
-      // Generate inlined version of the keyed store if the code is in
-      // a loop and the key is likely to be a smi.
       Property* property = expression()->AsProperty();
       ASSERT(property != NULL);
-      StaticType* key_smi_analysis = property->key()->type();
 
-      if (cgen_->loop_nesting() > 0 && key_smi_analysis->IsLikelySmi()) {
-        Comment cmnt(masm, "[ Inlined store to keyed Property");
-
-        // Get the receiver, key and value into registers.
-        Result value = cgen_->frame()->Pop();
-        Result key = cgen_->frame()->Pop();
-        Result receiver = cgen_->frame()->Pop();
-
-        Result tmp = cgen_->allocator_->Allocate();
-        ASSERT(tmp.is_valid());
-        Result tmp2 = cgen_->allocator_->Allocate();
-        ASSERT(tmp2.is_valid());
-
-        // Determine whether the value is a constant before putting it
-        // in a register.
-        bool value_is_constant = value.is_constant();
-
-        // Make sure that value, key and receiver are in registers.
-        value.ToRegister();
-        key.ToRegister();
-        receiver.ToRegister();
-
-        DeferredReferenceSetKeyedValue* deferred =
-            new DeferredReferenceSetKeyedValue(value.reg(),
-                                               key.reg(),
-                                               receiver.reg());
-
-        // Check that the receiver is not a smi.
-        __ JumpIfSmi(receiver.reg(), deferred->entry_label());
-
-        // Check that the key is a smi.
-        if (!key.is_smi()) {
-          __ JumpIfNotSmi(key.reg(), deferred->entry_label());
-        } else if (FLAG_debug_code) {
-          __ AbortIfNotSmi(key.reg());
-        }
-
-        // Check that the receiver is a JSArray.
-        __ CmpObjectType(receiver.reg(), JS_ARRAY_TYPE, kScratchRegister);
-        deferred->Branch(not_equal);
-
-        // Check that the key is within bounds.  Both the key and the
-        // length of the JSArray are smis. Use unsigned comparison to handle
-        // negative keys.
-        __ SmiCompare(FieldOperand(receiver.reg(), JSArray::kLengthOffset),
-                      key.reg());
-        deferred->Branch(below_equal);
-
-        // Get the elements array from the receiver and check that it
-        // is a flat array (not a dictionary).
-        __ movq(tmp.reg(),
-                FieldOperand(receiver.reg(), JSObject::kElementsOffset));
-
-        // Check whether it is possible to omit the write barrier. If the
-        // elements array is in new space or the value written is a smi we can
-        // safely update the elements array without write barrier.
-        Label in_new_space;
-        __ InNewSpace(tmp.reg(), tmp2.reg(), equal, &in_new_space);
-        if (!value_is_constant) {
-          __ JumpIfNotSmi(value.reg(), deferred->entry_label());
-        }
-
-        __ bind(&in_new_space);
-        // Bind the deferred code patch site to be able to locate the
-        // fixed array map comparison.  When debugging, we patch this
-        // comparison to always fail so that we will hit the IC call
-        // in the deferred code which will allow the debugger to
-        // break for fast case stores.
-        __ bind(deferred->patch_site());
-        // Avoid using __ to ensure the distance from patch_site
-        // to the map address is always the same.
-        masm->movq(kScratchRegister, Factory::fixed_array_map(),
-                   RelocInfo::EMBEDDED_OBJECT);
-        __ cmpq(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
-                kScratchRegister);
-        deferred->Branch(not_equal);
-
-        // Store the value.
-        SmiIndex index =
-            masm->SmiToIndex(kScratchRegister, key.reg(), kPointerSizeLog2);
-        __ movq(FieldOperand(tmp.reg(),
-                             index.reg,
-                             index.scale,
-                             FixedArray::kHeaderSize),
-                value.reg());
-        __ IncrementCounter(&Counters::keyed_store_inline, 1);
-
-        deferred->BindExit();
-
-        cgen_->frame()->Push(&value);
-      } else {
-        Result answer = cgen_->frame()->CallKeyedStoreIC();
-        // Make sure that we do not have a test instruction after the
-        // call.  A test instruction after the call is used to
-        // indicate that we have generated an inline version of the
-        // keyed store.
-        masm->nop();
-        cgen_->frame()->Push(&answer);
-      }
+      Result answer = cgen_->EmitKeyedStore(property->key()->type());
+      cgen_->frame()->Push(&answer);
       set_unloaded();
       break;
     }
 
-    default:
+    case UNLOADED:
+    case ILLEGAL:
       UNREACHABLE();
   }
 }
@@ -9587,7 +9749,7 @@
   __ bind(&arg2_is_object);
   __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map);
   __ j(not_equal, &check_undefined_arg2);
-  // Get the untagged integer version of the eax heap number in ecx.
+  // Get the untagged integer version of the rax heap number in rcx.
   IntegerConvert(masm, rcx, rax);
   __ bind(&done);
   __ movl(rax, rdx);
@@ -10006,7 +10168,7 @@
   __ j(not_equal, &runtime);
   // Check that the last match info has space for the capture registers and the
   // additional information. Ensure no overflow in add.
-  ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
+  STATIC_ASSERT(FixedArray::kMaxLength < kMaxInt - FixedArray::kLengthOffset);
   __ SmiToInteger32(rax, FieldOperand(rbx, FixedArray::kLengthOffset));
   __ addl(rdx, Immediate(RegExpImpl::kLastMatchOverhead));
   __ cmpl(rdx, rax);
@@ -10021,7 +10183,7 @@
   // First check for flat two byte string.
   __ andb(rbx, Immediate(
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask));
-  ASSERT_EQ(0, kStringTag | kSeqStringTag | kTwoByteStringTag);
+  STATIC_ASSERT((kStringTag | kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);
   // Any other flat string must be a flat ascii string.
   __ testb(rbx, Immediate(kIsNotStringMask | kStringRepresentationMask));
@@ -10032,8 +10194,8 @@
   // string. In that case the subject string is just the first part of the cons
   // string. Also in this case the first part of the cons string is known to be
   // a sequential string or an external string.
-  ASSERT(kExternalStringTag !=0);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  STATIC_ASSERT(kExternalStringTag !=0);
+  STATIC_ASSERT((kConsStringTag & kExternalStringTag) == 0);
   __ testb(rbx, Immediate(kIsNotStringMask | kExternalStringTag));
   __ j(not_zero, &runtime);
   // String is a cons string.
@@ -10043,12 +10205,12 @@
   __ movq(rax, FieldOperand(rax, ConsString::kFirstOffset));
   __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
   // String is a cons string with empty second part.
-  // eax: first part of cons string.
-  // ebx: map of first part of cons string.
+  // rax: first part of cons string.
+  // rbx: map of first part of cons string.
   // Is first part a flat two byte string?
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
            Immediate(kStringRepresentationMask | kStringEncodingMask));
-  ASSERT_EQ(0, kSeqStringTag | kTwoByteStringTag);
+  STATIC_ASSERT((kSeqStringTag | kTwoByteStringTag) == 0);
   __ j(zero, &seq_two_byte_string);
   // Any other flat string must be ascii.
   __ testb(FieldOperand(rbx, Map::kInstanceTypeOffset),
@@ -10285,7 +10447,7 @@
     __ JumpIfSmi(object, &is_smi);
     __ CheckMap(object, Factory::heap_number_map(), not_found, true);
 
-    ASSERT_EQ(8, kDoubleSize);
+    STATIC_ASSERT(8 == kDoubleSize);
     __ movl(scratch, FieldOperand(object, HeapNumber::kValueOffset + 4));
     __ xor_(scratch, FieldOperand(object, HeapNumber::kValueOffset));
     GenerateConvertHashCodeToIndex(masm, scratch, mask);
@@ -10466,13 +10628,13 @@
       // There is no test for undetectability in strict equality.
 
       // If the first object is a JS object, we have done pointer comparison.
-      ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
+      STATIC_ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
       Label first_non_object;
       __ CmpObjectType(rax, FIRST_JS_OBJECT_TYPE, rcx);
       __ j(below, &first_non_object);
       // Return non-zero (eax (not rax) is not zero)
       Label return_not_equal;
-      ASSERT(kHeapObjectTag != 0);
+      STATIC_ASSERT(kHeapObjectTag != 0);
       __ bind(&return_not_equal);
       __ ret(0);
 
@@ -10564,8 +10726,8 @@
     // At most one is a smi, so we can test for smi by adding the two.
     // A smi plus a heap object has the low bit set, a heap object plus
     // a heap object has the low bit clear.
-    ASSERT_EQ(0, kSmiTag);
-    ASSERT_EQ(static_cast<int64_t>(1), kSmiTagMask);
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagMask == 1);
     __ lea(rcx, Operand(rax, rdx, times_1, 0));
     __ testb(rcx, Immediate(kSmiTagMask));
     __ j(not_zero, &not_both_objects);
@@ -10621,8 +10783,8 @@
   __ movzxbq(scratch,
              FieldOperand(scratch, Map::kInstanceTypeOffset));
   // Ensure that no non-strings have the symbol bit set.
-  ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
-  ASSERT(kSymbolTag != 0);
+  STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+  STATIC_ASSERT(kSymbolTag != 0);
   __ testb(scratch, Immediate(kIsSymbolMask));
   __ j(zero, label);
 }
@@ -10701,9 +10863,9 @@
 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
   // Check that stack should contain next handler, frame pointer, state and
   // return address in that order.
-  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
             StackHandlerConstants::kStateOffset);
-  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
             StackHandlerConstants::kPCOffset);
 
   ExternalReference handler_address(Top::k_handler_address);
@@ -10813,7 +10975,7 @@
 
   // Check for failure result.
   Label failure_returned;
-  ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
+  STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0);
 #ifdef _WIN64
   // If return value is on the stack, pop it to registers.
   if (result_size_ > 1) {
@@ -10839,7 +11001,7 @@
 
   Label retry;
   // If the returned exception is RETRY_AFTER_GC continue at retry label
-  ASSERT(Failure::RETRY_AFTER_GC == 0);
+  STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0);
   __ testl(rax, Immediate(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize));
   __ j(zero, &retry);
 
@@ -10909,14 +11071,14 @@
   __ xor_(rsi, rsi);
 
   // Restore registers from handler.
-  ASSERT_EQ(StackHandlerConstants::kNextOffset + kPointerSize,
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset + kPointerSize ==
             StackHandlerConstants::kFPOffset);
   __ pop(rbp);  // FP
-  ASSERT_EQ(StackHandlerConstants::kFPOffset + kPointerSize,
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset + kPointerSize ==
             StackHandlerConstants::kStateOffset);
   __ pop(rdx);  // State
 
-  ASSERT_EQ(StackHandlerConstants::kStateOffset + kPointerSize,
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset + kPointerSize ==
             StackHandlerConstants::kPCOffset);
   __ ret(0);
 }
@@ -11191,7 +11353,7 @@
   __ bind(&is_instance);
   __ xorl(rax, rax);
   // Store bitwise zero in the cache.  This is a Smi in GC terms.
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
   __ ret(2 * kPointerSize);
 
@@ -11296,7 +11458,7 @@
   __ j(above_equal, index_out_of_range_);
 
   // We need special handling for non-flat strings.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result_, Immediate(kStringRepresentationMask));
   __ j(zero, &flat_string);
 
@@ -11317,13 +11479,13 @@
   __ movq(result_, FieldOperand(object_, HeapObject::kMapOffset));
   __ movzxbl(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
   // If the first cons component is also non-flat, then go to runtime.
-  ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
   __ testb(result_, Immediate(kStringRepresentationMask));
   __ j(not_zero, &call_runtime_);
 
   // Check for 1-byte or 2-byte string.
   __ bind(&flat_string);
-  ASSERT(kAsciiStringTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0);
   __ testb(result_, Immediate(kStringEncodingMask));
   __ j(not_zero, &ascii_string);
 
@@ -11517,7 +11679,7 @@
   __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
 
   // Look at the length of the result of adding the two strings.
-  ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
+  STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
   __ SmiAdd(rbx, rbx, rcx, NULL);
   // Use the runtime system when adding two one character strings, as it
   // contains optimizations for this specific case using the symbol table.
@@ -11549,7 +11711,7 @@
   __ SmiCompare(rbx, Smi::FromInt(String::kMinNonFlatLength));
   __ j(below, &string_add_flat_result);
   // Handle exceptionally long strings in the runtime system.
-  ASSERT((String::kMaxLength & 0x80000000) == 0);
+  STATIC_ASSERT((String::kMaxLength & 0x80000000) == 0);
   __ SmiCompare(rbx, Smi::FromInt(String::kMaxLength));
   __ j(above, &string_add_runtime);
 
@@ -11563,7 +11725,7 @@
   Label non_ascii, allocated, ascii_data;
   __ movl(rcx, r8);
   __ and_(rcx, r9);
-  ASSERT(kStringEncodingMask == kAsciiStringTag);
+  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ testl(rcx, Immediate(kAsciiStringTag));
   __ j(zero, &non_ascii);
   __ bind(&ascii_data);
@@ -11588,7 +11750,7 @@
   __ testb(rcx, Immediate(kAsciiDataHintMask));
   __ j(not_zero, &ascii_data);
   __ xor_(r8, r9);
-  ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+  STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
   __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
   __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
   __ j(equal, &ascii_data);
@@ -11620,7 +11782,7 @@
   // r8: instance type of first string
   // r9: instance type of second string
   Label non_ascii_string_add_flat_result;
-  ASSERT(kStringEncodingMask == kAsciiStringTag);
+  STATIC_ASSERT(kStringEncodingMask == kAsciiStringTag);
   __ testl(r8, Immediate(kAsciiStringTag));
   __ j(zero, &non_ascii_string_add_flat_result);
   __ testl(r9, Immediate(kAsciiStringTag));
@@ -11742,7 +11904,7 @@
 
   // Make count the number of bytes to copy.
   if (!ascii) {
-    ASSERT_EQ(2, static_cast<int>(sizeof(uc16)));  // NOLINT
+    STATIC_ASSERT(2 == sizeof(uc16));
     __ addl(count, count);
   }
 
@@ -11849,7 +12011,7 @@
 
     // Load the entry from the symble table.
     Register candidate = scratch;  // Scratch register contains candidate.
-    ASSERT_EQ(1, SymbolTable::kEntrySize);
+    STATIC_ASSERT(SymbolTable::kEntrySize == 1);
     __ movq(candidate,
             FieldOperand(symbol_table,
                          scratch,
@@ -11964,7 +12126,7 @@
 
   // Make sure first argument is a string.
   __ movq(rax, Operand(rsp, kStringOffset));
-  ASSERT_EQ(0, kSmiTag);
+  STATIC_ASSERT(kSmiTag == 0);
   __ testl(rax, Immediate(kSmiTagMask));
   __ j(zero, &runtime);
   Condition is_string = masm->IsObjectStringType(rax, rbx, rbx);
@@ -12104,7 +12266,7 @@
                                                         Register scratch4) {
   // Ensure that you can always subtract a string length from a non-negative
   // number (e.g. another length).
-  ASSERT(String::kMaxLength < 0x7fffffff);
+  STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
 
   // Find minimum length and length difference.
   __ movq(scratch1, FieldOperand(left, String::kLengthOffset));
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 5863317..f694dde 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -457,6 +457,7 @@
   // Support for compiling assignment expressions.
   void EmitSlotAssignment(Assignment* node);
   void EmitNamedPropertyAssignment(Assignment* node);
+  void EmitKeyedPropertyAssignment(Assignment* node);
 
   // Receiver is passed on the frame and not consumed.
   Result EmitNamedLoad(Handle<String> name, bool is_contextual);
@@ -470,6 +471,9 @@
   // not changed.
   Result EmitKeyedLoad();
 
+  // Receiver, key, and value are passed on the frame and consumed.
+  Result EmitKeyedStore(StaticType* key_type);
+
   // Special code for typeof expressions: Unfortunately, we must
   // be careful when loading the expression in 'typeof'
   // expressions. We are not allowed to throw reference errors for
@@ -488,6 +492,13 @@
   void GenericBinaryOperation(BinaryOperation* expr,
                               OverwriteMode overwrite_mode);
 
+  // Emits code sequence that jumps to a JumpTarget if the inputs
+  // are both smis.  Cannot be in MacroAssembler because it takes
+  // advantage of TypeInfo to skip unneeded checks.
+  void JumpIfBothSmiUsingTypeInfo(Result* left,
+                                  Result* right,
+                                  JumpTarget* both_smi);
+
   // Emits code sequence that jumps to deferred code if the input
   // is not a smi.  Cannot be in MacroAssembler because it takes
   // advantage of TypeInfo to skip unneeded checks.
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
index 2af887c..b6957b2 100644
--- a/src/x64/ic-x64.cc
+++ b/src/x64/ic-x64.cc
@@ -379,7 +379,7 @@
 }
 
 
-// One byte opcode for test eax,0xXXXXXXXX.
+// One byte opcode for test rax,0xXXXXXXXX.
 static const byte kTestEaxByte = 0xA9;
 
 
@@ -1520,8 +1520,8 @@
   GenerateFunctionTailCall(masm, argc, &slow_call);
 
   __ bind(&check_number_dictionary);
-  // eax: elements
-  // ecx: smi key
+  // rax: elements
+  // rcx: smi key
   // Check whether the elements is a number dictionary.
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kHashTableMapRootIndex);
@@ -1603,8 +1603,8 @@
 }
 
 
-// The offset from the inlined patch site to the start of the
-// inlined load instruction.
+// The offset from the inlined patch site to the start of the inlined
+// load instruction.
 const int LoadIC::kOffsetToLoadInstruction = 20;
 
 
@@ -1713,7 +1713,7 @@
   // The address of the instruction following the call.
   Address test_instruction_address =
       address + Assembler::kCallTargetAddressOffset;
-  // If the instruction following the call is not a test eax, nothing
+  // If the instruction following the call is not a test rax, nothing
   // was inlined.
   if (*test_instruction_address != kTestEaxByte) return false;
 
@@ -1737,9 +1737,54 @@
 }
 
 
+// The offset from the inlined patch site to the start of the inlined
+// store instruction.
+const int StoreIC::kOffsetToStoreInstruction = 20;
+
+
 bool StoreIC::PatchInlinedStore(Address address, Object* map, int offset) {
-  // TODO(787): Implement inline stores on x64.
-  return false;
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test rax, nothing
+  // was inlined.
+  if (*test_instruction_address != kTestEaxByte) return false;
+
+  // Extract the encoded deltas from the test rax instruction.
+  Address encoded_offsets_address = test_instruction_address + 1;
+  int encoded_offsets = *reinterpret_cast<int*>(encoded_offsets_address);
+  int delta_to_map_check = -(encoded_offsets & 0xFFFF);
+  int delta_to_record_write = encoded_offsets >> 16;
+
+  // Patch the map to check. The map address is the last 8 bytes of
+  // the 10-byte immediate move instruction.
+  Address map_check_address = test_instruction_address + delta_to_map_check;
+  Address map_address = map_check_address + 2;
+  *(reinterpret_cast<Object**>(map_address)) = map;
+
+  // Patch the offset in the store instruction. The offset is in the
+  // last 4 bytes of a 7 byte register-to-memory move instruction.
+  Address offset_address =
+      map_check_address + StoreIC::kOffsetToStoreInstruction + 3;
+  // The offset should have initial value (kMaxInt - 1), cleared value
+  // (-1) or we should be clearing the inlined version.
+  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt - 1 ||
+         *reinterpret_cast<int*>(offset_address) == -1 ||
+         (offset == 0 && map == Heap::null_value()));
+  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+
+  // Patch the offset in the write-barrier code. The offset is the
+  // last 4 bytes of a 7 byte lea instruction.
+  offset_address = map_check_address + delta_to_record_write + 3;
+  // The offset should have initial value (kMaxInt), cleared value
+  // (-1) or we should be clearing the inlined version.
+  ASSERT(*reinterpret_cast<int*>(offset_address) == kMaxInt ||
+         *reinterpret_cast<int*>(offset_address) == -1 ||
+         (offset == 0 && map == Heap::null_value()));
+  *reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
+
+  return true;
 }
 
 
diff --git a/src/x64/virtual-frame-x64.cc b/src/x64/virtual-frame-x64.cc
index bdff5a9..b8b008c 100644
--- a/src/x64/virtual-frame-x64.cc
+++ b/src/x64/virtual-frame-x64.cc
@@ -997,31 +997,9 @@
 }
 
 
-Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
-                                   InvokeFlag flag,
-                                   int arg_count) {
-  PrepareForCall(arg_count, arg_count);
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ InvokeBuiltin(id, flag);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
 //------------------------------------------------------------------------------
 // Virtual frame stub and IC calling functions.
 
-Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
-                                       RelocInfo::Mode rmode) {
-  ASSERT(cgen()->HasValidEntryRegisters());
-  __ Call(code, rmode);
-  Result result = cgen()->allocator()->Allocate(rax);
-  ASSERT(result.is_valid());
-  return result;
-}
-
-
 Result VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
   PrepareForCall(arg_count, arg_count);
   ASSERT(cgen()->HasValidEntryRegisters());
@@ -1053,6 +1031,28 @@
 #endif
 
 
+Result VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   int arg_count) {
+  PrepareForCall(arg_count, arg_count);
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ InvokeBuiltin(id, flag);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
+Result VirtualFrame::RawCallCodeObject(Handle<Code> code,
+                                       RelocInfo::Mode rmode) {
+  ASSERT(cgen()->HasValidEntryRegisters());
+  __ Call(code, rmode);
+  Result result = cgen()->allocator()->Allocate(rax);
+  ASSERT(result.is_valid());
+  return result;
+}
+
+
 // This function assumes that the only results that could be in a_reg or b_reg
 // are a and b.  Other results can be live, but must not be in a_reg or b_reg.
 void VirtualFrame::MoveResultsToRegisters(Result* a,
@@ -1107,67 +1107,17 @@
 
 
 Result VirtualFrame::CallKeyedLoadIC(RelocInfo::Mode mode) {
-  // Key and receiver are on top of the frame.  The IC expects them on
-  // the stack.  It does not drop them.
-  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
-  Result name = Pop();
+  // Key and receiver are on top of the frame. Put them in rax and rdx.
+  Result key = Pop();
   Result receiver = Pop();
   PrepareForCall(0, 0);
-  MoveResultsToRegisters(&name, &receiver, rax, rdx);
+  MoveResultsToRegisters(&key, &receiver, rax, rdx);
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
   return RawCallCodeObject(ic, mode);
 }
 
 
-Result VirtualFrame::CallCommonStoreIC(Handle<Code> ic,
-                                       Result* value,
-                                       Result* key,
-                                       Result* receiver) {
-  // The IC expects value in rax, key in rcx, and receiver in rdx.
-  PrepareForCall(0, 0);
-  // If one of the three registers is free, or a value is already
-  // in the correct register, move the remaining two values using
-  // MoveResultsToRegisters().
-  if (!cgen()->allocator()->is_used(rax) ||
-      (value->is_register() && value->reg().is(rax))) {
-    if (!cgen()->allocator()->is_used(rax)) {
-      value->ToRegister(rax);
-    }
-    MoveResultsToRegisters(key, receiver, rcx, rdx);
-    value->Unuse();
-  } else if (!cgen()->allocator()->is_used(rcx) ||
-             (key->is_register() && key->reg().is(rcx))) {
-    if (!cgen()->allocator()->is_used(rcx)) {
-      key->ToRegister(rcx);
-    }
-    MoveResultsToRegisters(value, receiver, rax, rdx);
-    key->Unuse();
-  } else if (!cgen()->allocator()->is_used(rdx) ||
-             (receiver->is_register() && receiver->reg().is(rdx))) {
-    if (!cgen()->allocator()->is_used(rdx)) {
-      receiver->ToRegister(rdx);
-    }
-    MoveResultsToRegisters(key, value, rcx, rax);
-    receiver->Unuse();
-  } else {
-    // Otherwise, no register is free, and no value is in the correct place.
-    // We have one of the two circular permutations of eax, ecx, edx.
-    ASSERT(value->is_register());
-    if (value->reg().is(rcx)) {
-      __ xchg(rax, rdx);
-      __ xchg(rax, rcx);
-    } else {
-      __ xchg(rax, rcx);
-      __ xchg(rax, rdx);
-    }
-    value->Unuse();
-    key->Unuse();
-    receiver->Unuse();
-  }
-
-  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
-}
-
-
 Result VirtualFrame::CallStoreIC(Handle<String> name, bool is_contextual) {
   // Value and (if not contextual) receiver are on top of the frame.
   // The IC expects name in rcx, value in rax, and receiver in rdx.
@@ -1188,6 +1138,55 @@
 }
 
 
+Result VirtualFrame::CallKeyedStoreIC() {
+  // Value, key, and receiver are on the top of the frame.  The IC
+  // expects value in rax, key in rcx, and receiver in rdx.
+  Result value = Pop();
+  Result key = Pop();
+  Result receiver = Pop();
+  PrepareForCall(0, 0);
+  if (!cgen()->allocator()->is_used(rax) ||
+      (value.is_register() && value.reg().is(rax))) {
+    if (!cgen()->allocator()->is_used(rax)) {
+      value.ToRegister(rax);
+    }
+    MoveResultsToRegisters(&key, &receiver, rcx, rdx);
+    value.Unuse();
+  } else if (!cgen()->allocator()->is_used(rcx) ||
+             (key.is_register() && key.reg().is(rcx))) {
+    if (!cgen()->allocator()->is_used(rcx)) {
+      key.ToRegister(rcx);
+    }
+    MoveResultsToRegisters(&value, &receiver, rax, rdx);
+    key.Unuse();
+  } else if (!cgen()->allocator()->is_used(rdx) ||
+             (receiver.is_register() && receiver.reg().is(rdx))) {
+    if (!cgen()->allocator()->is_used(rdx)) {
+      receiver.ToRegister(rdx);
+    }
+    MoveResultsToRegisters(&key, &value, rcx, rax);
+    receiver.Unuse();
+  } else {
+    // All three registers are used, and no value is in the correct place.
+    // We have one of the two circular permutations of rax, rcx, rdx.
+    ASSERT(value.is_register());
+    if (value.reg().is(rcx)) {
+      __ xchg(rax, rdx);
+      __ xchg(rax, rcx);
+    } else {
+      __ xchg(rax, rcx);
+      __ xchg(rax, rdx);
+    }
+    value.Unuse();
+    key.Unuse();
+    receiver.Unuse();
+  }
+
+  Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+  return RawCallCodeObject(ic, RelocInfo::CODE_TARGET);
+}
+
+
 Result VirtualFrame::CallCallIC(RelocInfo::Mode mode,
                                 int arg_count,
                                 int loop_nesting) {
diff --git a/src/x64/virtual-frame-x64.h b/src/x64/virtual-frame-x64.h
index 05af957..0479ff0 100644
--- a/src/x64/virtual-frame-x64.h
+++ b/src/x64/virtual-frame-x64.h
@@ -329,50 +329,27 @@
                        int arg_count);
 
   // Call load IC.  Name and receiver are found on top of the frame.
-  // Receiver is not dropped.
+  // Both are dropped.
   Result CallLoadIC(RelocInfo::Mode mode);
 
   // Call keyed load IC.  Key and receiver are found on top of the
-  // frame.  They are not dropped.
+  // frame.  Both are dropped.
   Result CallKeyedLoadIC(RelocInfo::Mode mode);
 
-
-  // Calling a store IC and a keyed store IC differ only by which ic is called
-  // and by the order of the three arguments on the frame.
-  Result CallCommonStoreIC(Handle<Code> ic,
-                           Result* value,
-                           Result* key,
-                           Result* receiver);
-
-  // Call store IC.  Name, value, and receiver are found on top
-  // of the frame.  All are dropped.
-  Result CallStoreIC() {
-    Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
-    Result name = Pop();
-    Result value = Pop();
-    Result receiver = Pop();
-    return CallCommonStoreIC(ic, &value, &name, &receiver);
-  }
-
   // Call store IC.  If the load is contextual, value is found on top of the
   // frame.  If not, value and receiver are on the frame.  Both are dropped.
   Result CallStoreIC(Handle<String> name, bool is_contextual);
 
   // Call keyed store IC.  Value, key, and receiver are found on top
-  // of the frame.  All are dropped.
-  Result CallKeyedStoreIC() {
-    Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
-    Result value = Pop();
-    Result key = Pop();
-    Result receiver = Pop();
-    return CallCommonStoreIC(ic, &value, &key, &receiver);
-  }
+  // of the frame.  All three are dropped.
+  Result CallKeyedStoreIC();
 
   // Call call IC.  Function name, arguments, and receiver are found on top
   // of the frame and dropped by the call.
   // The argument count does not include the receiver.
   Result CallCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
 
+  // Call keyed call IC.  Same calling convention as CallCallIC.
   Result CallKeyedCallIC(RelocInfo::Mode mode, int arg_count, int loop_nesting);
 
   // Allocate and call JS function as constructor.  Arguments,
diff --git a/test/mjsunit/debug-setbreakpoint.js b/test/mjsunit/debug-setbreakpoint.js
index 8201d63..90dfcd1 100644
--- a/test/mjsunit/debug-setbreakpoint.js
+++ b/test/mjsunit/debug-setbreakpoint.js
@@ -192,3 +192,26 @@
 sourceUrlFunc();
 
 assertTrue(breakListenerCalled, "Break listener not called on breakpoint set by sourceURL");
+
+
+// Breakpoint in a script with no statements test case. If breakpoint is set
+// to the script body, its actual position is taken from the nearest statement
+// below or like in this case is reset to the very end of the script.
+// Unless some precautions made, this position becomes out-of-range and
+// we get an exception.
+
+// Gets a script of 'i1' function and sets the breakpoint at line #4 which
+// should be empty.
+function SetBreakpointInI1Script() {
+  var i_script = Debug.findScript(i1);
+  assertTrue(!!i_script, "invalid script for i1");
+  Debug.setScriptBreakPoint(Debug.ScriptBreakPointType.ScriptId,
+                            i_script.id, 4);
+}
+
+// Creates the eval script and tries to set the breakpoint.
+// The tricky part is that the script function must be strongly reachable at the
+// moment. Since there's no way of simply getting the pointer to the function,
+// we run this code while the script function is being activated on stack.
+eval('SetBreakpointInI1Script()\nfunction i1(){}\n\n\n\nfunction i2(){}\n');
+
diff --git a/test/mjsunit/function-bind.js b/test/mjsunit/function-bind.js
new file mode 100644
index 0000000..7a72cd5
--- /dev/null
+++ b/test/mjsunit/function-bind.js
@@ -0,0 +1,184 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Tests the Function.prototype.bind (ES 15.3.4.5) method.
+
+// Simple tests.
+function foo(x, y, z) {
+  return x + y + z;
+}
+
+var f = foo.bind(foo);
+assertEquals(3, f(1, 1, 1));
+assertEquals(3, f.length);
+
+f = foo.bind(foo, 2);
+assertEquals(4, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo, 2, 2);
+assertEquals(5, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 2, 2, 2);
+assertEquals(6, f());
+assertEquals(0, f.length);
+
+// Test that length works correctly even if more than the actual number
+// of arguments are given when binding.
+f = foo.bind(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9);
+assertEquals(6, f());
+assertEquals(0, f.length);
+
+// Use a different bound object.
+var obj = {x: 42, y: 43};
+// Values that would normally be in "this" when calling f_bound_this.
+var x = 42;
+var y = 44;
+
+function f_bound_this(z) {
+  return z + this.y - this.x;
+} 
+
+assertEquals(3, f_bound_this(1))
+f = f_bound_this.bind(obj);
+assertEquals(2, f(1));
+assertEquals(1, f.length);
+
+f = f_bound_this.bind(obj, 2);
+assertEquals(3, f());
+assertEquals(0, f.length);
+
+// Test chained binds.
+
+// When only giving the thisArg, any number of binds should have 
+// the same effect.
+f = foo.bind(foo);
+assertEquals(3, f(1, 1, 1));
+f = foo.bind(foo).bind(foo).bind(foo).bind(foo);
+assertEquals(3, f(1, 1, 1));
+assertEquals(3, f.length);
+
+// Giving bound parameters should work at any place in the chain.
+f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo,1 ).bind(foo);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1);
+assertEquals(3, f(1, 1));
+assertEquals(2, f.length);
+
+// Several parameters can be given, and given in different bind invokations.
+f = foo.bind(foo, 1, 1).bind(foo).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo, 1, 1).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo, 1, 1).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo).bind(foo).bind(foo, 1, 1);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 1).bind(foo, 1).bind(foo).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 1).bind(foo).bind(foo, 1).bind(foo);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo, 1).bind(foo).bind(foo).bind(foo, 1);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+f = foo.bind(foo).bind(foo, 1).bind(foo).bind(foo, 1);
+assertEquals(3, f(1));
+assertEquals(1, f.length);
+
+// Test constructor calls.
+
+function bar(x, y, z) {
+  this.x = x;
+  this.y = y;
+  this.z = z;
+}
+
+f = bar.bind(bar);
+var obj2 = new f(1,2,3);
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+f = bar.bind(bar, 1);
+obj2 = new f(2,3);
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+f = bar.bind(bar, 1, 2);
+obj2 = new f(3);
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+f = bar.bind(bar, 1, 2, 3);
+obj2 = new f();
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+
+// Test bind chains when used as a constructor.
+
+f = bar.bind(bar, 1).bind(bar, 2).bind(bar, 3);
+obj2 = new f();
+assertEquals(1, obj2.x);
+assertEquals(2, obj2.y);
+assertEquals(3, obj2.z);
+
+// Test instanceof obj2 is bar, not f.
+assertTrue(obj2 instanceof bar);
+assertFalse(obj2 instanceof f);
+
diff --git a/tools/js2c.py b/tools/js2c.py
index 35bf43b..2da132f 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -275,8 +275,8 @@
   debugger_ids = []
   modules = []
   # Locate the macros file name.
-  consts = {}
-  macros = {}
+  consts = []
+  macros = []
   for s in source:
     if 'macros.py' == (os.path.split(str(s))[1]):
       (consts, macros) = ReadMacros(ReadLines(str(s)))