Version 3.31.1 (based on 18cf6c9ac96a62ec320fc10d738d26fba784e2f4)
Fix has_constant_parameter_count() confusion in LReturn (Chromium issue 431602).
Performance and stability improvements on all platforms.
git-svn-id: https://v8.googlecode.com/svn/trunk@25275 ce2b1a6d-e550-0410-aec6-3dcde31c8c00
diff --git a/BUILD.gn b/BUILD.gn
index 1758ee9..d77931c 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -572,6 +572,8 @@
"src/compiler/raw-machine-assembler.h",
"src/compiler/register-allocator.cc",
"src/compiler/register-allocator.h",
+ "src/compiler/register-allocator-verifier.cc",
+ "src/compiler/register-allocator-verifier.h",
"src/compiler/register-configuration.cc",
"src/compiler/register-configuration.h",
"src/compiler/representation-change.h",
diff --git a/ChangeLog b/ChangeLog
index 1e29853..c75f146 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,11 @@
+2014-11-12: Version 3.31.1
+
+ Fix has_constant_parameter_count() confusion in LReturn (Chromium issue
+ 431602).
+
+ Performance and stability improvements on all platforms.
+
+
2014-11-05: Version 3.30.33
`1..isPrototypeOf.call(null)` should return false, not throw TypeError
diff --git a/build/standalone.gypi b/build/standalone.gypi
index 47b2763..6cc0599 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -127,6 +127,16 @@
'arm_fpu%': 'vfpv3',
'arm_float_abi%': 'default',
'arm_thumb': 'default',
+
+ # Default MIPS variable settings.
+ 'mips_arch_variant%': 'r2',
+ # Possible values fp32, fp64, fpxx.
+ # fp32 - 32 32-bit FPU registers are available, doubles are placed in
+ # register pairs.
+ # fp64 - 32 64-bit FPU registers are available.
+ # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
+ # detection
+ 'mips_fpu_mode%': 'fp32',
},
'target_defaults': {
'variables': {
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index 82a10e8..df2f988 100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -55,17 +55,6 @@
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
- # Default arch variant for MIPS.
- 'mips_arch_variant%': 'r2',
-
- # Possible values fp32, fp64, fpxx.
- # fp32 - 32 32-bit FPU registers are available, doubles are placed in
- # register pairs.
- # fp64 - 32 64-bit FPU registers are available.
- # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
- # detection
- 'mips_fpu_mode%': 'fp32',
-
'v8_enable_backtrace%': 0,
# Enable profiling support. Only required on Windows.
@@ -278,10 +267,27 @@
'V8_TARGET_ARCH_MIPS',
],
'conditions': [
- ['v8_target_arch==target_arch and android_webview_build==0', {
- # Target built with a Mips CXX compiler.
- 'target_conditions': [
- ['_toolset=="target"', {
+ [ 'v8_can_use_fpu_instructions=="true"', {
+ 'defines': [
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }],
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'defines': [
+ '__mips_hard_float=1',
+ 'CAN_USE_FPU_INSTRUCTIONS',
+ ],
+ }, {
+ 'defines': [
+ '__mips_soft_float=1'
+ ]
+ }],
+ ],
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'conditions': [
+ ['v8_target_arch==target_arch and android_webview_build==0', {
+ # Target built with a Mips CXX compiler.
'cflags': ['-EB'],
'ldflags': ['-EB'],
'conditions': [
@@ -292,16 +298,11 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
- ['mips_fpu_mode=="fp64"', {
- 'cflags': ['-mfp64'],
- }],
- ['mips_fpu_mode=="fpxx"', {
- 'cflags': ['-mfpxx'],
- }],
- ['mips_fpu_mode=="fp32"', {
- 'cflags': ['-mfp32'],
- }],
['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
@@ -311,23 +312,145 @@
],
}],
['mips_arch_variant=="r2"', {
+ 'conditions': [
+ [ 'mips_fpu_mode=="fp64"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP64',
+ ],
+ 'cflags': ['-mfp64'],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FPXX',
+ ],
+ 'cflags': ['-mfpxx'],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP32',
+ ],
+ 'cflags': ['-mfp32'],
+ }],
+ ],
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'ldflags': ['-mips32r2'],
}],
['mips_arch_variant=="r1"', {
+ 'defines': [
+ 'FPU_MODE_FP32',
+ ],
'cflags!': ['-mfp64', '-mfpxx'],
'cflags': ['-mips32', '-Wa,-mips32'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
],
+ }, {
+ # 'v8_target_arch!=target_arch'
+ # Target not built with an MIPS CXX compiler (simulator build).
+ 'conditions': [
+ ['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'conditions': [
+ [ 'mips_fpu_mode=="fp64"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP32',
+ ],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': [
+ 'FPU_MODE_FP32',
+ ],
+ }],
+ ['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ],
}],
],
- }],
+ }], #_toolset=="target"
+ ['_toolset=="host"', {
+ 'conditions': [
+ ['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'conditions': [
+ ['mips_fpu_mode=="fp64"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP32'
+ ],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': ['FPU_MODE_FP32',],
+ }],
+ ]
+ }], #_toolset=="host"
+ ],
+ }], # v8_target_arch=="mips"
+ ['v8_target_arch=="mipsel"', {
+ 'defines': [
+ 'V8_TARGET_ARCH_MIPS',
+ ],
+ 'conditions': [
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
@@ -343,46 +466,12 @@
'__mips_soft_float=1'
],
}],
- ['mips_arch_variant=="rx"', {
- 'defines': [
- '_MIPS_ARCH_MIPS32RX',
- 'FPU_MODE_FPXX',
- ],
- }],
- ['mips_arch_variant=="r6"', {
- 'defines': [
- '_MIPS_ARCH_MIPS32R6',
- 'FPU_MODE_FP64',
- ],
- }],
- ['mips_arch_variant=="r2"', {
- 'defines': ['_MIPS_ARCH_MIPS32R2',],
+ ],
+ 'target_conditions': [
+ ['_toolset=="target"', {
'conditions': [
- ['mips_fpu_mode=="fp64"', {
- 'defines': ['FPU_MODE_FP64',],
- }],
- ['mips_fpu_mode=="fpxx"', {
- 'defines': ['FPU_MODE_FPXX',],
- }],
- ['mips_fpu_mode=="fp32"', {
- 'defines': ['FPU_MODE_FP32',],
- }],
- ],
- }],
- ['mips_arch_variant=="r1"', {
- 'defines': ['FPU_MODE_FP32',],
- }],
- ],
- }], # v8_target_arch=="mips"
- ['v8_target_arch=="mipsel"', {
- 'defines': [
- 'V8_TARGET_ARCH_MIPS',
- ],
- 'conditions': [
- ['v8_target_arch==target_arch and android_webview_build==0', {
- # Target built with a Mips CXX compiler.
- 'target_conditions': [
- ['_toolset=="target"', {
+ ['v8_target_arch==target_arch and android_webview_build==0', {
+ # Target built with a Mips CXX compiler.
'cflags': ['-EL'],
'ldflags': ['-EL'],
'conditions': [
@@ -393,16 +482,11 @@
'cflags': ['-msoft-float'],
'ldflags': ['-msoft-float'],
}],
- ['mips_fpu_mode=="fp64"', {
- 'cflags': ['-mfp64'],
- }],
- ['mips_fpu_mode=="fpxx"', {
- 'cflags': ['-mfpxx'],
- }],
- ['mips_fpu_mode=="fp32"', {
- 'cflags': ['-mfp32'],
- }],
['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
'cflags!': ['-mfp32', '-mfpxx'],
'cflags': ['-mips32r6', '-Wa,-mips32r6'],
'ldflags': [
@@ -412,6 +496,29 @@
],
}],
['mips_arch_variant=="r2"', {
+ 'conditions': [
+ [ 'mips_fpu_mode=="fp64"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP64',
+ ],
+ 'cflags': ['-mfp64'],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FPXX',
+ ],
+ 'cflags': ['-mfpxx'],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP32',
+ ],
+ 'cflags': ['-mfp32'],
+ }],
+ ],
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
'ldflags': ['-mips32r2'],
}],
@@ -421,67 +528,122 @@
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
'cflags!': ['-mfp64', '-mfp32'],
'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
'ldflags': ['-mips32'],
}],
['mips_arch_variant=="loongson"', {
+ 'defines': [
+ '_MIPS_ARCH_LOONGSON',
+ 'FPU_MODE_FP32',
+ ],
'cflags!': ['-mfp64', '-mfp32', '-mfpxx'],
'cflags': ['-mips3', '-Wa,-mips3'],
}],
],
+ }, {
+ # 'v8_target_arch!=target_arch'
+ # Target not built with an MIPS CXX compiler (simulator build).
+ 'conditions': [
+ ['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'conditions': [
+ [ 'mips_fpu_mode=="fp64"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP32',
+ ],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
+ 'defines': [
+ 'FPU_MODE_FP32',
+ ],
+ }],
+ ['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_arch_variant=="loongson"', {
+ 'defines': [
+ '_MIPS_ARCH_LOONGSON',
+ 'FPU_MODE_FP32',
+ ],
+ }],
+ ],
}],
],
- }],
- [ 'v8_can_use_fpu_instructions=="true"', {
- 'defines': [
- 'CAN_USE_FPU_INSTRUCTIONS',
- ],
- }],
- [ 'v8_use_mips_abi_hardfloat=="true"', {
- 'defines': [
- '__mips_hard_float=1',
- 'CAN_USE_FPU_INSTRUCTIONS',
- ],
- }, {
- 'defines': [
- '__mips_soft_float=1'
- ],
- }],
- ['mips_arch_variant=="rx"', {
- 'defines': [
- '_MIPS_ARCH_MIPS32RX',
- 'FPU_MODE_FPXX',
- ],
- }],
- ['mips_arch_variant=="r6"', {
- 'defines': [
- '_MIPS_ARCH_MIPS32R6',
- 'FPU_MODE_FP64',
- ],
- }],
- ['mips_arch_variant=="r2"', {
- 'defines': ['_MIPS_ARCH_MIPS32R2',],
+ }], #_toolset=="target
+ ['_toolset=="host"', {
'conditions': [
- ['mips_fpu_mode=="fp64"', {
- 'defines': ['FPU_MODE_FP64',],
+ ['mips_arch_variant=="rx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32RX',
+ 'FPU_MODE_FPXX',
+ ],
}],
- ['mips_fpu_mode=="fpxx"', {
- 'defines': ['FPU_MODE_FPXX',],
+ ['mips_arch_variant=="r6"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R6',
+ 'FPU_MODE_FP64',
+ ],
}],
- ['mips_fpu_mode=="fp32"', {
+ ['mips_arch_variant=="r2"', {
+ 'conditions': [
+ ['mips_fpu_mode=="fp64"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP64',
+ ],
+ }],
+ ['mips_fpu_mode=="fpxx"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FPXX',
+ ],
+ }],
+ ['mips_fpu_mode=="fp32"', {
+ 'defines': [
+ '_MIPS_ARCH_MIPS32R2',
+ 'FPU_MODE_FP32'
+ ],
+ }],
+ ],
+ }],
+ ['mips_arch_variant=="r1"', {
'defines': ['FPU_MODE_FP32',],
}],
- ],
- }],
- ['mips_arch_variant=="r1"', {
- 'defines': ['FPU_MODE_FP32',],
- }],
- ['mips_arch_variant=="loongson"', {
- 'defines': [
- '_MIPS_ARCH_LOONGSON',
- 'FPU_MODE_FP32',
- ],
+ ['mips_arch_variant=="loongson"', {
+ 'defines': [
+ '_MIPS_ARCH_LOONGSON',
+ 'FPU_MODE_FP32',
+ ],
+ }],
+ ]
}],
],
}], # v8_target_arch=="mipsel"
@@ -490,40 +652,6 @@
'V8_TARGET_ARCH_MIPS64',
],
'conditions': [
- ['v8_target_arch==target_arch and android_webview_build==0', {
- # Target built with a Mips CXX compiler.
- 'target_conditions': [
- ['_toolset=="target"', {
- 'cflags': ['-EL'],
- 'ldflags': ['-EL'],
- 'conditions': [
- [ 'v8_use_mips_abi_hardfloat=="true"', {
- 'cflags': ['-mhard-float'],
- 'ldflags': ['-mhard-float'],
- }, {
- 'cflags': ['-msoft-float'],
- 'ldflags': ['-msoft-float'],
- }],
- ['mips_arch_variant=="r6"', {
- 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
- 'ldflags': [
- '-mips64r6', '-mabi=64',
- '-Wl,--dynamic-linker=$(LDSO_PATH)',
- '-Wl,--rpath=$(LD_R_PATH)',
- ],
- }],
- ['mips_arch_variant=="r2"', {
- 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
- 'ldflags': [
- '-mips64r2', '-mabi=64',
- '-Wl,--dynamic-linker=$(LDSO_PATH)',
- '-Wl,--rpath=$(LD_R_PATH)',
- ],
- }],
- ],
- }],
- ],
- }],
[ 'v8_can_use_fpu_instructions=="true"', {
'defines': [
'CAN_USE_FPU_INSTRUCTIONS',
@@ -539,12 +667,64 @@
'__mips_soft_float=1'
],
}],
- ['mips_arch_variant=="r6"', {
- 'defines': ['_MIPS_ARCH_MIPS64R6',],
- }],
- ['mips_arch_variant=="r2"', {
- 'defines': ['_MIPS_ARCH_MIPS64R2',],
- }],
+ ],
+ 'target_conditions': [
+ ['_toolset=="target"', {
+ 'conditions': [
+ ['v8_target_arch==target_arch and android_webview_build==0', {
+ 'cflags': ['-EL'],
+ 'ldflags': ['-EL'],
+ 'conditions': [
+ [ 'v8_use_mips_abi_hardfloat=="true"', {
+ 'cflags': ['-mhard-float'],
+ 'ldflags': ['-mhard-float'],
+ }, {
+ 'cflags': ['-msoft-float'],
+ 'ldflags': ['-msoft-float'],
+ }],
+ ['mips_arch_variant=="r6"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R6',],
+ 'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
+ 'ldflags': [
+ '-mips64r6', '-mabi=64',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R2',],
+ 'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
+ 'ldflags': [
+ '-mips64r2', '-mabi=64',
+ '-Wl,--dynamic-linker=$(LDSO_PATH)',
+ '-Wl,--rpath=$(LD_R_PATH)',
+ ],
+ }],
+ ],
+ }, {
+ # 'v8_target_arch!=target_arch'
+ # Target not built with an MIPS CXX compiler (simulator build).
+ 'conditions': [
+ ['mips_arch_variant=="r6"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R6',],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R2',],
+ }],
+ ],
+ }],
+ ],
+ }], #'_toolset=="target"
+ ['_toolset=="host"', {
+ 'conditions': [
+ ['mips_arch_variant=="r6"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R6',],
+ }],
+ ['mips_arch_variant=="r2"', {
+ 'defines': ['_MIPS_ARCH_MIPS64R2',],
+ }],
+ ],
+ }], #'_toolset=="host"
],
}], # v8_target_arch=="mips64el"
['v8_target_arch=="x64"', {
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index e0750cd..d294785 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -234,61 +234,6 @@
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
- WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
- stub1.GetCode();
- stub2.GetCode();
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent. This test
- // has the neat side effect of setting the flags according to the sign.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- __ cmp(the_int(), Operand(0x80000000u));
- __ b(eq, &max_negative_int);
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ mov(scratch(), Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
- // Subtract from 0 if the value was negative.
- __ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
- __ str(scratch(),
- FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
- __ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
- __ str(scratch(),
- FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
- __ Ret();
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
- __ mov(ip, Operand::Zero());
- __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
- __ Ret();
-}
-
-
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
@@ -967,7 +912,6 @@
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 727bb1b..7f00cc0 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -46,44 +46,6 @@
};
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
- Register the_heap_number, Register scratch)
- : PlatformCodeStub(isolate) {
- minor_key_ = IntRegisterBits::encode(the_int.code()) |
- HeapNumberRegisterBits::encode(the_heap_number.code()) |
- ScratchRegisterBits::encode(scratch.code());
- }
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- Register the_int() const {
- return Register::from_code(IntRegisterBits::decode(minor_key_));
- }
-
- Register the_heap_number() const {
- return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
- }
-
- Register scratch() const {
- return Register::from_code(ScratchRegisterBits::decode(minor_key_));
- }
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate,
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index e945a13..e323e0d 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -2964,6 +2964,7 @@
__ add(sp, sp, Operand(sp_delta));
}
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
index 1b8ae1b..9fe311c 100644
--- a/src/arm64/lithium-codegen-arm64.cc
+++ b/src/arm64/lithium-codegen-arm64.cc
@@ -4766,6 +4766,7 @@
int parameter_count = ToInteger32(instr->constant_parameter_count());
__ Drop(parameter_count + 1);
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register parameter_count = ToRegister(instr->parameter_count());
__ DropBySMI(parameter_count);
}
diff --git a/src/code-stubs.h b/src/code-stubs.h
index 13f8e42..af5ae8d 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -92,9 +92,7 @@
// List of code stubs only used on ARM 32 bits platforms.
#if V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V) \
- V(DirectCEntry) \
- V(WriteInt32ToHeapNumber)
+#define CODE_STUB_LIST_ARM(V) V(DirectCEntry)
#else
#define CODE_STUB_LIST_ARM(V)
@@ -113,17 +111,15 @@
// List of code stubs only used on MIPS platforms.
#if V8_TARGET_ARCH_MIPS
-#define CODE_STUB_LIST_MIPS(V) \
- V(DirectCEntry) \
- V(RestoreRegistersState) \
- V(StoreRegistersState) \
- V(WriteInt32ToHeapNumber)
+#define CODE_STUB_LIST_MIPS(V) \
+ V(DirectCEntry) \
+ V(RestoreRegistersState) \
+ V(StoreRegistersState)
#elif V8_TARGET_ARCH_MIPS64
-#define CODE_STUB_LIST_MIPS(V) \
- V(DirectCEntry) \
- V(RestoreRegistersState) \
- V(StoreRegistersState) \
- V(WriteInt32ToHeapNumber)
+#define CODE_STUB_LIST_MIPS(V) \
+ V(DirectCEntry) \
+ V(RestoreRegistersState) \
+ V(StoreRegistersState)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
diff --git a/src/compiler/basic-block-instrumentor.cc b/src/compiler/basic-block-instrumentor.cc
index 708ec49..59bb7f4 100644
--- a/src/compiler/basic-block-instrumentor.cc
+++ b/src/compiler/basic-block-instrumentor.cc
@@ -69,7 +69,7 @@
CommonOperatorBuilder common(graph->zone());
Node* zero = graph->NewNode(common.Int32Constant(0));
Node* one = graph->NewNode(common.Int32Constant(1));
- MachineOperatorBuilder machine;
+ MachineOperatorBuilder machine(graph->zone());
BasicBlockVector* blocks = schedule->rpo_order();
size_t block_number = 0;
for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
index b6fcb3c..8a6006a 100644
--- a/src/compiler/instruction.h
+++ b/src/compiler/instruction.h
@@ -332,6 +332,11 @@
return reinterpret_cast<SubKindOperand*>(op);
}
+ static const SubKindOperand* cast(const InstructionOperand* op) {
+ DCHECK(op->kind() == kOperandKind);
+ return reinterpret_cast<const SubKindOperand*>(op);
+ }
+
static void SetUpCache();
static void TearDownCache();
@@ -581,7 +586,7 @@
return parallel_moves_[pos];
}
- ParallelMove* GetParallelMove(InnerPosition pos) {
+ ParallelMove* GetParallelMove(InnerPosition pos) const {
return parallel_moves_[pos];
}
@@ -917,6 +922,7 @@
typedef InstructionDeque::const_iterator const_iterator;
const_iterator begin() const { return instructions_.begin(); }
const_iterator end() const { return instructions_.end(); }
+ const InstructionDeque& instructions() const { return instructions_; }
GapInstruction* GapAt(int index) const {
return GapInstruction::cast(InstructionAt(index));
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
index 2ea1bf3..83459f7 100644
--- a/src/compiler/machine-operator.cc
+++ b/src/compiler/machine-operator.cc
@@ -7,6 +7,8 @@
#include "src/base/lazy-instance.h"
#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
+#include "src/v8.h"
+#include "src/zone-inl.h"
namespace v8 {
namespace internal {
@@ -196,8 +198,9 @@
LAZY_INSTANCE_INITIALIZER;
-MachineOperatorBuilder::MachineOperatorBuilder(MachineType word, Flags flags)
- : cache_(kCache.Get()), word_(word), flags_(flags) {
+MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
+ Flags flags)
+ : zone_(zone), cache_(kCache.Get()), word_(word), flags_(flags) {
DCHECK(word == kRepWord32 || word == kRepWord64);
}
@@ -220,8 +223,10 @@
default:
break;
}
- UNREACHABLE();
- return NULL;
+ // Uncached.
+ return new (zone_) Operator1<LoadRepresentation>( // --
+ IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, "Load", 2, 1, 1,
+ 1, 1, 0, rep);
}
@@ -242,8 +247,10 @@
default:
break;
}
- UNREACHABLE();
- return NULL;
+ // Uncached.
+ return new (zone_) Operator1<StoreRepresentation>( // --
+ IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
+ 1, 0, 1, 0, rep);
}
} // namespace compiler
} // namespace internal
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
index 979a887..8c0d11a 100644
--- a/src/compiler/machine-operator.h
+++ b/src/compiler/machine-operator.h
@@ -73,7 +73,7 @@
};
typedef base::Flags<Flag, unsigned> Flags;
- explicit MachineOperatorBuilder(MachineType word = kMachPtr,
+ explicit MachineOperatorBuilder(Zone* zone, MachineType word = kMachPtr,
Flags supportedOperators = kNoFlags);
const Operator* Word32And();
@@ -211,6 +211,7 @@
#undef PSEUDO_OP_LIST
private:
+ Zone* zone_;
const MachineOperatorGlobalCache& cache_;
const MachineType word_;
const Flags flags_;
diff --git a/src/compiler/node.h b/src/compiler/node.h
index 3a5afd2..8a442ce 100644
--- a/src/compiler/node.h
+++ b/src/compiler/node.h
@@ -68,6 +68,8 @@
typedef NodeSet::iterator NodeSetIter;
typedef NodeSet::reverse_iterator NodeSetRIter;
+typedef ZoneDeque<Node*> NodeDeque;
+
typedef ZoneVector<Node*> NodeVector;
typedef NodeVector::iterator NodeVectorIter;
typedef NodeVector::const_iterator NodeVectorConstIter;
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
index f2571e3..5832905 100644
--- a/src/compiler/pipeline.cc
+++ b/src/compiler/pipeline.cc
@@ -53,7 +53,8 @@
graph_(new (graph_zone()) Graph(graph_zone())),
source_positions_(new SourcePositionTable(graph())),
machine_(new (graph_zone()) MachineOperatorBuilder(
- kMachPtr, InstructionSelector::SupportedMachineOperatorFlags())),
+ graph_zone(), kMachPtr,
+ InstructionSelector::SupportedMachineOperatorFlags())),
common_(new (graph_zone()) CommonOperatorBuilder(graph_zone())),
javascript_(new (graph_zone()) JSOperatorBuilder(graph_zone())),
jsgraph_(new (graph_zone())
@@ -584,15 +585,17 @@
ZonePool::Scope zone_scope(data->zone_pool());
SmartArrayPointer<char> debug_name;
+ RegisterAllocator::VerificationType verification_type =
+ RegisterAllocator::kNoVerify;
#ifdef DEBUG
debug_name = GetDebugName(info());
+ verification_type = RegisterAllocator::kVerifyAssignment;
#endif
-
RegisterAllocator allocator(RegisterConfiguration::ArchDefault(),
zone_scope.zone(), &frame, &sequence,
debug_name.get());
- if (!allocator.Allocate(data->pipeline_statistics())) {
+ if (!allocator.Allocate(data->pipeline_statistics(), verification_type)) {
info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
return Handle<Code>::null();
}
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
index a8b658f..0327d1c 100644
--- a/src/compiler/raw-machine-assembler.cc
+++ b/src/compiler/raw-machine-assembler.cc
@@ -17,7 +17,7 @@
MachineOperatorBuilder::Flags flags)
: GraphBuilder(graph),
schedule_(new (zone()) Schedule(zone())),
- machine_(word, flags),
+ machine_(zone(), word, flags),
common_(zone()),
machine_sig_(machine_sig),
call_descriptor_(
diff --git a/src/compiler/register-allocator-verifier.cc b/src/compiler/register-allocator-verifier.cc
new file mode 100644
index 0000000..b8aefe1
--- /dev/null
+++ b/src/compiler/register-allocator-verifier.cc
@@ -0,0 +1,179 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/register-allocator-verifier.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static size_t OperandCount(const Instruction* instr) {
+ return instr->InputCount() + instr->OutputCount() + instr->TempCount();
+}
+
+
+RegisterAllocatorVerifier::RegisterAllocatorVerifier(
+ Zone* zone, const InstructionSequence* sequence)
+ : sequence_(sequence), constraints_(zone) {
+ constraints_.reserve(sequence->instructions().size());
+ for (const auto* instr : sequence->instructions()) {
+ const size_t operand_count = OperandCount(instr);
+ auto* op_constraints =
+ zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
+ // Construct OperandConstraints for all InstructionOperands, eliminating
+ // kSameAsFirst along the way.
+ size_t count = 0;
+ for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+ BuildConstraint(instr->InputAt(i), &op_constraints[count]);
+ CHECK_NE(kSameAsFirst, op_constraints[count].type_);
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+ BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
+ if (op_constraints[count].type_ == kSameAsFirst) {
+ CHECK(instr->InputCount() > 0);
+ op_constraints[count] = op_constraints[0];
+ }
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+ BuildConstraint(instr->TempAt(i), &op_constraints[count]);
+ CHECK_NE(kSameAsFirst, op_constraints[count].type_);
+ }
+ // All gaps should be totally unallocated at this point.
+ if (instr->IsGapMoves()) {
+ const auto* gap = GapInstruction::cast(instr);
+ for (int i = GapInstruction::FIRST_INNER_POSITION;
+ i <= GapInstruction::LAST_INNER_POSITION; i++) {
+ GapInstruction::InnerPosition inner_pos =
+ static_cast<GapInstruction::InnerPosition>(i);
+ CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+ }
+ }
+ InstructionConstraint instr_constraint = {instr, operand_count,
+ op_constraints};
+ constraints()->push_back(instr_constraint);
+ }
+}
+
+
+void RegisterAllocatorVerifier::VerifyAssignment() {
+ CHECK(sequence()->instructions().size() == constraints()->size());
+ auto instr_it = sequence()->begin();
+ for (const auto& instr_constraint : *constraints()) {
+ const auto* instr = instr_constraint.instruction_;
+ const size_t operand_count = instr_constraint.operand_constaints_size_;
+ const auto* op_constraints = instr_constraint.operand_constraints_;
+ CHECK_EQ(instr, *instr_it);
+ CHECK(operand_count == OperandCount(instr));
+ size_t count = 0;
+ for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+ CheckConstraint(instr->InputAt(i), &op_constraints[count]);
+ }
+ for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+ CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
+ }
+ for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+ CheckConstraint(instr->TempAt(i), &op_constraints[count]);
+ }
+ ++instr_it;
+ }
+}
+
+
+void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
+ OperandConstraint* constraint) {
+ constraint->value_ = kMinInt;
+ if (op->IsConstant()) {
+ constraint->type_ = kConstant;
+ constraint->value_ = ConstantOperand::cast(op)->index();
+ } else if (op->IsImmediate()) {
+ constraint->type_ = kImmediate;
+ constraint->value_ = ImmediateOperand::cast(op)->index();
+ } else {
+ CHECK(op->IsUnallocated());
+ const auto* unallocated = UnallocatedOperand::cast(op);
+ int vreg = unallocated->virtual_register();
+ if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+ constraint->type_ = kFixedSlot;
+ constraint->value_ = unallocated->fixed_slot_index();
+ } else {
+ switch (unallocated->extended_policy()) {
+ case UnallocatedOperand::ANY:
+ CHECK(false);
+ break;
+ case UnallocatedOperand::NONE:
+ if (sequence()->IsDouble(vreg)) {
+ constraint->type_ = kNoneDouble;
+ } else {
+ constraint->type_ = kNone;
+ }
+ break;
+ case UnallocatedOperand::FIXED_REGISTER:
+ constraint->type_ = kFixedRegister;
+ constraint->value_ = unallocated->fixed_register_index();
+ break;
+ case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+ constraint->type_ = kFixedDoubleRegister;
+ constraint->value_ = unallocated->fixed_register_index();
+ break;
+ case UnallocatedOperand::MUST_HAVE_REGISTER:
+ if (sequence()->IsDouble(vreg)) {
+ constraint->type_ = kDoubleRegister;
+ } else {
+ constraint->type_ = kRegister;
+ }
+ break;
+ case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+ constraint->type_ = kSameAsFirst;
+ break;
+ }
+ }
+ }
+}
+
+
+void RegisterAllocatorVerifier::CheckConstraint(
+ const InstructionOperand* op, const OperandConstraint* constraint) {
+ switch (constraint->type_) {
+ case kConstant:
+ CHECK(op->IsConstant());
+ CHECK_EQ(op->index(), constraint->value_);
+ return;
+ case kImmediate:
+ CHECK(op->IsImmediate());
+ CHECK_EQ(op->index(), constraint->value_);
+ return;
+ case kRegister:
+ CHECK(op->IsRegister());
+ return;
+ case kFixedRegister:
+ CHECK(op->IsRegister());
+ CHECK_EQ(op->index(), constraint->value_);
+ return;
+ case kDoubleRegister:
+ CHECK(op->IsDoubleRegister());
+ return;
+ case kFixedDoubleRegister:
+ CHECK(op->IsDoubleRegister());
+ CHECK_EQ(op->index(), constraint->value_);
+ return;
+ case kFixedSlot:
+ CHECK(op->IsStackSlot());
+ CHECK_EQ(op->index(), constraint->value_);
+ return;
+ case kNone:
+ CHECK(op->IsRegister() || op->IsStackSlot());
+ return;
+ case kNoneDouble:
+ CHECK(op->IsDoubleRegister() || op->IsDoubleStackSlot());
+ return;
+ case kSameAsFirst:
+ CHECK(false);
+ return;
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/src/compiler/register-allocator-verifier.h b/src/compiler/register-allocator-verifier.h
new file mode 100644
index 0000000..10592e1
--- /dev/null
+++ b/src/compiler/register-allocator-verifier.h
@@ -0,0 +1,68 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
+#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
+
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionOperand;
+class InstructionSequence;
+
+class RegisterAllocatorVerifier FINAL : public ZoneObject {
+ public:
+ RegisterAllocatorVerifier(Zone* zone, const InstructionSequence* sequence);
+
+ void VerifyAssignment();
+
+ private:
+ enum ConstraintType {
+ kConstant,
+ kImmediate,
+ kRegister,
+ kFixedRegister,
+ kDoubleRegister,
+ kFixedDoubleRegister,
+ kFixedSlot,
+ kNone,
+ kNoneDouble,
+ kSameAsFirst
+ };
+
+ struct OperandConstraint {
+ ConstraintType type_;
+ int value_; // subkind index when relevant
+ };
+
+ struct InstructionConstraint {
+ const Instruction* instruction_;
+ size_t operand_constaints_size_;
+ OperandConstraint* operand_constraints_;
+ };
+
+ typedef ZoneVector<InstructionConstraint> Constraints;
+
+ const InstructionSequence* sequence() const { return sequence_; }
+ Constraints* constraints() { return &constraints_; }
+ void BuildConstraint(const InstructionOperand* op,
+ OperandConstraint* constraint);
+ void CheckConstraint(const InstructionOperand* op,
+ const OperandConstraint* constraint);
+
+ const InstructionSequence* const sequence_;
+ Constraints constraints_;
+
+ DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
index 23a7df6..0069df8 100644
--- a/src/compiler/register-allocator.cc
+++ b/src/compiler/register-allocator.cc
@@ -5,6 +5,7 @@
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline-statistics.h"
#include "src/compiler/register-allocator.h"
+#include "src/compiler/register-allocator-verifier.h"
#include "src/string-stream.h"
namespace v8 {
@@ -1116,7 +1117,16 @@
}
-bool RegisterAllocator::Allocate(PipelineStatistics* stats) {
+bool RegisterAllocator::Allocate(PipelineStatistics* stats,
+ VerificationType verification_type) {
+ SmartPointer<Zone> verifier_zone;
+ RegisterAllocatorVerifier* verifier = NULL;
+ if (verification_type == kVerifyAssignment) {
+ // Don't track usage for this zone in compiler stats.
+ verifier_zone.Reset(new Zone(local_zone()->isolate()));
+ verifier = new (verifier_zone.get())
+ RegisterAllocatorVerifier(verifier_zone.get(), code());
+ }
assigned_registers_ = new (code_zone())
BitVector(config()->num_general_registers(), code_zone());
assigned_double_registers_ = new (code_zone())
@@ -1158,6 +1168,9 @@
}
frame()->SetAllocatedRegisters(assigned_registers_);
frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
+ if (verifier != NULL) {
+ verifier->VerifyAssignment();
+ }
return true;
}
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
index a6578af..690957d 100644
--- a/src/compiler/register-allocator.h
+++ b/src/compiler/register-allocator.h
@@ -322,12 +322,15 @@
class RegisterAllocator FINAL {
public:
+ enum VerificationType { kNoVerify, kVerifyAssignment };
+
explicit RegisterAllocator(const RegisterConfiguration* config,
Zone* local_zone, Frame* frame,
InstructionSequence* code,
const char* debug_name = nullptr);
- bool Allocate(PipelineStatistics* stats = NULL);
+ bool Allocate(PipelineStatistics* stats = NULL,
+ VerificationType verification_type = kNoVerify);
bool AllocationOk() { return allocation_ok_; }
BitVector* assigned_registers() { return assigned_registers_; }
BitVector* assigned_double_registers() { return assigned_double_registers_; }
diff --git a/src/compiler/select-lowering.cc b/src/compiler/select-lowering.cc
index 2e51d72..cb2b042 100644
--- a/src/compiler/select-lowering.cc
+++ b/src/compiler/select-lowering.cc
@@ -26,26 +26,61 @@
if (node->opcode() != IrOpcode::kSelect) return NoChange();
SelectParameters const p = SelectParametersOf(node->op());
- Node* const cond = node->InputAt(0);
+ Node* cond = node->InputAt(0);
+ Node* vthen = node->InputAt(1);
+ Node* velse = node->InputAt(2);
+ Node* merge = nullptr;
// Check if we already have a diamond for this condition.
- auto i = merges_.find(cond);
- if (i == merges_.end()) {
- // Create a new diamond for this condition and remember its merge node.
- Diamond d(graph(), common(), cond, p.hint());
- i = merges_.insert(std::make_pair(cond, d.merge)).first;
- }
+ auto range = merges_.equal_range(cond);
+ for (auto i = range.first;; ++i) {
+ if (i == range.second) {
+ // Create a new diamond for this condition and remember its merge node.
+ Diamond d(graph(), common(), cond, p.hint());
+ merges_.insert(std::make_pair(cond, d.merge));
+ merge = d.merge;
+ break;
+ }
- DCHECK_EQ(cond, i->first);
+ // If the diamond is reachable from the Select, merging them would result in
+ // an unschedulable graph, so we cannot reuse the diamond in that case.
+ merge = i->second;
+ if (!ReachableFrom(merge, node)) {
+ break;
+ }
+ }
// Create a Phi hanging off the previously determined merge.
node->set_op(common()->Phi(p.type(), 2));
- node->ReplaceInput(0, node->InputAt(1));
- node->ReplaceInput(1, node->InputAt(2));
- node->ReplaceInput(2, i->second);
+ node->ReplaceInput(0, vthen);
+ node->ReplaceInput(1, velse);
+ node->ReplaceInput(2, merge);
return Changed(node);
}
+
+bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
+ // TODO(turbofan): This is probably horribly expensive, and it should be moved
+ // into node.h or somewhere else?!
+ Zone zone(graph()->zone()->isolate());
+ std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
+ BoolVector visited(graph()->NodeCount(), false, &zone);
+ queue.push(source);
+ visited[source->id()] = true;
+ while (!queue.empty()) {
+ Node* current = queue.front();
+ if (current == sink) return true;
+ queue.pop();
+ for (auto input : current->inputs()) {
+ if (!visited[input->id()]) {
+ queue.push(input);
+ visited[input->id()] = true;
+ }
+ }
+ }
+ return false;
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/src/compiler/select-lowering.h b/src/compiler/select-lowering.h
index ae22cad..05ea0e0 100644
--- a/src/compiler/select-lowering.h
+++ b/src/compiler/select-lowering.h
@@ -28,8 +28,10 @@
Reduction Reduce(Node* node) OVERRIDE;
private:
- typedef std::map<Node*, Node*, std::less<Node*>,
- zone_allocator<std::pair<Node* const, Node*>>> Merges;
+ typedef std::multimap<Node*, Node*, std::less<Node*>,
+ zone_allocator<std::pair<Node* const, Node*>>> Merges;
+
+ bool ReachableFrom(Node* const sink, Node* const source);
CommonOperatorBuilder* common() const { return common_; }
Graph* graph() const { return graph_; }
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
index c50b338..33a3077 100644
--- a/src/compiler/simplified-lowering.cc
+++ b/src/compiler/simplified-lowering.cc
@@ -672,8 +672,11 @@
VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else if ((in & kTypeMask) == kTypeUint32 ||
- (in & kTypeMask) == kTypeInt32 ||
- in_upper->Is(Type::Unsigned32()) ||
+ in_upper->Is(Type::Unsigned32())) {
+ // Just change representation if necessary.
+ VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if ((in & kTypeMask) == kTypeInt32 ||
(in & kRepMask) == kRepWord32) {
// Just change representation if necessary.
VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
@@ -697,12 +700,15 @@
VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
if (lower()) DeferReplacement(node, node->InputAt(0));
} else if ((in & kTypeMask) == kTypeUint32 ||
- (in & kTypeMask) == kTypeInt32 ||
- in_upper->Is(Type::Signed32()) ||
- (in & kRepMask) == kRepWord32) {
+ in_upper->Is(Type::Unsigned32())) {
// Just change representation if necessary.
VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
if (lower()) DeferReplacement(node, node->InputAt(0));
+ } else if ((in & kTypeMask) == kTypeInt32 ||
+ (in & kRepMask) == kRepWord32) {
+ // Just change representation if necessary.
+ VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
+ if (lower()) DeferReplacement(node, node->InputAt(0));
} else {
// Require the input in float64 format and perform truncation.
// TODO(turbofan): avoid a truncation with a smi check.
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
index 0fa2b95..99f4f89 100644
--- a/src/compiler/typer.cc
+++ b/src/compiler/typer.cc
@@ -1033,8 +1033,7 @@
lhs = Rangify(lhs, t);
rhs = Rangify(rhs, t);
if (lhs->IsRange() && rhs->IsRange()) {
- // TODO(titzer): fix me.
- // return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
+ return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
}
return Type::OrderedNumber();
}
diff --git a/src/contexts.h b/src/contexts.h
index 716682d..8182532 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -413,13 +413,13 @@
UNSCOPABLES_SYMBOL_INDEX,
ARRAY_VALUES_ITERATOR_INDEX,
GLOBAL_CONTEXT_TABLE_INDEX,
+ MAP_CACHE_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak.
OPTIMIZED_CODE_LIST, // Weak.
DEOPTIMIZED_CODE_LIST, // Weak.
- MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak.
// Total number of slots.
diff --git a/src/factory.cc b/src/factory.cc
index 796fd13..e68ac9b 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -2420,35 +2420,42 @@
}
-Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map) {
- Handle<MapCache> map_cache = handle(MapCache::cast(context->map_cache()));
- Handle<MapCache> result = MapCache::Put(map_cache, keys, map);
- context->set_map_cache(*result);
- return result;
-}
-
-
Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys) {
+ int number_of_properties,
+ bool* is_result_from_cache) {
+ const int kMapCacheSize = 128;
+
+ if (number_of_properties > kMapCacheSize) {
+ *is_result_from_cache = false;
+ return Map::Create(isolate(), number_of_properties);
+ }
+ *is_result_from_cache = true;
+ if (number_of_properties == 0) {
+ // Reuse the initial map of the Object function if the literal has no
+ // predeclared properties.
+ return handle(context->object_function()->initial_map(), isolate());
+ }
+ int cache_index = number_of_properties - 1;
if (context->map_cache()->IsUndefined()) {
// Allocate the new map cache for the native context.
- Handle<MapCache> new_cache = MapCache::New(isolate(), 24);
+ Handle<FixedArray> new_cache = NewFixedArray(kMapCacheSize, TENURED);
context->set_map_cache(*new_cache);
}
// Check to see whether there is a matching element in the cache.
- Handle<MapCache> cache =
- Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
- if (result->IsMap()) return Handle<Map>::cast(result);
- int length = keys->length();
- // Create a new map and add it to the cache. Reuse the initial map of the
- // Object function if the literal has no predeclared properties.
- Handle<Map> map = length == 0
- ? handle(context->object_function()->initial_map())
- : Map::Create(isolate(), length);
- AddToMapCache(context, keys, map);
+ Handle<FixedArray> cache(FixedArray::cast(context->map_cache()));
+ {
+ Object* result = cache->get(cache_index);
+ if (result->IsWeakCell()) {
+ WeakCell* cell = WeakCell::cast(result);
+ if (!cell->cleared()) {
+ return handle(Map::cast(cell->value()), isolate());
+ }
+ }
+ }
+ // Create a new map and add it to the cache.
+ Handle<Map> map = Map::Create(isolate(), number_of_properties);
+ Handle<WeakCell> cell = NewWeakCell(map);
+ cache->set(cache_index, *cell);
return map;
}
@@ -2467,6 +2474,7 @@
regexp->set_data(*store);
}
+
void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
JSRegExp::Type type,
Handle<String> source,
@@ -2488,7 +2496,6 @@
}
-
MaybeHandle<FunctionTemplateInfo> Factory::ConfigureInstance(
Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance) {
// Configure the instance by adding the properties specified by the
diff --git a/src/factory.h b/src/factory.h
index 6a9ee55..44bb1be 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -643,10 +643,11 @@
Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
- // Return a map using the map cache in the native context.
- // The key the an ordered set of property names.
+ // Return a map for given number of properties using the map cache in the
+ // native context.
Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
- Handle<FixedArray> keys);
+ int number_of_properties,
+ bool* is_result_from_cache);
// Creates a new FixedArray that holds the data associated with the
// atom regexp and stores it in the regexp.
@@ -689,14 +690,6 @@
// Creates a code object that is not yet fully initialized yet.
inline Handle<Code> NewCodeRaw(int object_size, bool immovable);
- // Create a new map cache.
- Handle<MapCache> NewMapCache(int at_least_space_for);
-
- // Update the map cache in the native context with (keys, map)
- Handle<MapCache> AddToMapCache(Handle<Context> context,
- Handle<FixedArray> keys,
- Handle<Map> map);
-
// Attempt to find the number in a small cache. If we finds it, return
// the string representation of the number. Otherwise return undefined.
Handle<Object> GetNumberStringCache(Handle<Object> number);
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index 7f217bb..df99de4 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -1282,7 +1282,7 @@
Context::FIRST_WEAK_SLOT);
STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
Context::NATIVE_CONTEXT_SLOTS);
- STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 5 ==
+ STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 4 ==
Context::NATIVE_CONTEXT_SLOTS);
}
}
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
index 2cefebf..b929d85 100644
--- a/src/heap/mark-compact.cc
+++ b/src/heap/mark-compact.cc
@@ -2232,12 +2232,6 @@
void MarkCompactCollector::AfterMarking() {
- // Object literal map caches reference strings (cache keys) and maps
- // (cache values). At this point still useful maps have already been
- // marked. Mark the keys for the alive values before we process the
- // string table.
- ProcessMapCaches();
-
// Prune the string table removing all strings only pointed to by the
// string table. Cannot use string_table() here because the string
// table is marked.
@@ -2274,57 +2268,6 @@
}
-void MarkCompactCollector::ProcessMapCaches() {
- Object* raw_context = heap()->native_contexts_list();
- while (raw_context != heap()->undefined_value()) {
- Context* context = reinterpret_cast<Context*>(raw_context);
- if (IsMarked(context)) {
- HeapObject* raw_map_cache =
- HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
- // A map cache may be reachable from the stack. In this case
- // it's already transitively marked and it's too late to clean
- // up its parts.
- if (!IsMarked(raw_map_cache) &&
- raw_map_cache != heap()->undefined_value()) {
- MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
- int existing_elements = map_cache->NumberOfElements();
- int used_elements = 0;
- for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
- i += MapCache::kEntrySize) {
- Object* raw_key = map_cache->get(i);
- if (raw_key == heap()->undefined_value() ||
- raw_key == heap()->the_hole_value())
- continue;
- STATIC_ASSERT(MapCache::kEntrySize == 2);
- Object* raw_map = map_cache->get(i + 1);
- if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
- ++used_elements;
- } else {
- // Delete useless entries with unmarked maps.
- DCHECK(raw_map->IsMap());
- map_cache->set_the_hole(i);
- map_cache->set_the_hole(i + 1);
- }
- }
- if (used_elements == 0) {
- context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
- } else {
- // Note: we don't actually shrink the cache here to avoid
- // extra complexity during GC. We rely on subsequent cache
- // usages (EnsureCapacity) to do this.
- map_cache->ElementsRemoved(existing_elements - used_elements);
- MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
- MarkObject(map_cache, map_cache_markbit);
- }
- }
- }
- // Move to next element in the list.
- raw_context = context->get(Context::NEXT_CONTEXT_LINK);
- }
- ProcessMarkingDeque();
-}
-
-
void MarkCompactCollector::ClearNonLiveReferences() {
// Iterate over the map space, setting map transitions that go from
// a marked map to an unmarked map to null transitions. This action
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
index e48d5a3..cd2c9b6 100644
--- a/src/heap/mark-compact.h
+++ b/src/heap/mark-compact.h
@@ -786,10 +786,6 @@
// flag on the marking stack.
void RefillMarkingDeque();
- // After reachable maps have been marked process per context object
- // literal map caches removing unmarked entries.
- void ProcessMapCaches();
-
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index ce76fbe..1589066 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -2834,6 +2834,10 @@
// could cause heap object checks not to get emitted.
object_ = Unique<Object>(Handle<Object>::null());
}
+ if (r.IsSmiOrInteger32()) {
+ // If it's not a heap object, it can't be in new space.
+ bit_field_ = IsNotInNewSpaceField::update(bit_field_, true);
+ }
set_representation(r);
SetFlag(kUseGVN);
}
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index c64a4b0..d6ffbee 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -2753,6 +2753,7 @@
}
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
@@ -2770,6 +2771,7 @@
if (dynamic_frame_alignment) {
__ inc(reg); // 1 more for alignment
}
+
__ shl(reg, kPointerSizeLog2);
__ add(esp, reg);
__ jmp(return_addr_reg);
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index 5b34cc2..cfc89d1 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -272,66 +272,6 @@
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
- stub1.GetCode();
- stub2.GetCode();
-}
-
-
-// See comment for class, this does NOT work for int32's that are in Smi range.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- // Test sign, and save for later conditionals.
- __ And(sign(), the_int(), Operand(0x80000000u));
- __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
-
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ li(scratch(), Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ or_(scratch(), scratch(), sign());
- // Subtract from 0 if the value was negative.
- __ subu(at, zero_reg, the_int());
- __ Movn(the_int(), at, sign());
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ srl(at, the_int(), shift_distance);
- __ or_(scratch(), scratch(), at);
- __ sw(scratch(), FieldMemOperand(the_heap_number(),
- HeapNumber::kExponentOffset));
- __ sll(scratch(), the_int(), 32 - shift_distance);
- __ Ret(USE_DELAY_SLOT);
- __ sw(scratch(), FieldMemOperand(the_heap_number(),
- HeapNumber::kMantissaOffset));
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ sw(scratch(),
- FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
- __ mov(scratch(), zero_reg);
- __ Ret(USE_DELAY_SLOT);
- __ sw(scratch(),
- FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
-}
-
-
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
@@ -1058,7 +998,6 @@
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index afad32b..b3d2c45 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -73,55 +73,6 @@
};
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
- Register the_heap_number, Register scratch,
- Register scratch2)
- : PlatformCodeStub(isolate) {
- minor_key_ = IntRegisterBits::encode(the_int.code()) |
- HeapNumberRegisterBits::encode(the_heap_number.code()) |
- ScratchRegisterBits::encode(scratch.code()) |
- SignRegisterBits::encode(scratch2.code());
- DCHECK(IntRegisterBits::is_valid(the_int.code()));
- DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
- DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
- DCHECK(SignRegisterBits::is_valid(scratch2.code()));
- }
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- Register the_int() const {
- return Register::from_code(IntRegisterBits::decode(minor_key_));
- }
-
- Register the_heap_number() const {
- return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
- }
-
- Register scratch() const {
- return Register::from_code(ScratchRegisterBits::decode(minor_key_));
- }
-
- Register sign() const {
- return Register::from_code(SignRegisterBits::decode(minor_key_));
- }
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
- class SignRegisterBits: public BitField<int, 12, 4> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
-};
-
-
class RecordWriteStub: public PlatformCodeStub {
public:
RecordWriteStub(Isolate* isolate,
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index c9e3686..07f32de 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -2858,6 +2858,7 @@
__ Addu(sp, sp, Operand(sp_delta));
}
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 8f39d02..481fb8c 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -268,66 +268,6 @@
}
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
- Isolate* isolate) {
- WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
- WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
- stub1.GetCode();
- stub2.GetCode();
-}
-
-
-// See comment for class, this does NOT work for int32's that are in Smi range.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
- Label max_negative_int;
- // the_int_ has the answer which is a signed int32 but not a Smi.
- // We test for the special value that has a different exponent.
- STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
- // Test sign, and save for later conditionals.
- __ And(sign(), the_int(), Operand(0x80000000u));
- __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
-
- // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
- // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
- uint32_t non_smi_exponent =
- (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
- __ li(scratch(), Operand(non_smi_exponent));
- // Set the sign bit in scratch_ if the value was negative.
- __ or_(scratch(), scratch(), sign());
- // Subtract from 0 if the value was negative.
- __ subu(at, zero_reg, the_int());
- __ Movn(the_int(), at, sign());
- // We should be masking the implict first digit of the mantissa away here,
- // but it just ends up combining harmlessly with the last digit of the
- // exponent that happens to be 1. The sign bit is 0 so we shift 10 to get
- // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
- DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- __ srl(at, the_int(), shift_distance);
- __ or_(scratch(), scratch(), at);
- __ sw(scratch(), FieldMemOperand(the_heap_number(),
- HeapNumber::kExponentOffset));
- __ sll(scratch(), the_int(), 32 - shift_distance);
- __ Ret(USE_DELAY_SLOT);
- __ sw(scratch(), FieldMemOperand(the_heap_number(),
- HeapNumber::kMantissaOffset));
-
- __ bind(&max_negative_int);
- // The max negative int32 is stored as a positive number in the mantissa of
- // a double because it uses a sign bit instead of using two's complement.
- // The actual mantissa bits stored are all 0 because the implicit most
- // significant 1 bit is not stored.
- non_smi_exponent += 1 << HeapNumber::kExponentShift;
- __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
- __ sw(scratch(),
- FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
- __ mov(scratch(), zero_reg);
- __ Ret(USE_DELAY_SLOT);
- __ sw(scratch(),
- FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
-}
-
-
// Handle the case where the lhs and rhs are the same object.
// Equality is almost reflexive (everything but NaN), so this is a test
// for "identity and not NaN".
@@ -1053,7 +993,6 @@
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
- WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/src/mips64/code-stubs-mips64.h b/src/mips64/code-stubs-mips64.h
index 6c324bb..3cd6ed6 100644
--- a/src/mips64/code-stubs-mips64.h
+++ b/src/mips64/code-stubs-mips64.h
@@ -73,56 +73,6 @@
DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
};
-// This stub can convert a signed int32 to a heap number (double). It does
-// not work for int32s that are in Smi range! No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
- WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
- Register the_heap_number, Register scratch,
- Register scratch2)
- : PlatformCodeStub(isolate) {
- minor_key_ = IntRegisterBits::encode(the_int.code()) |
- HeapNumberRegisterBits::encode(the_heap_number.code()) |
- ScratchRegisterBits::encode(scratch.code()) |
- SignRegisterBits::encode(scratch2.code());
- DCHECK(IntRegisterBits::is_valid(the_int.code()));
- DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
- DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
- DCHECK(SignRegisterBits::is_valid(scratch2.code()));
- }
-
- static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
- void Generate(MacroAssembler* masm);
-
- Register the_int() const {
- return Register::from_code(IntRegisterBits::decode(minor_key_));
- }
-
- Register the_heap_number() const {
- return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
- }
-
- Register scratch() const {
- return Register::from_code(ScratchRegisterBits::decode(minor_key_));
- }
-
- Register sign() const {
- return Register::from_code(SignRegisterBits::decode(minor_key_));
- }
-
- // Minor key encoding in 16 bits.
- class IntRegisterBits: public BitField<int, 0, 4> {};
- class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
- class ScratchRegisterBits: public BitField<int, 8, 4> {};
- class SignRegisterBits: public BitField<int, 12, 4> {};
-
- DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
- DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
-};
-
class RecordWriteStub: public PlatformCodeStub {
public:
diff --git a/src/mips64/lithium-codegen-mips64.cc b/src/mips64/lithium-codegen-mips64.cc
index 88f6b18..35839e6 100644
--- a/src/mips64/lithium-codegen-mips64.cc
+++ b/src/mips64/lithium-codegen-mips64.cc
@@ -2828,6 +2828,7 @@
__ Daddu(sp, sp, Operand(sp_delta));
}
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 40ce81a..9564ff0 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -479,11 +479,6 @@
}
-Handle<Object> MapCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) {
- return key->AsHandle(isolate);
-}
-
-
Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
@@ -3288,7 +3283,6 @@
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(Map)
-CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(Name)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
diff --git a/src/objects.cc b/src/objects.cc
index 2b5b567..ab13412 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -14207,8 +14207,6 @@
CompilationCacheShape,
HashTableKey*>;
-template class HashTable<MapCache, MapCacheShape, HashTableKey*>;
-
template class HashTable<ObjectHashTable,
ObjectHashTableShape,
Handle<Object> >;
@@ -15158,28 +15156,6 @@
};
-Object* MapCache::Lookup(FixedArray* array) {
- DisallowHeapAllocation no_alloc;
- StringsKey key(handle(array));
- int entry = FindEntry(&key);
- if (entry == kNotFound) return GetHeap()->undefined_value();
- return get(EntryToIndex(entry) + 1);
-}
-
-
-Handle<MapCache> MapCache::Put(
- Handle<MapCache> map_cache, Handle<FixedArray> array, Handle<Map> value) {
- StringsKey key(array);
-
- Handle<MapCache> new_cache = EnsureCapacity(map_cache, 1, &key);
- int entry = new_cache->FindInsertionEntry(key.Hash());
- new_cache->set(EntryToIndex(entry), *array);
- new_cache->set(EntryToIndex(entry) + 1, *value);
- new_cache->ElementAdded();
- return new_cache;
-}
-
-
template<typename Derived, typename Shape, typename Key>
Handle<Derived> Dictionary<Derived, Shape, Key>::New(
Isolate* isolate,
diff --git a/src/objects.h b/src/objects.h
index 31d0a16..d025b24 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -3462,44 +3462,6 @@
};
-class MapCacheShape : public BaseShape<HashTableKey*> {
- public:
- static inline bool IsMatch(HashTableKey* key, Object* value) {
- return key->IsMatch(value);
- }
-
- static inline uint32_t Hash(HashTableKey* key) {
- return key->Hash();
- }
-
- static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
- return key->HashForObject(object);
- }
-
- static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
-
- static const int kPrefixSize = 0;
- static const int kEntrySize = 2;
-};
-
-
-// MapCache.
-//
-// Maps keys that are a fixed array of unique names to a map.
-// Used for canonicalize maps for object literals.
-class MapCache: public HashTable<MapCache, MapCacheShape, HashTableKey*> {
- public:
- // Find cached value for a name key, otherwise return null.
- Object* Lookup(FixedArray* key);
- static Handle<MapCache> Put(
- Handle<MapCache> map_cache, Handle<FixedArray> key, Handle<Map> value);
- DECLARE_CAST(MapCache)
-
- private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache);
-};
-
-
template <typename Derived, typename Shape, typename Key>
class Dictionary: public HashTable<Derived, Shape, Key> {
protected:
diff --git a/src/runtime/runtime-literals.cc b/src/runtime/runtime-literals.cc
index c6efd02..8bbe0ee 100644
--- a/src/runtime/runtime-literals.cc
+++ b/src/runtime/runtime-literals.cc
@@ -17,53 +17,22 @@
static Handle<Map> ComputeObjectLiteralMap(
Handle<Context> context, Handle<FixedArray> constant_properties,
bool* is_result_from_cache) {
- Isolate* isolate = context->GetIsolate();
int properties_length = constant_properties->length();
int number_of_properties = properties_length / 2;
- // Check that there are only internal strings and array indices among keys.
- int number_of_string_keys = 0;
+
for (int p = 0; p != properties_length; p += 2) {
Object* key = constant_properties->get(p);
uint32_t element_index = 0;
- if (key->IsInternalizedString()) {
- number_of_string_keys++;
- } else if (key->ToArrayIndex(&element_index)) {
+ if (key->ToArrayIndex(&element_index)) {
// An index key does not require space in the property backing store.
number_of_properties--;
- } else {
- // Bail out as a non-internalized-string non-index key makes caching
- // impossible.
- // DCHECK to make sure that the if condition after the loop is false.
- DCHECK(number_of_string_keys != number_of_properties);
- break;
}
}
- // If we only have internalized strings and array indices among keys then we
- // can use the map cache in the native context.
- const int kMaxKeys = 10;
- if ((number_of_string_keys == number_of_properties) &&
- (number_of_string_keys < kMaxKeys)) {
- // Create the fixed array with the key.
- Handle<FixedArray> keys =
- isolate->factory()->NewFixedArray(number_of_string_keys);
- if (number_of_string_keys > 0) {
- int index = 0;
- for (int p = 0; p < properties_length; p += 2) {
- Object* key = constant_properties->get(p);
- if (key->IsInternalizedString()) {
- keys->set(index++, key);
- }
- }
- DCHECK(index == number_of_string_keys);
- }
- *is_result_from_cache = true;
- return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
- }
- *is_result_from_cache = false;
- return Map::Create(isolate, number_of_properties);
+ Isolate* isolate = context->GetIsolate();
+ return isolate->factory()->ObjectLiteralMapFromCache(
+ context, number_of_properties, is_result_from_cache);
}
-
MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
Isolate* isolate, Handle<FixedArray> literals,
Handle<FixedArray> constant_properties);
@@ -169,7 +138,6 @@
boilerplate->map()->unused_property_fields(),
"FastLiteral");
}
-
return boilerplate;
}
diff --git a/src/version.cc b/src/version.cc
index 6136739..fdcb4fd 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -34,7 +34,7 @@
// system so their names cannot be changed without changing the scripts.
#define MAJOR_VERSION 3
#define MINOR_VERSION 31
-#define BUILD_NUMBER 0
+#define BUILD_NUMBER 1
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 7e482ee..8ec1af5 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -2822,6 +2822,7 @@
__ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
rcx);
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiToInteger32(reg, reg);
diff --git a/src/x87/lithium-codegen-x87.cc b/src/x87/lithium-codegen-x87.cc
index 284a666..2b6c265 100644
--- a/src/x87/lithium-codegen-x87.cc
+++ b/src/x87/lithium-codegen-x87.cc
@@ -3050,6 +3050,7 @@
}
__ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
} else {
+ DCHECK(info()->IsStub()); // Functions would need to drop one more value.
Register reg = ToRegister(instr->parameter_count());
// The argument count parameter is a smi
__ SmiUntag(reg);
diff --git a/test/cctest/compiler/graph-builder-tester.h b/test/cctest/compiler/graph-builder-tester.h
index 1bc5be7..772de4d 100644
--- a/test/cctest/compiler/graph-builder-tester.h
+++ b/test/cctest/compiler/graph-builder-tester.h
@@ -61,6 +61,7 @@
explicit GraphAndBuilders(Zone* zone)
: main_graph_(new (zone) Graph(zone)),
main_common_(zone),
+ main_machine_(zone),
main_simplified_(zone) {}
protected:
diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc
index 425a46c..3e2b857 100644
--- a/test/cctest/compiler/test-instruction.cc
+++ b/test/cctest/compiler/test-instruction.cc
@@ -33,6 +33,7 @@
info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
linkage(zone(), &info),
common(zone()),
+ machine(zone()),
code(NULL) {}
~InstructionTester() { delete code; }
diff --git a/test/cctest/compiler/test-js-constant-cache.cc b/test/cctest/compiler/test-js-constant-cache.cc
index 2a591c1..8588f66 100644
--- a/test/cctest/compiler/test-js-constant-cache.cc
+++ b/test/cctest/compiler/test-js-constant-cache.cc
@@ -22,7 +22,7 @@
main_common_(zone),
main_javascript_(zone),
main_typer_(&main_graph_, MaybeHandle<Context>()),
- main_machine_() {}
+ main_machine_(zone) {}
Graph main_graph_;
CommonOperatorBuilder main_common_;
JSOperatorBuilder main_javascript_;
diff --git a/test/cctest/compiler/test-js-context-specialization.cc b/test/cctest/compiler/test-js-context-specialization.cc
index 264fee0..f04e327 100644
--- a/test/cctest/compiler/test-js-context-specialization.cc
+++ b/test/cctest/compiler/test-js-context-specialization.cc
@@ -21,7 +21,7 @@
: DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
common_(main_zone()),
javascript_(main_zone()),
- machine_(),
+ machine_(main_zone()),
simplified_(main_zone()),
jsgraph_(graph(), common(), &javascript_, &machine_),
info_(main_isolate(), main_zone()) {}
diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc
index 28cc90a..7c04cf3 100644
--- a/test/cctest/compiler/test-js-typed-lowering.cc
+++ b/test/cctest/compiler/test-js-typed-lowering.cc
@@ -21,6 +21,7 @@
binop(NULL),
unop(NULL),
javascript(main_zone()),
+ machine(main_zone()),
simplified(main_zone()),
common(main_zone()),
graph(main_zone()),
diff --git a/test/cctest/compiler/test-machine-operator-reducer.cc b/test/cctest/compiler/test-machine-operator-reducer.cc
index 115967a..ec14cd2 100644
--- a/test/cctest/compiler/test-machine-operator-reducer.cc
+++ b/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -56,6 +56,7 @@
: isolate(main_isolate()),
binop(NULL),
unop(NULL),
+ machine(main_zone()),
common(main_zone()),
graph(main_zone()),
javascript(main_zone()),
diff --git a/test/cctest/compiler/test-schedule.cc b/test/cctest/compiler/test-schedule.cc
index 7b4f975..d2a52c7 100644
--- a/test/cctest/compiler/test-schedule.cc
+++ b/test/cctest/compiler/test-schedule.cc
@@ -163,7 +163,8 @@
Schedule schedule(scope.main_zone());
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
- MachineOperatorBuilder machine;
+ // TODO(titzer): use test operators.
+ MachineOperatorBuilder machine(scope.main_zone());
Node* start = graph.NewNode(common.Start(0));
graph.SetStart(start);
diff --git a/test/cctest/compiler/test-scheduler.cc b/test/cctest/compiler/test-scheduler.cc
index 659aacd..f835265 100644
--- a/test/cctest/compiler/test-scheduler.cc
+++ b/test/cctest/compiler/test-scheduler.cc
@@ -3,7 +3,6 @@
// found in the LICENSE file.
#include "src/v8.h"
-#include "test/cctest/cctest.h"
#include "src/compiler/access-builder.h"
#include "src/compiler/common-operator.h"
@@ -12,17 +11,21 @@
#include "src/compiler/graph.h"
#include "src/compiler/graph-visualizer.h"
#include "src/compiler/js-operator.h"
-#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
#include "src/compiler/operator.h"
#include "src/compiler/schedule.h"
#include "src/compiler/scheduler.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/verifier.h"
+#include "test/cctest/cctest.h"
using namespace v8::internal;
using namespace v8::internal::compiler;
+Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0, 0, 1,
+ 0, 0);
+
// TODO(titzer): pull RPO tests out to their own file.
static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
bool loops_allowed) {
@@ -1571,7 +1574,6 @@
Graph graph(scope.main_zone());
CommonOperatorBuilder common_builder(scope.main_zone());
JSOperatorBuilder js_builder(scope.main_zone());
- MachineOperatorBuilder machine_builder;
const Operator* op;
Handle<HeapObject> object =
@@ -1607,7 +1609,7 @@
Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
USE(n20);
n20->ReplaceInput(0, n9);
- op = machine_builder.Int32Add();
+ op = &kIntAdd;
Node* n19 = graph.NewNode(op, nil, nil);
USE(n19);
op = common_builder.Phi(kMachAnyTagged, 2);
@@ -1731,7 +1733,6 @@
HandleAndZoneScope scope;
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
@@ -1740,7 +1741,7 @@
Node* p1 = graph.NewNode(common.Parameter(1), start);
Node* d1 = CreateDiamond(&graph, &common, p0);
Node* d2 = CreateDiamond(&graph, &common, p1);
- Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+ Node* add = graph.NewNode(&kIntAdd, d1, d2);
Node* ret = graph.NewNode(common.Return(), add, start, start);
Node* end = graph.NewNode(common.End(), ret, start);
@@ -1754,7 +1755,6 @@
HandleAndZoneScope scope;
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
@@ -1763,7 +1763,7 @@
Node* p1 = graph.NewNode(common.Parameter(1), start);
Node* d1 = CreateDiamond(&graph, &common, p0);
Node* d2 = CreateDiamond(&graph, &common, p1);
- Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+ Node* add = graph.NewNode(&kIntAdd, d1, d2);
Node* d3 = CreateDiamond(&graph, &common, add);
Node* ret = graph.NewNode(common.Return(), d3, start, start);
Node* end = graph.NewNode(common.End(), ret, start);
@@ -1779,7 +1779,6 @@
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
SimplifiedOperatorBuilder simplified(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
@@ -1821,7 +1820,6 @@
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
SimplifiedOperatorBuilder simplified(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
@@ -1837,7 +1835,7 @@
Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
// TODO(mstarzinger): Make scheduler deal with non-empty loops here.
- // Node* add = graph.NewNode(machine.IntAdd(), ind, fv);
+ // Node* add = graph.NewNode(&kIntAdd, ind, fv);
Node* br1 = graph.NewNode(common.Branch(), ind, loop);
Node* t1 = graph.NewNode(common.IfTrue(), br1);
@@ -1863,7 +1861,6 @@
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
SimplifiedOperatorBuilder simplified(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
@@ -1873,7 +1870,7 @@
Node* c = graph.NewNode(common.Int32Constant(7));
Node* loop = graph.NewNode(common.Loop(2), start, start);
Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
- Node* add = graph.NewNode(machine.IntAdd(), ind, c);
+ Node* add = graph.NewNode(&kIntAdd, ind, c);
Node* br = graph.NewNode(common.Branch(), add, loop);
Node* t = graph.NewNode(common.IfTrue(), br);
@@ -1902,7 +1899,6 @@
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
SimplifiedOperatorBuilder simplified(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
@@ -1919,7 +1915,7 @@
Node* m1 = graph.NewNode(common.Merge(2), t1, f1);
Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), c, ind, m1);
- Node* add = graph.NewNode(machine.IntAdd(), ind, phi1);
+ Node* add = graph.NewNode(&kIntAdd, ind, phi1);
Node* br = graph.NewNode(common.Branch(), add, loop);
Node* t = graph.NewNode(common.IfTrue(), br);
@@ -1942,7 +1938,6 @@
Graph graph(scope.main_zone());
CommonOperatorBuilder common(scope.main_zone());
SimplifiedOperatorBuilder simplified(scope.main_zone());
- MachineOperatorBuilder machine;
Node* start = graph.NewNode(common.Start(2));
graph.SetStart(start);
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
index 5ddc10d..7fe4933 100644
--- a/test/cctest/compiler/test-simplified-lowering.cc
+++ b/test/cctest/compiler/test-simplified-lowering.cc
@@ -744,6 +744,17 @@
}
}
+ Node* ExampleWithTypeAndRep(Type* type, MachineType mach_type) {
+ FieldAccess access = {kUntaggedBase, 0, Handle<Name>::null(), type,
+ mach_type};
+ // TODO(titzer): using loads here just to force the representation is ugly.
+ Node* node = graph()->NewNode(simplified()->LoadField(access),
+ jsgraph.IntPtrConstant(0), graph()->start(),
+ graph()->start());
+ NodeProperties::SetBounds(node, Bounds(type));
+ return node;
+ }
+
Node* Use(Node* node, MachineType type) {
if (type & kTypeInt32) {
return graph()->NewNode(machine()->Int32LessThan(), node,
@@ -757,6 +768,9 @@
} else if (type & kRepWord64) {
return graph()->NewNode(machine()->Int64LessThan(), node,
Int64Constant(1));
+ } else if (type & kRepWord32) {
+ return graph()->NewNode(machine()->Word32Equal(), node,
+ jsgraph.Int32Constant(1));
} else {
return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
jsgraph.TrueConstant());
@@ -1060,9 +1074,7 @@
TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
// NumberToInt32(x: kRepFloat64) used as kMachInt32
TestingGraph t(Type::Number());
- Node* p0 = t.ExampleWithOutput(kMachFloat64);
- // TODO(titzer): run the typer here, or attach machine type to param.
- NodeProperties::SetBounds(p0, Bounds(Type::Number()));
+ Node* p0 = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
Node* use = t.Use(trunc, kMachInt32);
t.Return(use);
@@ -1086,17 +1098,6 @@
}
-TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
- // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepTagged
-}
-
-
-TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
- // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepWord32
- // | kTypeInt32
-}
-
-
TEST(LowerNumberToUint32_to_nop) {
// NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
TestingGraph t(Type::Unsigned32());
@@ -1159,20 +1160,67 @@
}
-TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
- // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
- // kRepTagged
+TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
+ // NumberToUint32(x: kRepFloat64) used as kRepWord32
+ TestingGraph t(Type::Unsigned32());
+ Node* input = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
+ Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
+ Node* use = t.Use(trunc, kRepWord32);
+ t.Return(use);
+ t.Lower();
+ CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, input, use->InputAt(0));
}
-TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
- // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
- // kRepWord32
+TEST(LowerNumberToUI32_of_Float64_used_as_word32) {
+ // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
+ // kType(Int,Uint)32 | kRepWord32
+ Type* types[] = {Type::Signed32(), Type::Unsigned32()};
+ MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
+
+ for (int i = 0; i < 2; i++) {
+ for (int u = 0; u < 3; u++) {
+ TestingGraph t(types[i]);
+ Node* input = t.ExampleWithTypeAndRep(
+ types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
+ const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
+ : t.simplified()->NumberToUint32();
+ Node* trunc = t.graph()->NewNode(op, input);
+ Node* use = t.Use(trunc, static_cast<MachineType>(kRepWord32 | mach[u]));
+ t.Return(use);
+ t.Lower();
+ IrOpcode::Value opcode = i == 0 ? IrOpcode::kChangeFloat64ToInt32
+ : IrOpcode::kChangeFloat64ToUint32;
+ CheckChangeOf(opcode, input, use->InputAt(0));
+ }
+ }
}
-TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
- // TODO(titzer): NumberToUint32(x: kRepFloat64) used as kRepWord32
+TEST(LowerNumberToUI32_of_Float64_used_as_tagged) {
+ // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
+ // kType(Int,Uint)32 | kRepTagged
+ Type* types[] = {Type::Signed32(), Type::Unsigned32(), Type::Any()};
+ MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
+
+ for (int i = 0; i < 2; i++) {
+ for (int u = 0; u < 3; u++) {
+ TestingGraph t(types[i]);
+ Node* input = t.ExampleWithTypeAndRep(
+ types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
+ const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
+ : t.simplified()->NumberToUint32();
+ Node* trunc = t.graph()->NewNode(op, input);
+ // TODO(titzer): we use the store here to force the representation.
+ FieldAccess access = {kTaggedBase, 0, Handle<Name>(), types[u],
+ static_cast<MachineType>(mach[u] | kRepTagged)};
+ Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
+ trunc, t.start, t.start);
+ t.Effect(store);
+ t.Lower();
+ CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, input, store->InputAt(2));
+ }
+ }
}
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 5fa2a2f..d4fbb04 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -21211,29 +21211,40 @@
}
+static int CountLiveMapsInMapCache(i::Context* context) {
+ i::FixedArray* map_cache = i::FixedArray::cast(context->map_cache());
+ int length = map_cache->length();
+ int count = 0;
+ for (int i = 0; i < length; i++) {
+ i::Object* value = map_cache->get(i);
+ if (value->IsWeakCell() && !i::WeakCell::cast(value)->cleared()) count++;
+ }
+ return count;
+}
+
+
THREADED_TEST(Regress1516) {
LocalContext context;
v8::HandleScope scope(context->GetIsolate());
+ // Object with 20 properties is not a common case, so it should be removed
+ // from the cache after GC.
{ v8::HandleScope temp_scope(context->GetIsolate());
- CompileRun("({'a': 0})");
+ CompileRun(
+ "({"
+ "'a00': 0, 'a01': 0, 'a02': 0, 'a03': 0, 'a04': 0, "
+ "'a05': 0, 'a06': 0, 'a07': 0, 'a08': 0, 'a09': 0, "
+ "'a10': 0, 'a11': 0, 'a12': 0, 'a13': 0, 'a14': 0, "
+ "'a15': 0, 'a16': 0, 'a17': 0, 'a18': 0, 'a19': 0, "
+ "})");
}
- int elements;
- { i::MapCache* map_cache =
- i::MapCache::cast(CcTest::i_isolate()->context()->map_cache());
- elements = map_cache->NumberOfElements();
- CHECK_LE(1, elements);
- }
+ int elements = CountLiveMapsInMapCache(CcTest::i_isolate()->context());
+ CHECK_LE(1, elements);
- CcTest::heap()->CollectAllGarbage(
- i::Heap::kAbortIncrementalMarkingMask);
- { i::Object* raw_map_cache = CcTest::i_isolate()->context()->map_cache();
- if (raw_map_cache != CcTest::heap()->undefined_value()) {
- i::MapCache* map_cache = i::MapCache::cast(raw_map_cache);
- CHECK_GT(elements, map_cache->NumberOfElements());
- }
- }
+ CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+
+ CHECK_GT(elements, CountLiveMapsInMapCache(CcTest::i_isolate()->context()));
}
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index 0c13a68..9ebaeec 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -683,33 +683,177 @@
TEST(CrossScriptReferencesHarmony) {
- i::FLAG_use_strict = true;
i::FLAG_harmony_scoping = true;
i::FLAG_harmony_modules = true;
v8::Isolate* isolate = CcTest::isolate();
HandleScope scope(isolate);
+ // Check that simple cross-script global scope access works.
const char* decs[] = {
- "var x = 1; x", "x", "this.x",
- "function x() { return 1 }; x()", "x()", "this.x()",
- "let x = 1; x", "x", "this.x",
- "const x = 1; x", "x", "this.x",
- "module x { export let a = 1 }; x.a", "x.a", "this.x.a",
+ "'use strict'; var x = 1; x", "x",
+ "'use strict'; function x() { return 1 }; x()", "x()",
+ "'use strict'; let x = 1; x", "x",
+ "'use strict'; const x = 1; x", "x",
+ "'use strict'; module x { export let a = 1 }; x.a", "x.a",
NULL
};
- for (int i = 0; decs[i] != NULL; i += 3) {
+ for (int i = 0; decs[i] != NULL; i += 2) {
SimpleContext context;
context.Check(decs[i], EXPECT_RESULT, Number::New(isolate, 1));
context.Check(decs[i+1], EXPECT_RESULT, Number::New(isolate, 1));
- // TODO(rossberg): The current ES6 draft spec does not reflect lexical
- // bindings on the global object. However, this will probably change, in
- // which case we reactivate the following test.
- if (i/3 < 2) {
- context.Check(decs[i+2], EXPECT_RESULT, Number::New(isolate, 1));
- }
}
+
+ // Check that cross-script global scope access works with late declarations.
+ {
+ SimpleContext context;
+ context.Check("function d0() { return x0 }", // dynamic lookup
+ EXPECT_RESULT, Undefined(isolate));
+ context.Check("this.x0 = -1;"
+ "d0()",
+ EXPECT_RESULT, Number::New(isolate, -1));
+ context.Check("'use strict';"
+ "function f0() { let y = 10; return x0 + y }"
+ "function g0() { let y = 10; return eval('x0 + y') }"
+ "function h0() { let y = 10; return (1,eval)('x0') + y }"
+ "x0 + f0() + g0() + h0()",
+ EXPECT_RESULT, Number::New(isolate, 26));
+
+ context.Check("'use strict';"
+ "let x1 = 1;"
+ "function f1() { let y = 10; return x1 + y }"
+ "function g1() { let y = 10; return eval('x1 + y') }"
+ "function h1() { let y = 10; return (1,eval)('x1') + y }"
+ "function i1() { "
+ " let y = 10; return (typeof x2 === 'undefined' ? 0 : 2) + y"
+ "}"
+ "function j1() { let y = 10; return eval('x2 + y') }"
+ "function k1() { let y = 10; return (1,eval)('x2') + y }"
+ "function cl() { "
+ " let y = 10; "
+ " return { "
+ " f: function(){ return x1 + y },"
+ " g: function(){ return eval('x1 + y') },"
+ " h: function(){ return (1,eval)('x1') + y },"
+ " i: function(){"
+ " return (typeof x2 == 'undefined' ? 0 : 2) + y"
+ " },"
+ " j: function(){ return eval('x2 + y') },"
+ " k: function(){ return (1,eval)('x2') + y },"
+ " }"
+ "}"
+ "let o = cl();"
+ "x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ context.Check("x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ context.Check("o.f() + o.g() + o.h();",
+ EXPECT_RESULT, Number::New(isolate, 33));
+ context.Check("i1() + o.i();",
+ EXPECT_RESULT, Number::New(isolate, 20));
+
+ context.Check("'use strict';"
+ "let x2 = 2;"
+ "function f2() { let y = 20; return x2 + y }"
+ "function g2() { let y = 20; return eval('x2 + y') }"
+ "function h2() { let y = 20; return (1,eval)('x2') + y }"
+ "function i2() { let y = 20; return x1 + y }"
+ "function j2() { let y = 20; return eval('x1 + y') }"
+ "function k2() { let y = 20; return (1,eval)('x1') + y }"
+ "x2 + eval('x2') + (1,eval)('x2') + f2() + g2() + h2();",
+ EXPECT_RESULT, Number::New(isolate, 72));
+ context.Check("x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ context.Check("i1() + j1() + k1();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ context.Check("i2() + j2() + k2();",
+ EXPECT_RESULT, Number::New(isolate, 63));
+ context.Check("o.f() + o.g() + o.h();",
+ EXPECT_RESULT, Number::New(isolate, 33));
+ context.Check("o.i() + o.j() + o.k();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ context.Check("i1() + o.i();",
+ EXPECT_RESULT, Number::New(isolate, 24));
+
+ context.Check("'use strict';"
+ "let x0 = 100;"
+ "x0 + eval('x0') + (1,eval)('x0') + "
+ " d0() + f0() + g0() + h0();",
+ EXPECT_RESULT, Number::New(isolate, 730));
+ context.Check("x0 + eval('x0') + (1,eval)('x0') + "
+ " d0() + f0() + g0() + h0();",
+ EXPECT_RESULT, Number::New(isolate, 730));
+ context.Check("delete this.x0;"
+ "x0 + eval('x0') + (1,eval)('x0') + "
+ " d0() + f0() + g0() + h0();",
+ EXPECT_RESULT, Number::New(isolate, 730));
+ context.Check("this.x1 = 666;"
+ "x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ context.Check("delete this.x1;"
+ "x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+ EXPECT_RESULT, Number::New(isolate, 36));
+ }
+
+ // Check that caching does respect scopes.
+ {
+ SimpleContext context;
+ const char* script1 = "(function(){ return y1 })()";
+ const char* script2 = "(function(){ return y2 })()";
+
+ context.Check(script1, EXPECT_EXCEPTION);
+ context.Check("this.y1 = 1; this.y2 = 2; 0;",
+ EXPECT_RESULT, Number::New(isolate, 0));
+ context.Check(script1,
+ EXPECT_RESULT, Number::New(isolate, 1));
+ context.Check("'use strict'; let y1 = 3; 0;",
+ EXPECT_RESULT, Number::New(isolate, 0));
+ // TODO(dslomov): still returns 1 not 3
+ // context.Check(script1,
+ // EXPECT_RESULT, Number::New(isolate, 3));
+ context.Check("y1 = 4;",
+ EXPECT_RESULT, Number::New(isolate, 4));
+ // TODO(dslomov): still returns 1 not 4
+ // context.Check(script1,
+ // EXPECT_RESULT, Number::New(isolate, 4));
+
+ context.Check(script2,
+ EXPECT_RESULT, Number::New(isolate, 2));
+ context.Check("'use strict'; let y2 = 5; 0;",
+ EXPECT_RESULT, Number::New(isolate, 0));
+ // TODO(dslomov): still returns 1 not 4
+ // context.Check(script1,
+ // EXPECT_RESULT, Number::New(isolate, 4));
+ // TODO(dslomov): still returns 2 not 5
+ // context.Check(script2,
+ // EXPECT_RESULT, Number::New(isolate, 5));
+ }
+}
+
+
+TEST(GlobalLexicalOSR) {
+ i::FLAG_use_strict = true;
+ i::FLAG_harmony_scoping = true;
+ i::FLAG_harmony_modules = true;
+
+ v8::Isolate* isolate = CcTest::isolate();
+ HandleScope scope(isolate);
+ SimpleContext context;
+
+ context.Check("'use strict';"
+ "let x = 1; x;",
+ EXPECT_RESULT, Number::New(isolate, 1));
+ context.Check("'use strict';"
+ "let y = 2*x;"
+ "++x;"
+ "let z = 0;"
+ "const limit = 100000;"
+ "for (var i = 0; i < limit; ++i) {"
+ " z += x + y;"
+ "}"
+ "z;",
+ EXPECT_RESULT, Number::New(isolate, 400000));
}
diff --git a/test/mjsunit/mod-range.js b/test/mjsunit/mod-range.js
new file mode 100644
index 0000000..0cded89
--- /dev/null
+++ b/test/mjsunit/mod-range.js
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g1(i) {
+ var x = i * 1;
+ return (x >>> 0) % 1000000000000;
+}
+
+function g2(i) {
+ var x = i * 1;
+ return ((x >>> 0) % 1000000000000) | 0;
+}
+
+function test1() {
+ assertEquals(2294967296, g1(-2000000000));
+ assertEquals(2294967295, g1(-2000000001));
+ assertEquals(2294967290, g1(-2000000006));
+
+ assertEquals(2147483651, g1(-2147483645));
+ assertEquals(2147483650, g1(-2147483646));
+ assertEquals(2147483649, g1(-2147483647));
+ assertEquals(2147483648, g1(-2147483648));
+ assertEquals(2147483647, g1(-2147483649));
+
+ assertEquals(3000000000, g1(3000000000));
+ assertEquals(3000000001, g1(3000000001));
+ assertEquals(3000000002, g1(3000000002));
+
+ assertEquals(4000000000, g1(4000000000));
+ assertEquals(4000400001, g1(4000400001));
+ assertEquals(4000400002, g1(4000400002));
+
+ assertEquals(3, g1(4294967299));
+ assertEquals(2, g1(4294967298));
+ assertEquals(1, g1(4294967297));
+ assertEquals(0, g1(4294967296));
+ assertEquals(4294967295, g1(4294967295));
+ assertEquals(4294967294, g1(4294967294));
+ assertEquals(4294967293, g1(4294967293));
+ assertEquals(4294967292, g1(4294967292));
+}
+
+%NeverOptimizeFunction(test1);
+test1();
+
+function test2() {
+ assertEquals(-2000000000, g2(-2000000000));
+ assertEquals(-2000000001, g2(-2000000001));
+ assertEquals(-2000000006, g2(-2000000006));
+
+ assertEquals(-2147483645, g2(-2147483645));
+ assertEquals(-2147483646, g2(-2147483646));
+ assertEquals(-2147483647, g2(-2147483647));
+ assertEquals(-2147483648, g2(-2147483648));
+ assertEquals(2147483647, g2(-2147483649));
+
+ assertEquals(-1294967296, g2(3000000000));
+ assertEquals(-1294967295, g2(3000000001));
+ assertEquals(-1294967294, g2(3000000002));
+
+ assertEquals(-294967296, g2(4000000000));
+ assertEquals(-294567295, g2(4000400001));
+ assertEquals(-294567294, g2(4000400002));
+
+ assertEquals(3, g2(4294967299));
+ assertEquals(2, g2(4294967298));
+ assertEquals(1, g2(4294967297));
+ assertEquals(0, g2(4294967296));
+ assertEquals(-1, g2(4294967295));
+ assertEquals(-2, g2(4294967294));
+ assertEquals(-3, g2(4294967293));
+ assertEquals(-4, g2(4294967292));
+}
+
+%NeverOptimizeFunction(test2);
+test2();
diff --git a/test/mjsunit/regress/regress-crbug-431602.js b/test/mjsunit/regress/regress-crbug-431602.js
new file mode 100644
index 0000000..2467aaf
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-431602.js
@@ -0,0 +1,23 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+var heap_number_producer = {y:1.5};
+heap_number_producer.y = 0;
+var heap_number_zero = heap_number_producer.y;
+var non_constant_eight = {};
+non_constant_eight = 8;
+
+function BreakIt() {
+ return heap_number_zero | (1 | non_constant_eight);
+}
+
+function expose(a, b, c) {
+ return b;
+}
+
+assertEquals(9, expose(8, 9, 10));
+assertEquals(9, expose(8, BreakIt(), 10));
+assertEquals(9, BreakIt());
diff --git a/test/unittests/compiler/change-lowering-unittest.cc b/test/unittests/compiler/change-lowering-unittest.cc
index 17d6513..7f5ec8f 100644
--- a/test/unittests/compiler/change-lowering-unittest.cc
+++ b/test/unittests/compiler/change-lowering-unittest.cc
@@ -66,7 +66,7 @@
}
Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine(WordRepresentation());
+ MachineOperatorBuilder machine(zone(), WordRepresentation());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(graph(), common(), &javascript, &machine);
CompilationInfo info(isolate(), zone());
diff --git a/test/unittests/compiler/js-builtin-reducer-unittest.cc b/test/unittests/compiler/js-builtin-reducer-unittest.cc
index 0a0d8d6..48ad1b7 100644
--- a/test/unittests/compiler/js-builtin-reducer-unittest.cc
+++ b/test/unittests/compiler/js-builtin-reducer-unittest.cc
@@ -23,7 +23,7 @@
protected:
Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
MachineOperatorBuilder::Flag::kNoFlags) {
- MachineOperatorBuilder machine(kMachPtr, flags);
+ MachineOperatorBuilder machine(zone(), kMachPtr, flags);
JSGraph jsgraph(graph(), common(), javascript(), &machine);
JSBuiltinReducer reducer(&jsgraph);
return reducer.Reduce(node);
diff --git a/test/unittests/compiler/js-typed-lowering-unittest.cc b/test/unittests/compiler/js-typed-lowering-unittest.cc
index 539785d..97ffd83 100644
--- a/test/unittests/compiler/js-typed-lowering-unittest.cc
+++ b/test/unittests/compiler/js-typed-lowering-unittest.cc
@@ -38,7 +38,7 @@
protected:
Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine;
+ MachineOperatorBuilder machine(zone());
JSGraph jsgraph(graph(), common(), javascript(), &machine);
JSTypedLowering reducer(&jsgraph);
return reducer.Reduce(node);
diff --git a/test/unittests/compiler/machine-operator-reducer-unittest.cc b/test/unittests/compiler/machine-operator-reducer-unittest.cc
index a62216d..065a734 100644
--- a/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -22,7 +22,7 @@
class MachineOperatorReducerTest : public TypedGraphTest {
public:
explicit MachineOperatorReducerTest(int num_parameters = 2)
- : TypedGraphTest(num_parameters) {}
+ : TypedGraphTest(num_parameters), machine_(zone()) {}
protected:
Reduction Reduce(Node* node) {
diff --git a/test/unittests/compiler/node-matchers-unittest.cc b/test/unittests/compiler/node-matchers-unittest.cc
index 843a44e..f087c66 100644
--- a/test/unittests/compiler/node-matchers-unittest.cc
+++ b/test/unittests/compiler/node-matchers-unittest.cc
@@ -18,7 +18,7 @@
class NodeMatcherTest : public GraphTest {
public:
- NodeMatcherTest() {}
+ NodeMatcherTest() : machine_(zone()) {}
virtual ~NodeMatcherTest() {}
MachineOperatorBuilder* machine() { return &machine_; }
diff --git a/test/unittests/compiler/select-lowering-unittest.cc b/test/unittests/compiler/select-lowering-unittest.cc
index 6dbd7ad..51efc83 100644
--- a/test/unittests/compiler/select-lowering-unittest.cc
+++ b/test/unittests/compiler/select-lowering-unittest.cc
@@ -10,6 +10,7 @@
using testing::AllOf;
using testing::Capture;
using testing::CaptureEq;
+using testing::Not;
namespace v8 {
namespace internal {
@@ -33,12 +34,12 @@
Node* const p2 = Parameter(2);
Node* const p3 = Parameter(3);
Node* const p4 = Parameter(4);
+ Node* const s0 = graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2);
Capture<Node*> branch;
Capture<Node*> merge;
{
- Reduction const r =
- Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2));
+ Reduction const r = Reduce(s0);
ASSERT_TRUE(r.Changed());
EXPECT_THAT(
r.replacement(),
@@ -55,6 +56,15 @@
ASSERT_TRUE(r.Changed());
EXPECT_THAT(r.replacement(), IsPhi(kMachInt32, p3, p4, CaptureEq(&merge)));
}
+ {
+ // We must not reuse the diamond if it is reachable from either else/then
+ // values of the Select, because the resulting graph can not be scheduled.
+ Reduction const r =
+ Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, s0, p0));
+ ASSERT_TRUE(r.Changed());
+ EXPECT_THAT(r.replacement(),
+ IsPhi(kMachInt32, s0, p0, Not(CaptureEq(&merge))));
+ }
}
} // namespace compiler
diff --git a/test/unittests/compiler/simplified-operator-reducer-unittest.cc b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
index 465ee84..7f8e43d 100644
--- a/test/unittests/compiler/simplified-operator-reducer-unittest.cc
+++ b/test/unittests/compiler/simplified-operator-reducer-unittest.cc
@@ -22,7 +22,7 @@
protected:
Reduction Reduce(Node* node) {
- MachineOperatorBuilder machine;
+ MachineOperatorBuilder machine(zone());
JSOperatorBuilder javascript(zone());
JSGraph jsgraph(graph(), common(), &javascript, &machine);
SimplifiedOperatorReducer reducer(&jsgraph);
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index b675e18..0ba4e5a 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -485,6 +485,8 @@
'../../src/compiler/raw-machine-assembler.h',
'../../src/compiler/register-allocator.cc',
'../../src/compiler/register-allocator.h',
+ '../../src/compiler/register-allocator-verifier.cc',
+ '../../src/compiler/register-allocator-verifier.h',
'../../src/compiler/register-configuration.cc',
'../../src/compiler/register-configuration.h',
'../../src/compiler/representation-change.h',
diff --git a/tools/push-to-trunk/common_includes.py b/tools/push-to-trunk/common_includes.py
index bb040f5..a7f8047 100644
--- a/tools/push-to-trunk/common_includes.py
+++ b/tools/push-to-trunk/common_includes.py
@@ -45,6 +45,7 @@
from git_recipes import GitRecipesMixin
from git_recipes import GitFailedException
+CHANGELOG_FILE = "ChangeLog"
VERSION_FILE = os.path.join("src", "version.cc")
# V8 base directory.
diff --git a/tools/push-to-trunk/push_to_trunk.py b/tools/push-to-trunk/push_to_trunk.py
index 0df548b..941d041 100755
--- a/tools/push-to-trunk/push_to_trunk.py
+++ b/tools/push-to-trunk/push_to_trunk.py
@@ -319,12 +319,11 @@
# The change log has been modified by the patch. Reset it to the version
# on trunk and apply the exact changes determined by this PrepareChangeLog
# step above.
- self.GitCheckoutFile(self.Config("CHANGELOG_FILE"),
- self.vc.RemoteCandidateBranch())
+ self.GitCheckoutFile(CHANGELOG_FILE, self.vc.RemoteCandidateBranch())
changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
- old_change_log = FileToText(self.Config("CHANGELOG_FILE"))
+ old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
- TextToFile(new_change_log, self.Config("CHANGELOG_FILE"))
+ TextToFile(new_change_log, os.path.join(self.default_cwd, CHANGELOG_FILE))
os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
@@ -423,7 +422,6 @@
"BRANCHNAME": "prepare-push",
"TRUNKBRANCH": "trunk-push",
"PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
- "CHANGELOG_FILE": "ChangeLog",
"CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
"PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
"COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index 41cc97f..14eb50e 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -57,7 +57,6 @@
"BRANCHNAME": "test-prepare-push",
"TRUNKBRANCH": "test-trunk-push",
"PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
- "CHANGELOG_FILE": None,
"CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
"PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
"COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
@@ -717,9 +716,9 @@
self.WriteFakeVersionFile(build=5)
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
- TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
bleeding_edge_change_log = "2014-03-17: Sentinel\n"
- TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
+ TextToFile(bleeding_edge_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
os.environ["EDITOR"] = "vi"
def ResetChangeLog():
@@ -728,7 +727,8 @@
trunk_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
- TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
+ TextToFile(trunk_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
def ResetToTrunk():
ResetChangeLog()
@@ -751,7 +751,8 @@
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the trunk branch got correctly modified.
- change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
+ change_log = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertEquals(
"""1999-07-31: Version 3.22.5
@@ -815,8 +816,7 @@
Cmd(("git new-branch %s --upstream origin/candidates" %
TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
- Cmd(("git checkout -f origin/candidates -- %s" %
- TEST_CONFIG["CHANGELOG_FILE"]), "",
+ Cmd("git checkout -f origin/candidates -- ChangeLog", "",
cb=ResetChangeLog),
Cmd("git checkout -f origin/candidates -- src/version.cc", "",
cb=self.WriteFakeVersionFile),
@@ -846,7 +846,7 @@
else: args += ["-r", "reviewer@chromium.org"]
PushToTrunk(TEST_CONFIG, self).Run(args)
- cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
+ cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
@@ -873,9 +873,9 @@
self.WriteFakeVersionFile(build=5)
TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
- TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
bleeding_edge_change_log = "2014-03-17: Sentinel\n"
- TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
+ TextToFile(bleeding_edge_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
def ResetChangeLog():
"""On 'git co -b new_branch svn/trunk', and 'git checkout -- ChangeLog',
@@ -883,7 +883,8 @@
trunk_change_log = """1999-04-05: Version 3.22.4
Performance and stability improvements on all platforms.\n"""
- TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
+ TextToFile(trunk_change_log,
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
def ResetToTrunk():
ResetChangeLog()
@@ -906,7 +907,8 @@
self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
# Check that the change log on the trunk branch got correctly modified.
- change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
+ change_log = FileToText(
+ os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertEquals(
"""1999-07-31: Version 3.22.5
@@ -949,8 +951,7 @@
Cmd(("git new-branch %s --upstream origin/candidates" %
TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
- Cmd(("git checkout -f origin/candidates -- %s" %
- TEST_CONFIG["CHANGELOG_FILE"]), "",
+ Cmd("git checkout -f origin/candidates -- ChangeLog", "",
cb=ResetChangeLog),
Cmd("git checkout -f origin/candidates -- src/version.cc", "",
cb=self.WriteFakeVersionFile),
@@ -989,7 +990,7 @@
"--work-dir", TEST_CONFIG["DEFAULT_CWD"]]
PushToTrunk(TEST_CONFIG, self).Run(args)
- cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
+ cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
self.assertTrue(re.search(r" Log text 1 \(issue 321\).", cl))
self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
diff --git a/tools/testrunner/local/progress.py b/tools/testrunner/local/progress.py
index 8caa58c..84aceba 100644
--- a/tools/testrunner/local/progress.py
+++ b/tools/testrunner/local/progress.py
@@ -333,6 +333,7 @@
"stderr": test.output.stderr,
"exit_code": test.output.exit_code,
"result": test.suite.GetOutcome(test),
+ "expected": list(test.outcomes) or ["PASS"],
})