blob: 8177d6d21ed2c373e6551d5a3317a4968251e42c [file] [log] [blame]
// Copyright 2012 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// Review notes:
//
// - The use of macros in these inline functions may seem superfluous
// but it is absolutely needed to make sure gcc generates optimal
// code. gcc is not happy when attempting to inline too deep.
//
#ifndef V8_OBJECTS_INL_H_
#define V8_OBJECTS_INL_H_
#include "src/base/atomicops.h"
#include "src/base/bits.h"
#include "src/builtins/builtins.h"
#include "src/contexts-inl.h"
#include "src/conversions-inl.h"
#include "src/factory.h"
#include "src/field-index-inl.h"
#include "src/field-type.h"
#include "src/handles-inl.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/isolate-inl.h"
#include "src/isolate.h"
#include "src/keys.h"
#include "src/layout-descriptor-inl.h"
#include "src/lookup-cache-inl.h"
#include "src/lookup.h"
#include "src/objects.h"
#include "src/objects/scope-info.h"
#include "src/property.h"
#include "src/prototype.h"
#include "src/transitions-inl.h"
#include "src/type-feedback-vector-inl.h"
#include "src/v8memory.h"
namespace v8 {
namespace internal {
PropertyDetails::PropertyDetails(Smi* smi) {
value_ = smi->value();
}
Smi* PropertyDetails::AsSmi() const {
// Ensure the upper 2 bits have the same value by sign extending it. This is
// necessary to be able to use the 31st bit of the property details.
int value = value_ << 1;
return Smi::FromInt(value >> 1);
}
int PropertyDetails::field_width_in_words() const {
DCHECK(location() == kField);
if (!FLAG_unbox_double_fields) return 1;
if (kDoubleSize == kPointerSize) return 1;
return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
}
#define INT_ACCESSORS(holder, name, offset) \
int holder::name() const { return READ_INT_FIELD(this, offset); } \
void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
#define ACCESSORS_CHECKED(holder, name, type, offset, condition) \
type* holder::name() const { \
DCHECK(condition); \
return type::cast(READ_FIELD(this, offset)); \
} \
void holder::set_##name(type* value, WriteBarrierMode mode) { \
DCHECK(condition); \
WRITE_FIELD(this, offset, value); \
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode); \
}
#define ACCESSORS(holder, name, type, offset) \
ACCESSORS_CHECKED(holder, name, type, offset, true)
// Getter that returns a Smi as an int and writes an int as a Smi.
#define SMI_ACCESSORS_CHECKED(holder, name, offset, condition) \
int holder::name() const { \
DCHECK(condition); \
Object* value = READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::set_##name(int value) { \
DCHECK(condition); \
WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define SMI_ACCESSORS(holder, name, offset) \
SMI_ACCESSORS_CHECKED(holder, name, offset, true)
#define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset) \
int holder::synchronized_##name() const { \
Object* value = ACQUIRE_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::synchronized_set_##name(int value) { \
RELEASE_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define NOBARRIER_SMI_ACCESSORS(holder, name, offset) \
int holder::nobarrier_##name() const { \
Object* value = NOBARRIER_READ_FIELD(this, offset); \
return Smi::cast(value)->value(); \
} \
void holder::nobarrier_set_##name(int value) { \
NOBARRIER_WRITE_FIELD(this, offset, Smi::FromInt(value)); \
}
#define BOOL_GETTER(holder, field, name, offset) \
bool holder::name() const { \
return BooleanBit::get(field(), offset); \
} \
#define BOOL_ACCESSORS(holder, field, name, offset) \
bool holder::name() const { \
return BooleanBit::get(field(), offset); \
} \
void holder::set_##name(bool value) { \
set_##field(BooleanBit::set(field(), offset, value)); \
}
#define TYPE_CHECKER(type, instancetype) \
bool HeapObject::Is##type() const { \
return map()->instance_type() == instancetype; \
}
TYPE_CHECKER(ByteArray, BYTE_ARRAY_TYPE)
TYPE_CHECKER(BytecodeArray, BYTECODE_ARRAY_TYPE)
TYPE_CHECKER(Cell, CELL_TYPE)
TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
TYPE_CHECKER(Foreign, FOREIGN_TYPE)
TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
TYPE_CHECKER(JSArgumentsObject, JS_ARGUMENTS_TYPE)
TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
TYPE_CHECKER(JSBoundFunction, JS_BOUND_FUNCTION_TYPE)
TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
TYPE_CHECKER(JSError, JS_ERROR_TYPE)
TYPE_CHECKER(JSFunction, JS_FUNCTION_TYPE)
TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSGlobalObject, JS_GLOBAL_OBJECT_TYPE)
TYPE_CHECKER(JSMap, JS_MAP_TYPE)
TYPE_CHECKER(JSMapIterator, JS_MAP_ITERATOR_TYPE)
TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
TYPE_CHECKER(JSModuleNamespace, JS_MODULE_NAMESPACE_TYPE)
TYPE_CHECKER(JSPromise, JS_PROMISE_TYPE)
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
TYPE_CHECKER(JSSet, JS_SET_TYPE)
TYPE_CHECKER(JSSetIterator, JS_SET_ITERATOR_TYPE)
TYPE_CHECKER(JSStringIterator, JS_STRING_ITERATOR_TYPE)
TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSWeakMap, JS_WEAK_MAP_TYPE)
TYPE_CHECKER(JSWeakSet, JS_WEAK_SET_TYPE)
TYPE_CHECKER(Map, MAP_TYPE)
TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(PropertyCell, PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
TYPE_CHECKER(Simd128Value, SIMD128_VALUE_TYPE)
TYPE_CHECKER(Symbol, SYMBOL_TYPE)
TYPE_CHECKER(TransitionArray, TRANSITION_ARRAY_TYPE)
TYPE_CHECKER(WeakCell, WEAK_CELL_TYPE)
TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
#define TYPED_ARRAY_TYPE_CHECKER(Type, type, TYPE, ctype, size) \
TYPE_CHECKER(Fixed##Type##Array, FIXED_##TYPE##_ARRAY_TYPE)
TYPED_ARRAYS(TYPED_ARRAY_TYPE_CHECKER)
#undef TYPED_ARRAY_TYPE_CHECKER
#undef TYPE_CHECKER
bool HeapObject::IsFixedArrayBase() const {
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
bool HeapObject::IsFixedArray() const {
InstanceType instance_type = map()->instance_type();
return instance_type == FIXED_ARRAY_TYPE ||
instance_type == TRANSITION_ARRAY_TYPE;
}
// External objects are not extensible, so the map check is enough.
bool HeapObject::IsExternal() const {
return map() == GetHeap()->external_map();
}
#define SIMD128_TYPE_CHECKER(TYPE, Type, type, lane_count, lane_type) \
bool HeapObject::Is##Type() const { return map() == GetHeap()->type##_map(); }
SIMD128_TYPES(SIMD128_TYPE_CHECKER)
#undef SIMD128_TYPE_CHECKER
#define IS_TYPE_FUNCTION_DEF(type_) \
bool Object::Is##type_() const { \
return IsHeapObject() && HeapObject::cast(this)->Is##type_(); \
}
HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
#define IS_TYPE_FUNCTION_DEF(Type, Value) \
bool Object::Is##Type(Isolate* isolate) const { \
return this == isolate->heap()->Value(); \
} \
bool HeapObject::Is##Type(Isolate* isolate) const { \
return this == isolate->heap()->Value(); \
}
ODDBALL_LIST(IS_TYPE_FUNCTION_DEF)
#undef IS_TYPE_FUNCTION_DEF
bool HeapObject::IsString() const {
return map()->instance_type() < FIRST_NONSTRING_TYPE;
}
bool HeapObject::IsName() const {
return map()->instance_type() <= LAST_NAME_TYPE;
}
bool HeapObject::IsUniqueName() const {
return IsInternalizedString() || IsSymbol();
}
bool Name::IsUniqueName() const {
uint32_t type = map()->instance_type();
return (type & (kIsNotStringMask | kIsNotInternalizedMask)) !=
(kStringTag | kNotInternalizedTag);
}
bool HeapObject::IsFunction() const {
STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
return map()->instance_type() >= FIRST_FUNCTION_TYPE;
}
bool HeapObject::IsCallable() const { return map()->is_callable(); }
bool HeapObject::IsConstructor() const { return map()->is_constructor(); }
bool HeapObject::IsTemplateInfo() const {
return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
}
bool HeapObject::IsInternalizedString() const {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
bool HeapObject::IsConsString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsCons();
}
bool HeapObject::IsSlicedString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSliced();
}
bool HeapObject::IsSeqString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential();
}
bool HeapObject::IsSeqOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsOneByteRepresentation();
}
bool HeapObject::IsSeqTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
String::cast(this)->IsTwoByteRepresentation();
}
bool HeapObject::IsExternalString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal();
}
bool HeapObject::IsExternalOneByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsOneByteRepresentation();
}
bool HeapObject::IsExternalTwoByteString() const {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
String::cast(this)->IsTwoByteRepresentation();
}
bool Object::IsNumber() const { return IsSmi() || IsHeapNumber(); }
bool HeapObject::IsFiller() const {
InstanceType instance_type = map()->instance_type();
return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
}
bool HeapObject::IsFixedTypedArrayBase() const {
InstanceType instance_type = map()->instance_type();
return (instance_type >= FIRST_FIXED_TYPED_ARRAY_TYPE &&
instance_type <= LAST_FIXED_TYPED_ARRAY_TYPE);
}
bool HeapObject::IsJSReceiver() const {
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
return map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
}
bool HeapObject::IsJSObject() const {
STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
return map()->IsJSObjectMap();
}
bool HeapObject::IsJSProxy() const { return map()->IsJSProxyMap(); }
bool HeapObject::IsJSArrayIterator() const {
InstanceType instance_type = map()->instance_type();
return (instance_type >= FIRST_ARRAY_ITERATOR_TYPE &&
instance_type <= LAST_ARRAY_ITERATOR_TYPE);
}
bool HeapObject::IsJSWeakCollection() const {
return IsJSWeakMap() || IsJSWeakSet();
}
bool HeapObject::IsJSCollection() const { return IsJSMap() || IsJSSet(); }
bool HeapObject::IsDescriptorArray() const { return IsFixedArray(); }
bool HeapObject::IsFrameArray() const { return IsFixedArray(); }
bool HeapObject::IsArrayList() const { return IsFixedArray(); }
bool HeapObject::IsRegExpMatchInfo() const { return IsFixedArray(); }
bool Object::IsLayoutDescriptor() const {
return IsSmi() || IsFixedTypedArrayBase();
}
bool HeapObject::IsTypeFeedbackVector() const { return IsFixedArray(); }
bool HeapObject::IsTypeFeedbackMetadata() const { return IsFixedArray(); }
bool HeapObject::IsLiteralsArray() const { return IsFixedArray(); }
bool HeapObject::IsDeoptimizationInputData() const {
// Must be a fixed array.
if (!IsFixedArray()) return false;
// There's no sure way to detect the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can
// check that the length is zero or else the fixed size plus a multiple of
// the entry size.
int length = FixedArray::cast(this)->length();
if (length == 0) return true;
length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
}
bool HeapObject::IsDeoptimizationOutputData() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a deoptimization data array. Since this is used for asserts we can check
// that the length is plausible though.
if (FixedArray::cast(this)->length() % 2 != 0) return false;
return true;
}
bool HeapObject::IsHandlerTable() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a handler table array.
return true;
}
bool HeapObject::IsTemplateList() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a template list.
if (FixedArray::cast(this)->length() < 1) return false;
return true;
}
bool HeapObject::IsDependentCode() const {
if (!IsFixedArray()) return false;
// There's actually no way to see the difference between a fixed array and
// a dependent codes array.
return true;
}
bool HeapObject::IsContext() const {
Map* map = this->map();
Heap* heap = GetHeap();
return (
map == heap->function_context_map() || map == heap->catch_context_map() ||
map == heap->with_context_map() || map == heap->native_context_map() ||
map == heap->block_context_map() || map == heap->module_context_map() ||
map == heap->eval_context_map() || map == heap->script_context_map() ||
map == heap->debug_evaluate_context_map());
}
bool HeapObject::IsNativeContext() const {
return map() == GetHeap()->native_context_map();
}
bool HeapObject::IsScriptContextTable() const {
return map() == GetHeap()->script_context_table_map();
}
bool HeapObject::IsScopeInfo() const {
return map() == GetHeap()->scope_info_map();
}
bool HeapObject::IsModuleInfo() const {
return map() == GetHeap()->module_info_map();
}
template <>
inline bool Is<JSFunction>(Object* obj) {
return obj->IsJSFunction();
}
bool HeapObject::IsAbstractCode() const {
return IsBytecodeArray() || IsCode();
}
bool HeapObject::IsStringWrapper() const {
return IsJSValue() && JSValue::cast(this)->value()->IsString();
}
bool HeapObject::IsBoolean() const {
return IsOddball() &&
((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
}
bool HeapObject::IsJSArrayBufferView() const {
return IsJSDataView() || IsJSTypedArray();
}
template <>
inline bool Is<JSArray>(Object* obj) {
return obj->IsJSArray();
}
bool HeapObject::IsHashTable() const {
return map() == GetHeap()->hash_table_map();
}
bool HeapObject::IsWeakHashTable() const { return IsHashTable(); }
bool HeapObject::IsDictionary() const {
return IsHashTable() && this != GetHeap()->string_table();
}
bool Object::IsNameDictionary() const { return IsDictionary(); }
bool Object::IsGlobalDictionary() const { return IsDictionary(); }
bool Object::IsSeededNumberDictionary() const { return IsDictionary(); }
bool HeapObject::IsUnseededNumberDictionary() const {
return map() == GetHeap()->unseeded_number_dictionary_map();
}
bool HeapObject::IsStringTable() const { return IsHashTable(); }
bool HeapObject::IsStringSet() const { return IsHashTable(); }
bool HeapObject::IsObjectHashSet() const { return IsHashTable(); }
bool HeapObject::IsNormalizedMapCache() const {
return NormalizedMapCache::IsNormalizedMapCache(this);
}
int NormalizedMapCache::GetIndex(Handle<Map> map) {
return map->Hash() % NormalizedMapCache::kEntries;
}
bool NormalizedMapCache::IsNormalizedMapCache(const HeapObject* obj) {
if (!obj->IsFixedArray()) return false;
if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
return false;
}
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
reinterpret_cast<NormalizedMapCache*>(const_cast<HeapObject*>(obj))
->NormalizedMapCacheVerify();
}
#endif
return true;
}
bool HeapObject::IsCompilationCacheTable() const { return IsHashTable(); }
bool HeapObject::IsCodeCacheHashTable() const { return IsHashTable(); }
bool HeapObject::IsMapCache() const { return IsHashTable(); }
bool HeapObject::IsObjectHashTable() const { return IsHashTable(); }
bool HeapObject::IsOrderedHashTable() const {
return map() == GetHeap()->ordered_hash_table_map();
}
bool Object::IsOrderedHashSet() const { return IsOrderedHashTable(); }
bool Object::IsOrderedHashMap() const { return IsOrderedHashTable(); }
bool Object::IsPrimitive() const {
return IsSmi() || HeapObject::cast(this)->map()->IsPrimitiveMap();
}
bool HeapObject::IsJSGlobalProxy() const {
bool result = map()->instance_type() == JS_GLOBAL_PROXY_TYPE;
DCHECK(!result || map()->is_access_check_needed());
return result;
}
bool HeapObject::IsUndetectable() const { return map()->is_undetectable(); }
bool HeapObject::IsAccessCheckNeeded() const {
if (IsJSGlobalProxy()) {
const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
JSGlobalObject* global = proxy->GetIsolate()->context()->global_object();
return proxy->IsDetachedFrom(global);
}
return map()->is_access_check_needed();
}
bool HeapObject::IsStruct() const {
switch (map()->instance_type()) {
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
return true;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
default:
return false;
}
}
#define MAKE_STRUCT_PREDICATE(NAME, Name, name) \
bool Object::Is##Name() const { \
return IsHeapObject() && HeapObject::cast(this)->Is##Name(); \
} \
bool HeapObject::Is##Name() const { \
return map()->instance_type() == NAME##_TYPE; \
}
STRUCT_LIST(MAKE_STRUCT_PREDICATE)
#undef MAKE_STRUCT_PREDICATE
double Object::Number() const {
DCHECK(IsNumber());
return IsSmi()
? static_cast<double>(reinterpret_cast<const Smi*>(this)->value())
: reinterpret_cast<const HeapNumber*>(this)->value();
}
bool Object::IsNaN() const {
return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
}
bool Object::IsMinusZero() const {
return this->IsHeapNumber() &&
i::IsMinusZero(HeapNumber::cast(this)->value());
}
// ------------------------------------
// Cast operations
#define CAST_ACCESSOR(type) \
type* type::cast(Object* object) { \
SLOW_DCHECK(object->Is##type()); \
return reinterpret_cast<type*>(object); \
} \
const type* type::cast(const Object* object) { \
SLOW_DCHECK(object->Is##type()); \
return reinterpret_cast<const type*>(object); \
}
CAST_ACCESSOR(AbstractCode)
CAST_ACCESSOR(ArrayList)
CAST_ACCESSOR(Bool16x8)
CAST_ACCESSOR(Bool32x4)
CAST_ACCESSOR(Bool8x16)
CAST_ACCESSOR(ByteArray)
CAST_ACCESSOR(BytecodeArray)
CAST_ACCESSOR(Cell)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(CodeCacheHashTable)
CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(ConsString)
CAST_ACCESSOR(DeoptimizationInputData)
CAST_ACCESSOR(DeoptimizationOutputData)
CAST_ACCESSOR(DependentCode)
CAST_ACCESSOR(DescriptorArray)
CAST_ACCESSOR(ExternalOneByteString)
CAST_ACCESSOR(ExternalString)
CAST_ACCESSOR(ExternalTwoByteString)
CAST_ACCESSOR(FixedArray)
CAST_ACCESSOR(FixedArrayBase)
CAST_ACCESSOR(FixedDoubleArray)
CAST_ACCESSOR(FixedTypedArrayBase)
CAST_ACCESSOR(Float32x4)
CAST_ACCESSOR(Foreign)
CAST_ACCESSOR(FrameArray)
CAST_ACCESSOR(GlobalDictionary)
CAST_ACCESSOR(HandlerTable)
CAST_ACCESSOR(HeapObject)
CAST_ACCESSOR(Int16x8)
CAST_ACCESSOR(Int32x4)
CAST_ACCESSOR(Int8x16)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
CAST_ACCESSOR(JSArrayBufferView)
CAST_ACCESSOR(JSBoundFunction)
CAST_ACCESSOR(JSDataView)
CAST_ACCESSOR(JSDate)
CAST_ACCESSOR(JSFunction)
CAST_ACCESSOR(JSGeneratorObject)
CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSGlobalProxy)
CAST_ACCESSOR(JSMap)
CAST_ACCESSOR(JSMapIterator)
CAST_ACCESSOR(JSMessageObject)
CAST_ACCESSOR(JSModuleNamespace)
CAST_ACCESSOR(JSObject)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSReceiver)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSPromise)
CAST_ACCESSOR(JSSet)
CAST_ACCESSOR(JSSetIterator)
CAST_ACCESSOR(JSStringIterator)
CAST_ACCESSOR(JSArrayIterator)
CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSValue)
CAST_ACCESSOR(JSWeakCollection)
CAST_ACCESSOR(JSWeakMap)
CAST_ACCESSOR(JSWeakSet)
CAST_ACCESSOR(LayoutDescriptor)
CAST_ACCESSOR(Map)
CAST_ACCESSOR(ModuleInfo)
CAST_ACCESSOR(Name)
CAST_ACCESSOR(NameDictionary)
CAST_ACCESSOR(NormalizedMapCache)
CAST_ACCESSOR(Object)
CAST_ACCESSOR(ObjectHashTable)
CAST_ACCESSOR(ObjectHashSet)
CAST_ACCESSOR(Oddball)
CAST_ACCESSOR(OrderedHashMap)
CAST_ACCESSOR(OrderedHashSet)
CAST_ACCESSOR(PropertyCell)
CAST_ACCESSOR(TemplateList)
CAST_ACCESSOR(RegExpMatchInfo)
CAST_ACCESSOR(ScopeInfo)
CAST_ACCESSOR(SeededNumberDictionary)
CAST_ACCESSOR(SeqOneByteString)
CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SharedFunctionInfo)
CAST_ACCESSOR(Simd128Value)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(Smi)
CAST_ACCESSOR(String)
CAST_ACCESSOR(StringSet)
CAST_ACCESSOR(StringTable)
CAST_ACCESSOR(Struct)
CAST_ACCESSOR(Symbol)
CAST_ACCESSOR(TemplateInfo)
CAST_ACCESSOR(Uint16x8)
CAST_ACCESSOR(Uint32x4)
CAST_ACCESSOR(Uint8x16)
CAST_ACCESSOR(UnseededNumberDictionary)
CAST_ACCESSOR(WeakCell)
CAST_ACCESSOR(WeakFixedArray)
CAST_ACCESSOR(WeakHashTable)
#define MAKE_STRUCT_CAST(NAME, Name, name) CAST_ACCESSOR(Name)
STRUCT_LIST(MAKE_STRUCT_CAST)
#undef MAKE_STRUCT_CAST
#undef CAST_ACCESSOR
bool Object::HasValidElements() {
// Dictionary is covered under FixedArray.
return IsFixedArray() || IsFixedDoubleArray() || IsFixedTypedArrayBase();
}
bool Object::KeyEquals(Object* second) {
Object* first = this;
if (second->IsNumber()) {
if (first->IsNumber()) return first->Number() == second->Number();
Object* temp = first;
first = second;
second = temp;
}
if (first->IsNumber()) {
DCHECK_LE(0, first->Number());
uint32_t expected = static_cast<uint32_t>(first->Number());
uint32_t index;
return Name::cast(second)->AsArrayIndex(&index) && index == expected;
}
return Name::cast(first)->Equals(Name::cast(second));
}
bool Object::FilterKey(PropertyFilter filter) {
if (IsSymbol()) {
if (filter & SKIP_SYMBOLS) return true;
if (Symbol::cast(this)->is_private()) return true;
} else {
if (filter & SKIP_STRINGS) return true;
}
return false;
}
Handle<Object> Object::NewStorageFor(Isolate* isolate, Handle<Object> object,
Representation representation) {
if (representation.IsSmi() && object->IsUninitialized(isolate)) {
return handle(Smi::kZero, isolate);
}
if (!representation.IsDouble()) return object;
double value;
if (object->IsUninitialized(isolate)) {
value = 0;
} else if (object->IsMutableHeapNumber()) {
value = HeapNumber::cast(*object)->value();
} else {
value = object->Number();
}
return isolate->factory()->NewHeapNumber(value, MUTABLE);
}
Handle<Object> Object::WrapForRead(Isolate* isolate, Handle<Object> object,
Representation representation) {
DCHECK(!object->IsUninitialized(isolate));
if (!representation.IsDouble()) {
DCHECK(object->FitsRepresentation(representation));
return object;
}
return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
}
StringShape::StringShape(const String* str)
: type_(str->map()->instance_type()) {
set_valid();
DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
StringShape::StringShape(Map* map) : type_(map->instance_type()) {
set_valid();
DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
StringShape::StringShape(InstanceType t) : type_(static_cast<uint32_t>(t)) {
set_valid();
DCHECK((type_ & kIsNotStringMask) == kStringTag);
}
bool StringShape::IsInternalized() {
DCHECK(valid());
STATIC_ASSERT(kNotInternalizedTag != 0);
return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
(kStringTag | kInternalizedTag);
}
bool String::IsOneByteRepresentation() const {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kOneByteStringTag;
}
bool String::IsTwoByteRepresentation() const {
uint32_t type = map()->instance_type();
return (type & kStringEncodingMask) == kTwoByteStringTag;
}
bool String::IsOneByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
DCHECK(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
case kOneByteStringTag:
return true;
case kTwoByteStringTag:
return false;
default: // Cons or sliced string. Need to go deeper.
return GetUnderlying()->IsOneByteRepresentation();
}
}
bool String::IsTwoByteRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
DCHECK(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
case kOneByteStringTag:
return false;
case kTwoByteStringTag:
return true;
default: // Cons or sliced string. Need to go deeper.
return GetUnderlying()->IsTwoByteRepresentation();
}
}
bool String::HasOnlyOneByteChars() {
uint32_t type = map()->instance_type();
return (type & kOneByteDataHintMask) == kOneByteDataHintTag ||
IsOneByteRepresentation();
}
bool StringShape::IsCons() {
return (type_ & kStringRepresentationMask) == kConsStringTag;
}
bool StringShape::IsSliced() {
return (type_ & kStringRepresentationMask) == kSlicedStringTag;
}
bool StringShape::IsIndirect() {
return (type_ & kIsIndirectStringMask) == kIsIndirectStringTag;
}
bool StringShape::IsExternal() {
return (type_ & kStringRepresentationMask) == kExternalStringTag;
}
bool StringShape::IsSequential() {
return (type_ & kStringRepresentationMask) == kSeqStringTag;
}
StringRepresentationTag StringShape::representation_tag() {
uint32_t tag = (type_ & kStringRepresentationMask);
return static_cast<StringRepresentationTag>(tag);
}
uint32_t StringShape::encoding_tag() { return type_ & kStringEncodingMask; }
uint32_t StringShape::full_representation_tag() {
return (type_ & (kStringRepresentationMask | kStringEncodingMask));
}
STATIC_ASSERT((kStringRepresentationMask | kStringEncodingMask) ==
Internals::kFullStringRepresentationMask);
STATIC_ASSERT(static_cast<uint32_t>(kStringEncodingMask) ==
Internals::kStringEncodingMask);
bool StringShape::IsSequentialOneByte() {
return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
}
bool StringShape::IsSequentialTwoByte() {
return full_representation_tag() == (kSeqStringTag | kTwoByteStringTag);
}
bool StringShape::IsExternalOneByte() {
return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
}
STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
Internals::kExternalOneByteRepresentationTag);
STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
bool StringShape::IsExternalTwoByte() {
return full_representation_tag() == (kExternalStringTag | kTwoByteStringTag);
}
STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
Internals::kExternalTwoByteRepresentationTag);
STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
uc32 FlatStringReader::Get(int index) {
if (is_one_byte_) {
return Get<uint8_t>(index);
} else {
return Get<uc16>(index);
}
}
template <typename Char>
Char FlatStringReader::Get(int index) {
DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
DCHECK(0 <= index && index <= length_);
if (sizeof(Char) == 1) {
return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
} else {
return static_cast<Char>(static_cast<const uc16*>(start_)[index]);
}
}
Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
return key->AsHandle(isolate);
}
Handle<Object> CompilationCacheShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
}
Handle<Object> CodeCacheHashTableShape::AsHandle(Isolate* isolate,
HashTableKey* key) {
return key->AsHandle(isolate);
}
template <typename Char>
class SequentialStringKey : public HashTableKey {
public:
explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) {}
uint32_t Hash() override {
hash_field_ = StringHasher::HashSequentialString<Char>(
string_.start(), string_.length(), seed_);
uint32_t result = hash_field_ >> String::kHashShift;
DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
uint32_t HashForObject(Object* other) override {
return String::cast(other)->Hash();
}
Vector<const Char> string_;
uint32_t hash_field_;
uint32_t seed_;
};
class OneByteStringKey : public SequentialStringKey<uint8_t> {
public:
OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
: SequentialStringKey<uint8_t>(str, seed) {}
bool IsMatch(Object* string) override {
return String::cast(string)->IsOneByteEqualTo(string_);
}
Handle<Object> AsHandle(Isolate* isolate) override;
};
class SeqOneByteSubStringKey : public HashTableKey {
public:
SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
: string_(string), from_(from), length_(length) {
DCHECK(string_->IsSeqOneByteString());
}
uint32_t Hash() override {
DCHECK(length_ >= 0);
DCHECK(from_ + length_ <= string_->length());
const uint8_t* chars = string_->GetChars() + from_;
hash_field_ = StringHasher::HashSequentialString(
chars, length_, string_->GetHeap()->HashSeed());
uint32_t result = hash_field_ >> String::kHashShift;
DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
uint32_t HashForObject(Object* other) override {
return String::cast(other)->Hash();
}
bool IsMatch(Object* string) override;
Handle<Object> AsHandle(Isolate* isolate) override;
private:
Handle<SeqOneByteString> string_;
int from_;
int length_;
uint32_t hash_field_;
};
class TwoByteStringKey : public SequentialStringKey<uc16> {
public:
explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
: SequentialStringKey<uc16>(str, seed) {}
bool IsMatch(Object* string) override {
return String::cast(string)->IsTwoByteEqualTo(string_);
}
Handle<Object> AsHandle(Isolate* isolate) override;
};
// Utf8StringKey carries a vector of chars as key.
class Utf8StringKey : public HashTableKey {
public:
explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
: string_(string), hash_field_(0), seed_(seed) {}
bool IsMatch(Object* string) override {
return String::cast(string)->IsUtf8EqualTo(string_);
}
uint32_t Hash() override {
if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
uint32_t result = hash_field_ >> String::kHashShift;
DCHECK(result != 0); // Ensure that the hash value of 0 is never computed.
return result;
}
uint32_t HashForObject(Object* other) override {
return String::cast(other)->Hash();
}
Handle<Object> AsHandle(Isolate* isolate) override {
if (hash_field_ == 0) Hash();
return isolate->factory()->NewInternalizedStringFromUtf8(string_, chars_,
hash_field_);
}
Vector<const char> string_;
uint32_t hash_field_;
int chars_; // Caches the number of characters when computing the hash code.
uint32_t seed_;
};
Representation Object::OptimalRepresentation() {
if (!FLAG_track_fields) return Representation::Tagged();
if (IsSmi()) {
return Representation::Smi();
} else if (FLAG_track_double_fields && IsHeapNumber()) {
return Representation::Double();
} else if (FLAG_track_computed_fields &&
IsUninitialized(HeapObject::cast(this)->GetIsolate())) {
return Representation::None();
} else if (FLAG_track_heap_object_fields) {
DCHECK(IsHeapObject());
return Representation::HeapObject();
} else {
return Representation::Tagged();
}
}
ElementsKind Object::OptimalElementsKind() {
if (IsSmi()) return FAST_SMI_ELEMENTS;
if (IsNumber()) return FAST_DOUBLE_ELEMENTS;
return FAST_ELEMENTS;
}
bool Object::FitsRepresentation(Representation representation) {
if (FLAG_track_fields && representation.IsSmi()) {
return IsSmi();
} else if (FLAG_track_double_fields && representation.IsDouble()) {
return IsMutableHeapNumber() || IsNumber();
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
return IsHeapObject();
} else if (FLAG_track_fields && representation.IsNone()) {
return false;
}
return true;
}
bool Object::ToUint32(uint32_t* value) {
if (IsSmi()) {
int num = Smi::cast(this)->value();
if (num < 0) return false;
*value = static_cast<uint32_t>(num);
return true;
}
if (IsHeapNumber()) {
double num = HeapNumber::cast(this)->value();
if (num < 0) return false;
uint32_t uint_value = FastD2UI(num);
if (FastUI2D(uint_value) == num) {
*value = uint_value;
return true;
}
}
return false;
}
// static
MaybeHandle<JSReceiver> Object::ToObject(Isolate* isolate,
Handle<Object> object) {
if (object->IsJSReceiver()) return Handle<JSReceiver>::cast(object);
return ToObject(isolate, object, isolate->native_context());
}
// static
MaybeHandle<Name> Object::ToName(Isolate* isolate, Handle<Object> input) {
if (input->IsName()) return Handle<Name>::cast(input);
return ConvertToName(isolate, input);
}
// static
MaybeHandle<Object> Object::ToPropertyKey(Isolate* isolate,
Handle<Object> value) {
if (value->IsSmi() || HeapObject::cast(*value)->IsName()) return value;
return ConvertToPropertyKey(isolate, value);
}
// static
MaybeHandle<Object> Object::ToPrimitive(Handle<Object> input,
ToPrimitiveHint hint) {
if (input->IsPrimitive()) return input;
return JSReceiver::ToPrimitive(Handle<JSReceiver>::cast(input), hint);
}
// static
MaybeHandle<Object> Object::ToNumber(Handle<Object> input) {
if (input->IsNumber()) return input;
return ConvertToNumber(HeapObject::cast(*input)->GetIsolate(), input);
}
// static
MaybeHandle<Object> Object::ToInteger(Isolate* isolate, Handle<Object> input) {
if (input->IsSmi()) return input;
return ConvertToInteger(isolate, input);
}
// static
MaybeHandle<Object> Object::ToInt32(Isolate* isolate, Handle<Object> input) {
if (input->IsSmi()) return input;
return ConvertToInt32(isolate, input);
}
// static
MaybeHandle<Object> Object::ToUint32(Isolate* isolate, Handle<Object> input) {
if (input->IsSmi()) return handle(Smi::cast(*input)->ToUint32Smi(), isolate);
return ConvertToUint32(isolate, input);
}
// static
MaybeHandle<String> Object::ToString(Isolate* isolate, Handle<Object> input) {
if (input->IsString()) return Handle<String>::cast(input);
return ConvertToString(isolate, input);
}
// static
MaybeHandle<Object> Object::ToLength(Isolate* isolate, Handle<Object> input) {
if (input->IsSmi()) {
int value = std::max(Smi::cast(*input)->value(), 0);
return handle(Smi::FromInt(value), isolate);
}
return ConvertToLength(isolate, input);
}
// static
MaybeHandle<Object> Object::ToIndex(Isolate* isolate, Handle<Object> input,
MessageTemplate::Template error_index) {
if (input->IsSmi() && Smi::cast(*input)->value() >= 0) return input;
return ConvertToIndex(isolate, input, error_index);
}
bool Object::HasSpecificClassOf(String* name) {
return this->IsJSObject() && (JSObject::cast(this)->class_name() == name);
}
MaybeHandle<Object> Object::GetProperty(Handle<Object> object,
Handle<Name> name) {
LookupIterator it(object, name);
if (!it.IsFound()) return it.factory()->undefined_value();
return GetProperty(&it);
}
MaybeHandle<Object> JSReceiver::GetProperty(Handle<JSReceiver> receiver,
Handle<Name> name) {
LookupIterator it(receiver, name, receiver);
if (!it.IsFound()) return it.factory()->undefined_value();
return Object::GetProperty(&it);
}
MaybeHandle<Object> Object::GetElement(Isolate* isolate, Handle<Object> object,
uint32_t index) {
LookupIterator it(isolate, object, index);
if (!it.IsFound()) return it.factory()->undefined_value();
return GetProperty(&it);
}
MaybeHandle<Object> JSReceiver::GetElement(Isolate* isolate,
Handle<JSReceiver> receiver,
uint32_t index) {
LookupIterator it(isolate, receiver, index, receiver);
if (!it.IsFound()) return it.factory()->undefined_value();
return Object::GetProperty(&it);
}
Handle<Object> JSReceiver::GetDataProperty(Handle<JSReceiver> object,
Handle<Name> name) {
LookupIterator it(object, name, object,
LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
if (!it.IsFound()) return it.factory()->undefined_value();
return GetDataProperty(&it);
}
MaybeHandle<Object> Object::SetElement(Isolate* isolate, Handle<Object> object,
uint32_t index, Handle<Object> value,
LanguageMode language_mode) {
LookupIterator it(isolate, object, index);
MAYBE_RETURN_NULL(
SetProperty(&it, value, language_mode, MAY_BE_STORE_FROM_KEYED));
return value;
}
MaybeHandle<Object> JSReceiver::GetPrototype(Isolate* isolate,
Handle<JSReceiver> receiver) {
// We don't expect access checks to be needed on JSProxy objects.
DCHECK(!receiver->IsAccessCheckNeeded() || receiver->IsJSObject());
PrototypeIterator iter(isolate, receiver, kStartAtReceiver,
PrototypeIterator::END_AT_NON_HIDDEN);
do {
if (!iter.AdvanceFollowingProxies()) return MaybeHandle<Object>();
} while (!iter.IsAtEnd());
return PrototypeIterator::GetCurrent(iter);
}
MaybeHandle<Object> JSReceiver::GetProperty(Isolate* isolate,
Handle<JSReceiver> receiver,
const char* name) {
Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
return GetProperty(receiver, str);
}
// static
MUST_USE_RESULT MaybeHandle<FixedArray> JSReceiver::OwnPropertyKeys(
Handle<JSReceiver> object) {
return KeyAccumulator::GetKeys(object, KeyCollectionMode::kOwnOnly,
ALL_PROPERTIES,
GetKeysConversion::kConvertToString);
}
bool JSObject::PrototypeHasNoElements(Isolate* isolate, JSObject* object) {
DisallowHeapAllocation no_gc;
HeapObject* prototype = HeapObject::cast(object->map()->prototype());
HeapObject* null = isolate->heap()->null_value();
HeapObject* empty = isolate->heap()->empty_fixed_array();
while (prototype != null) {
Map* map = prototype->map();
if (map->instance_type() <= LAST_CUSTOM_ELEMENTS_RECEIVER) return false;
if (JSObject::cast(prototype)->elements() != empty) return false;
prototype = HeapObject::cast(map->prototype());
}
return true;
}
#define FIELD_ADDR(p, offset) \
(reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
#define FIELD_ADDR_CONST(p, offset) \
(reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
#define READ_FIELD(p, offset) \
(*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset)))
#define ACQUIRE_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::Acquire_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
#define NOBARRIER_READ_FIELD(p, offset) \
reinterpret_cast<Object*>(base::NoBarrier_Load( \
reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
#define WRITE_FIELD(p, offset, value) \
(*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
#define RELEASE_WRITE_FIELD(p, offset, value) \
base::Release_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define NOBARRIER_WRITE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset)), \
reinterpret_cast<base::AtomicWord>(value));
#define WRITE_BARRIER(heap, object, offset, value) \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
heap->RecordWrite(object, offset, value);
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
do { \
heap->RecordFixedArrayElements(array, start, length); \
heap->incremental_marking()->IterateBlackObject(array); \
} while (false)
#define CONDITIONAL_WRITE_BARRIER(heap, object, offset, value, mode) \
if (mode != SKIP_WRITE_BARRIER) { \
if (mode == UPDATE_WRITE_BARRIER) { \
heap->incremental_marking()->RecordWrite( \
object, HeapObject::RawField(object, offset), value); \
} \
heap->RecordWrite(object, offset, value); \
}
#define READ_DOUBLE_FIELD(p, offset) \
ReadDoubleValue(FIELD_ADDR_CONST(p, offset))
#define WRITE_DOUBLE_FIELD(p, offset, value) \
WriteDoubleValue(FIELD_ADDR(p, offset), value)
#define READ_INT_FIELD(p, offset) \
(*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
#define READ_INTPTR_FIELD(p, offset) \
(*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT8_FIELD(p, offset) \
(*reinterpret_cast<const uint8_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_UINT8_FIELD(p, offset, value) \
(*reinterpret_cast<uint8_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT8_FIELD(p, offset) \
(*reinterpret_cast<const int8_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT8_FIELD(p, offset, value) \
(*reinterpret_cast<int8_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT16_FIELD(p, offset) \
(*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_UINT16_FIELD(p, offset, value) \
(*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT16_FIELD(p, offset) \
(*reinterpret_cast<const int16_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT16_FIELD(p, offset, value) \
(*reinterpret_cast<int16_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_UINT32_FIELD(p, offset, value) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT32_FIELD(p, offset) \
(*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT32_FIELD(p, offset, value) \
(*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_FLOAT_FIELD(p, offset) \
(*reinterpret_cast<const float*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_FLOAT_FIELD(p, offset, value) \
(*reinterpret_cast<float*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT64_FIELD(p, offset) \
(*reinterpret_cast<const uint64_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_UINT64_FIELD(p, offset, value) \
(*reinterpret_cast<uint64_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_INT64_FIELD(p, offset) \
(*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
#define WRITE_INT64_FIELD(p, offset, value) \
(*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_BYTE_FIELD(p, offset) \
(*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
#define NOBARRIER_READ_BYTE_FIELD(p, offset) \
static_cast<byte>(base::NoBarrier_Load( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset))))
#define WRITE_BYTE_FIELD(p, offset, value) \
(*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)) = value)
#define NOBARRIER_WRITE_BYTE_FIELD(p, offset, value) \
base::NoBarrier_Store( \
reinterpret_cast<base::Atomic8*>(FIELD_ADDR(p, offset)), \
static_cast<base::Atomic8>(value));
Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
}
MapWord MapWord::FromMap(const Map* map) {
return MapWord(reinterpret_cast<uintptr_t>(map));
}
Map* MapWord::ToMap() {
return reinterpret_cast<Map*>(value_);
}
bool MapWord::IsForwardingAddress() const {
return HAS_SMI_TAG(reinterpret_cast<Object*>(value_));
}
MapWord MapWord::FromForwardingAddress(HeapObject* object) {
Address raw = reinterpret_cast<Address>(object) - kHeapObjectTag;
return MapWord(reinterpret_cast<uintptr_t>(raw));
}
HeapObject* MapWord::ToForwardingAddress() {
DCHECK(IsForwardingAddress());
return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
}
#ifdef VERIFY_HEAP
void HeapObject::VerifyObjectField(int offset) {
VerifyPointer(READ_FIELD(this, offset));
}
void HeapObject::VerifySmiField(int offset) {
CHECK(READ_FIELD(this, offset)->IsSmi());
}
#endif
Heap* HeapObject::GetHeap() const {
Heap* heap = MemoryChunk::FromAddress(
reinterpret_cast<Address>(const_cast<HeapObject*>(this)))
->heap();
SLOW_DCHECK(heap != NULL);
return heap;
}
Isolate* HeapObject::GetIsolate() const {
return GetHeap()->isolate();
}
Map* HeapObject::map() const {
#ifdef DEBUG
// Clear mark potentially added by PathTracer.
uintptr_t raw_value =
map_word().ToRawValue() & ~static_cast<uintptr_t>(PathTracer::kMarkTag);
return MapWord::FromRawValue(raw_value).ToMap();
#else
return map_word().ToMap();
#endif
}
void HeapObject::set_map(Map* value) {
set_map_word(MapWord::FromMap(value));
if (value != NULL) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
}
}
Map* HeapObject::synchronized_map() {
return synchronized_map_word().ToMap();
}
void HeapObject::synchronized_set_map(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
if (value != NULL) {
// TODO(1600) We are passing NULL as a slot because maps can never be on
// evacuation candidate.
value->GetHeap()->incremental_marking()->RecordWrite(this, NULL, value);
}
}
void HeapObject::synchronized_set_map_no_write_barrier(Map* value) {
synchronized_set_map_word(MapWord::FromMap(value));
}
// Unsafe accessor omitting write barrier.
void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
}
MapWord HeapObject::map_word() const {
return MapWord(
reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
}
void HeapObject::set_map_word(MapWord map_word) {
NOBARRIER_WRITE_FIELD(
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
MapWord HeapObject::synchronized_map_word() const {
return MapWord(
reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
}
void HeapObject::synchronized_set_map_word(MapWord map_word) {
RELEASE_WRITE_FIELD(
this, kMapOffset, reinterpret_cast<Object*>(map_word.value_));
}
int HeapObject::Size() {
return SizeFromMap(map());
}
double HeapNumber::value() const {
return READ_DOUBLE_FIELD(this, kValueOffset);
}
void HeapNumber::set_value(double value) {
WRITE_DOUBLE_FIELD(this, kValueOffset, value);
}
int HeapNumber::get_exponent() {
return ((READ_INT_FIELD(this, kExponentOffset) & kExponentMask) >>
kExponentShift) - kExponentBias;
}
int HeapNumber::get_sign() {
return READ_INT_FIELD(this, kExponentOffset) & kSignMask;
}
bool Simd128Value::Equals(Simd128Value* that) {
// TODO(bmeurer): This doesn't match the SIMD.js specification, but it seems
// to be consistent with what the CompareICStub does, and what is tested in
// the current SIMD.js testsuite.
if (this == that) return true;
#define SIMD128_VALUE(TYPE, Type, type, lane_count, lane_type) \
if (this->Is##Type()) { \
if (!that->Is##Type()) return false; \
return Type::cast(this)->Equals(Type::cast(that)); \
}
SIMD128_TYPES(SIMD128_VALUE)
#undef SIMD128_VALUE
return false;
}
// static
bool Simd128Value::Equals(Handle<Simd128Value> one, Handle<Simd128Value> two) {
return one->Equals(*two);
}
#define SIMD128_VALUE_EQUALS(TYPE, Type, type, lane_count, lane_type) \
bool Type::Equals(Type* that) { \
for (int lane = 0; lane < lane_count; ++lane) { \
if (this->get_lane(lane) != that->get_lane(lane)) return false; \
} \
return true; \
}
SIMD128_TYPES(SIMD128_VALUE_EQUALS)
#undef SIMD128_VALUE_EQUALS
#if defined(V8_TARGET_LITTLE_ENDIAN)
#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
lane_type value = \
READ_##field_type##_FIELD(this, kValueOffset + lane * field_size);
#elif defined(V8_TARGET_BIG_ENDIAN)
#define SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
lane_type value = READ_##field_type##_FIELD( \
this, kValueOffset + (lane_count - lane - 1) * field_size);
#else
#error Unknown byte ordering
#endif
#if defined(V8_TARGET_LITTLE_ENDIAN)
#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
WRITE_##field_type##_FIELD(this, kValueOffset + lane * field_size, value);
#elif defined(V8_TARGET_BIG_ENDIAN)
#define SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
WRITE_##field_type##_FIELD( \
this, kValueOffset + (lane_count - lane - 1) * field_size, value);
#else
#error Unknown byte ordering
#endif
#define SIMD128_NUMERIC_LANE_FNS(type, lane_type, lane_count, field_type, \
field_size) \
lane_type type::get_lane(int lane) const { \
DCHECK(lane < lane_count && lane >= 0); \
SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
return value; \
} \
\
void type::set_lane(int lane, lane_type value) { \
DCHECK(lane < lane_count && lane >= 0); \
SIMD128_WRITE_LANE(lane_count, field_type, field_size, value) \
}
SIMD128_NUMERIC_LANE_FNS(Float32x4, float, 4, FLOAT, kFloatSize)
SIMD128_NUMERIC_LANE_FNS(Int32x4, int32_t, 4, INT32, kInt32Size)
SIMD128_NUMERIC_LANE_FNS(Uint32x4, uint32_t, 4, UINT32, kInt32Size)
SIMD128_NUMERIC_LANE_FNS(Int16x8, int16_t, 8, INT16, kShortSize)
SIMD128_NUMERIC_LANE_FNS(Uint16x8, uint16_t, 8, UINT16, kShortSize)
SIMD128_NUMERIC_LANE_FNS(Int8x16, int8_t, 16, INT8, kCharSize)
SIMD128_NUMERIC_LANE_FNS(Uint8x16, uint8_t, 16, UINT8, kCharSize)
#undef SIMD128_NUMERIC_LANE_FNS
#define SIMD128_BOOLEAN_LANE_FNS(type, lane_type, lane_count, field_type, \
field_size) \
bool type::get_lane(int lane) const { \
DCHECK(lane < lane_count && lane >= 0); \
SIMD128_READ_LANE(lane_type, lane_count, field_type, field_size) \
DCHECK(value == 0 || value == -1); \
return value != 0; \
} \
\
void type::set_lane(int lane, bool value) { \
DCHECK(lane < lane_count && lane >= 0); \
int32_t int_val = value ? -1 : 0; \
SIMD128_WRITE_LANE(lane_count, field_type, field_size, int_val) \
}
SIMD128_BOOLEAN_LANE_FNS(Bool32x4, int32_t, 4, INT32, kInt32Size)
SIMD128_BOOLEAN_LANE_FNS(Bool16x8, int16_t, 8, INT16, kShortSize)
SIMD128_BOOLEAN_LANE_FNS(Bool8x16, int8_t, 16, INT8, kCharSize)
#undef SIMD128_BOOLEAN_LANE_FNS
#undef SIMD128_READ_LANE
#undef SIMD128_WRITE_LANE
ACCESSORS(JSReceiver, properties, FixedArray, kPropertiesOffset)
Object** FixedArray::GetFirstElementAddress() {
return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
}
bool FixedArray::ContainsOnlySmisOrHoles() {
Object* the_hole = GetHeap()->the_hole_value();
Object** current = GetFirstElementAddress();
for (int i = 0; i < length(); ++i) {
Object* candidate = *current++;
if (!candidate->IsSmi() && candidate != the_hole) return false;
}
return true;
}
FixedArrayBase* JSObject::elements() const {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
}
void AllocationSite::Initialize() {
set_transition_info(Smi::kZero);
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::kZero);
set_pretenure_data(0);
set_pretenure_create_count(0);
set_dependent_code(DependentCode::cast(GetHeap()->empty_fixed_array()),
SKIP_WRITE_BARRIER);
}
bool AllocationSite::IsZombie() { return pretenure_decision() == kZombie; }
bool AllocationSite::IsMaybeTenure() {
return pretenure_decision() == kMaybeTenure;
}
bool AllocationSite::PretenuringDecisionMade() {
return pretenure_decision() != kUndecided;
}
void AllocationSite::MarkZombie() {
DCHECK(!IsZombie());
Initialize();
set_pretenure_decision(kZombie);
}
ElementsKind AllocationSite::GetElementsKind() {
DCHECK(!SitePointsToLiteral());
int value = Smi::cast(transition_info())->value();
return ElementsKindBits::decode(value);
}
void AllocationSite::SetElementsKind(ElementsKind kind) {
int value = Smi::cast(transition_info())->value();
set_transition_info(Smi::FromInt(ElementsKindBits::update(value, kind)),
SKIP_WRITE_BARRIER);
}
bool AllocationSite::CanInlineCall() {
int value = Smi::cast(transition_info())->value();
return DoNotInlineBit::decode(value) == 0;
}
void AllocationSite::SetDoNotInlineCall() {
int value = Smi::cast(transition_info())->value();
set_transition_info(Smi::FromInt(DoNotInlineBit::update(value, true)),
SKIP_WRITE_BARRIER);
}
bool AllocationSite::SitePointsToLiteral() {
// If transition_info is a smi, then it represents an ElementsKind
// for a constructed array. Otherwise, it must be a boilerplate
// for an object or array literal.
return transition_info()->IsJSArray() || transition_info()->IsJSObject();
}
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
AllocationSiteMode AllocationSite::GetMode(
ElementsKind boilerplate_elements_kind) {
if (IsFastSmiElementsKind(boilerplate_elements_kind)) {
return TRACK_ALLOCATION_SITE;
}
return DONT_TRACK_ALLOCATION_SITE;
}
inline bool AllocationSite::CanTrack(InstanceType type) {
if (FLAG_allocation_site_pretenuring) {
return type == JS_ARRAY_TYPE ||
type == JS_OBJECT_TYPE ||
type < FIRST_NONSTRING_TYPE;
}
return type == JS_ARRAY_TYPE;
}
AllocationSite::PretenureDecision AllocationSite::pretenure_decision() {
int value = pretenure_data();
return PretenureDecisionBits::decode(value);
}
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
int value = pretenure_data();
set_pretenure_data(PretenureDecisionBits::update(value, decision));
}
bool AllocationSite::deopt_dependent_code() {
int value = pretenure_data();
return DeoptDependentCodeBit::decode(value);
}
void AllocationSite::set_deopt_dependent_code(bool deopt) {
int value = pretenure_data();
set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
}
int AllocationSite::memento_found_count() {
int value = pretenure_data();
return MementoFoundCountBits::decode(value);
}
inline void AllocationSite::set_memento_found_count(int count) {
int value = pretenure_data();
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
(Heap::kMinObjectSizeInWords * kPointerSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
DCHECK(count < MementoFoundCountBits::kMax);
set_pretenure_data(MementoFoundCountBits::update(value, count));
}
int AllocationSite::memento_create_count() { return pretenure_create_count(); }
void AllocationSite::set_memento_create_count(int count) {
set_pretenure_create_count(count);
}
bool AllocationSite::IncrementMementoFoundCount(int increment) {
if (IsZombie()) return false;
int value = memento_found_count();
set_memento_found_count(value + increment);
return memento_found_count() >= kPretenureMinimumCreated;
}
inline void AllocationSite::IncrementMementoCreateCount() {
DCHECK(FLAG_allocation_site_pretenuring);
int value = memento_create_count();
set_memento_create_count(value + 1);
}
inline bool AllocationSite::MakePretenureDecision(
PretenureDecision current_decision,
double ratio,
bool maximum_size_scavenge) {
// Here we just allow state transitions from undecided or maybe tenure
// to don't tenure, maybe tenure, or tenure.
if ((current_decision == kUndecided || current_decision == kMaybeTenure)) {
if (ratio >= kPretenureRatio) {
// We just transition into tenure state when the semi-space was at
// maximum capacity.
if (maximum_size_scavenge) {
set_deopt_dependent_code(true);
set_pretenure_decision(kTenure);
// Currently we just need to deopt when we make a state transition to
// tenure.
return true;
}
set_pretenure_decision(kMaybeTenure);
} else {
set_pretenure_decision(kDontTenure);
}
}
return false;
}
inline bool AllocationSite::DigestPretenuringFeedback(
bool maximum_size_scavenge) {
bool deopt = false;
int create_count = memento_create_count();
int found_count = memento_found_count();
bool minimum_mementos_created = create_count >= kPretenureMinimumCreated;
double ratio =
minimum_mementos_created || FLAG_trace_pretenuring_statistics ?
static_cast<double>(found_count) / create_count : 0.0;
PretenureDecision current_decision = pretenure_decision();
if (minimum_mementos_created) {
deopt = MakePretenureDecision(
current_decision, ratio, maximum_size_scavenge);
}
if (FLAG_trace_pretenuring_statistics) {
PrintIsolate(GetIsolate(),
"pretenuring: AllocationSite(%p): (created, found, ratio) "
"(%d, %d, %f) %s => %s\n",
static_cast<void*>(this), create_count, found_count, ratio,
PretenureDecisionName(current_decision),
PretenureDecisionName(pretenure_decision()));
}
// Clear feedback calculation fields until the next gc.
set_memento_found_count(0);
set_memento_create_count(0);
return deopt;
}
bool AllocationMemento::IsValid() {
return allocation_site()->IsAllocationSite() &&
!AllocationSite::cast(allocation_site())->IsZombie();
}
AllocationSite* AllocationMemento::GetAllocationSite() {
DCHECK(IsValid());
return AllocationSite::cast(allocation_site());
}
Address AllocationMemento::GetAllocationSiteUnchecked() {
return reinterpret_cast<Address>(allocation_site());
}
void JSObject::EnsureCanContainHeapObjectElements(Handle<JSObject> object) {
JSObject::ValidateElements(object);
ElementsKind elements_kind = object->map()->elements_kind();
if (!IsFastObjectElementsKind(elements_kind)) {
if (IsFastHoleyElementsKind(elements_kind)) {
TransitionElementsKind(object, FAST_HOLEY_ELEMENTS);
} else {
TransitionElementsKind(object, FAST_ELEMENTS);
}
}
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Object** objects,
uint32_t count,
EnsureElementsMode mode) {
ElementsKind current_kind = object->GetElementsKind();
ElementsKind target_kind = current_kind;
{
DisallowHeapAllocation no_allocation;
DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
bool is_holey = IsFastHoleyElementsKind(current_kind);
if (current_kind == FAST_HOLEY_ELEMENTS) return;
Object* the_hole = object->GetHeap()->the_hole_value();
for (uint32_t i = 0; i < count; ++i) {
Object* current = *objects++;
if (current == the_hole) {
is_holey = true;
target_kind = GetHoleyElementsKind(target_kind);
} else if (!current->IsSmi()) {
if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS && current->IsNumber()) {
if (IsFastSmiElementsKind(target_kind)) {
if (is_holey) {
target_kind = FAST_HOLEY_DOUBLE_ELEMENTS;
} else {
target_kind = FAST_DOUBLE_ELEMENTS;
}
}
} else if (is_holey) {
target_kind = FAST_HOLEY_ELEMENTS;
break;
} else {
target_kind = FAST_ELEMENTS;
}
}
}
}
if (target_kind != current_kind) {
TransitionElementsKind(object, target_kind);
}
}
void JSObject::EnsureCanContainElements(Handle<JSObject> object,
Handle<FixedArrayBase> elements,
uint32_t length,
EnsureElementsMode mode) {
Heap* heap = object->GetHeap();
if (elements->map() != heap->fixed_double_array_map()) {
DCHECK(elements->map() == heap->fixed_array_map() ||
elements->map() == heap->fixed_cow_array_map());
if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
mode = DONT_ALLOW_DOUBLE_ELEMENTS;
}
Object** objects =
Handle<FixedArray>::cast(elements)->GetFirstElementAddress();
EnsureCanContainElements(object, objects, length, mode);
return;
}
DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
} else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(elements);
for (uint32_t i = 0; i < length; ++i) {
if (double_array->is_the_hole(i)) {
TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
return;
}
}
TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
}
}
void JSObject::SetMapAndElements(Handle<JSObject> object,
Handle<Map> new_map,
Handle<FixedArrayBase> value) {
JSObject::MigrateToMap(object, new_map);
DCHECK((object->map()->has_fast_smi_or_object_elements() ||
(*value == object->GetHeap()->empty_fixed_array()) ||
object->map()->has_fast_string_wrapper_elements()) ==
(value->map() == object->GetHeap()->fixed_array_map() ||
value->map() == object->GetHeap()->fixed_cow_array_map()));
DCHECK((*value == object->GetHeap()->empty_fixed_array()) ||
(object->map()->has_fast_double_elements() ==
value->IsFixedDoubleArray()));
object->set_elements(*value);
}
void JSObject::set_elements(FixedArrayBase* value, WriteBarrierMode mode) {
WRITE_FIELD(this, kElementsOffset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kElementsOffset, value, mode);
}
void JSObject::initialize_elements() {
FixedArrayBase* elements = map()->GetInitialElements();
WRITE_FIELD(this, kElementsOffset, elements);
}
InterceptorInfo* JSObject::GetIndexedInterceptor() {
return map()->GetIndexedInterceptor();
}
InterceptorInfo* JSObject::GetNamedInterceptor() {
return map()->GetNamedInterceptor();
}
InterceptorInfo* Map::GetNamedInterceptor() {
DCHECK(has_named_interceptor());
JSFunction* constructor = JSFunction::cast(GetConstructor());
DCHECK(constructor->shared()->IsApiFunction());
return InterceptorInfo::cast(
constructor->shared()->get_api_func_data()->named_property_handler());
}
InterceptorInfo* Map::GetIndexedInterceptor() {
DCHECK(has_indexed_interceptor());
JSFunction* constructor = JSFunction::cast(GetConstructor());
DCHECK(constructor->shared()->IsApiFunction());
return InterceptorInfo::cast(
constructor->shared()->get_api_func_data()->indexed_property_handler());
}
double Oddball::to_number_raw() const {
return READ_DOUBLE_FIELD(this, kToNumberRawOffset);
}
void Oddball::set_to_number_raw(double value) {
WRITE_DOUBLE_FIELD(this, kToNumberRawOffset, value);
}
ACCESSORS(Oddball, to_string, String, kToStringOffset)
ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
ACCESSORS(Oddball, type_of, String, kTypeOfOffset)
byte Oddball::kind() const {
return Smi::cast(READ_FIELD(this, kKindOffset))->value();
}
void Oddball::set_kind(byte value) {
WRITE_FIELD(this, kKindOffset, Smi::FromInt(value));
}
// static
Handle<Object> Oddball::ToNumber(Handle<Oddball> input) {
return handle(input->to_number(), input->GetIsolate());
}
ACCESSORS(Cell, value, Object, kValueOffset)
ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
ACCESSORS(PropertyCell, property_details_raw, Object, kDetailsOffset)
ACCESSORS(PropertyCell, value, Object, kValueOffset)
PropertyDetails PropertyCell::property_details() {
return PropertyDetails(Smi::cast(property_details_raw()));
}
void PropertyCell::set_property_details(PropertyDetails details) {
set_property_details_raw(details.AsSmi());
}
Object* WeakCell::value() const { return READ_FIELD(this, kValueOffset); }
void WeakCell::clear() {
// Either the garbage collector is clearing the cell or we are simply
// initializing the root empty weak cell.
DCHECK(GetHeap()->gc_state() == Heap::MARK_COMPACT ||
this == GetHeap()->empty_weak_cell());
WRITE_FIELD(this, kValueOffset, Smi::kZero);
}
void WeakCell::initialize(HeapObject* val) {
WRITE_FIELD(this, kValueOffset, val);
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
WriteBarrierMode mode = Marking::IsBlack(ObjectMarking::MarkBitFrom(this))
? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
}
bool WeakCell::cleared() const { return value() == Smi::kZero; }
Object* WeakCell::next() const { return READ_FIELD(this, kNextOffset); }
void WeakCell::set_next(Object* val, WriteBarrierMode mode) {
WRITE_FIELD(this, kNextOffset, val);
if (mode == UPDATE_WRITE_BARRIER) {
WRITE_BARRIER(GetHeap(), this, kNextOffset, val);
}
}
void WeakCell::clear_next(Object* the_hole_value) {
DCHECK_EQ(GetHeap()->the_hole_value(), the_hole_value);
set_next(the_hole_value, SKIP_WRITE_BARRIER);
}
bool WeakCell::next_cleared() { return next()->IsTheHole(GetIsolate()); }
int JSObject::GetHeaderSize() { return GetHeaderSize(map()->instance_type()); }
int JSObject::GetHeaderSize(InstanceType type) {
// Check for the most common kind of JavaScript object before
// falling into the generic switch. This speeds up the internal
// field operations considerably on average.
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
switch (type) {
case JS_API_OBJECT_TYPE:
case JS_SPECIAL_API_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_GENERATOR_OBJECT_TYPE:
return JSGeneratorObject::kSize;
case JS_GLOBAL_PROXY_TYPE:
return JSGlobalProxy::kSize;
case JS_GLOBAL_OBJECT_TYPE:
return JSGlobalObject::kSize;
case JS_BOUND_FUNCTION_TYPE:
return JSBoundFunction::kSize;
case JS_FUNCTION_TYPE:
return JSFunction::kSize;
case JS_VALUE_TYPE:
return JSValue::kSize;
case JS_DATE_TYPE:
return JSDate::kSize;
case JS_ARRAY_TYPE:
return JSArray::kSize;
case JS_ARRAY_BUFFER_TYPE:
return JSArrayBuffer::kSize;
case JS_TYPED_ARRAY_TYPE:
return JSTypedArray::kSize;
case JS_DATA_VIEW_TYPE:
return JSDataView::kSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
return JSMap::kSize;
case JS_SET_ITERATOR_TYPE:
return JSSetIterator::kSize;
case JS_MAP_ITERATOR_TYPE:
return JSMapIterator::kSize;
case JS_WEAK_MAP_TYPE:
return JSWeakMap::kSize;
case JS_WEAK_SET_TYPE:
return JSWeakSet::kSize;
case JS_PROMISE_TYPE:
return JSPromise::kSize;
case JS_REGEXP_TYPE:
return JSRegExp::kSize;
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
return JSObject::kHeaderSize;
case JS_MESSAGE_OBJECT_TYPE:
return JSMessageObject::kSize;
case JS_ARGUMENTS_TYPE:
return JSArgumentsObject::kHeaderSize;
case JS_ERROR_TYPE:
return JSObject::kHeaderSize;
case JS_STRING_ITERATOR_TYPE:
return JSStringIterator::kSize;
case JS_MODULE_NAMESPACE_TYPE:
return JSModuleNamespace::kSize;
default:
if (type >= FIRST_ARRAY_ITERATOR_TYPE &&
type <= LAST_ARRAY_ITERATOR_TYPE) {
return JSArrayIterator::kSize;
}
UNREACHABLE();
return 0;
}
}
int JSObject::GetInternalFieldCount(Map* map) {
int instance_size = map->instance_size();
if (instance_size == kVariableSizeSentinel) return 0;
InstanceType instance_type = map->instance_type();
return ((instance_size - GetHeaderSize(instance_type)) >> kPointerSizeLog2) -
map->GetInObjectProperties();
}
int JSObject::GetInternalFieldCount() { return GetInternalFieldCount(map()); }
int JSObject::GetInternalFieldOffset(int index) {
DCHECK(index < GetInternalFieldCount() && index >= 0);
return GetHeaderSize() + (kPointerSize * index);
}
Object* JSObject::GetInternalField(int index) {
DCHECK(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
return READ_FIELD(this, GetHeaderSize() + (kPointerSize * index));
}
void JSObject::SetInternalField(int index, Object* value) {
DCHECK(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
void JSObject::SetInternalField(int index, Smi* value) {
DCHECK(index < GetInternalFieldCount() && index >= 0);
// Internal objects do follow immediately after the header, whereas in-object
// properties are at the end of the object. Therefore there is no need
// to adjust the index here.
int offset = GetHeaderSize() + (kPointerSize * index);
WRITE_FIELD(this, offset, value);
}
bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
if (!FLAG_unbox_double_fields) return false;
return map()->IsUnboxedDoubleField(index);
}
bool Map::IsUnboxedDoubleField(FieldIndex index) {
if (!FLAG_unbox_double_fields) return false;
if (index.is_hidden_field() || !index.is_inobject()) return false;
return !layout_descriptor()->IsTagged(index.property_index());
}
// Access fast-case object properties at index. The use of these routines
// is needed to correctly distinguish between properties stored in-object and
// properties stored in the properties array.
Object* JSObject::RawFastPropertyAt(FieldIndex index) {
DCHECK(!IsUnboxedDoubleField(index));
if (index.is_inobject()) {
return READ_FIELD(this, index.offset());
} else {
return properties()->get(index.outobject_array_index());
}
}
double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
DCHECK(IsUnboxedDoubleField(index));
return READ_DOUBLE_FIELD(this, index.offset());
}
void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
if (index.is_inobject()) {
int offset = index.offset();
WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
} else {
properties()->set(index.outobject_array_index(), value);
}
}
void JSObject::RawFastDoublePropertyAtPut(FieldIndex index, double value) {
WRITE_DOUBLE_FIELD(this, index.offset(), value);
}
void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
if (IsUnboxedDoubleField(index)) {
DCHECK(value->IsMutableHeapNumber());
RawFastDoublePropertyAtPut(index, HeapNumber::cast(value)->value());
} else {
RawFastPropertyAtPut(index, value);
}
}
void JSObject::WriteToField(int descriptor, PropertyDetails details,
Object* value) {
DCHECK(details.type() == DATA);
DisallowHeapAllocation no_gc;
FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
if (details.representation().IsDouble()) {
// Nothing more to be done.
if (value->IsUninitialized(this->GetIsolate())) {
return;
}
if (IsUnboxedDoubleField(index)) {
RawFastDoublePropertyAtPut(index, value->Number());
} else {
HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
DCHECK(box->IsMutableHeapNumber());
box->set_value(value->Number());
}
} else {
RawFastPropertyAtPut(index, value);
}
}
void JSObject::WriteToField(int descriptor, Object* value) {
DescriptorArray* desc = map()->instance_descriptors();
PropertyDetails details = desc->GetDetails(descriptor);
WriteToField(descriptor, details, value);
}
int JSObject::GetInObjectPropertyOffset(int index) {
return map()->GetInObjectPropertyOffset(index);
}
Object* JSObject::InObjectPropertyAt(int index) {
int offset = GetInObjectPropertyOffset(index);
return READ_FIELD(this, offset);
}
Object* JSObject::InObjectPropertyAtPut(int index,
Object* value,
WriteBarrierMode mode) {
// Adjust for the number of properties stored in the object.
int offset = GetInObjectPropertyOffset(index);
WRITE_FIELD(this, offset, value);
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
return value;
}
void JSObject::InitializeBody(Map* map, int start_offset,
Object* pre_allocated_value,
Object* filler_value) {
DCHECK(!filler_value->IsHeapObject() ||
!GetHeap()->InNewSpace(filler_value));
DCHECK(!pre_allocated_value->IsHeapObject() ||
!GetHeap()->InNewSpace(pre_allocated_value));
int size = map->instance_size();
int offset = start_offset;
if (filler_value != pre_allocated_value) {
int end_of_pre_allocated_offset =
size - (map->unused_property_fields() * kPointerSize);
DCHECK_LE(kHeaderSize, end_of_pre_allocated_offset);
while (offset < end_of_pre_allocated_offset) {
WRITE_FIELD(this, offset, pre_allocated_value);
offset += kPointerSize;
}
}
while (offset < size) {
WRITE_FIELD(this, offset, filler_value);
offset += kPointerSize;
}
}
bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
if (unused_property_fields() != 0) return false;
if (is_prototype_map()) return false;
int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
int limit = Max(minimum, GetInObjectProperties());
int external = NumberOfFields() - GetInObjectProperties();
return external > limit;
}
void Struct::InitializeBody(int object_size) {
Object* value = GetHeap()->undefined_value();
for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
WRITE_FIELD(this, offset, value);
}
}
bool Object::ToArrayLength(uint32_t* index) { return Object::ToUint32(index); }
bool Object::ToArrayIndex(uint32_t* index) {
return Object::ToUint32(index) && *index != kMaxUInt32;
}
void Object::VerifyApiCallResultType() {
#if DEBUG
if (IsSmi()) return;
DCHECK(IsHeapObject());
Isolate* isolate = HeapObject::cast(this)->GetIsolate();
if (!(IsString() || IsSymbol() || IsJSReceiver() || IsHeapNumber() ||
IsSimd128Value() || IsUndefined(isolate) || IsTrue(isolate) ||
IsFalse(isolate) || IsNull(isolate))) {
FATAL("API call returned invalid object");
}
#endif // DEBUG
}
Object* FixedArray::get(int index) const {
SLOW_DCHECK(index >= 0 && index < this->length());
return NOBARRIER_READ_FIELD(this, kHeaderSize + index * kPointerSize);
}
Handle<Object> FixedArray::get(FixedArray* array, int index, Isolate* isolate) {
return handle(array->get(index), isolate);
}
template <class T>
MaybeHandle<T> FixedArray::GetValue(Isolate* isolate, int index) const {
Object* obj = get(index);
if (obj->IsUndefined(isolate)) return MaybeHandle<T>();
return Handle<T>(T::cast(obj), isolate);
}
template <class T>
Handle<T> FixedArray::GetValueChecked(Isolate* isolate, int index) const {
Object* obj = get(index);
CHECK(!obj->IsUndefined(isolate));
return Handle<T>(T::cast(obj), isolate);
}
bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(index)->IsTheHole(isolate);
}
void FixedArray::set(int index, Smi* value) {
DCHECK(map() != GetHeap()->fixed_cow_array_map());
DCHECK(index >= 0 && index < this->length());
DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
int offset = kHeaderSize + index * kPointerSize;
NOBARRIER_WRITE_FIELD(this, offset, value);
}
void FixedArray::set(int index, Object* value) {
DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
DCHECK(IsFixedArray());
DCHECK_GE(index, 0);
DCHECK_LT(index, this->length());
int offset = kHeaderSize + index * kPointerSize;
NOBARRIER_WRITE_FIELD(this, offset, value);
WRITE_BARRIER(GetHeap(), this, offset, value);
}
double FixedDoubleArray::get_scalar(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
DCHECK(index >= 0 && index < this->length());
DCHECK(!is_the_hole(index));
return READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
}
uint64_t FixedDoubleArray::get_representation(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
DCHECK(index >= 0 && index < this->length());
int offset = kHeaderSize + index * kDoubleSize;
return READ_UINT64_FIELD(this, offset);
}
Handle<Object> FixedDoubleArray::get(FixedDoubleArray* array, int index,
Isolate* isolate) {
if (array->is_the_hole(index)) {
return isolate->factory()->the_hole_value();
} else {
return isolate->factory()->NewNumber(array->get_scalar(index));
}
}
void FixedDoubleArray::set(int index, double value) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
if (std::isnan(value)) {
WRITE_DOUBLE_FIELD(this, offset, std::numeric_limits<double>::quiet_NaN());
} else {
WRITE_DOUBLE_FIELD(this, offset, value);
}
DCHECK(!is_the_hole(index));
}
void FixedDoubleArray::set_the_hole(Isolate* isolate, int index) {
set_the_hole(index);
}
void FixedDoubleArray::set_the_hole(int index) {
DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
map() != GetHeap()->fixed_array_map());
int offset = kHeaderSize + index * kDoubleSize;
WRITE_UINT64_FIELD(this, offset, kHoleNanInt64);
}
bool FixedDoubleArray::is_the_hole(Isolate* isolate, int index) {
return is_the_hole(index);
}
bool FixedDoubleArray::is_the_hole(int index) {
return get_representation(index) == kHoleNanInt64;
}