blob: 6ad0b76c07f39abadc7a9fd41f684b008a836196 [file] [log] [blame]
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer.h"
#include <memory>
#include "src/accessors.h"
#include "src/assembler-inl.h"
#include "src/ast/prettyprinter.h"
#include "src/codegen.h"
#include "src/disasm.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/interpreter/interpreter.h"
#include "src/macro-assembler.h"
#include "src/objects/debug-objects-inl.h"
#include "src/tracing/trace-event.h"
#include "src/v8.h"
namespace v8 {
namespace internal {
static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
MemoryAllocator::GetCommitPageSize(),
EXECUTABLE, NULL);
}
DeoptimizerData::DeoptimizerData(MemoryAllocator* allocator)
: allocator_(allocator),
current_(NULL) {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
deopt_entry_code_entries_[i] = -1;
deopt_entry_code_[i] = AllocateCodeChunk(allocator);
}
}
DeoptimizerData::~DeoptimizerData() {
for (int i = 0; i <= Deoptimizer::kLastBailoutType; ++i) {
allocator_->Free<MemoryAllocator::kFull>(deopt_entry_code_[i]);
deopt_entry_code_[i] = NULL;
}
}
Code* Deoptimizer::FindDeoptimizingCode(Address addr) {
if (function_->IsHeapObject()) {
// Search all deoptimizing code in the native context of the function.
Isolate* isolate = function_->GetIsolate();
Context* native_context = function_->context()->native_context();
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
CHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
if (code->contains(addr)) return code;
element = code->next_code_link();
}
}
return NULL;
}
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
Deoptimizer* Deoptimizer::New(JSFunction* function,
BailoutType type,
unsigned bailout_id,
Address from,
int fp_to_sp_delta,
Isolate* isolate) {
Deoptimizer* deoptimizer = new Deoptimizer(isolate, function, type,
bailout_id, from, fp_to_sp_delta);
CHECK(isolate->deoptimizer_data()->current_ == NULL);
isolate->deoptimizer_data()->current_ = deoptimizer;
return deoptimizer;
}
// No larger than 2K on all platforms
static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
size_t Deoptimizer::GetMaxDeoptTableSize() {
int entries_size =
Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
int commit_page_size = static_cast<int>(MemoryAllocator::GetCommitPageSize());
int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
commit_page_size) + 1;
return static_cast<size_t>(commit_page_size * page_count);
}
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
Deoptimizer* result = isolate->deoptimizer_data()->current_;
CHECK_NOT_NULL(result);
result->DeleteFrameDescriptions();
isolate->deoptimizer_data()->current_ = NULL;
return result;
}
DeoptimizedFrameInfo* Deoptimizer::DebuggerInspectableFrame(
JavaScriptFrame* frame,
int jsframe_index,
Isolate* isolate) {
CHECK(frame->is_optimized());
TranslatedState translated_values(frame);
translated_values.Prepare(false, frame->fp());
TranslatedState::iterator frame_it = translated_values.end();
int counter = jsframe_index;
for (auto it = translated_values.begin(); it != translated_values.end();
it++) {
if (it->kind() == TranslatedFrame::kFunction ||
it->kind() == TranslatedFrame::kInterpretedFunction) {
if (counter == 0) {
frame_it = it;
break;
}
counter--;
}
}
CHECK(frame_it != translated_values.end());
DeoptimizedFrameInfo* info =
new DeoptimizedFrameInfo(&translated_values, frame_it, isolate);
return info;
}
void Deoptimizer::GenerateDeoptimizationEntries(MacroAssembler* masm,
int count,
BailoutType type) {
TableEntryGenerator generator(masm, type, count);
generator.Generate();
}
void Deoptimizer::VisitAllOptimizedFunctionsForContext(
Context* context, OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
CHECK(context->IsNativeContext());
// Visit the list of optimized functions, removing elements that
// no longer refer to optimized code.
JSFunction* prev = NULL;
Object* element = context->OptimizedFunctionsListHead();
Isolate* isolate = context->GetIsolate();
while (!element->IsUndefined(isolate)) {
JSFunction* function = JSFunction::cast(element);
Object* next = function->next_function_link();
if (function->code()->kind() != Code::OPTIMIZED_FUNCTION ||
(visitor->VisitFunction(function),
function->code()->kind() != Code::OPTIMIZED_FUNCTION)) {
// The function no longer refers to optimized code, or the visitor
// changed the code to which it refers to no longer be optimized code.
// Remove the function from this list.
if (prev != NULL) {
prev->set_next_function_link(next, UPDATE_WEAK_WRITE_BARRIER);
} else {
context->SetOptimizedFunctionsListHead(next);
}
// The visitor should not alter the link directly.
CHECK_EQ(function->next_function_link(), next);
// Set the next function link to undefined to indicate it is no longer
// in the optimized functions list.
function->set_next_function_link(context->GetHeap()->undefined_value(),
SKIP_WRITE_BARRIER);
} else {
// The visitor should not alter the link directly.
CHECK_EQ(function->next_function_link(), next);
// preserve this element.
prev = function;
}
element = next;
}
}
void Deoptimizer::UnlinkOptimizedCode(Code* code, Context* native_context) {
class CodeUnlinker : public OptimizedFunctionVisitor {
public:
explicit CodeUnlinker(Code* code) : code_(code) {}
virtual void VisitFunction(JSFunction* function) {
if (function->code() == code_) {
if (FLAG_trace_deopt) {
PrintF("[removing optimized code for: ");
function->ShortPrint();
PrintF("]\n");
}
function->set_code(function->shared()->code());
}
}
private:
Code* code_;
};
CodeUnlinker unlinker(code);
VisitAllOptimizedFunctionsForContext(native_context, &unlinker);
}
void Deoptimizer::VisitAllOptimizedFunctions(
Isolate* isolate,
OptimizedFunctionVisitor* visitor) {
DisallowHeapAllocation no_allocation;
// Run through the list of all native contexts.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
VisitAllOptimizedFunctionsForContext(Context::cast(context), visitor);
context = Context::cast(context)->next_context_link();
}
}
// Unlink functions referring to code marked for deoptimization, then move
// marked code from the optimized code list to the deoptimized code list,
// and patch code for lazy deopt.
void Deoptimizer::DeoptimizeMarkedCodeForContext(Context* context) {
DisallowHeapAllocation no_allocation;
// A "closure" that unlinks optimized code that is going to be
// deoptimized from the functions that refer to it.
class SelectedCodeUnlinker: public OptimizedFunctionVisitor {
public:
virtual void VisitFunction(JSFunction* function) {
// The code in the function's optimized code feedback vector slot might
// be different from the code on the function - evict it if necessary.
function->feedback_vector()->EvictOptimizedCodeMarkedForDeoptimization(
function->shared(), "unlinking code marked for deopt");
Code* code = function->code();
if (!code->marked_for_deoptimization()) return;
// Unlink this function.
SharedFunctionInfo* shared = function->shared();
if (!code->deopt_already_counted()) {
shared->increment_deopt_count();
code->set_deopt_already_counted(true);
}
function->set_code(shared->code());
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
PrintF(scope.file(), "[deoptimizer unlinked: ");
function->PrintName(scope.file());
PrintF(scope.file(),
" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
}
};
// Unlink all functions that refer to marked code.
SelectedCodeUnlinker unlinker;
VisitAllOptimizedFunctionsForContext(context, &unlinker);
Isolate* isolate = context->GetHeap()->isolate();
#ifdef DEBUG
Code* topmost_optimized_code = NULL;
bool safe_to_deopt_topmost_optimized_code = false;
// Make sure all activations of optimized code can deopt at their current PC.
// The topmost optimized code has special handling because it cannot be
// deoptimized due to weak object dependency.
for (StackFrameIterator it(isolate, isolate->thread_local_top());
!it.done(); it.Advance()) {
StackFrame::Type type = it.frame()->type();
if (type == StackFrame::OPTIMIZED) {
Code* code = it.frame()->LookupCode();
JSFunction* function =
static_cast<OptimizedFrame*>(it.frame())->function();
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimizer found activation of function: ");
function->PrintName(scope.file());
PrintF(scope.file(),
" / %" V8PRIxPTR "]\n", reinterpret_cast<intptr_t>(function));
}
SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
int deopt_index = safepoint.deoptimization_index();
// Turbofan deopt is checked when we are patching addresses on stack.
bool turbofanned =
code->is_turbofanned() && function->shared()->asm_function();
bool safe_to_deopt =
deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
bool builtin = code->kind() == Code::BUILTIN;
CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned ||
builtin);
if (topmost_optimized_code == NULL) {
topmost_optimized_code = code;
safe_to_deopt_topmost_optimized_code = safe_to_deopt;
}
}
}
#endif
// Move marked code from the optimized code list to the deoptimized
// code list, collecting them into a ZoneList.
Zone zone(isolate->allocator(), ZONE_NAME);
ZoneList<Code*> codes(10, &zone);
// Walk over all optimized code objects in this native context.
Code* prev = NULL;
Object* element = context->OptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
Object* next = code->next_code_link();
if (code->marked_for_deoptimization()) {
// Put the code into the list for later patching.
codes.Add(code, &zone);
if (prev != NULL) {
// Skip this code in the optimized code list.
prev->set_next_code_link(next);
} else {
// There was no previous node, the next node is the new head.
context->SetOptimizedCodeListHead(next);
}
// Move the code to the _deoptimized_ code list.
code->set_next_code_link(context->DeoptimizedCodeListHead());
context->SetDeoptimizedCodeListHead(code);
} else {
// Not marked; preserve this element.
prev = code;
}
element = next;
}
// We need a handle scope only because of the macro assembler,
// which is used in code patching in EnsureCodeForDeoptimizationEntry.
HandleScope scope(isolate);
// Now patch all the codes for deoptimization.
for (int i = 0; i < codes.length(); i++) {
#ifdef DEBUG
if (codes[i] == topmost_optimized_code) {
DCHECK(safe_to_deopt_topmost_optimized_code);
}
#endif
// It is finally time to die, code object.
// Remove the code from the osr optimized code cache.
DeoptimizationInputData* deopt_data =
DeoptimizationInputData::cast(codes[i]->deoptimization_data());
if (deopt_data->OsrAstId()->value() != BailoutId::None().ToInt()) {
isolate->EvictOSROptimizedCode(codes[i], "deoptimized code");
}
// Do platform-specific patching to force any activations to lazy deopt.
PatchCodeForDeoptimization(isolate, codes[i]);
// We might be in the middle of incremental marking with compaction.
// Tell collector to treat this code object in a special way and
// ignore all slots that might have been recorded on it.
isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
}
}
void Deoptimizer::DeoptimizeAll(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize all code in all contexts]\n");
}
DisallowHeapAllocation no_allocation;
// For all contexts, mark all code, then deoptimize.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
Context* native_context = Context::cast(context);
MarkAllCodeForContext(native_context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context->next_context_link();
}
}
void Deoptimizer::DeoptimizeMarkedCode(Isolate* isolate) {
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (FLAG_trace_deopt) {
CodeTracer::Scope scope(isolate->GetCodeTracer());
PrintF(scope.file(), "[deoptimize marked code in all contexts]\n");
}
DisallowHeapAllocation no_allocation;
// For all contexts, deoptimize code already marked.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
Context* native_context = Context::cast(context);
DeoptimizeMarkedCodeForContext(native_context);
context = native_context->next_context_link();
}
}
void Deoptimizer::MarkAllCodeForContext(Context* context) {
Object* element = context->OptimizedCodeListHead();
Isolate* isolate = context->GetIsolate();
while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
code->set_marked_for_deoptimization(true);
element = code->next_code_link();
}
}
void Deoptimizer::DeoptimizeFunction(JSFunction* function, Code* code) {
Isolate* isolate = function->GetIsolate();
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::DeoptimizeCode);
TimerEventScope<TimerEventDeoptimizeCode> timer(isolate);
TRACE_EVENT0("v8", "V8.DeoptimizeCode");
if (code == nullptr) code = function->code();
if (code->kind() == Code::OPTIMIZED_FUNCTION) {
// Mark the code for deoptimization and unlink any functions that also
// refer to that code. The code cannot be shared across native contexts,
// so we only need to search one.
code->set_marked_for_deoptimization(true);
DeoptimizeMarkedCodeForContext(function->context()->native_context());
}
}
void Deoptimizer::ComputeOutputFrames(Deoptimizer* deoptimizer) {
deoptimizer->DoComputeOutputFrames();
}
bool Deoptimizer::TraceEnabledFor(StackFrame::Type frame_type) {
return (frame_type == StackFrame::STUB) ? FLAG_trace_stub_failures
: FLAG_trace_deopt;
}
const char* Deoptimizer::MessageFor(BailoutType type) {
switch (type) {
case EAGER: return "eager";
case SOFT: return "soft";
case LAZY: return "lazy";
}
FATAL("Unsupported deopt type");
return NULL;
}
namespace {
CodeEventListener::DeoptKind DeoptKindOfBailoutType(
Deoptimizer::BailoutType bailout_type) {
switch (bailout_type) {
case Deoptimizer::EAGER:
return CodeEventListener::kEager;
case Deoptimizer::SOFT:
return CodeEventListener::kSoft;
case Deoptimizer::LAZY:
return CodeEventListener::kLazy;
}
UNREACHABLE();
}
} // namespace
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction* function,
BailoutType type, unsigned bailout_id, Address from,
int fp_to_sp_delta)
: isolate_(isolate),
function_(function),
bailout_id_(bailout_id),
bailout_type_(type),
from_(from),
fp_to_sp_delta_(fp_to_sp_delta),
deoptimizing_throw_(false),
catch_handler_data_(-1),
catch_handler_pc_offset_(-1),
input_(nullptr),
output_count_(0),
jsframe_count_(0),
output_(nullptr),
caller_frame_top_(0),
caller_fp_(0),
caller_pc_(0),
caller_constant_pool_(0),
input_frame_context_(0),
stack_fp_(0),
trace_scope_(nullptr) {
if (isolate->deoptimizer_lazy_throw()) {
isolate->set_deoptimizer_lazy_throw(false);
deoptimizing_throw_ = true;
}
// For COMPILED_STUBs called from builtins, the function pointer is a SMI
// indicating an internal frame.
if (function->IsSmi()) {
function = nullptr;
}
DCHECK(from != nullptr);
compiled_code_ = FindOptimizedCode(function);
#if DEBUG
DCHECK(compiled_code_ != NULL);
if (type == EAGER || type == SOFT || type == LAZY) {
DCHECK(compiled_code_->kind() != Code::FUNCTION);
}
#endif
StackFrame::Type frame_type = function == NULL
? StackFrame::STUB
: StackFrame::JAVA_SCRIPT;
trace_scope_ = TraceEnabledFor(frame_type)
? new CodeTracer::Scope(isolate->GetCodeTracer())
: NULL;
#ifdef DEBUG
CHECK(AllowHeapAllocation::IsAllowed());
disallow_heap_allocation_ = new DisallowHeapAllocation();
#endif // DEBUG
if (function != nullptr && function->IsOptimized() &&
(compiled_code_->kind() != Code::OPTIMIZED_FUNCTION ||
!compiled_code_->deopt_already_counted())) {
// If the function is optimized, and we haven't counted that deopt yet, then
// increment the function's deopt count so that we can avoid optimising
// functions that deopt too often.
if (bailout_type_ == Deoptimizer::SOFT) {
// Soft deopts shouldn't count against the overall deoptimization count
// that can eventually lead to disabling optimization for a function.
isolate->counters()->soft_deopts_executed()->Increment();
} else {
function->shared()->increment_deopt_count();
}
}
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
compiled_code_->set_deopt_already_counted(true);
PROFILE(isolate_,
CodeDeoptEvent(compiled_code_, DeoptKindOfBailoutType(type), from_,
fp_to_sp_delta_));
}
unsigned size = ComputeInputFrameSize();
int parameter_count =
function == nullptr
? 0
: (function->shared()->internal_formal_parameter_count() + 1);
input_ = new (size) FrameDescription(size, parameter_count);
input_->SetFrameType(frame_type);
}
Code* Deoptimizer::FindOptimizedCode(JSFunction* function) {
Code* compiled_code = FindDeoptimizingCode(from_);
return (compiled_code == NULL)
? static_cast<Code*>(isolate_->FindCodeObject(from_))
: compiled_code;
}
void Deoptimizer::PrintFunctionName() {
if (function_->IsHeapObject() && function_->IsJSFunction()) {
function_->ShortPrint(trace_scope_->file());
} else {
PrintF(trace_scope_->file(),
"%s", Code::Kind2String(compiled_code_->kind()));
}
}
Deoptimizer::~Deoptimizer() {
DCHECK(input_ == NULL && output_ == NULL);
DCHECK(disallow_heap_allocation_ == NULL);
delete trace_scope_;
}
void Deoptimizer::DeleteFrameDescriptions() {
delete input_;
for (int i = 0; i < output_count_; ++i) {
if (output_[i] != input_) delete output_[i];
}
delete[] output_;
input_ = NULL;
output_ = NULL;
#ifdef DEBUG
CHECK(!AllowHeapAllocation::IsAllowed());
CHECK(disallow_heap_allocation_ != NULL);
delete disallow_heap_allocation_;
disallow_heap_allocation_ = NULL;
#endif // DEBUG
}
Address Deoptimizer::GetDeoptimizationEntry(Isolate* isolate,
int id,
BailoutType type,
GetEntryMode mode) {
CHECK_GE(id, 0);
if (id >= kMaxNumberOfEntries) return NULL;
if (mode == ENSURE_ENTRY_CODE) {
EnsureCodeForDeoptimizationEntry(isolate, type, id);
} else {
CHECK_EQ(mode, CALCULATE_ENTRY_ADDRESS);
}
DeoptimizerData* data = isolate->deoptimizer_data();
CHECK_LE(type, kLastBailoutType);
MemoryChunk* base = data->deopt_entry_code_[type];
return base->area_start() + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Isolate* isolate,
Address addr,
BailoutType type) {
DeoptimizerData* data = isolate->deoptimizer_data();
MemoryChunk* base = data->deopt_entry_code_[type];
Address start = base->area_start();
if (addr < start ||
addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
DCHECK_EQ(0,
static_cast<int>(addr - start) % table_entry_size_);
return static_cast<int>(addr - start) / table_entry_size_;
}
int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
BailoutId id,
SharedFunctionInfo* shared) {
// TODO(kasperl): For now, we do a simple linear search for the PC
// offset associated with the given node id. This should probably be
// changed to a binary search.
int length = data->DeoptPoints();
for (int i = 0; i < length; i++) {
if (data->AstId(i) == id) {
return data->PcAndState(i)->value();
}
}
OFStream os(stderr);
os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n"
<< "[method: " << shared->DebugName()->ToCString().get() << "]\n"
<< "[source:\n" << SourceCodeOf(shared) << "\n]" << std::endl;
shared->GetHeap()->isolate()->PushStackTraceAndDie(0xfefefefe, data, shared,
0xfefefeff);
FATAL("unable to find pc offset during deoptimization");
return -1;
}
int Deoptimizer::GetDeoptimizedCodeCount(Isolate* isolate) {
int length = 0;
// Count all entries in the deoptimizing code list of every context.
Object* context = isolate->heap()->native_contexts_list();
while (!context->IsUndefined(isolate)) {
Context* native_context = Context::cast(context);
Object* element = native_context->DeoptimizedCodeListHead();
while (!element->IsUndefined(isolate)) {
Code* code = Code::cast(element);
DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
length++;
element = code->next_code_link();
}
context = Context::cast(context)->next_context_link();
}
return length;
}
namespace {
int LookupCatchHandler(TranslatedFrame* translated_frame, int* data_out) {
switch (translated_frame->kind()) {
case TranslatedFrame::kFunction: {
#ifdef DEBUG
JSFunction* function =
JSFunction::cast(translated_frame->begin()->GetRawValue());
Code* non_optimized_code = function->shared()->code();
HandlerTable* table =
HandlerTable::cast(non_optimized_code->handler_table());
DCHECK_EQ(0, table->NumberOfRangeEntries());
#endif
break;
}
case TranslatedFrame::kInterpretedFunction: {
int bytecode_offset = translated_frame->node_id().ToInt();
JSFunction* function =
JSFunction::cast(translated_frame->begin()->GetRawValue());
BytecodeArray* bytecode = function->shared()->bytecode_array();
HandlerTable* table = HandlerTable::cast(bytecode->handler_table());
return table->LookupRange(bytecode_offset, data_out, nullptr);
}
default:
break;
}
return -1;
}
} // namespace
// We rely on this function not causing a GC. It is called from generated code
// without having a real stack frame in place.
void Deoptimizer::DoComputeOutputFrames() {
base::ElapsedTimer timer;
// Determine basic deoptimization information. The optimized frame is
// described by the input data.
DeoptimizationInputData* input_data =
DeoptimizationInputData::cast(compiled_code_->deoptimization_data());
{
// Read caller's PC, caller's FP and caller's constant pool values
// from input frame. Compute caller's frame top address.
Register fp_reg = JavaScriptFrame::fp_register();
stack_fp_ = input_->GetRegister(fp_reg.code());
caller_frame_top_ = stack_fp_ + ComputeInputFrameAboveFpFixedSize();
Address fp_address = input_->GetFramePointerAddress();
caller_fp_ = Memory::intptr_at(fp_address);
caller_pc_ =
Memory::intptr_at(fp_address + CommonFrameConstants::kCallerPCOffset);
input_frame_context_ = Memory::intptr_at(
fp_address + CommonFrameConstants::kContextOrFrameTypeOffset);
if (FLAG_enable_embedded_constant_pool) {
caller_constant_pool_ = Memory::intptr_at(
fp_address + CommonFrameConstants::kConstantPoolOffset);
}
}
if (trace_scope_ != NULL) {
timer.Start();
PrintF(trace_scope_->file(), "[deoptimizing (DEOPT %s): begin ",
MessageFor(bailout_type_));
PrintFunctionName();
PrintF(trace_scope_->file(),
" (opt #%d) @%d, FP to SP delta: %d, caller sp: 0x%08" V8PRIxPTR
"]\n",
input_data->OptimizationId()->value(), bailout_id_, fp_to_sp_delta_,
caller_frame_top_);
if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
(compiled_code_->is_hydrogen_stub())) {
compiled_code_->PrintDeoptLocation(trace_scope_->file(), from_);
}
}
BailoutId node_id = input_data->AstId(bailout_id_);
ByteArray* translations = input_data->TranslationByteArray();
unsigned translation_index =
input_data->TranslationIndex(bailout_id_)->value();
TranslationIterator state_iterator(translations, translation_index);
translated_state_.Init(
input_->GetFramePointerAddress(), &state_iterator,
input_data->LiteralArray(), input_->GetRegisterValues(),
trace_scope_ == nullptr ? nullptr : trace_scope_->file(),
function_->IsHeapObject()
? function_->shared()->internal_formal_parameter_count()
: 0);
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
// If we are supposed to go to the catch handler, find the catching frame
// for the catch and make sure we only deoptimize upto that frame.
if (deoptimizing_throw_) {
size_t catch_handler_frame_index = count;
for (size_t i = count; i-- > 0;) {
catch_handler_pc_offset_ = LookupCatchHandler(
&(translated_state_.frames()[i]), &catch_handler_data_);
if (catch_handler_pc_offset_ >= 0) {
catch_handler_frame_index = i;
break;
}
}
CHECK_LT(catch_handler_frame_index, count);
count = catch_handler_frame_index + 1;
}
DCHECK(output_ == NULL);
output_ = new FrameDescription*[count];
for (size_t i = 0; i < count; ++i) {
output_[i] = NULL;
}
output_count_ = static_cast<int>(count);
// Translate each output frame.
int frame_index = 0; // output_frame_index
for (size_t i = 0; i < count; ++i, ++frame_index) {
// Read the ast node id, function, and frame height for this output frame.
TranslatedFrame* translated_frame = &(translated_state_.frames()[i]);
switch (translated_frame->kind()) {
case TranslatedFrame::kFunction:
DoComputeJSFrame(translated_frame, frame_index,
deoptimizing_throw_ && i == count - 1);
jsframe_count_++;
break;
case TranslatedFrame::kInterpretedFunction:
DoComputeInterpretedFrame(translated_frame, frame_index,
deoptimizing_throw_ && i == count - 1);
jsframe_count_++;
break;
case TranslatedFrame::kArgumentsAdaptor:
DoComputeArgumentsAdaptorFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kTailCallerFunction:
DoComputeTailCallerFrame(translated_frame, frame_index);
// Tail caller frame translations do not produce output frames.
frame_index--;
output_count_--;
break;
case TranslatedFrame::kConstructStub:
DoComputeConstructStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kGetter:
DoComputeAccessorStubFrame(translated_frame, frame_index, false);
break;
case TranslatedFrame::kSetter:
DoComputeAccessorStubFrame(translated_frame, frame_index, true);
break;
case TranslatedFrame::kCompiledStub:
DoComputeCompiledStubFrame(translated_frame, frame_index);
break;
case TranslatedFrame::kInvalid:
FATAL("invalid frame");
break;
}
}
// Print some helpful diagnostic information.
if (trace_scope_ != NULL) {
double ms = timer.Elapsed().InMillisecondsF();
int index = output_count_ - 1; // Index of the topmost frame.
PrintF(trace_scope_->file(), "[deoptimizing (%s): end ",
MessageFor(bailout_type_));
PrintFunctionName();
PrintF(trace_scope_->file(),
" @%d => node=%d, pc=0x%08" V8PRIxPTR ", caller sp=0x%08" V8PRIxPTR
", state=%s, took %0.3f ms]\n",
bailout_id_, node_id.ToInt(), output_[index]->GetPc(),
caller_frame_top_, BailoutStateToString(static_cast<BailoutState>(
output_[index]->GetState()->value())),
ms);
}
}
void Deoptimizer::DoComputeJSFrame(TranslatedFrame* translated_frame,
int frame_index, bool goto_catch_handler) {
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin();
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
int input_index = 0;
BailoutId node_id = translated_frame->node_id();
unsigned height =
translated_frame->height() - 1; // Do not count the context.
unsigned height_in_bytes = height * kPointerSize;
if (goto_catch_handler) {
// Take the stack height from the handler table.
height = catch_handler_data_;
// We also make space for the exception itself.
height_in_bytes = (height + 1) * kPointerSize;
CHECK(is_topmost);
}
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating frame ");
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
PrintF(trace_scope_->file(), " => node=%d, height=%d%s\n", node_id.ToInt(),
height_in_bytes, goto_catch_handler ? " (throw)" : "");
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by JavaScriptFrameConstants.
unsigned fixed_frame_size = ComputeJavascriptFixedSize(shared);
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
int parameter_count = shared->internal_formal_parameter_count() + 1;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::JAVA_SCRIPT);
CHECK(frame_index >= 0 && frame_index < output_count_);
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
intptr_t top_address;
if (is_bottommost) {
top_address = caller_frame_top_ - output_frame_size;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " -------------------------\n");
}
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Synthesize their values and set them up
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPCOnStackSize;
intptr_t value;
if (is_bottommost) {
value = caller_pc_;
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetCallerPc(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kFPOnStackSize;
if (is_bottommost) {
value = caller_fp_;
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (is_topmost) {
Register fp_reg = JavaScriptFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
}
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
// from the input frame. For subsequent output frames, it can be read from
// the previous frame.
output_offset -= kPointerSize;
if (is_bottommost) {
value = caller_constant_pool_;
} else {
value = output_[frame_index - 1]->GetConstantPool();
}
output_frame->SetCallerConstantPool(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"caller's constant_pool\n");
}
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
// When deoptimizing into a catch block, we need to take the context
// from just above the top of the operand stack (we push the context
// at the entry of the try block).
TranslatedFrame::iterator context_pos = value_iterator;
int context_input_index = input_index;
if (goto_catch_handler) {
for (unsigned i = 0; i < height + 1; ++i) {
context_pos++;
context_input_index++;
}
}
// Read the context from the translations.
Object* context = context_pos->GetRawValue();
if (context->IsUndefined(isolate_)) {
// If the context was optimized away, just use the context from
// the activation. This should only apply to Crankshaft code.
CHECK(!compiled_code_->is_turbofanned());
context = is_bottommost ? reinterpret_cast<Object*>(input_frame_context_)
: function->context();
}
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
if (context == isolate_->heap()->arguments_marker()) {
Address output_address =
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
output_offset;
values_to_materialize_.push_back({output_address, context_pos});
}
value_iterator++;
input_index++;
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " -------------------------\n");
}
// Translate the rest of the frame.
for (unsigned i = 0; i < height; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
if (goto_catch_handler) {
// Write out the exception for the catch handler.
output_offset -= kPointerSize;
Object* exception_obj = reinterpret_cast<Object*>(
input_->GetRegister(FullCodeGenerator::result_register().code()));
WriteValueToOutput(exception_obj, input_index, frame_index, output_offset,
"exception ");
input_index++;
}
CHECK_EQ(0u, output_offset);
// Update constant pool.
Code* non_optimized_code = shared->code();
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(non_optimized_code->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
JavaScriptFrame::constant_pool_pointer_register();
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
}
}
// Compute this frame's PC and state.
FixedArray* raw_data = non_optimized_code->deoptimization_data();
DeoptimizationOutputData* data = DeoptimizationOutputData::cast(raw_data);
Address start = non_optimized_code->instruction_start();
unsigned pc_and_state = GetOutputInfo(data, node_id, function->shared());
unsigned pc_offset = goto_catch_handler
? catch_handler_pc_offset_
: FullCodeGenerator::PcField::decode(pc_and_state);
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
// If we are going to the catch handler, then the exception lives in
// the accumulator.
BailoutState state =
goto_catch_handler
? BailoutState::TOS_REGISTER
: FullCodeGenerator::BailoutStateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(static_cast<int>(state)));
// Clear the context register. The context might be a de-materialized object
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
if (bailout_type_ == LAZY) {
continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
} else if (bailout_type_ == SOFT) {
continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
} else {
CHECK_EQ(bailout_type_, EAGER);
}
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
}
void Deoptimizer::DoComputeInterpretedFrame(TranslatedFrame* translated_frame,
int frame_index,
bool goto_catch_handler) {
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
TranslatedFrame::iterator value_iterator = translated_frame->begin();
bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index);
int input_index = 0;
int bytecode_offset = translated_frame->node_id().ToInt();
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
// All tranlations for interpreted frames contain the accumulator and hence
// are assumed to be in bailout state {BailoutState::TOS_REGISTER}. However
// such a state is only supported for the topmost frame. We need to skip
// pushing the accumulator for any non-topmost frame.
if (!is_topmost) height_in_bytes -= kPointerSize;
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating interpreted frame ");
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s", name.get());
PrintF(trace_scope_->file(), " => bytecode_offset=%d, height=%d%s\n",
bytecode_offset, height_in_bytes,
goto_catch_handler ? " (throw)" : "");
}
if (goto_catch_handler) {
bytecode_offset = catch_handler_pc_offset_;
}
// The 'fixed' part of the frame consists of the incoming parameters and
// the part described by InterpreterFrameConstants.
unsigned fixed_frame_size = ComputeInterpretedFixedSize(shared);
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
int parameter_count = shared->internal_formal_parameter_count() + 1;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::INTERPRETED);
CHECK(frame_index >= 0 && frame_index < output_count_);
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
intptr_t top_address;
if (is_bottommost) {
top_address = caller_frame_top_ - output_frame_size;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " -------------------------\n");
}
// There are no translation commands for the caller's pc and fp, the
// context, the function, new.target and the bytecode offset. Synthesize
// their values and set them up
// explicitly.
//
// The caller's pc for the bottommost output frame is the same as in the
// input frame. For all subsequent output frames, it can be read from the
// previous one. This frame's pc can be computed from the non-optimized
// function code and AST id of the bailout.
output_offset -= kPCOnStackSize;
intptr_t value;
if (is_bottommost) {
value = caller_pc_;
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetCallerPc(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
// The caller's frame pointer for the bottommost output frame is the same
// as in the input frame. For all subsequent output frames, it can be
// read from the previous one. Also compute and set this frame's frame
// pointer.
output_offset -= kFPOnStackSize;
if (is_bottommost) {
value = caller_fp_;
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (is_topmost) {
Register fp_reg = InterpretedFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
}
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
// For the bottommost output frame the constant pool pointer can be gotten
// from the input frame. For subsequent output frames, it can be read from
// the previous frame.
output_offset -= kPointerSize;
if (is_bottommost) {
value = caller_constant_pool_;
} else {
value = output_[frame_index - 1]->GetConstantPool();
}
output_frame->SetCallerConstantPool(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"caller's constant_pool\n");
}
// For the bottommost output frame the context can be gotten from the input
// frame. For all subsequent output frames it can be gotten from the function
// so long as we don't inline functions that need local contexts.
output_offset -= kPointerSize;
// When deoptimizing into a catch block, we need to take the context
// from a register that was specified in the handler table.
TranslatedFrame::iterator context_pos = value_iterator;
int context_input_index = input_index;
if (goto_catch_handler) {
// Skip to the translated value of the register specified
// in the handler table.
for (int i = 0; i < catch_handler_data_ + 1; ++i) {
context_pos++;
context_input_index++;
}
}
// Read the context from the translations.
Object* context = context_pos->GetRawValue();
value = reinterpret_cast<intptr_t>(context);
output_frame->SetContext(value);
WriteValueToOutput(context, context_input_index, frame_index, output_offset,
"context ");
if (context == isolate_->heap()->arguments_marker()) {
Address output_address =
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
output_offset;
values_to_materialize_.push_back({output_address, context_pos});
}
value_iterator++;
input_index++;
// The function was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
// The new.target slot is only used during function activiation which is
// before the first deopt point, so should never be needed. Just set it to
// undefined.
output_offset -= kPointerSize;
Object* new_target = isolate_->heap()->undefined_value();
WriteValueToOutput(new_target, 0, frame_index, output_offset, "new_target ");
// Set the bytecode array pointer.
output_offset -= kPointerSize;
Object* bytecode_array = shared->HasDebugInfo()
? shared->GetDebugInfo()->DebugBytecodeArray()
: shared->bytecode_array();
WriteValueToOutput(bytecode_array, 0, frame_index, output_offset,
"bytecode array ");
// The bytecode offset was mentioned explicitly in the BEGIN_FRAME.
output_offset -= kPointerSize;
int raw_bytecode_offset =
BytecodeArray::kHeaderSize - kHeapObjectTag + bytecode_offset;
Smi* smi_bytecode_offset = Smi::FromInt(raw_bytecode_offset);
WriteValueToOutput(smi_bytecode_offset, 0, frame_index, output_offset,
"bytecode offset ");
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), " -------------------------\n");
}
// Translate the rest of the interpreter registers in the frame.
for (unsigned i = 0; i < height - 1; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
// Translate the accumulator register (depending on frame position).
if (is_topmost) {
// For topmost frame, put the accumulator on the stack. The bailout state
// for interpreted frames is always set to {BailoutState::TOS_REGISTER} and
// the {NotifyDeoptimized} builtin pops it off the topmost frame (possibly
// after materialization).
output_offset -= kPointerSize;
if (goto_catch_handler) {
// If we are lazy deopting to a catch handler, we set the accumulator to
// the exception (which lives in the result register).
intptr_t accumulator_value =
input_->GetRegister(FullCodeGenerator::result_register().code());
WriteValueToOutput(reinterpret_cast<Object*>(accumulator_value), 0,
frame_index, output_offset, "accumulator ");
value_iterator++;
} else {
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset, "accumulator ");
}
} else {
// For non-topmost frames, skip the accumulator translation. For those
// frames, the return value from the callee will become the accumulator.
value_iterator++;
input_index++;
}
CHECK_EQ(0u, output_offset);
// Compute this frame's PC and state. The PC will be a special builtin that
// continues the bytecode dispatch. Note that non-topmost and lazy-style
// bailout handlers also advance the bytecode offset before dispatch, hence
// simulating what normal handlers do upon completion of the operation.
Builtins* builtins = isolate_->builtins();
Code* dispatch_builtin =
(!is_topmost || (bailout_type_ == LAZY)) && !goto_catch_handler
? builtins->builtin(Builtins::kInterpreterEnterBytecodeAdvance)
: builtins->builtin(Builtins::kInterpreterEnterBytecodeDispatch);
output_frame->SetPc(reinterpret_cast<intptr_t>(dispatch_builtin->entry()));
// Restore accumulator (TOS) register.
output_frame->SetState(
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(dispatch_builtin->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
InterpretedFrame::constant_pool_pointer_register();
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
}
}
// Clear the context register. The context might be a de-materialized object
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
// Set the continuation for the topmost frame.
if (is_topmost) {
Code* continuation = builtins->builtin(Builtins::kNotifyDeoptimized);
if (bailout_type_ == LAZY) {
continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
} else if (bailout_type_ == SOFT) {
continuation = builtins->builtin(Builtins::kNotifySoftDeoptimized);
} else {
CHECK_EQ(bailout_type_, EAGER);
}
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
}
void Deoptimizer::DoComputeArgumentsAdaptorFrame(
TranslatedFrame* translated_frame, int frame_index) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
bool is_bottommost = (0 == frame_index);
int input_index = 0;
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating arguments adaptor => height=%d\n", height_in_bytes);
}
unsigned fixed_frame_size = ArgumentsAdaptorFrameConstants::kFixedFrameSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
int parameter_count = height;
FrameDescription* output_frame = new (output_frame_size)
FrameDescription(output_frame_size, parameter_count);
output_frame->SetFrameType(StackFrame::ARGUMENTS_ADAPTOR);
// Arguments adaptor can not be topmost.
CHECK(frame_index < output_count_ - 1);
CHECK(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
intptr_t top_address;
if (is_bottommost) {
top_address = caller_frame_top_ - output_frame_size;
} else {
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
}
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t value;
if (is_bottommost) {
value = caller_pc_;
} else {
value = output_[frame_index - 1]->GetPc();
}
output_frame->SetCallerPc(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
if (is_bottommost) {
value = caller_fp_;
} else {
value = output_[frame_index - 1]->GetFp();
}
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
if (is_bottommost) {
value = caller_constant_pool_;
} else {
value = output_[frame_index - 1]->GetConstantPool();
}
output_frame->SetCallerConstantPool(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"caller's constant_pool\n");
}
// A marker value is used in place of the context.
output_offset -= kPointerSize;
intptr_t context = StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR);
output_frame->SetFrameSlot(output_offset, context);
DebugPrintOutputSlot(context, frame_index, output_offset,
"context (adaptor sentinel)\n");
// The function was mentioned explicitly in the ARGUMENTS_ADAPTOR_FRAME.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset, "function ");
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "argc ");
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
DCHECK(0 == output_offset);
Builtins* builtins = isolate_->builtins();
Code* adaptor_trampoline =
builtins->builtin(Builtins::kArgumentsAdaptorTrampoline);
intptr_t pc_value = reinterpret_cast<intptr_t>(
adaptor_trampoline->instruction_start() +
isolate_->heap()->arguments_adaptor_deopt_pc_offset()->value());
output_frame->SetPc(pc_value);
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(adaptor_trampoline->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
}
}
void Deoptimizer::DoComputeTailCallerFrame(TranslatedFrame* translated_frame,
int frame_index) {
SharedFunctionInfo* shared = translated_frame->raw_shared_info();
bool is_bottommost = (0 == frame_index);
// Tail caller frame can't be topmost.
CHECK_NE(output_count_ - 1, frame_index);
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(), " translating tail caller frame ");
std::unique_ptr<char[]> name = shared->DebugName()->ToCString();
PrintF(trace_scope_->file(), "%s\n", name.get());
}
if (!is_bottommost) return;
// Drop arguments adaptor frame below current frame if it exsits.
Address fp_address = input_->GetFramePointerAddress();
Address adaptor_fp_address =
Memory::Address_at(fp_address + CommonFrameConstants::kCallerFPOffset);
if (StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR) !=
Memory::intptr_at(adaptor_fp_address +
CommonFrameConstants::kContextOrFrameTypeOffset)) {
return;
}
int caller_params_count =
Smi::cast(
Memory::Object_at(adaptor_fp_address +
ArgumentsAdaptorFrameConstants::kLengthOffset))
->value();
int callee_params_count =
function_->shared()->internal_formal_parameter_count();
// Both caller and callee parameters count do not include receiver.
int offset = (caller_params_count - callee_params_count) * kPointerSize;
intptr_t new_stack_fp =
reinterpret_cast<intptr_t>(adaptor_fp_address) + offset;
intptr_t new_caller_frame_top = new_stack_fp +
(callee_params_count + 1) * kPointerSize +
CommonFrameConstants::kFixedFrameSizeAboveFp;
intptr_t adaptor_caller_pc = Memory::intptr_at(
adaptor_fp_address + CommonFrameConstants::kCallerPCOffset);
intptr_t adaptor_caller_fp = Memory::intptr_at(
adaptor_fp_address + CommonFrameConstants::kCallerFPOffset);
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" dropping caller arguments adaptor frame: offset=%d, "
"fp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR
", "
"caller sp: 0x%08" V8PRIxPTR " -> 0x%08" V8PRIxPTR "\n",
offset, stack_fp_, new_stack_fp, caller_frame_top_,
new_caller_frame_top);
}
caller_frame_top_ = new_caller_frame_top;
caller_fp_ = adaptor_caller_fp;
caller_pc_ = adaptor_caller_pc;
}
void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
int frame_index) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
bool is_topmost = (output_count_ - 1 == frame_index);
// The construct frame could become topmost only if we inlined a constructor
// call which does a tail call (otherwise the tail callee's frame would be
// the topmost one). So it could only be the LAZY case.
CHECK(!is_topmost || bailout_type_ == LAZY);
int input_index = 0;
Builtins* builtins = isolate_->builtins();
Code* construct_stub = builtins->builtin(
FLAG_harmony_restrict_constructor_return
? Builtins::kJSConstructStubGenericRestrictedReturn
: Builtins::kJSConstructStubGenericUnrestrictedReturn);
BailoutId bailout_id = translated_frame->node_id();
unsigned height = translated_frame->height();
unsigned height_in_bytes = height * kPointerSize;
// If the construct frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of the constructor function to the
// top of the reconstructed stack and then using the
// BailoutState::TOS_REGISTER machinery.
if (is_topmost) {
height_in_bytes += kPointerSize;
}
JSFunction* function = JSFunction::cast(value_iterator->GetRawValue());
value_iterator++;
input_index++;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating construct stub => bailout_id=%d (%s), height=%d\n",
bailout_id.ToInt(),
bailout_id == BailoutId::ConstructStubCreate() ? "create" : "invoke",
height_in_bytes);
}
unsigned fixed_frame_size = ConstructFrameConstants::kFixedFrameSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::CONSTRUCT);
// Construct stub can not be topmost.
DCHECK(frame_index > 0 && frame_index < output_count_);
DCHECK(output_[frame_index] == NULL);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
intptr_t top_address;
top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
// Compute the incoming parameter translation.
int parameter_count = height;
unsigned output_offset = output_frame_size;
for (int i = 0; i < parameter_count; ++i) {
output_offset -= kPointerSize;
// The allocated receiver of a construct stub frame is passed as the
// receiver parameter through the translation. It might be encoding
// a captured object, override the slot address for a captured object.
WriteTranslatedValueToOutput(
&value_iterator, &input_index, frame_index, output_offset, nullptr,
(i == 0) ? reinterpret_cast<Address>(top_address) : nullptr);
}
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (is_topmost) {
Register fp_reg = JavaScriptFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
}
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
output_frame->SetCallerConstantPool(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"caller's constant_pool\n");
}
// A marker value is used to mark the frame.
output_offset -= kPointerSize;
value = StackFrame::TypeToMarker(StackFrame::CONSTRUCT);
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"typed frame marker\n");
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// Number of incoming arguments.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(Smi::FromInt(height - 1));
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "argc ");
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), "(%d)\n", height - 1);
}
// The constructor function was mentioned explicitly in the
// CONSTRUCT_STUB_FRAME.
output_offset -= kPointerSize;
value = reinterpret_cast<intptr_t>(function);
WriteValueToOutput(function, 0, frame_index, output_offset,
"constructor function ");
// The deopt info contains the implicit receiver or the new target at the
// position of the receiver. Copy it to the top of stack.
output_offset -= kPointerSize;
value = output_frame->GetFrameSlot(output_frame_size - kPointerSize);
output_frame->SetFrameSlot(output_offset, value);
if (bailout_id == BailoutId::ConstructStubCreate()) {
DebugPrintOutputSlot(value, frame_index, output_offset, "new target\n");
} else {
CHECK(bailout_id == BailoutId::ConstructStubInvoke());
DebugPrintOutputSlot(value, frame_index, output_offset,
"allocated receiver\n");
}
if (is_topmost) {
// Ensure the result is restored back when we return to the stub.
output_offset -= kPointerSize;
Register result_reg = FullCodeGenerator::result_register();
value = input_->GetRegister(result_reg.code());
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "subcall result\n");
output_frame->SetState(
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
}
CHECK_EQ(0u, output_offset);
// Compute this frame's PC.
DCHECK(bailout_id.IsValidForConstructStub());
Address start = construct_stub->instruction_start();
int pc_offset =
bailout_id == BailoutId::ConstructStubCreate()
? isolate_->heap()->construct_stub_create_deopt_pc_offset()->value()
: isolate_->heap()->construct_stub_invoke_deopt_pc_offset()->value();
intptr_t pc_value = reinterpret_cast<intptr_t>(start + pc_offset);
output_frame->SetPc(pc_value);
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(construct_stub->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
JavaScriptFrame::constant_pool_pointer_register();
output_frame->SetRegister(constant_pool_reg.code(), fp_value);
}
}
// Clear the context register. The context might be a de-materialized object
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
DCHECK_EQ(LAZY, bailout_type_);
Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
}
void Deoptimizer::DoComputeAccessorStubFrame(TranslatedFrame* translated_frame,
int frame_index,
bool is_setter_stub_frame) {
TranslatedFrame::iterator value_iterator = translated_frame->begin();
bool is_topmost = (output_count_ - 1 == frame_index);
// The accessor frame could become topmost only if we inlined an accessor
// call which does a tail call (otherwise the tail callee's frame would be
// the topmost one). So it could only be the LAZY case.
CHECK(!is_topmost || bailout_type_ == LAZY);
int input_index = 0;
// Skip accessor.
value_iterator++;
input_index++;
// The receiver (and the implicit return value, if any) are expected in
// registers by the LoadIC/StoreIC, so they don't belong to the output stack
// frame. This means that we have to use a height of 0.
unsigned height = 0;
unsigned height_in_bytes = height * kPointerSize;
// If the accessor frame appears to be topmost we should ensure that the
// value of result register is preserved during continuation execution.
// We do this here by "pushing" the result of the accessor function to the
// top of the reconstructed stack and then using the
// BailoutState::TOS_REGISTER machinery.
// We don't need to restore the result in case of a setter call because we
// have to return the stored value but not the result of the setter function.
bool should_preserve_result = is_topmost && !is_setter_stub_frame;
if (should_preserve_result) {
height_in_bytes += kPointerSize;
}
const char* kind = is_setter_stub_frame ? "setter" : "getter";
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating %s stub => height=%u\n", kind, height_in_bytes);
}
// We need 1 stack entry for the return address and enough entries for the
// StackFrame::INTERNAL (FP, frame type, context, code object and constant
// pool (if enabled)- see MacroAssembler::EnterFrame).
// For a setter stub frame we need one additional entry for the implicit
// return value, see StoreStubCompiler::CompileStoreViaSetter.
unsigned fixed_frame_entries =
(StandardFrameConstants::kFixedFrameSize / kPointerSize) + 1 +
(is_setter_stub_frame ? 1 : 0);
unsigned fixed_frame_size = fixed_frame_entries * kPointerSize;
unsigned output_frame_size = height_in_bytes + fixed_frame_size;
// Allocate and store the output frame description.
FrameDescription* output_frame =
new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::INTERNAL);
// A frame for an accessor stub can not be bottommost.
CHECK(frame_index > 0 && frame_index < output_count_);
CHECK_NULL(output_[frame_index]);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
intptr_t top_address = output_[frame_index - 1]->GetTop() - output_frame_size;
output_frame->SetTop(top_address);
unsigned output_offset = output_frame_size;
// Read caller's PC from the previous frame.
output_offset -= kPCOnStackSize;
intptr_t callers_pc = output_[frame_index - 1]->GetPc();
output_frame->SetCallerPc(output_offset, callers_pc);
DebugPrintOutputSlot(callers_pc, frame_index, output_offset, "caller's pc\n");
// Read caller's FP from the previous frame, and set this frame's FP.
output_offset -= kFPOnStackSize;
intptr_t value = output_[frame_index - 1]->GetFp();
output_frame->SetCallerFp(output_offset, value);
intptr_t fp_value = top_address + output_offset;
output_frame->SetFp(fp_value);
if (is_topmost) {
Register fp_reg = JavaScriptFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), fp_value);
}
DebugPrintOutputSlot(value, frame_index, output_offset, "caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetConstantPool();
output_frame->SetCallerConstantPool(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"caller's constant_pool\n");
}
// Set the frame type.
output_offset -= kPointerSize;
value = StackFrame::TypeToMarker(StackFrame::INTERNAL);
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "frame type ");
if (trace_scope_ != nullptr) {
PrintF(trace_scope_->file(), "(%s sentinel)\n", kind);
}
// Get Code object from accessor stub.
output_offset -= kPointerSize;
Builtins::Name name = is_setter_stub_frame ?
Builtins::kStoreIC_Setter_ForDeopt :
Builtins::kLoadIC_Getter_ForDeopt;
Code* accessor_stub = isolate_->builtins()->builtin(name);
value = reinterpret_cast<intptr_t>(accessor_stub);
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "code object\n");
// The context can be gotten from the previous frame.
output_offset -= kPointerSize;
value = output_[frame_index - 1]->GetContext();
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset, "context\n");
// Skip receiver.
value_iterator++;
input_index++;
if (is_setter_stub_frame) {
// The implicit return value was part of the artificial setter stub
// environment.
output_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, frame_index,
output_offset);
}
if (should_preserve_result) {
// Ensure the result is restored back when we return to the stub.
output_offset -= kPointerSize;
Register result_reg = FullCodeGenerator::result_register();
value = input_->GetRegister(result_reg.code());
output_frame->SetFrameSlot(output_offset, value);
DebugPrintOutputSlot(value, frame_index, output_offset,
"accessor result\n");
output_frame->SetState(
Smi::FromInt(static_cast<int>(BailoutState::TOS_REGISTER)));
} else {
output_frame->SetState(
Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
}
CHECK_EQ(0u, output_offset);
Smi* offset = is_setter_stub_frame ?
isolate_->heap()->setter_stub_deopt_pc_offset() :
isolate_->heap()->getter_stub_deopt_pc_offset();
intptr_t pc = reinterpret_cast<intptr_t>(
accessor_stub->instruction_start() + offset->value());
output_frame->SetPc(pc);
// Update constant pool.
if (FLAG_enable_embedded_constant_pool) {
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(accessor_stub->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
if (is_topmost) {
Register constant_pool_reg =
JavaScriptFrame::constant_pool_pointer_register();
output_frame->SetRegister(constant_pool_reg.code(), fp_value);
}
}
// Clear the context register. The context might be a de-materialized object
// and will be materialized by {Runtime_NotifyDeoptimized}. For additional
// safety we use Smi(0) instead of the potential {arguments_marker} here.
if (is_topmost) {
intptr_t context_value = reinterpret_cast<intptr_t>(Smi::kZero);
Register context_reg = JavaScriptFrame::context_register();
output_frame->SetRegister(context_reg.code(), context_value);
}
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
DCHECK_EQ(LAZY, bailout_type_);
Code* continuation = builtins->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(continuation->entry()));
}
}
void Deoptimizer::DoComputeCompiledStubFrame(TranslatedFrame* translated_frame,
int frame_index) {
//
// FROM TO
// | .... | | .... |
// +-------------------------+ +-------------------------+
// | JSFunction continuation | | JSFunction continuation |
// +-------------------------+ +-------------------------+
// | | saved frame (FP) | | saved frame (FP) |
// | +=========================+<-fpreg +=========================+<-fpreg
// | |constant pool (if ool_cp)| |constant pool (if ool_cp)|
// | +-------------------------+ +-------------------------|
// | | JSFunction context | | JSFunction context |
// v +-------------------------+ +-------------------------|
// | COMPILED_STUB marker | | STUB_FAILURE marker |
// +-------------------------+ +-------------------------+
// | | | caller args.arguments_ |
// | ... | +-------------------------+
// | | | caller args.length_ |
// |-------------------------|<-spreg +-------------------------+
// | caller args pointer |
// +-------------------------+
// | caller stack param 1 |
// parameters in registers +-------------------------+
// and spilled to stack | .... |
// +-------------------------+
// | caller stack param n |
// +-------------------------+<-spreg
// reg = number of parameters
// reg = failure handler address
// reg = saved frame
// reg = JSFunction context
//
// Caller stack params contain the register parameters to the stub first,
// and then, if the descriptor specifies a constant number of stack
// parameters, the stack parameters as well.
TranslatedFrame::iterator value_iterator = translated_frame->begin();
int input_index = 0;
CHECK(compiled_code_->is_hydrogen_stub());
int major_key = CodeStub::GetMajorKey(compiled_code_);
CodeStubDescriptor descriptor(isolate_, compiled_code_->stub_key());
// The output frame must have room for all pushed register parameters
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
int param_count = descriptor.GetRegisterParameterCount();
int stack_param_count = descriptor.GetStackParameterCount();
// The translated frame contains all of the register parameters
// plus the context.
CHECK_EQ(translated_frame->height(), param_count + 1);
CHECK_GE(param_count, 0);
int height_in_bytes = kPointerSize * (param_count + stack_param_count);
int fixed_frame_size = StubFailureTrampolineFrameConstants::kFixedFrameSize;
int output_frame_size = height_in_bytes + fixed_frame_size;
if (trace_scope_ != NULL) {
PrintF(trace_scope_->file(),
" translating %s => StubFailureTrampolineStub, height=%d\n",
CodeStub::MajorName(static_cast<CodeStub::Major>(major_key)),
height_in_bytes);
}
// The stub failure trampoline is a single frame.
FrameDescription* output_frame =
new (output_frame_size) FrameDescription(output_frame_size);
output_frame->SetFrameType(StackFrame::STUB_FAILURE_TRAMPOLINE);
CHECK_EQ(frame_index, 0);
output_[frame_index] = output_frame;
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
intptr_t top_address = caller_frame_top_ - output_frame_size;
output_frame->SetTop(top_address);
// Set caller's PC (JSFunction continuation).
unsigned output_frame_offset = output_frame_size - kFPOnStackSize;
intptr_t value = caller_pc_;
output_frame->SetCallerPc(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's pc\n");
// Read caller's FP from the input frame, and set this frame's FP.
value = caller_fp_;
output_frame_offset -= kFPOnStackSize;
output_frame->SetCallerFp(output_frame_offset, value);
intptr_t frame_ptr = top_address + output_frame_offset;
Register fp_reg = StubFailureTrampolineFrame::fp_register();
output_frame->SetRegister(fp_reg.code(), frame_ptr);
output_frame->SetFp(frame_ptr);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's fp\n");
if (FLAG_enable_embedded_constant_pool) {
// Read the caller's constant pool from the input frame.
value = caller_constant_pool_;
output_frame_offset -= kPointerSize;
output_frame->SetCallerConstantPool(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"caller's constant_pool\n");
}
// The marker for the typed stack frame
output_frame_offset -= kPointerSize;
value = StackFrame::TypeToMarker(StackFrame::STUB_FAILURE_TRAMPOLINE);
output_frame->SetFrameSlot(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"function (stub failure sentinel)\n");
intptr_t caller_arg_count = stack_param_count;
bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
// Build the Arguments object for the caller's parameters and a pointer to it.
output_frame_offset -= kPointerSize;
int args_arguments_offset = output_frame_offset;
intptr_t the_hole = reinterpret_cast<intptr_t>(
isolate_->heap()->the_hole_value());
if (arg_count_known) {
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
} else {
value = the_hole;
}
output_frame->SetFrameSlot(args_arguments_offset, value);
DebugPrintOutputSlot(
value, frame_index, args_arguments_offset,
arg_count_known ? "args.arguments\n" : "args.arguments (the hole)\n");
output_frame_offset -= kPointerSize;
int length_frame_offset = output_frame_offset;
value = arg_count_known ? caller_arg_count : the_hole;
output_frame->SetFrameSlot(length_frame_offset, value);
DebugPrintOutputSlot(
value, frame_index, length_frame_offset,
arg_count_known ? "args.length\n" : "args.length (the hole)\n");
output_frame_offset -= kPointerSize;
value = frame_ptr + StandardFrameConstants::kCallerSPOffset -
(output_frame_size - output_frame_offset) + kPointerSize;
output_frame->SetFrameSlot(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset, "args*\n");
// Copy the register parameters to the failure frame.
int arguments_length_offset = -1;
for (int i = 0; i < param_count; ++i) {
output_frame_offset -= kPointerSize;
WriteTranslatedValueToOutput(&value_iterator, &input_index, 0,
output_frame_offset);
if (!arg_count_known &&
descriptor.GetRegisterParameter(i)
.is(descriptor.stack_parameter_count())) {
arguments_length_offset = output_frame_offset;
}
}
Object* maybe_context = value_iterator->GetRawValue();
CHECK(maybe_context->IsContext());
Register context_reg = StubFailureTrampolineFrame::context_register();
value = reinterpret_cast<intptr_t>(maybe_context);
output_frame->SetRegister(context_reg.code(), value);
++value_iterator;
// Copy constant stack parameters to the failure frame. If the number of stack
// parameters is not known in the descriptor, the arguments object is the way
// to access them.
for (int i = 0; i < stack_param_count; i++) {
output_frame_offset -= kPointerSize;
Object** stack_parameter = reinterpret_cast<Object**>(
frame_ptr + StandardFrameConstants::kCallerSPOffset +
(stack_param_count - i - 1) * kPointerSize);
value = reinterpret_cast<intptr_t>(*stack_parameter);
output_frame->SetFrameSlot(output_frame_offset, value);
DebugPrintOutputSlot(value, frame_index, output_frame_offset,
"stack parameter\n");
}
CHECK_EQ(0u, output_frame_offset);
if (!arg_count_known) {
CHECK_GE(arguments_length_offset, 0);
// We know it's a smi because 1) the code stub guarantees the stack
// parameter count is in smi range, and 2) the DoTranslateCommand in the
// parameter loop above translated that to a tagged value.
Smi* smi_caller_arg_count = reinterpret_cast<Smi*>(
output_frame->GetFrameSlot(arguments_length_offset));
caller_arg_count = smi_caller_arg_count->value();
output_frame->SetFrameSlot(length_frame_offset, caller_arg_count);
DebugPrintOutputSlot(caller_arg_count, frame_index, length_frame_offset,
"args.length\n");
value = frame_ptr + StandardFrameConstants::kCallerSPOffset +
(caller_arg_count - 1) * kPointerSize;
output_frame->SetFrameSlot(args_arguments_offset, value);
DebugPrintOutputSlot(value, frame_index, args_arguments_offset,
"args.arguments");
}
// Copy the double registers from the input into the output frame.
CopyDoubleRegisters(output_frame);
// Fill registers containing handler and number of parameters.
SetPlatformCompiledStubRegisters(output_frame, &descriptor);
// Compute this frame's PC, state, and continuation.
Code* trampoline = NULL;
StubFunctionMode function_mode = descriptor.function_mode();
StubFailureTrampolineStub(isolate_, function_mode)
.FindCodeInCache(&trampoline);
DCHECK(trampoline != NULL);
output_frame->SetPc(reinterpret_cast<intptr_t>(
trampoline->instruction_start()));
if (FLAG_enable_embedded_constant_pool) {
Register constant_pool_reg =
StubFailureTrampolineFrame::constant_pool_pointer_register();
intptr_t constant_pool_value =
reinterpret_cast<intptr_t>(trampoline->constant_pool());
output_frame->SetConstantPool(constant_pool_value);
output_frame->SetRegister(constant_pool_reg.code(), constant_pool_value);
}
output_frame->SetState(
Smi::FromInt(static_cast<int>(BailoutState::NO_REGISTERS)));
Code* notify_failure =
isolate_->builtins()->builtin(Builtins::kNotifyStubFailureSaveDoubles);
output_frame->SetContinuation(
reinterpret_cast<intptr_t>(notify_failure->entry()));
}
void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
// Walk to the last JavaScript output frame to find out if it has
// adapted arguments.
for (int frame_index = 0; frame_index < jsframe_count(); ++frame_index) {
if (frame_index != 0) it->Advance();
}
translated_state_.Prepare(it->frame()->has_adapted_arguments(),
reinterpret_cast<Address>(stack_fp_));
for (auto& materialization : values_to_materialize_) {
Handle<Object> value = materialization.value_->GetValue();
if (trace_scope_ != nullptr) {
PrintF("Materialization [0x%08" V8PRIxPTR "] <- 0x%08" V8PRIxPTR " ; ",
reinterpret_cast<intptr_t>(materialization.output_slot_address_),
reinterpret_cast<intptr_t>(*value));
value->ShortPrint(trace_scope_->file());
PrintF(trace_scope_->file(), "\n");
}
*(reinterpret_cast<intptr_t*>(materialization.output_slot_address_)) =
reinterpret_cast<intptr_t>(*value);
}
isolate_->materialized_object_store()->Remove(
reinterpret_cast<Address>(stack_fp_));
}
void Deoptimizer::WriteTranslatedValueToOutput(
TranslatedFrame::iterator* iterator, int* input_index, int frame_index,
unsigned output_offset, const char* debug_hint_string,
Address output_address_for_materialization) {
Object* value = (*iterator)->GetRawValue();
WriteValueToOutput(value, *input_index, frame_index, output_offset,
debug_hint_string);
if (value == isolate_->heap()->arguments_marker()) {
Address output_address =
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
output_offset;
if (output_address_for_materialization == nullptr) {
output_address_for_materialization = output_address;
}
values_to_materialize_.push_back(
{output_address_for_materialization, *iterator});
}
(*iterator)++;
(*input_index)++;
}
void Deoptimizer::WriteValueToOutput(Object* value, int input_index,
int frame_index, unsigned output_offset,
const char* debug_hint_string) {
output_[frame_index]->SetFrameSlot(output_offset,
reinterpret_cast<intptr_t>(value));
if (trace_scope_ != nullptr) {
DebugPrintOutputSlot(reinterpret_cast<intptr_t>(value), frame_index,
output_offset, debug_hint_string);
value->ShortPrint(trace_scope_->file());
PrintF(trace_scope_->file(), " (input #%d)\n", input_index);
}
}
void Deoptimizer::DebugPrintOutputSlot(intptr_t value, int frame_index,
unsigned output_offset,
const char* debug_hint_string) {
if (trace_scope_ != nullptr) {
Address output_address =
reinterpret_cast<Address>(output_[frame_index]->GetTop()) +
output_offset;
PrintF(trace_scope_->file(),
" 0x%08" V8PRIxPTR ": [top + %d] <- 0x%08" V8PRIxPTR " ; %s",
reinterpret_cast<intptr_t>(output_address), output_offset, value,
debug_hint_string == nullptr ? "" : debug_hint_string);
}
}
unsigned Deoptimizer::ComputeInputFrameAboveFpFixedSize() const {
unsigned fixed_size = CommonFrameConstants::kFixedFrameSizeAboveFp;
if (!function_->IsSmi()) {
fixed_size += ComputeIncomingArgumentSize(function_->shared());
}
return fixed_size;
}
unsigned Deoptimizer::ComputeInputFrameSize() const {
// The fp-to-sp delta already takes the context, constant pool pointer and the
// function into account so we have to avoid double counting them.
unsigned fixed_size_above_fp = ComputeInputFrameAboveFpFixedSize();
unsigned result = fixed_size_above_fp + fp_to_sp_delta_;
if (compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
unsigned stack_slots = compiled_code_->stack_slots();
unsigned outgoing_size =
ComputeOutgoingArgumentSize(compiled_code_, bailout_id_);
CHECK_EQ(fixed_size_above_fp + (stack_slots * kPointerSize) -
CommonFrameConstants::kFixedFrameSizeAboveFp + outgoing_size,
result);
}
return result;
}
// static
unsigned Deoptimizer::ComputeJavascriptFixedSize(SharedFunctionInfo* shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, and all the incoming arguments.
return ComputeIncomingArgumentSize(shared) +
StandardFrameConstants::kFixedFrameSize;
}
// static
unsigned Deoptimizer::ComputeInterpretedFixedSize(SharedFunctionInfo* shared) {
// The fixed part of the frame consists of the return address, frame
// pointer, function, context, new.target, bytecode offset and all the
// incoming arguments.
return ComputeIncomingArgumentSize(shared) +
InterpreterFrameConstants::kFixedFrameSize;
}
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo* shared) {
return (shared->internal_formal_parameter_count() + 1) * kPointerSize;
}
// static
unsigned Deoptimizer::ComputeOutgoingArgumentSize(Code* code,
unsigned bailout_id) {
DeoptimizationInputData* data =
DeoptimizationInputData::cast(code->deoptimization_data());
unsigned height = data->ArgumentsStackHeight(bailout_id)->value();
return height * kPointerSize;
}
void Deoptimizer::EnsureCodeForDeoptimizationEntry(Isolate* isolate,
BailoutType type,
int max_entry_id) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
CHECK(type == EAGER || type == SOFT || type == LAZY);
DeoptimizerData* data = isolate->deoptimizer_data();
int entry_count = data->deopt_entry_code_entries_[type];
if (max_entry_id < entry_count) return;
entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
while (max_entry_id >= entry_count) entry_count *= 2;
CHECK(entry_count <= Deoptimizer::kMaxNumberOfEntries);
MacroAssembler masm(isolate, NULL, 16 * KB, CodeObjectRequired::kYes);
masm.set_emit_debug_code(false);
GenerateDeoptimizationEntries(&masm, entry_count, type);
CodeDesc desc;
masm.GetCode(&desc);
DCHECK(!RelocInfo::RequiresRelocation(isolate, desc));
MemoryChunk* chunk = data->deopt_entry_code_[type];
CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
desc.instr_size);
if (!chunk->CommitArea(desc.instr_size)) {
V8::FatalProcessOutOfMemory(
"Deoptimizer::EnsureCodeForDeoptimizationEntry");
}
CopyBytes(chunk->area_start(), desc.buffer,
static_cast<size_t>(desc.instr_size));
Assembler::FlushICache(isolate, chunk->area_start(), desc.instr_size);
data->deopt_entry_code_entries_[type] = entry_count;
}
void Deoptimizer::EnsureCodeForMaxDeoptimizationEntries(Isolate* isolate) {
EnsureCodeForDeoptimizationEntry(isolate, EAGER, kMaxNumberOfEntries - 1);
EnsureCodeForDeoptimizationEntry(isolate, LAZY, kMaxNumberOfEntries - 1);
EnsureCodeForDeoptimizationEntry(isolate, SOFT, kMaxNumberOfEntries - 1);
}
FrameDescription::FrameDescription(uint32_t frame_size, int parameter_count)
: frame_size_(frame_size),
parameter_count_(parameter_count),
top_(kZapUint32),
pc_(kZapUint32),
fp_(kZapUint32),
context_(kZapUint32),
constant_pool_(kZapUint32) {
// Zap all the registers.
for (int r = 0; r < Register::kNumRegisters; r++) {
// TODO(jbramley): It isn't safe to use kZapUint32 here. If the register
// isn't used before the next safepoint, the GC will try to scan it as a
// tagged value. kZapUint32 looks like a valid tagged pointer, but it isn't.
SetRegister(r, kZapUint32);
}
// Zap all the slots.
for (unsigned o = 0; o < frame_size; o += kPointerSize) {
SetFrameSlot(o, kZapUint32);
}
}
void TranslationBuffer::Add(int32_t value) {
// This wouldn't handle kMinInt correctly if it ever encountered it.
DCHECK(value != kMinInt);
// Encode the sign bit in the least significant bit.
bool is_negative = (value < 0);
uint32_t bits = ((is_negative ? -value : value) << 1) |
static_cast<int32_t>(is_negative);
// Encode the individual bytes using the least significant bit of
// each byte to indicate whether or not more bytes follow.
do {
uint32_t next = bits >> 7;
contents_.push_back(((bits << 1) & 0xFF) | (next != 0));
bits = next;
} while (bits != 0);
}
int32_t TranslationIterator::Next() {
// Run through the bytes until we reach one with a least significant
// bit of zero (marks the end).
uint32_t bits = 0;
for (int i = 0; true; i += 7) {
DCHECK(HasNext());
uint8_t next = buffer_->get(index_++);
bits |= (next >> 1) << i;
if ((next & 1) == 0) break;
}
// The bits encode the sign in the least significant bit.
bool is_negative = (bits & 1) == 1;
int32_t result = bits >> 1;
return is_negative ? -result : result;
}
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
Handle<ByteArray> result = factory->NewByteArray(CurrentIndex(), TENURED);
contents_.CopyTo(result->GetDataStartAddress());
return result;
}
void Translation::BeginConstructStubFrame(BailoutId bailout_id, int literal_id,
unsigned height) {
buffer_->Add(CONSTRUCT_STUB_FRAME);
buffer_->Add(bailout_id.ToInt());
buffer_->Add(literal_id);
buffer_->Add(height);
}
void Translation::BeginGetterStubFrame(int literal_id) {
buffer_->Add(GETTER_STUB_FRAME);
buffer_->Add(literal_id);
}
void Translation::BeginSetterStubFrame(int literal_id) {
buffer_->Add(SETTER_STUB_FRAME);
buffer_->Add(literal_id);
}
void Translation::BeginArgumentsAdaptorFrame(int literal_id, unsigned height) {
buffer_->Add(ARGUMENTS_ADAPTOR_FRAME);
buffer_->Add(literal_id);
buffer_->Add(height);
}
void Translation::BeginTailCallerFrame(int literal_id) {
buffer_->Add(TAIL_CALLER_FRAME);
buffer_->Add(literal_id);
}
void Translation::BeginJSFrame(BailoutId node_id, int literal_id,
unsigned height) {
buffer_->Add(JS_FRAME);
buffer_->Add(node_id.ToInt());
buffer_->Add(literal_id);
buffer_->Add(height);
}
void Translation::BeginInterpretedFrame(BailoutId bytecode_offset,
int literal_id, unsigned height) {
buffer_->Add(INTERPRETED_FRAME);
buffer_->Add(bytecode_offset.ToInt());
buffer_->Add(literal_id);
buffer_->Add(height);
}
void Translation::BeginCompiledStubFrame(int height) {
buffer_->Add(COMPILED_STUB_FRAME);
buffer_->Add(height);
}
void Translation::BeginArgumentsObject(int args_length) {
buffer_->Add(ARGUMENTS_OBJECT);
buffer_->Add(args_length);
}
void Translation::ArgumentsElements(bool is_rest) {
buffer_->Add(ARGUMENTS_ELEMENTS);
buffer_->Add(is_rest);
}
void Translation::ArgumentsLength(bool is_rest) {
buffer_->Add(ARGUMENTS_LENGTH);
buffer_->Add(is_rest);
}
void Translation::BeginCapturedObject(int length) {
buffer_->Add(CAPTURED_OBJECT);
buffer_->Add(length);
}
void Translation::DuplicateObject(int object_index) {
buffer_->Add(DUPLICATED_OBJECT);
buffer_->Add(object_index);
}
void Translation::StoreRegister(Register reg) {
buffer_->Add(REGISTER);
buffer_->Add(reg.code());
}
void Translation::StoreInt32Register(Register reg) {
buffer_->Add(INT32_REGISTER);
buffer_->Add(reg.code());
}
void Translation::StoreUint32Register(Register reg) {
buffer_->Add(UINT32_REGISTER);
buffer_->Add(reg.code());
}
void Translation::StoreBoolRegister(Register reg) {
buffer_->Add(BOOL_REGISTER);
buffer_->Add(reg.code());
}
void Translation::StoreFloatRegister(FloatRegister reg) {
buffer_->Add(FLOAT_REGISTER);
buffer_->Add(reg.code());
}
void Translation::StoreDoubleRegister(DoubleRegister reg) {
buffer_->Add(DOUBLE_REGISTER);
buffer_->Add(reg.code());
}
void Translation::StoreStackSlot(int index) {
buffer_->Add(STACK_SLOT);
buffer_->Add(index);
}
void Translation::StoreInt32StackSlot(int index) {
buffer_->Add(INT32_STACK_SLOT);
buffer_->Add(index);
}
void Translation::StoreUint32StackSlot(int index) {
buffer_->Add(UINT32_STACK_SLOT);
buffer_->Add(index);
}
void Translation::StoreBoolStackSlot(int index) {
buffer_->Add(BOOL_STACK_SLOT);
buffer_->Add(index);
}
void Translation::StoreFloatStackSlot(int index) {
buffer_->Add(FLOAT_STACK_SLOT);
buffer_->Add(index);
}
void Translation::StoreDoubleStackSlot(int index) {
buffer_->Add(DOUBLE_STACK_SLOT);
buffer_->Add(index);
}
void Translation::StoreLiteral(int literal_id) {
buffer_->Add(LITERAL);
buffer_->Add(literal_id);
}
void Translation::StoreArgumentsObject(bool args_known,
int args_index,
int args_length) {
buffer_->Add(ARGUMENTS_OBJECT);
buffer_->Add(args_known);
buffer_->Add(args_index);
buffer_->Add(args_length);
}
void Translation::StoreJSFrameFunction() {
StoreStackSlot((StandardFrameConstants::kCallerPCOffset -
StandardFrameConstants::kFunctionOffset) /
kPointerSize);
}
int Translation::NumberOfOperandsFor(Opcode opcode) {
switch (opcode) {
case GETTER_STUB_FRAME:
case SETTER_STUB_FRAME:
case DUPLICATED_OBJECT:
case ARGUMENTS_OBJECT:
case CAPTURED_OBJECT:
case REGISTER:
case INT32_REGISTER:
case UINT32_REGISTER:
case BOOL_REGISTER:
case FLOAT_REGISTER: