[test] Fix flaky wasm test and add stable regression test

Remove a DCHECK that got triggered in the rare condition that GC kicks in
during CompilationDependencies::Commit, changing the pretenuring decision,
thus leading to deoptimization. To make sure this rare case is properly
handled, add a new FLAG_pretenure_during_compilation and a cctest that
simulates it predictably.

R=jarin@chromium.org,mvstanton@chromium.org

Bug: v8:8520
Change-Id: If83f8a3d4659a694357b3869c931c7d7c164fd1a
Reviewed-on: https://chromium-review.googlesource.com/c/1363143
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Reviewed-by: Jaroslav Sevcik <jarin@chromium.org>
Reviewed-by: Michael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58236}
diff --git a/src/compiler/compilation-dependencies.cc b/src/compiler/compilation-dependencies.cc
index 958e0b3..053e6f7 100644
--- a/src/compiler/compilation-dependencies.cc
+++ b/src/compiler/compilation-dependencies.cc
@@ -12,7 +12,7 @@
 namespace compiler {
 
 CompilationDependencies::CompilationDependencies(Isolate* isolate, Zone* zone)
-    : zone_(zone), dependencies_(zone) {}
+    : zone_(zone), dependencies_(zone), isolate_(isolate) {}
 
 class CompilationDependencies::Dependency : public ZoneObject {
  public:
@@ -416,7 +416,16 @@
     }
     dep->Install(MaybeObjectHandle::Weak(code));
   }
-  SLOW_DCHECK(AreValid());
+
+  if (FLAG_stress_gc_during_compilation) {
+    isolate_->heap()->PreciseCollectAllGarbage(
+        Heap::kNoGCFlags, GarbageCollectionReason::kTesting,
+        kGCCallbackFlagForced);
+    if (!AreValid()) {
+      dependencies_.clear();
+      return false;
+    }
+  }
 
   dependencies_.clear();
   return true;
diff --git a/src/compiler/compilation-dependencies.h b/src/compiler/compilation-dependencies.h
index ec47f75..1a6760f 100644
--- a/src/compiler/compilation-dependencies.h
+++ b/src/compiler/compilation-dependencies.h
@@ -91,6 +91,7 @@
  private:
   Zone* zone_;
   ZoneForwardList<Dependency*> dependencies_;
+  Isolate* isolate_;
 };
 
 }  // namespace compiler
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 33d6ebe..c302fe6 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -517,6 +517,9 @@
             "rewrite far to near jumps (ia32,x64)")
 DEFINE_BOOL(experimental_inline_promise_constructor, true,
             "inline the Promise constructor in TurboFan")
+DEFINE_BOOL(
+    stress_gc_during_compilation, false,
+    "simulate GC/compiler thread race related to https://crbug.com/v8/8520")
 
 #ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
 #define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 9ccb709..0ba2839 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -428,6 +428,12 @@
 }],  # variant == stress_incremental_marking
 
 ##############################################################################
+# The test relies on deterministic compilation.
+['variant == stress_background_compile', {
+  'test-compiler/DecideToPretenureDuringCompilation': [SKIP],
+}],  # variant == stress_background_compile
+
+##############################################################################
 ['variant == no_wasm_traps', {
   'test-accessors/*': [SKIP],
   'test-api-interceptors/*': [SKIP],
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index 495f402..f60fdf4 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -911,5 +911,114 @@
   CHECK_LE(peak_mem_4 - peak_mem_3, peak_mem_3);
 }
 
+// TODO(mslekova): Remove the duplication with test-heap.cc
+static int AllocationSitesCount(Heap* heap) {
+  int count = 0;
+  for (Object* site = heap->allocation_sites_list();
+       site->IsAllocationSite();) {
+    AllocationSite* cur = AllocationSite::cast(site);
+    CHECK(cur->HasWeakNext());
+    site = cur->weak_next();
+    count++;
+  }
+  return count;
+}
+
+// This test simulates a specific race-condition if GC is triggered just
+// before CompilationDependencies::Commit is finished, and this changes
+// the pretenuring decision, thus causing a deoptimization.
+TEST(DecideToPretenureDuringCompilation) {
+  // The test makes use of optimization and relies on deterministic
+  // compilation.
+  if (!i::FLAG_opt || i::FLAG_always_opt ||
+      i::FLAG_stress_incremental_marking || i::FLAG_optimize_for_size
+#ifdef ENABLE_MINOR_MC
+      || i::FLAG_minor_mc
+#endif
+  )
+    return;
+
+  FLAG_stress_gc_during_compilation = true;
+  FLAG_allow_natives_syntax = true;
+  FLAG_allocation_site_pretenuring = true;
+
+  // We want to trigger exactly 1 optimization.
+  FLAG_use_osr = false;
+
+  // We'll do manual initialization.
+  ManualGCScope manual_gc_scope;
+  v8::Isolate::CreateParams create_params;
+
+  // This setting ensures Heap::MaximumSizeScavenge will return `true`.
+  // We need to initialize the heap with at least 1 page, while keeping the
+  // limit low, to ensure the new space fills even on 32-bit architectures.
+  create_params.constraints.set_max_semi_space_size_in_kb(Page::kPageSize /
+                                                          1024);
+  create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
+  v8::Isolate* isolate = v8::Isolate::New(create_params);
+
+  isolate->Enter();
+  {
+    i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+    Heap* heap = i_isolate->heap();
+    GlobalHandles* global_handles = i_isolate->global_handles();
+    HandleScope handle_scope(i_isolate);
+
+    // The allocation site at the head of the list is ours.
+    Handle<AllocationSite> site;
+    {
+      LocalContext context(isolate);
+      v8::HandleScope scope(context->GetIsolate());
+
+      int count = AllocationSitesCount(heap);
+      CompileRun(
+          "let arr = [];"
+          "function foo(shouldKeep) {"
+          "  let local_array = new Array();"
+          "  if (shouldKeep) arr.push(local_array);"
+          "}"
+          "function bar(shouldKeep) {"
+          "  for (let i = 0; i < 10000; i++) {"
+          "    foo(shouldKeep);"
+          "  }"
+          "}"
+          "bar();");
+
+      // This number should be >= kPretenureRatio * 10000,
+      // where 10000 is the number of iterations in `bar`,
+      // in order to make the ratio in DigestPretenuringFeedback close to 1.
+      const int memento_found_bump = 8500;
+
+      // One allocation site should have been created.
+      int new_count = AllocationSitesCount(heap);
+      CHECK_EQ(new_count, (count + 1));
+      site = Handle<AllocationSite>::cast(global_handles->Create(
+          AllocationSite::cast(heap->allocation_sites_list())));
+      site->set_memento_found_count(memento_found_bump);
+
+      CompileRun("%OptimizeFunctionOnNextCall(bar);");
+      CompileRun("bar(true);");
+
+      // The last call should have caused `foo` to bail out of compilation
+      // due to dependency change (the pretenuring decision in this case).
+      // This will cause recompilation.
+
+      // Check `bar` can get optimized again, meaning the compiler state is
+      // recoverable from this point.
+      CompileRun("%OptimizeFunctionOnNextCall(bar);");
+      CompileRun("bar();");
+
+      Handle<Object> foo_obj =
+          JSReceiver::GetProperty(i_isolate, i_isolate->global_object(), "bar")
+              .ToHandleChecked();
+      Handle<JSFunction> bar = Handle<JSFunction>::cast(foo_obj);
+
+      CHECK(bar->IsOptimized());
+    }
+  }
+  isolate->Exit();
+  isolate->Dispose();
+}
+
 }  // namespace internal
 }  // namespace v8