Torquefy a few types

AllocationMemento, CoverageInfo, DebugInfo, DescriptorArray, FeedbackCell, FeedbackVector

Bug: v8:8952
Change-Id: I17297706a8d9bd4a0ee01b0b133ca613dbc31cf9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1521910
Commit-Queue: Irina Yatsenko <irinayat@microsoft.com>
Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
Reviewed-by: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#61026}
diff --git a/src/builtins/arm/builtins-arm.cc b/src/builtins/arm/builtins-arm.cc
index 37be51e..d45b1f1 100644
--- a/src/builtins/arm/builtins-arm.cc
+++ b/src/builtins/arm/builtins-arm.cc
@@ -887,7 +887,8 @@
 
   __ ldr(
       optimized_code_entry,
-      FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+      FieldMemOperand(feedback_vector,
+                      FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/src/builtins/arm64/builtins-arm64.cc b/src/builtins/arm64/builtins-arm64.cc
index f814420..050db74 100644
--- a/src/builtins/arm64/builtins-arm64.cc
+++ b/src/builtins/arm64/builtins-arm64.cc
@@ -1009,7 +1009,8 @@
 
   __ LoadAnyTaggedField(
       optimized_code_entry,
-      FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+      FieldMemOperand(feedback_vector,
+                      FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret is at a weak reference to a code
diff --git a/src/builtins/base.tq b/src/builtins/base.tq
index 67e1df7..565c308 100644
--- a/src/builtins/base.tq
+++ b/src/builtins/base.tq
@@ -165,6 +165,14 @@
   stack_frame_cache: Object;
 }
 
+extern class DescriptorArray extends HeapObject {
+  number_of_all_descriptors: uint16;
+  number_of_descriptors: uint16;
+  raw_number_of_marked_descriptors: uint16;
+  filler16_bits: uint16;
+  enum_cache: EnumCache;
+}
+
 // These intrinsics should never be called from Torque code. They're used
 // internally by the 'new' operator and only declared here because it's simpler
 // than building the definition from C++.
@@ -1051,6 +1059,27 @@
   coverage_info: CoverageInfo | Undefined;
 }
 
+extern class FeedbackVector extends HeapObject {
+  shared_function_info: SharedFunctionInfo;
+  // TODO(v8:9108): currently no support for MaybeObject in Torque
+  optimized_code_weak_or_smi: Object;
+  closure_feedback_cell_array: FixedArray;
+  length: int32;
+  invocation_count: int32;
+  profiler_ticks: int32;
+  deopt_count: int32;
+}
+
+extern class FeedbackCell extends Struct {
+  value: Undefined | FeedbackVector | FixedArray;
+  interrupt_budget: int32;
+}
+
+type AllocationSite extends Struct;
+extern class AllocationMemento extends Struct {
+  allocation_site: AllocationSite;
+}
+
 extern class WasmModuleObject extends JSObject {
   native_module: Foreign;
   export_wrappers: FixedArray;
diff --git a/src/builtins/builtins-lazy-gen.cc b/src/builtins/builtins-lazy-gen.cc
index c171554..a9d8582 100644
--- a/src/builtins/builtins-lazy-gen.cc
+++ b/src/builtins/builtins-lazy-gen.cc
@@ -44,7 +44,7 @@
   Label fallthrough(this);
 
   TNode<MaybeObject> maybe_optimized_code_entry = LoadMaybeWeakObjectField(
-      feedback_vector, FeedbackVector::kOptimizedCodeOffset);
+      feedback_vector, FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/src/builtins/ia32/builtins-ia32.cc b/src/builtins/ia32/builtins-ia32.cc
index da7f083..247c59f 100644
--- a/src/builtins/ia32/builtins-ia32.cc
+++ b/src/builtins/ia32/builtins-ia32.cc
@@ -799,7 +799,8 @@
   // Load the optimized code from the feedback vector and re-use the register.
   Register optimized_code_entry = scratch;
   __ mov(optimized_code_entry,
-         FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+         FieldOperand(feedback_vector,
+                      FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/src/builtins/mips/builtins-mips.cc b/src/builtins/mips/builtins-mips.cc
index 00ae715..a676a74 100644
--- a/src/builtins/mips/builtins-mips.cc
+++ b/src/builtins/mips/builtins-mips.cc
@@ -874,7 +874,8 @@
   Register optimized_code_entry = scratch1;
 
   __ lw(optimized_code_entry,
-        FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+        FieldMemOperand(feedback_vector,
+                        FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak cell to a code
diff --git a/src/builtins/mips64/builtins-mips64.cc b/src/builtins/mips64/builtins-mips64.cc
index bba01d5..5d343be 100644
--- a/src/builtins/mips64/builtins-mips64.cc
+++ b/src/builtins/mips64/builtins-mips64.cc
@@ -891,7 +891,8 @@
   Register optimized_code_entry = scratch1;
 
   __ Ld(optimized_code_entry,
-        FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+        FieldMemOperand(feedback_vector,
+                        FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/src/builtins/ppc/builtins-ppc.cc b/src/builtins/ppc/builtins-ppc.cc
index f089f08..7f54273 100644
--- a/src/builtins/ppc/builtins-ppc.cc
+++ b/src/builtins/ppc/builtins-ppc.cc
@@ -906,7 +906,8 @@
 
   __ LoadP(
       optimized_code_entry,
-      FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+      FieldMemOperand(feedback_vector,
+                      FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/src/builtins/s390/builtins-s390.cc b/src/builtins/s390/builtins-s390.cc
index b242e89..cfdafa1 100644
--- a/src/builtins/s390/builtins-s390.cc
+++ b/src/builtins/s390/builtins-s390.cc
@@ -960,7 +960,8 @@
 
   __ LoadP(
       optimized_code_entry,
-      FieldMemOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset));
+      FieldMemOperand(feedback_vector,
+                      FeedbackVector::kOptimizedCodeWeakOrSmiOffset));
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
   // optimisation marker. Otherwise, interpret it as a weak reference to a code
diff --git a/src/builtins/x64/builtins-x64.cc b/src/builtins/x64/builtins-x64.cc
index b5ca486..f1584b0 100644
--- a/src/builtins/x64/builtins-x64.cc
+++ b/src/builtins/x64/builtins-x64.cc
@@ -899,7 +899,8 @@
 
   __ LoadAnyTaggedField(
       optimized_code_entry,
-      FieldOperand(feedback_vector, FeedbackVector::kOptimizedCodeOffset),
+      FieldOperand(feedback_vector,
+                   FeedbackVector::kOptimizedCodeWeakOrSmiOffset),
       decompr_scratch);
 
   // Check if the code entry is a Smi. If yes, we interpret it as an
diff --git a/src/feedback-vector-inl.h b/src/feedback-vector-inl.h
index 6573cea..0487f94 100644
--- a/src/feedback-vector-inl.h
+++ b/src/feedback-vector-inl.h
@@ -102,7 +102,8 @@
 
 ACCESSORS(FeedbackVector, shared_function_info, SharedFunctionInfo,
           kSharedFunctionInfoOffset)
-WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi, kOptimizedCodeOffset)
+WEAK_ACCESSORS(FeedbackVector, optimized_code_weak_or_smi,
+               kOptimizedCodeWeakOrSmiOffset)
 ACCESSORS(FeedbackVector, closure_feedback_cell_array, ClosureFeedbackCellArray,
           kClosureFeedbackCellArrayOffset)
 INT32_ACCESSORS(FeedbackVector, length, kLengthOffset)
diff --git a/src/feedback-vector.h b/src/feedback-vector.h
index 0073335..21bd401 100644
--- a/src/feedback-vector.h
+++ b/src/feedback-vector.h
@@ -311,21 +311,11 @@
   // garbage collection (e.g., for patching the cache).
   static inline Symbol RawUninitializedSentinel(Isolate* isolate);
 
-// Layout description.
-#define FEEDBACK_VECTOR_FIELDS(V)                 \
-  /* Header fields. */                            \
-  V(kSharedFunctionInfoOffset, kTaggedSize)       \
-  V(kOptimizedCodeOffset, kTaggedSize)            \
-  V(kClosureFeedbackCellArrayOffset, kTaggedSize) \
-  V(kLengthOffset, kInt32Size)                    \
-  V(kInvocationCountOffset, kInt32Size)           \
-  V(kProfilerTicksOffset, kInt32Size)             \
-  V(kDeoptCountOffset, kInt32Size)                \
-  V(kUnalignedHeaderSize, 0)
+  // Layout description.
+  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+                                TORQUE_GENERATED_FEEDBACK_VECTOR_FIELDS)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_VECTOR_FIELDS)
-#undef FEEDBACK_VECTOR_FIELDS
-
+  static constexpr int kUnalignedHeaderSize = kSize;
   static const int kHeaderSize =
       RoundUp<kObjectAlignment>(int{kUnalignedHeaderSize});
   static const int kFeedbackSlotsOffset = kHeaderSize;
diff --git a/src/heap/factory.cc b/src/heap/factory.cc
index 637262c..2956da6 100644
--- a/src/heap/factory.cc
+++ b/src/heap/factory.cc
@@ -1882,8 +1882,8 @@
 
 Handle<FeedbackCell> Factory::NewNoClosuresCell(Handle<HeapObject> value) {
   AllowDeferredHandleDereference convert_to_cell;
-  HeapObject result = AllocateRawWithImmortalMap(
-      FeedbackCell::kSize, AllocationType::kOld, *no_closures_cell_map());
+  HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+      AllocationType::kOld, *no_closures_cell_map());
   Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
   cell->set_value(*value);
   cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@@ -1893,8 +1893,8 @@
 
 Handle<FeedbackCell> Factory::NewOneClosureCell(Handle<HeapObject> value) {
   AllowDeferredHandleDereference convert_to_cell;
-  HeapObject result = AllocateRawWithImmortalMap(
-      FeedbackCell::kSize, AllocationType::kOld, *one_closure_cell_map());
+  HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+      AllocationType::kOld, *one_closure_cell_map());
   Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
   cell->set_value(*value);
   cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
@@ -1904,8 +1904,8 @@
 
 Handle<FeedbackCell> Factory::NewManyClosuresCell(Handle<HeapObject> value) {
   AllowDeferredHandleDereference convert_to_cell;
-  HeapObject result = AllocateRawWithImmortalMap(
-      FeedbackCell::kSize, AllocationType::kOld, *many_closures_cell_map());
+  HeapObject result = AllocateRawWithImmortalMap(FeedbackCell::kAlignedSize,
+      AllocationType::kOld, *many_closures_cell_map());
   Handle<FeedbackCell> cell(FeedbackCell::cast(result), isolate());
   cell->set_value(*value);
   cell->set_interrupt_budget(FeedbackCell::GetInitialInterruptBudget());
diff --git a/src/heap/setup-heap-internal.cc b/src/heap/setup-heap-internal.cc
index ac00b77..bc21ff8 100644
--- a/src/heap/setup-heap-internal.cc
+++ b/src/heap/setup-heap-internal.cc
@@ -452,11 +452,14 @@
 
     // The "no closures" and "one closure" FeedbackCell maps need
     // to be marked unstable because their objects can change maps.
-    ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, no_closures_cell)
+    ALLOCATE_MAP(
+      FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, no_closures_cell)
     roots.no_closures_cell_map()->mark_unstable();
-    ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, one_closure_cell)
+    ALLOCATE_MAP(
+      FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, one_closure_cell)
     roots.one_closure_cell_map()->mark_unstable();
-    ALLOCATE_MAP(FEEDBACK_CELL_TYPE, FeedbackCell::kSize, many_closures_cell)
+    ALLOCATE_MAP(
+      FEEDBACK_CELL_TYPE, FeedbackCell::kAlignedSize, many_closures_cell)
 
     ALLOCATE_VARSIZE_MAP(TRANSITION_ARRAY_TYPE, transition_array)
 
diff --git a/src/objects-body-descriptors-inl.h b/src/objects-body-descriptors-inl.h
index 7c82849..c0e1823 100644
--- a/src/objects-body-descriptors-inl.h
+++ b/src/objects-body-descriptors-inl.h
@@ -471,7 +471,7 @@
  public:
   static bool IsValidSlot(Map map, HeapObject obj, int offset) {
     return offset == kSharedFunctionInfoOffset ||
-           offset == kOptimizedCodeOffset ||
+           offset == kOptimizedCodeWeakOrSmiOffset ||
            offset == kClosureFeedbackCellArrayOffset ||
            offset >= kFeedbackSlotsOffset;
   }
@@ -480,7 +480,7 @@
   static inline void IterateBody(Map map, HeapObject obj, int object_size,
                                  ObjectVisitor* v) {
     IteratePointer(obj, kSharedFunctionInfoOffset, v);
-    IterateMaybeWeakPointer(obj, kOptimizedCodeOffset, v);
+    IterateMaybeWeakPointer(obj, kOptimizedCodeWeakOrSmiOffset, v);
     IteratePointer(obj, kClosureFeedbackCellArrayOffset, v);
     IterateMaybeWeakPointers(obj, kFeedbackSlotsOffset, object_size, v);
   }
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 3d8257b..92aa069 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -2107,6 +2107,10 @@
 
 void AllocationSite::AllocationSiteVerify(Isolate* isolate) {
   CHECK(IsAllocationSite());
+  CHECK(dependent_code()->IsDependentCode());
+  CHECK(transition_info_or_boilerplate()->IsSmi() ||
+        transition_info_or_boilerplate()->IsJSObject());
+  CHECK(nested_site()->IsAllocationSite() || nested_site() == Smi::kZero);
 }
 
 void AllocationMemento::AllocationMementoVerify(Isolate* isolate) {
@@ -2153,10 +2157,15 @@
 
 void DebugInfo::DebugInfoVerify(Isolate* isolate) {
   CHECK(IsDebugInfo());
-  VerifyPointer(isolate, shared());
-  VerifyPointer(isolate, script());
-  VerifyPointer(isolate, original_bytecode_array());
-  VerifyPointer(isolate, break_points());
+  VerifySmiField(kFlagsOffset);
+  VerifySmiField(kDebuggerHintsOffset);
+  CHECK(shared()->IsSharedFunctionInfo());
+  CHECK(script()->IsUndefined(isolate) || script()->IsScript());
+  CHECK(original_bytecode_array()->IsUndefined(isolate) ||
+        original_bytecode_array()->IsBytecodeArray());
+  CHECK(debug_bytecode_array()->IsUndefined(isolate) ||
+        debug_bytecode_array()->IsBytecodeArray());
+  CHECK(break_points()->IsFixedArray());
 }
 
 void StackTraceFrame::StackTraceFrameVerify(Isolate* isolate) {
diff --git a/src/objects/allocation-site.h b/src/objects/allocation-site.h
index b221bd0..e8e5f9b 100644
--- a/src/objects/allocation-site.h
+++ b/src/objects/allocation-site.h
@@ -134,25 +134,25 @@
   static bool ShouldTrack(ElementsKind from, ElementsKind to);
   static inline bool CanTrack(InstanceType type);
 
-// Layout description.
-// AllocationSite has to start with TransitionInfoOrboilerPlateOffset
-// and end with WeakNext field.
-#define ALLOCATION_SITE_FIELDS(V)                     \
-  V(kStartOffset, 0)                                  \
-  V(kTransitionInfoOrBoilerplateOffset, kTaggedSize)  \
-  V(kNestedSiteOffset, kTaggedSize)                   \
-  V(kDependentCodeOffset, kTaggedSize)                \
-  V(kCommonPointerFieldEndOffset, 0)                  \
-  V(kPretenureDataOffset, kInt32Size)                 \
-  V(kPretenureCreateCountOffset, kInt32Size)          \
-  /* Size of AllocationSite without WeakNext field */ \
-  V(kSizeWithoutWeakNext, 0)                          \
-  V(kWeakNextOffset, kTaggedSize)                     \
-  /* Size of AllocationSite with WeakNext field */    \
-  V(kSizeWithWeakNext, 0)
+  // Layout description.
+  // AllocationSite has to start with TransitionInfoOrboilerPlateOffset
+  // and end with WeakNext field.
+  #define ALLOCATION_SITE_FIELDS(V)                     \
+    V(kStartOffset, 0)                                  \
+    V(kTransitionInfoOrBoilerplateOffset, kTaggedSize)  \
+    V(kNestedSiteOffset, kTaggedSize)                   \
+    V(kDependentCodeOffset, kTaggedSize)                \
+    V(kCommonPointerFieldEndOffset, 0)                  \
+    V(kPretenureDataOffset, kInt32Size)                 \
+    V(kPretenureCreateCountOffset, kInt32Size)          \
+    /* Size of AllocationSite without WeakNext field */ \
+    V(kSizeWithoutWeakNext, 0)                          \
+    V(kWeakNextOffset, kTaggedSize)                     \
+    /* Size of AllocationSite with WeakNext field */    \
+    V(kSizeWithWeakNext, 0)
 
   DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, ALLOCATION_SITE_FIELDS)
-#undef ALLOCATION_SITE_FIELDS
+  #undef ALLOCATION_SITE_FIELDS
 
   class BodyDescriptor;
 
@@ -164,14 +164,9 @@
 
 class AllocationMemento : public Struct {
  public:
-// Layout description.
-#define ALLOCATION_MEMENTO_FIELDS(V)    \
-  V(kAllocationSiteOffset, kTaggedSize) \
-  V(kSize, 0)
-
+  // Layout description.
   DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                ALLOCATION_MEMENTO_FIELDS)
-#undef ALLOCATION_MEMENTO_FIELDS
+                                TORQUE_GENERATED_ALLOCATION_MEMENTO_FIELDS)
 
   DECL_ACCESSORS(allocation_site, Object)
 
diff --git a/src/objects/descriptor-array-inl.h b/src/objects/descriptor-array-inl.h
index a59d4e5..7b91ffd 100644
--- a/src/objects/descriptor-array-inl.h
+++ b/src/objects/descriptor-array-inl.h
@@ -92,7 +92,7 @@
 }
 
 ObjectSlot DescriptorArray::GetFirstPointerSlot() {
-  return RawField(DescriptorArray::kPointersStartOffset);
+  return RawField(DescriptorArray::kStartOfPointerFieldsOffset);
 }
 
 ObjectSlot DescriptorArray::GetDescriptorSlot(int descriptor) {
diff --git a/src/objects/descriptor-array.h b/src/objects/descriptor-array.h
index 8935051..1f597af 100644
--- a/src/objects/descriptor-array.h
+++ b/src/objects/descriptor-array.h
@@ -139,20 +139,11 @@
   static const int kNotFound = -1;
 
   // Layout description.
-#define DESCRIPTOR_ARRAY_FIELDS(V)                    \
-  V(kNumberOfAllDescriptorsOffset, kUInt16Size)       \
-  V(kNumberOfDescriptorsOffset, kUInt16Size)          \
-  V(kRawNumberOfMarkedDescriptorsOffset, kUInt16Size) \
-  V(kFiller16BitsOffset, kUInt16Size)                 \
-  V(kPointersStartOffset, 0)                          \
-  V(kEnumCacheOffset, kTaggedSize)                    \
-  V(kHeaderSize, 0)
-
   DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
-                                DESCRIPTOR_ARRAY_FIELDS)
-#undef DESCRIPTOR_ARRAY_FIELDS
+                                TORQUE_GENERATED_DESCRIPTOR_ARRAY_FIELDS)
+  static constexpr int kHeaderSize = kSize;
 
-  STATIC_ASSERT(IsAligned(kPointersStartOffset, kTaggedSize));
+  STATIC_ASSERT(IsAligned(kStartOfPointerFieldsOffset, kTaggedSize));
   STATIC_ASSERT(IsAligned(kHeaderSize, kTaggedSize));
 
   // Garbage collection support.
@@ -174,7 +165,8 @@
   inline ObjectSlot GetKeySlot(int descriptor);
   inline MaybeObjectSlot GetValueSlot(int descriptor);
 
-  using BodyDescriptor = FlexibleWeakBodyDescriptor<kPointersStartOffset>;
+  using BodyDescriptor =
+    FlexibleWeakBodyDescriptor<kStartOfPointerFieldsOffset>;
 
   // Layout of descriptor.
   // Naming is consistent with Dictionary classes for easy templating.
diff --git a/src/objects/feedback-cell-inl.h b/src/objects/feedback-cell-inl.h
index c3902ca..7392a07 100644
--- a/src/objects/feedback-cell-inl.h
+++ b/src/objects/feedback-cell-inl.h
@@ -25,10 +25,10 @@
 INT32_ACCESSORS(FeedbackCell, interrupt_budget, kInterruptBudgetOffset)
 
 void FeedbackCell::clear_padding() {
-  if (FeedbackCell::kSize == FeedbackCell::kUnalignedSize) return;
-  DCHECK_GE(FeedbackCell::kSize, FeedbackCell::kUnalignedSize);
+  if (FeedbackCell::kAlignedSize == FeedbackCell::kUnalignedSize) return;
+  DCHECK_GE(FeedbackCell::kAlignedSize, FeedbackCell::kUnalignedSize);
   memset(reinterpret_cast<byte*>(address() + FeedbackCell::kUnalignedSize), 0,
-         FeedbackCell::kSize - FeedbackCell::kUnalignedSize);
+         FeedbackCell::kAlignedSize - FeedbackCell::kUnalignedSize);
 }
 
 }  // namespace internal
diff --git a/src/objects/feedback-cell.h b/src/objects/feedback-cell.h
index a708f4c..b125ddf 100644
--- a/src/objects/feedback-cell.h
+++ b/src/objects/feedback-cell.h
@@ -37,23 +37,17 @@
   DECL_PRINTER(FeedbackCell)
   DECL_VERIFIER(FeedbackCell)
 
-// Layout description.
-#define FEEDBACK_CELL_FIELDS(V)         \
-  V(kValueOffset, kTaggedSize)          \
-  /* Non-pointer fields */              \
-  V(kInterruptBudgetOffset, kInt32Size) \
-  /* Total size. */                     \
-  V(kUnalignedSize, 0)
+  // Layout description.
+  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
+                                TORQUE_GENERATED_FEEDBACK_CELL_FIELDS)
 
-  DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize, FEEDBACK_CELL_FIELDS)
-#undef FEEDBACK_CELL_FIELDS
-
-  static const int kSize = RoundUp<kObjectAlignment>(int{kUnalignedSize});
+  static const int kUnalignedSize = kSize;
+  static const int kAlignedSize = RoundUp<kObjectAlignment>(int{kSize});
 
   inline void clear_padding();
 
   using BodyDescriptor =
-      FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kSize>;
+      FixedBodyDescriptor<kValueOffset, kInterruptBudgetOffset, kAlignedSize>;
 
   OBJECT_CONSTRUCTORS(FeedbackCell, Struct);
 };
diff --git a/src/profiler/heap-snapshot-generator.cc b/src/profiler/heap-snapshot-generator.cc
index bf5eeaf..9e121c3 100644
--- a/src/profiler/heap-snapshot-generator.cc
+++ b/src/profiler/heap-snapshot-generator.cc
@@ -1232,7 +1232,7 @@
   HeapObject code_heap_object;
   if (code->GetHeapObjectIfWeak(&code_heap_object)) {
     SetWeakReference(entry, "optimized code", code_heap_object,
-                     FeedbackVector::kOptimizedCodeOffset);
+                     FeedbackVector::kOptimizedCodeWeakOrSmiOffset);
   }
 }