media: Retain reference to the wrapped frame in WrapVideoFrame

When we use VideoFrame::WrapVideoFrame to soft-apply a different set
of format, visible rectangle, and natural size on a source VideoFrame,
we have to make sure the source VideoFrame outlives the wrapping frame
in most if not all the scenarios.

This CL explicitly retains a reference to the wrapped frame inside the
wrapping frame when WrapVideoFrame is called.  Along with the change,
we can remove several empty destruction callbacks that are only used
for retaining a reference to the wrapped frame.

Bug: 982201
Change-Id: Id81ad65c500fe4f0b47a60d98819408522314129
Reviewed-on: https://chromium-review.googlesource.com/c/chromium/src/+/1830241
Commit-Queue: Ricky Liang <jcliang@chromium.org>
Reviewed-by: Dan Sanders <sandersd@chromium.org>
Reviewed-by: Guido Urdaneta <guidou@chromium.org>
Cr-Commit-Position: refs/heads/master@{#701466}
diff --git a/media/base/mac/video_frame_mac_unittests.cc b/media/base/mac/video_frame_mac_unittests.cc
index abe9e5142..86ecdf5 100644
--- a/media/base/mac/video_frame_mac_unittests.cc
+++ b/media/base/mac/video_frame_mac_unittests.cc
@@ -94,7 +94,7 @@
 
   int instances_destroyed = 0;
   auto wrapper_frame = VideoFrame::WrapVideoFrame(
-      *frame, frame->format(), frame->visible_rect(), frame->natural_size());
+      frame, frame->format(), frame->visible_rect(), frame->natural_size());
   wrapper_frame->AddDestructionObserver(
       base::Bind(&Increment, &instances_destroyed));
   ASSERT_TRUE(wrapper_frame.get());
diff --git a/media/base/video_frame.cc b/media/base/video_frame.cc
index e6957b81..6d1f93e 100644
--- a/media/base/video_frame.cc
+++ b/media/base/video_frame.cc
@@ -617,60 +617,64 @@
 
 // static
 scoped_refptr<VideoFrame> VideoFrame::WrapVideoFrame(
-    const VideoFrame& frame,
+    scoped_refptr<VideoFrame> frame,
     VideoPixelFormat format,
     const gfx::Rect& visible_rect,
     const gfx::Size& natural_size) {
   // Frames with textures need mailbox info propagated, and there's no support
   // for that here yet, see http://crbug/362521.
-  CHECK(!frame.HasTextures());
-  DCHECK(frame.visible_rect().Contains(visible_rect));
+  CHECK(!frame->HasTextures());
+  DCHECK(frame->visible_rect().Contains(visible_rect));
 
   // The following storage type should not be wrapped as the shared region
   // cannot be owned by both the wrapped frame and the wrapping frame.
-  DCHECK(frame.storage_type() != STORAGE_MOJO_SHARED_BUFFER);
+  //
+  // TODO: We can support this now since we have a reference to the wrapped
+  // frame through |wrapped_frame_|.
+  DCHECK(frame->storage_type() != STORAGE_MOJO_SHARED_BUFFER);
 
-  if (!AreValidPixelFormatsForWrap(frame.format(), format)) {
+  if (!AreValidPixelFormatsForWrap(frame->format(), format)) {
     DLOG(ERROR) << __func__ << " Invalid format conversion."
-                << VideoPixelFormatToString(frame.format()) << " to "
+                << VideoPixelFormatToString(frame->format()) << " to "
                 << VideoPixelFormatToString(format);
     return nullptr;
   }
 
-  if (!IsValidConfig(format, frame.storage_type(), frame.coded_size(),
+  if (!IsValidConfig(format, frame->storage_type(), frame->coded_size(),
                      visible_rect, natural_size)) {
     DLOG(ERROR) << __func__ << " Invalid config."
-                << ConfigToString(format, frame.storage_type(),
-                                  frame.coded_size(), visible_rect,
+                << ConfigToString(format, frame->storage_type(),
+                                  frame->coded_size(), visible_rect,
                                   natural_size);
     return nullptr;
   }
 
   scoped_refptr<VideoFrame> wrapping_frame(
-      new VideoFrame(frame.layout(), frame.storage_type(), visible_rect,
-                     natural_size, frame.timestamp()));
+      new VideoFrame(frame->layout(), frame->storage_type(), visible_rect,
+                     natural_size, frame->timestamp()));
 
-  // Copy all metadata to the wrapped frame.
-  wrapping_frame->metadata()->MergeMetadataFrom(frame.metadata());
+  // Copy all metadata to the wrapped frame->
+  wrapping_frame->metadata()->MergeMetadataFrom(frame->metadata());
 
-  if (frame.IsMappable()) {
+  if (frame->IsMappable()) {
     for (size_t i = 0; i < NumPlanes(format); ++i) {
-      wrapping_frame->data_[i] = frame.data_[i];
+      wrapping_frame->data_[i] = frame->data_[i];
     }
   }
 
 #if defined(OS_LINUX)
-  DCHECK(frame.dmabuf_fds_);
+  DCHECK(frame->dmabuf_fds_);
   // If there are any |dmabuf_fds_| plugged in, we should refer them too.
-  wrapping_frame->dmabuf_fds_ = frame.dmabuf_fds_;
+  wrapping_frame->dmabuf_fds_ = frame->dmabuf_fds_;
 #endif
 
-  if (frame.storage_type() == STORAGE_SHMEM) {
-    DCHECK(frame.shm_region_ && frame.shm_region_->IsValid());
-    wrapping_frame->BackWithSharedMemory(frame.shm_region_,
-                                         frame.shared_memory_offset());
+  if (frame->storage_type() == STORAGE_SHMEM) {
+    DCHECK(frame->shm_region_ && frame->shm_region_->IsValid());
+    wrapping_frame->BackWithSharedMemory(frame->shm_region_,
+                                         frame->shared_memory_offset());
   }
 
+  wrapping_frame->wrapped_frame_ = std::move(frame);
   return wrapping_frame;
 }
 
diff --git a/media/base/video_frame.h b/media/base/video_frame.h
index d955bd7..641caade 100644
--- a/media/base/video_frame.h
+++ b/media/base/video_frame.h
@@ -281,7 +281,7 @@
   // Wraps |frame|. |visible_rect| must be a sub rect within
   // frame->visible_rect().
   static scoped_refptr<VideoFrame> WrapVideoFrame(
-      const VideoFrame& frame,
+      scoped_refptr<VideoFrame> frame,
       VideoPixelFormat format,
       const gfx::Rect& visible_rect,
       const gfx::Size& natural_size);
@@ -617,6 +617,10 @@
   // VideFrameLayout (includes format, coded_size, and strides).
   const VideoFrameLayout layout_;
 
+  // Set by WrapVideoFrame to soft-apply a new set of format, visible rectangle,
+  // and natural size on |wrapped_frame_|
+  scoped_refptr<VideoFrame> wrapped_frame_;
+
   // Storage type for the different planes.
   StorageType storage_type_;  // TODO(mcasas): make const
 
diff --git a/media/base/video_frame_pool.cc b/media/base/video_frame_pool.cc
index 16156e9..b70a1f3 100644
--- a/media/base/video_frame_pool.cc
+++ b/media/base/video_frame_pool.cc
@@ -111,7 +111,7 @@
   }
 
   scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
-      *frame, frame->format(), frame->visible_rect(), frame->natural_size());
+      frame, frame->format(), frame->visible_rect(), frame->natural_size());
   wrapped_frame->AddDestructionObserver(base::Bind(
       &VideoFramePool::PoolImpl::FrameReleased, this, std::move(frame)));
   return wrapped_frame;
diff --git a/media/base/video_frame_unittest.cc b/media/base/video_frame_unittest.cc
index a7f3008..b17866d 100644
--- a/media/base/video_frame_unittest.cc
+++ b/media/base/video_frame_unittest.cc
@@ -260,8 +260,7 @@
   }
 }
 
-static void FrameNoLongerNeededCallback(scoped_refptr<media::VideoFrame> frame,
-                                        bool* triggered) {
+static void FrameNoLongerNeededCallback(bool* triggered) {
   *triggered = true;
 }
 
@@ -282,9 +281,9 @@
     wrapped_frame->metadata()->SetTimeDelta(
         media::VideoFrameMetadata::FRAME_DURATION, kFrameDuration);
     frame = media::VideoFrame::WrapVideoFrame(
-        *wrapped_frame, wrapped_frame->format(), visible_rect, natural_size);
-    frame->AddDestructionObserver(base::Bind(
-        &FrameNoLongerNeededCallback, wrapped_frame, &done_callback_was_run));
+        wrapped_frame, wrapped_frame->format(), visible_rect, natural_size);
+    wrapped_frame->AddDestructionObserver(
+        base::Bind(&FrameNoLongerNeededCallback, &done_callback_was_run));
     EXPECT_EQ(wrapped_frame->coded_size(), frame->coded_size());
     EXPECT_EQ(wrapped_frame->data(media::VideoFrame::kYPlane),
               frame->data(media::VideoFrame::kYPlane));
@@ -308,6 +307,7 @@
         frame->metadata()->HasKey(media::VideoFrameMetadata::FRAME_DURATION));
   }
 
+  // Verify that |wrapped_frame| outlives |frame|.
   EXPECT_FALSE(done_callback_was_run);
   frame = NULL;
   EXPECT_TRUE(done_callback_was_run);
@@ -454,13 +454,13 @@
 
   // Wrapped DMABUF frames must share the same memory as their wrappee.
   auto wrapped_frame = VideoFrame::WrapVideoFrame(
-      *frame, frame->format(), visible_rect, visible_rect.size());
+      frame, frame->format(), visible_rect, visible_rect.size());
   ASSERT_NE(wrapped_frame, nullptr);
   ASSERT_EQ(wrapped_frame->IsSameDmaBufsAs(*frame), true);
 
   // Multi-level wrapping should share same memory as well.
   auto wrapped_frame2 = VideoFrame::WrapVideoFrame(
-      *wrapped_frame, frame->format(), visible_rect, visible_rect.size());
+      wrapped_frame, frame->format(), visible_rect, visible_rect.size());
   ASSERT_NE(wrapped_frame2, nullptr);
   ASSERT_EQ(wrapped_frame2->IsSameDmaBufsAs(*wrapped_frame), true);
   ASSERT_EQ(wrapped_frame2->IsSameDmaBufsAs(*frame), true);
diff --git a/media/base/video_util.cc b/media/base/video_util.cc
index 8f4e0c22..e97350a 100644
--- a/media/base/video_util.cc
+++ b/media/base/video_util.cc
@@ -17,9 +17,6 @@
 
 namespace {
 
-// Empty method used for keeping a reference to the original media::VideoFrame.
-void ReleaseOriginalFrame(scoped_refptr<media::VideoFrame> frame) {}
-
 // Helper to apply padding to the region outside visible rect up to the coded
 // size with the repeated last column / row of the visible rect.
 void FillRegionOutsideVisibleRect(uint8_t* data,
@@ -428,13 +425,11 @@
   DCHECK_EQ(PIXEL_FORMAT_I420A, frame->format());
 
   scoped_refptr<media::VideoFrame> wrapped_frame =
-      media::VideoFrame::WrapVideoFrame(*frame, PIXEL_FORMAT_I420,
+      media::VideoFrame::WrapVideoFrame(frame, PIXEL_FORMAT_I420,
                                         frame->visible_rect(),
                                         frame->natural_size());
   if (!wrapped_frame)
     return nullptr;
-  wrapped_frame->AddDestructionObserver(
-      base::BindOnce(&ReleaseOriginalFrame, std::move(frame)));
   return wrapped_frame;
 }
 
diff --git a/media/gpu/linux/platform_video_frame_pool.cc b/media/gpu/linux/platform_video_frame_pool.cc
index 9dde20e..91c750d9 100644
--- a/media/gpu/linux/platform_video_frame_pool.cc
+++ b/media/gpu/linux/platform_video_frame_pool.cc
@@ -84,7 +84,7 @@
   DCHECK_EQ(origin_frame->coded_size(), coded_size);
 
   scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
-      *origin_frame, format, visible_rect_, natural_size_);
+      origin_frame, format, visible_rect_, natural_size_);
   DCHECK(wrapped_frame);
   frames_in_use_.emplace(GetDmabufId(*wrapped_frame), origin_frame.get());
   wrapped_frame->AddDestructionObserver(
diff --git a/media/gpu/linux/platform_video_frame_pool_unittest.cc b/media/gpu/linux/platform_video_frame_pool_unittest.cc
index 00d90fc..9b76824 100644
--- a/media/gpu/linux/platform_video_frame_pool_unittest.cc
+++ b/media/gpu/linux/platform_video_frame_pool_unittest.cc
@@ -164,7 +164,7 @@
   SetFrameFormat(PIXEL_FORMAT_I420);
   scoped_refptr<VideoFrame> frame_1 = GetFrame(10);
   scoped_refptr<VideoFrame> frame_2 = VideoFrame::WrapVideoFrame(
-      *frame_1, frame_1->format(), frame_1->visible_rect(),
+      frame_1, frame_1->format(), frame_1->visible_rect(),
       frame_1->natural_size());
   EXPECT_EQ(pool_->UnwrapFrame(*frame_1), pool_->UnwrapFrame(*frame_2));
   EXPECT_TRUE(frame_1->IsSameDmaBufsAs(*frame_2));
diff --git a/media/gpu/test/texture_ref.cc b/media/gpu/test/texture_ref.cc
index 6cac302..63b17b0 100644
--- a/media/gpu/test/texture_ref.cc
+++ b/media/gpu/test/texture_ref.cc
@@ -75,7 +75,7 @@
 scoped_refptr<VideoFrame> TextureRef::ExportVideoFrame(
     gfx::Rect visible_rect) const {
 #if BUILDFLAG(USE_CHROMEOS_MEDIA_ACCELERATION)
-  return VideoFrame::WrapVideoFrame(*frame_, frame_->format(), visible_rect,
+  return VideoFrame::WrapVideoFrame(frame_, frame_->format(), visible_rect,
                                     visible_rect.size());
 #else
   return nullptr;
diff --git a/media/gpu/test/video_player/test_vda_video_decoder.cc b/media/gpu/test/video_player/test_vda_video_decoder.cc
index 5343d5a..e8bf41ba 100644
--- a/media/gpu/test/video_player/test_vda_video_decoder.cc
+++ b/media/gpu/test/video_player/test_vda_video_decoder.cc
@@ -294,7 +294,7 @@
   // new video frame using the same mailbox.
   if (!video_frame->HasTextures()) {
     wrapped_video_frame = VideoFrame::WrapVideoFrame(
-        *video_frame, video_frame->format(), picture.visible_rect(),
+        video_frame, video_frame->format(), picture.visible_rect(),
         picture.visible_rect().size());
   } else {
     gpu::MailboxHolder mailbox_holders[media::VideoFrame::kMaxPlanes];
@@ -319,15 +319,13 @@
   // (e.g. on a resolution change).
   base::OnceClosure reuse_cb = BindToCurrentLoop(
       base::BindOnce(&TestVDAVideoDecoder::ReusePictureBufferTask, weak_this_,
-                     picture.picture_buffer_id(), video_frame));
+                     picture.picture_buffer_id()));
   wrapped_video_frame->AddDestructionObserver(std::move(reuse_cb));
   output_cb_.Run(std::move(wrapped_video_frame));
 }
 
 // Called when a picture buffer is ready to be re-used.
-void TestVDAVideoDecoder::ReusePictureBufferTask(
-    int32_t picture_buffer_id,
-    scoped_refptr<VideoFrame> /*video_frame*/) {
+void TestVDAVideoDecoder::ReusePictureBufferTask(int32_t picture_buffer_id) {
   DCHECK_CALLED_ON_VALID_SEQUENCE(vda_wrapper_sequence_checker_);
   DCHECK(decoder_);
   DVLOGF(4) << "Picture buffer ID: " << picture_buffer_id;
diff --git a/media/gpu/test/video_player/test_vda_video_decoder.h b/media/gpu/test/video_player/test_vda_video_decoder.h
index f439d3c..bda6726c 100644
--- a/media/gpu/test/video_player/test_vda_video_decoder.h
+++ b/media/gpu/test/video_player/test_vda_video_decoder.h
@@ -71,8 +71,7 @@
                                             uint32_t texture_target) override;
   void DismissPictureBuffer(int32_t picture_buffer_id) override;
   void PictureReady(const Picture& picture) override;
-  void ReusePictureBufferTask(int32_t picture_buffer_id,
-                              scoped_refptr<VideoFrame> video_frame);
+  void ReusePictureBufferTask(int32_t picture_buffer_id);
   void NotifyEndOfBitstreamBuffer(int32_t bitstream_buffer_id) override;
   void NotifyFlushDone() override;
   void NotifyResetDone() override;
diff --git a/media/gpu/v4l2/v4l2_image_processor.cc b/media/gpu/v4l2/v4l2_image_processor.cc
index 739bf920..04e5cdb 100644
--- a/media/gpu/v4l2/v4l2_image_processor.cc
+++ b/media/gpu/v4l2/v4l2_image_processor.cc
@@ -758,7 +758,7 @@
         {
           const auto& orig_frame = buffer->GetVideoFrame();
           output_frame = VideoFrame::WrapVideoFrame(
-              *orig_frame, orig_frame->format(), orig_frame->visible_rect(),
+              orig_frame, orig_frame->format(), orig_frame->visible_rect(),
               orig_frame->natural_size());
           output_frame->AddDestructionObserver(BindToCurrentLoop(
               base::BindOnce(&V4L2ImageProcessor::V4L2VFDestructionObserver,
diff --git a/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc b/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
index a3313c4c..4afafc5b 100644
--- a/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
+++ b/media/gpu/v4l2/v4l2_slice_video_decode_accelerator.cc
@@ -2187,8 +2187,8 @@
     // We will set a destruction observer to the output frame, so wrap the
     // imported frame into another one that we can destruct.
     scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
-        *output_frame.get(), output_frame->format(),
-        output_frame->visible_rect(), output_frame->coded_size());
+        output_frame, output_frame->format(), output_frame->visible_rect(),
+        output_frame->coded_size());
     DCHECK(output_frame != nullptr);
 
     image_processor_->Process(
diff --git a/media/gpu/v4l2/v4l2_slice_video_decoder.cc b/media/gpu/v4l2/v4l2_slice_video_decoder.cc
index 63b0c41..da4ba41b 100644
--- a/media/gpu/v4l2/v4l2_slice_video_decoder.cc
+++ b/media/gpu/v4l2/v4l2_slice_video_decoder.cc
@@ -1061,10 +1061,8 @@
       frame->timestamp() != timestamp) {
     gfx::Size natural_size = GetNaturalSize(visible_rect, pixel_aspect_ratio_);
     scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
-        *frame, frame->format(), visible_rect, natural_size);
+        frame, frame->format(), visible_rect, natural_size);
     wrapped_frame->set_timestamp(timestamp);
-    wrapped_frame->AddDestructionObserver(base::BindOnce(
-        base::DoNothing::Once<scoped_refptr<VideoFrame>>(), std::move(frame)));
 
     frame = std::move(wrapped_frame);
   }
diff --git a/media/gpu/v4l2/v4l2_video_encode_accelerator.cc b/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
index ee49c63..43f8c985 100644
--- a/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
+++ b/media/gpu/v4l2/v4l2_video_encode_accelerator.cc
@@ -728,7 +728,7 @@
   if (image_processor_->output_mode() == ImageProcessor::OutputMode::IMPORT) {
     const auto& buf = image_processor_output_buffers_[output_buffer_index];
     auto output_frame = VideoFrame::WrapVideoFrame(
-        *buf, buf->format(), buf->visible_rect(), buf->natural_size());
+        buf, buf->format(), buf->visible_rect(), buf->natural_size());
 
     // Unretained(this) is safe here, because image_processor is destroyed
     // before video_encoder_thread stops.
diff --git a/media/gpu/vaapi/vaapi_video_decoder.cc b/media/gpu/vaapi/vaapi_video_decoder.cc
index cf4f0ac..37b7b40b 100644
--- a/media/gpu/vaapi/vaapi_video_decoder.cc
+++ b/media/gpu/vaapi/vaapi_video_decoder.cc
@@ -481,11 +481,8 @@
       video_frame->timestamp() != timestamp) {
     gfx::Size natural_size = GetNaturalSize(visible_rect, pixel_aspect_ratio_);
     scoped_refptr<VideoFrame> wrapped_frame = VideoFrame::WrapVideoFrame(
-        *video_frame, video_frame->format(), visible_rect, natural_size);
+        video_frame, video_frame->format(), visible_rect, natural_size);
     wrapped_frame->set_timestamp(timestamp);
-    wrapped_frame->AddDestructionObserver(
-        base::BindOnce(base::DoNothing::Once<scoped_refptr<VideoFrame>>(),
-                       std::move(video_frame)));
 
     video_frame = std::move(wrapped_frame);
   }
diff --git a/third_party/blink/renderer/modules/mediarecorder/video_track_recorder.cc b/third_party/blink/renderer/modules/mediarecorder/video_track_recorder.cc
index 69e9535..bb6aa0a 100644
--- a/third_party/blink/renderer/modules/mediarecorder/video_track_recorder.cc
+++ b/third_party/blink/renderer/modules/mediarecorder/video_track_recorder.cc
@@ -263,11 +263,9 @@
       frame = media::WrapAsI420VideoFrame(video_frame);
     } else {
       frame = media::VideoFrame::WrapVideoFrame(
-          *video_frame, video_frame->format(), video_frame->visible_rect(),
+          video_frame, video_frame->format(), video_frame->visible_rect(),
           video_frame->natural_size());
     }
-    frame->AddDestructionObserver(ConvertToBaseOnceCallback(CrossThreadBindOnce(
-        [](scoped_refptr<VideoFrame> video_frame) {}, std::move(video_frame))));
   }
   frame->AddDestructionObserver(media::BindToCurrentLoop(
       WTF::Bind(&VideoTrackRecorder::Counter::DecreaseCount,
diff --git a/third_party/blink/renderer/modules/mediastream/media_stream_video_track.cc b/third_party/blink/renderer/modules/mediastream/media_stream_video_track.cc
index 8508564ed..676dacd7 100644
--- a/third_party/blink/renderer/modules/mediastream/media_stream_video_track.cc
+++ b/third_party/blink/renderer/modules/mediastream/media_stream_video_track.cc
@@ -35,10 +35,6 @@
   // |callback| will be deleted when this exits.
 }
 
-// Empty method used for keeping a reference to the original media::VideoFrame.
-// The reference to |frame| is kept in the closure that calls this method.
-void ReleaseOriginalFrame(scoped_refptr<media::VideoFrame> frame) {}
-
 }  // namespace
 
 // MediaStreamVideoTrack::FrameDeliverer is a helper class used for registering
@@ -230,13 +226,11 @@
   // Wrap |black_frame_| so we get a fresh timestamp we can modify. Frames
   // returned from this function may still be in use.
   scoped_refptr<media::VideoFrame> wrapped_black_frame =
-      media::VideoFrame::WrapVideoFrame(*black_frame_, black_frame_->format(),
+      media::VideoFrame::WrapVideoFrame(black_frame_, black_frame_->format(),
                                         black_frame_->visible_rect(),
                                         black_frame_->natural_size());
   if (!wrapped_black_frame)
     return nullptr;
-  wrapped_black_frame->AddDestructionObserver(ConvertToBaseOnceCallback(
-      CrossThreadBindOnce(&ReleaseOriginalFrame, black_frame_)));
 
   wrapped_black_frame->set_timestamp(reference_frame.timestamp());
   base::TimeTicks reference_time;
diff --git a/third_party/blink/renderer/modules/mediastream/video_track_adapter.cc b/third_party/blink/renderer/modules/mediastream/video_track_adapter.cc
index 5b993e5..f0cc162 100644
--- a/third_party/blink/renderer/modules/mediastream/video_track_adapter.cc
+++ b/third_party/blink/renderer/modules/mediastream/video_track_adapter.cc
@@ -71,11 +71,6 @@
   base::TimeTicks last_update_timestamp;
 };
 
-// Empty method used for keeping a reference to the original media::VideoFrame
-// in VideoFrameResolutionAdapter::DeliverFrame if cropping is needed.
-// The reference to |frame| is kept in the closure that calls this method.
-void TrackReleaseOriginalFrame(scoped_refptr<media::VideoFrame> frame) {}
-
 int ClampToValidDimension(int dimension) {
   return std::min(static_cast<int>(media::limits::kMaxDimension),
                   std::max(0, dimension));
@@ -337,15 +332,13 @@
         media::ComputeLetterboxRegion(frame->visible_rect(), desired_size);
 
     video_frame = media::VideoFrame::WrapVideoFrame(
-        *frame, frame->format(), region_in_frame, desired_size);
+        frame, frame->format(), region_in_frame, desired_size);
     if (!video_frame) {
       PostFrameDroppedToMainTaskRunner(
           media::VideoCaptureFrameDropReason::
               kResolutionAdapterWrappingFrameForCroppingFailed);
       return;
     }
-    video_frame->AddDestructionObserver(ConvertToBaseOnceCallback(
-        CrossThreadBindOnce(&TrackReleaseOriginalFrame, frame)));
 
     DVLOG(3) << "desired size  " << desired_size.ToString()
              << " output natural size "
diff --git a/third_party/blink/renderer/platform/peerconnection/webrtc_video_track_source.cc b/third_party/blink/renderer/platform/peerconnection/webrtc_video_track_source.cc
index 95788af..8fa9bf3 100644
--- a/third_party/blink/renderer/platform/peerconnection/webrtc_video_track_source.cc
+++ b/third_party/blink/renderer/platform/peerconnection/webrtc_video_track_source.cc
@@ -159,17 +159,11 @@
                                frame_adaptation_params.scale_to_height);
   // Soft-apply the new (combined) cropping and scaling.
   scoped_refptr<media::VideoFrame> video_frame =
-      media::VideoFrame::WrapVideoFrame(*frame, frame->format(),
+      media::VideoFrame::WrapVideoFrame(frame, frame->format(),
                                         cropped_visible_rect, adapted_size);
   if (!video_frame)
     return;
 
-  // Attach shared ownership of the wrapped |frame| to the wrapping
-  // |video_frame|.
-  video_frame->AddDestructionObserver(
-      base::BindOnce(base::DoNothing::Once<scoped_refptr<media::VideoFrame>>(),
-                     std::move(frame)));
-
   // If no scaling is needed, return a wrapped version of |frame| directly.
   // The soft-applied cropping will be taken into account by the remainder
   // of the pipeline.