blob: b5eaa2416da8aad1066124ba2ec9bdab4dba6bbd [file] [log] [blame]
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "content/renderer/media/webrtc/webrtc_video_capturer_adapter.h"
#include "base/bind.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/trace_event.h"
#include "cc/paint/skia_paint_canvas.h"
#include "content/renderer/media/webrtc/webrtc_video_frame_adapter.h"
#include "content/renderer/render_thread_impl.h"
#include "media/base/timestamp_constants.h"
#include "media/base/video_util.h"
#include "media/renderers/paint_canvas_video_renderer.h"
#include "services/ui/public/cpp/gpu/context_provider_command_buffer.h"
#include "skia/ext/platform_canvas.h"
#include "third_party/libyuv/include/libyuv/convert.h"
#include "third_party/libyuv/include/libyuv/convert_from.h"
#include "third_party/libyuv/include/libyuv/scale.h"
#include "third_party/webrtc/api/video/video_rotation.h"
#include "third_party/webrtc/rtc_base/refcountedobject.h"
namespace content {
namespace {
// Empty method used for keeping a reference to the original media::VideoFrame.
// The reference to |frame| is kept in the closure that calls this method.
void CapturerReleaseOriginalFrame(
const scoped_refptr<media::VideoFrame>& frame) {}
// Helper class that signals a WaitableEvent when it goes out of scope.
class ScopedWaitableEvent {
public:
explicit ScopedWaitableEvent(base::WaitableEvent* event) : event_(event) {}
~ScopedWaitableEvent() {
if (event_)
event_->Signal();
}
private:
base::WaitableEvent* const event_;
};
} // anonymous namespace
// Initializes the GL context environment and provides a method for copying
// texture backed frames into CPU mappable memory.
// The class is created and destroyed on the main render thread.
class WebRtcVideoCapturerAdapter::TextureFrameCopier
: public base::RefCounted<WebRtcVideoCapturerAdapter::TextureFrameCopier> {
public:
TextureFrameCopier()
: main_thread_task_runner_(base::ThreadTaskRunnerHandle::Get()),
canvas_video_renderer_(new media::PaintCanvasVideoRenderer) {
RenderThreadImpl* const main_thread = RenderThreadImpl::current();
if (main_thread)
provider_ = main_thread->SharedMainThreadContextProvider();
}
// Synchronous call to copy a texture backed |frame| into a CPU mappable
// |new_frame|. If it is not called on the main render thread, this call posts
// a task on main thread by calling CopyTextureFrameOnMainThread() and blocks
// until it is completed.
void CopyTextureFrame(const scoped_refptr<media::VideoFrame>& frame,
scoped_refptr<media::VideoFrame>* new_frame) {
if (main_thread_task_runner_->BelongsToCurrentThread()) {
CopyTextureFrameOnMainThread(frame, new_frame, nullptr);
return;
}
base::WaitableEvent waiter(base::WaitableEvent::ResetPolicy::MANUAL,
base::WaitableEvent::InitialState::NOT_SIGNALED);
main_thread_task_runner_->PostTask(
FROM_HERE,
base::BindOnce(&TextureFrameCopier::CopyTextureFrameOnMainThread, this,
frame, new_frame, &waiter));
waiter.Wait();
}
private:
friend class base::RefCounted<TextureFrameCopier>;
~TextureFrameCopier() {
// |canvas_video_renderer_| should be deleted on the thread it was created.
if (!main_thread_task_runner_->BelongsToCurrentThread()) {
main_thread_task_runner_->DeleteSoon(FROM_HERE,
canvas_video_renderer_.release());
}
}
void CopyTextureFrameOnMainThread(
const scoped_refptr<media::VideoFrame>& frame,
scoped_refptr<media::VideoFrame>* new_frame,
base::WaitableEvent* waiter) {
DCHECK(main_thread_task_runner_->BelongsToCurrentThread());
DCHECK(frame->format() == media::PIXEL_FORMAT_ARGB ||
frame->format() == media::PIXEL_FORMAT_XRGB ||
frame->format() == media::PIXEL_FORMAT_I420 ||
frame->format() == media::PIXEL_FORMAT_UYVY ||
frame->format() == media::PIXEL_FORMAT_NV12);
ScopedWaitableEvent event(waiter);
if (!provider_) {
// Return a black frame (yuv = {0, 0x80, 0x80}).
*new_frame = media::VideoFrame::CreateColorFrame(
frame->visible_rect().size(), 0u, 0x80, 0x80, frame->timestamp());
return;
}
SkBitmap bitmap;
bitmap.allocPixels(SkImageInfo::MakeN32Premul(
frame->visible_rect().width(), frame->visible_rect().height()));
cc::SkiaPaintCanvas paint_canvas(bitmap);
*new_frame = media::VideoFrame::CreateFrame(
media::PIXEL_FORMAT_I420, frame->coded_size(), frame->visible_rect(),
frame->natural_size(), frame->timestamp());
DCHECK(provider_->ContextGL());
canvas_video_renderer_->Copy(
frame.get(), &paint_canvas,
media::Context3D(provider_->ContextGL(), provider_->GrContext()));
SkPixmap pixmap;
const bool result = bitmap.peekPixels(&pixmap);
DCHECK(result) << "Error trying to access SkBitmap's pixels";
const uint32 source_pixel_format =
(kN32_SkColorType == kRGBA_8888_SkColorType) ? cricket::FOURCC_ABGR
: cricket::FOURCC_ARGB;
libyuv::ConvertToI420(
static_cast<const uint8*>(pixmap.addr(0, 0)), pixmap.computeByteSize(),
(*new_frame)->visible_data(media::VideoFrame::kYPlane),
(*new_frame)->stride(media::VideoFrame::kYPlane),
(*new_frame)->visible_data(media::VideoFrame::kUPlane),
(*new_frame)->stride(media::VideoFrame::kUPlane),
(*new_frame)->visible_data(media::VideoFrame::kVPlane),
(*new_frame)->stride(media::VideoFrame::kVPlane), 0 /* crop_x */,
0 /* crop_y */, pixmap.width(), pixmap.height(),
(*new_frame)->visible_rect().width(),
(*new_frame)->visible_rect().height(), libyuv::kRotate0,
source_pixel_format);
}
const scoped_refptr<base::SingleThreadTaskRunner> main_thread_task_runner_;
scoped_refptr<ui::ContextProviderCommandBuffer> provider_;
std::unique_ptr<media::PaintCanvasVideoRenderer> canvas_video_renderer_;
};
WebRtcVideoCapturerAdapter::WebRtcVideoCapturerAdapter(
bool is_screencast,
blink::WebMediaStreamTrack::ContentHintType content_hint)
: texture_copier_(new WebRtcVideoCapturerAdapter::TextureFrameCopier()),
is_screencast_(is_screencast),
content_hint_(content_hint),
running_(false) {
thread_checker_.DetachFromThread();
}
WebRtcVideoCapturerAdapter::~WebRtcVideoCapturerAdapter() {
DVLOG(3) << __func__;
}
void WebRtcVideoCapturerAdapter::OnFrameCaptured(
const scoped_refptr<media::VideoFrame>& input_frame) {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT0("video", "WebRtcVideoCapturerAdapter::OnFrameCaptured");
if (!(input_frame->IsMappable() &&
(input_frame->format() == media::PIXEL_FORMAT_I420 ||
input_frame->format() == media::PIXEL_FORMAT_I420A)) &&
!input_frame->HasTextures()) {
// Since connecting sources and sinks do not check the format, we need to
// just ignore formats that we can not handle.
LOG(ERROR) << "We cannot send frame with storage type: "
<< input_frame->AsHumanReadableString();
NOTREACHED();
return;
}
scoped_refptr<media::VideoFrame> frame = input_frame;
const int orig_width = frame->natural_size().width();
const int orig_height = frame->natural_size().height();
int adapted_width;
int adapted_height;
// The VideoAdapter is only used for cpu-adaptation downscaling, no
// aspect changes. So we ignore these crop-related outputs.
int crop_width;
int crop_height;
int crop_x;
int crop_y;
int64_t translated_camera_time_us;
if (!AdaptFrame(orig_width, orig_height,
frame->timestamp().InMicroseconds(),
rtc::TimeMicros(),
&adapted_width, &adapted_height,
&crop_width, &crop_height, &crop_x, &crop_y,
&translated_camera_time_us)) {
return;
}
// Return |frame| directly if it is texture backed, because there is no
// cropping support for texture yet. See http://crbug/503653.
if (frame->HasTextures()) {
OnFrame(webrtc::VideoFrame(
new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(
frame, base::Bind(&TextureFrameCopier::CopyTextureFrame,
texture_copier_)),
webrtc::kVideoRotation_0, translated_camera_time_us),
orig_width, orig_height);
return;
}
// Translate crop rectangle from natural size to visible size.
gfx::Rect cropped_visible_rect(
frame->visible_rect().x() +
crop_x * frame->visible_rect().width() / orig_width,
frame->visible_rect().y() +
crop_y * frame->visible_rect().height() / orig_height,
crop_width * frame->visible_rect().width() / orig_width,
crop_height * frame->visible_rect().height() / orig_height);
const gfx::Size adapted_size(adapted_width, adapted_height);
scoped_refptr<media::VideoFrame> video_frame =
media::VideoFrame::WrapVideoFrame(frame, frame->format(),
cropped_visible_rect, adapted_size);
if (!video_frame)
return;
video_frame->AddDestructionObserver(
base::BindOnce(&CapturerReleaseOriginalFrame, frame));
// If no scaling is needed, return a wrapped version of |frame| directly.
if (video_frame->natural_size() == video_frame->visible_rect().size()) {
OnFrame(webrtc::VideoFrame(
new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(
video_frame,
WebRtcVideoFrameAdapter::CopyTextureFrameCallback()),
webrtc::kVideoRotation_0, translated_camera_time_us),
orig_width, orig_height);
return;
}
// We need to scale the frame before we hand it over to webrtc.
const bool has_alpha = video_frame->format() == media::PIXEL_FORMAT_I420A;
scoped_refptr<media::VideoFrame> scaled_frame =
scaled_frame_pool_.CreateFrame(
has_alpha ? media::PIXEL_FORMAT_I420A : media::PIXEL_FORMAT_I420,
adapted_size, gfx::Rect(adapted_size), adapted_size,
frame->timestamp());
libyuv::I420Scale(video_frame->visible_data(media::VideoFrame::kYPlane),
video_frame->stride(media::VideoFrame::kYPlane),
video_frame->visible_data(media::VideoFrame::kUPlane),
video_frame->stride(media::VideoFrame::kUPlane),
video_frame->visible_data(media::VideoFrame::kVPlane),
video_frame->stride(media::VideoFrame::kVPlane),
video_frame->visible_rect().width(),
video_frame->visible_rect().height(),
scaled_frame->data(media::VideoFrame::kYPlane),
scaled_frame->stride(media::VideoFrame::kYPlane),
scaled_frame->data(media::VideoFrame::kUPlane),
scaled_frame->stride(media::VideoFrame::kUPlane),
scaled_frame->data(media::VideoFrame::kVPlane),
scaled_frame->stride(media::VideoFrame::kVPlane),
adapted_width, adapted_height, libyuv::kFilterBilinear);
if (has_alpha) {
libyuv::ScalePlane(video_frame->visible_data(media::VideoFrame::kAPlane),
video_frame->stride(media::VideoFrame::kAPlane),
video_frame->visible_rect().width(),
video_frame->visible_rect().height(),
scaled_frame->data(media::VideoFrame::kAPlane),
scaled_frame->stride(media::VideoFrame::kAPlane),
adapted_width, adapted_height, libyuv::kFilterBilinear);
}
OnFrame(webrtc::VideoFrame(
new rtc::RefCountedObject<WebRtcVideoFrameAdapter>(
scaled_frame,
WebRtcVideoFrameAdapter::CopyTextureFrameCallback()),
webrtc::kVideoRotation_0, translated_camera_time_us),
orig_width, orig_height);
}
cricket::CaptureState WebRtcVideoCapturerAdapter::Start(
const cricket::VideoFormat& capture_format) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(!running_);
DVLOG(3) << __func__ << " capture format: " << capture_format.ToString();
running_ = true;
return cricket::CS_RUNNING;
}
void WebRtcVideoCapturerAdapter::Stop() {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(3) << __func__;
DCHECK(running_);
running_ = false;
SetCaptureFormat(nullptr);
SignalStateChange(this, cricket::CS_STOPPED);
}
bool WebRtcVideoCapturerAdapter::IsRunning() {
DCHECK(thread_checker_.CalledOnValidThread());
return running_;
}
bool WebRtcVideoCapturerAdapter::GetPreferredFourccs(
std::vector<uint32_t>* fourccs) {
DCHECK(thread_checker_.CalledOnValidThread());
if (!fourccs)
return false;
DCHECK(fourccs->empty());
fourccs->push_back(cricket::FOURCC_I420);
return true;
}
void WebRtcVideoCapturerAdapter::SetContentHint(
blink::WebMediaStreamTrack::ContentHintType content_hint) {
DCHECK(thread_checker_.CalledOnValidThread());
content_hint_ = content_hint;
}
bool WebRtcVideoCapturerAdapter::IsScreencast() const {
// IsScreencast() is misleading since content hints were added to
// MediaStreamTracks. What IsScreencast() really signals is whether or not
// video frames should ever be scaled before being handed over to WebRTC.
// TODO(pbos): Remove the need for IsScreencast() -> ShouldAdaptResolution()
// by inlining VideoCapturer::AdaptFrame() and removing it from VideoCapturer.
return !ShouldAdaptResolution();
}
bool WebRtcVideoCapturerAdapter::ShouldAdaptResolution() const {
DCHECK(thread_checker_.CalledOnValidThread());
if (content_hint_ ==
blink::WebMediaStreamTrack::ContentHintType::kVideoMotion) {
return true;
}
if (content_hint_ ==
blink::WebMediaStreamTrack::ContentHintType::kVideoDetail) {
return false;
}
// Screencast does not adapt by default.
return !is_screencast_;
}
bool WebRtcVideoCapturerAdapter::GetBestCaptureFormat(
const cricket::VideoFormat& desired,
cricket::VideoFormat* best_format) {
DCHECK(thread_checker_.CalledOnValidThread());
DVLOG(3) << __func__ << " desired: " << desired.ToString();
// Capability enumeration is done in MediaStreamVideoSource. The adapter can
// just use what is provided.
// Use the desired format as the best format.
best_format->width = desired.width;
best_format->height = desired.height;
best_format->fourcc = cricket::FOURCC_I420;
best_format->interval = desired.interval;
return true;
}
} // namespace content