blob: 06d59301904ff889c053d267dd56edef7fa6ed70 [file] [log] [blame]
// Copyright 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "cc/trees/layer_tree_host_impl.h"
#include <stddef.h>
#include <stdint.h>
#include <algorithm>
#include <limits>
#include <map>
#include <set>
#include <unordered_map>
#include <utility>
#include "base/auto_reset.h"
#include "base/bind.h"
#include "base/containers/flat_map.h"
#include "base/json/json_writer.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event_argument.h"
#include "cc/base/devtools_instrumentation.h"
#include "cc/base/histograms.h"
#include "cc/base/math_util.h"
#include "cc/benchmarks/benchmark_instrumentation.h"
#include "cc/debug/rendering_stats_instrumentation.h"
#include "cc/input/browser_controls_offset_manager.h"
#include "cc/input/main_thread_scrolling_reason.h"
#include "cc/input/page_scale_animation.h"
#include "cc/input/scroll_elasticity_helper.h"
#include "cc/input/scroll_state.h"
#include "cc/input/scrollbar_animation_controller.h"
#include "cc/input/scroller_size_metrics.h"
#include "cc/layers/append_quads_data.h"
#include "cc/layers/effect_tree_layer_list_iterator.h"
#include "cc/layers/heads_up_display_layer_impl.h"
#include "cc/layers/layer_impl.h"
#include "cc/layers/painted_scrollbar_layer_impl.h"
#include "cc/layers/render_surface_impl.h"
#include "cc/layers/scrollbar_layer_impl_base.h"
#include "cc/layers/surface_layer_impl.h"
#include "cc/layers/viewport.h"
#include "cc/raster/bitmap_raster_buffer_provider.h"
#include "cc/raster/gpu_raster_buffer_provider.h"
#include "cc/raster/one_copy_raster_buffer_provider.h"
#include "cc/raster/raster_buffer_provider.h"
#include "cc/raster/synchronous_task_graph_runner.h"
#include "cc/raster/zero_copy_raster_buffer_provider.h"
#include "cc/resources/memory_history.h"
#include "cc/resources/resource_pool.h"
#include "cc/resources/ui_resource_bitmap.h"
#include "cc/tiles/eviction_tile_priority_queue.h"
#include "cc/tiles/frame_viewer_instrumentation.h"
#include "cc/tiles/gpu_image_decode_cache.h"
#include "cc/tiles/picture_layer_tiling.h"
#include "cc/tiles/raster_tile_priority_queue.h"
#include "cc/tiles/software_image_decode_cache.h"
#include "cc/trees/damage_tracker.h"
#include "cc/trees/debug_rect_history.h"
#include "cc/trees/draw_property_utils.h"
#include "cc/trees/effect_node.h"
#include "cc/trees/frame_rate_counter.h"
#include "cc/trees/image_animation_controller.h"
#include "cc/trees/latency_info_swap_promise_monitor.h"
#include "cc/trees/layer_tree_frame_sink.h"
#include "cc/trees/layer_tree_host_common.h"
#include "cc/trees/layer_tree_impl.h"
#include "cc/trees/mutator_host.h"
#include "cc/trees/render_frame_metadata.h"
#include "cc/trees/scroll_node.h"
#include "cc/trees/single_thread_proxy.h"
#include "cc/trees/transform_node.h"
#include "cc/trees/tree_synchronizer.h"
#include "components/viz/common/frame_sinks/copy_output_request.h"
#include "components/viz/common/frame_sinks/delay_based_time_source.h"
#include "components/viz/common/quads/compositor_frame.h"
#include "components/viz/common/quads/compositor_frame_metadata.h"
#include "components/viz/common/quads/render_pass_draw_quad.h"
#include "components/viz/common/quads/shared_quad_state.h"
#include "components/viz/common/quads/solid_color_draw_quad.h"
#include "components/viz/common/quads/texture_draw_quad.h"
#include "components/viz/common/traced_value.h"
#include "gpu/GLES2/gl2extchromium.h"
#include "gpu/command_buffer/client/context_support.h"
#include "gpu/command_buffer/client/gles2_interface.h"
#include "gpu/command_buffer/client/raster_interface.h"
#include "services/metrics/public/cpp/ukm_recorder.h"
#include "third_party/skia/include/gpu/GrContext.h"
#include "ui/gfx/geometry/point_conversions.h"
#include "ui/gfx/geometry/rect_conversions.h"
#include "ui/gfx/geometry/scroll_offset.h"
#include "ui/gfx/geometry/size_conversions.h"
#include "ui/gfx/geometry/vector2d_conversions.h"
namespace cc {
namespace {
// Small helper class that saves the current viewport location as the user sees
// it and resets to the same location.
class ViewportAnchor {
public:
ViewportAnchor(LayerImpl* inner_scroll, LayerImpl* outer_scroll)
: inner_(inner_scroll), outer_(outer_scroll) {
viewport_in_content_coordinates_ = inner_->CurrentScrollOffset();
if (outer_)
viewport_in_content_coordinates_ += outer_->CurrentScrollOffset();
}
void ResetViewportToAnchoredPosition() {
DCHECK(outer_);
inner_->ClampScrollToMaxScrollOffset();
outer_->ClampScrollToMaxScrollOffset();
gfx::ScrollOffset viewport_location =
inner_->CurrentScrollOffset() + outer_->CurrentScrollOffset();
gfx::Vector2dF delta =
viewport_in_content_coordinates_.DeltaFrom(viewport_location);
delta = inner_->ScrollBy(delta);
outer_->ScrollBy(delta);
}
private:
LayerImpl* inner_;
LayerImpl* outer_;
gfx::ScrollOffset viewport_in_content_coordinates_;
};
void DidVisibilityChange(LayerTreeHostImpl* id, bool visible) {
if (visible) {
TRACE_EVENT_ASYNC_BEGIN1("cc", "LayerTreeHostImpl::SetVisible", id,
"LayerTreeHostImpl", id);
return;
}
TRACE_EVENT_ASYNC_END0("cc", "LayerTreeHostImpl::SetVisible", id);
}
bool IsWheelBasedScroll(InputHandler::ScrollInputType type) {
return type == InputHandler::WHEEL;
}
enum ScrollThread { MAIN_THREAD, CC_THREAD };
void RecordCompositorSlowScrollMetric(InputHandler::ScrollInputType type,
ScrollThread scroll_thread) {
bool scroll_on_main_thread = (scroll_thread == MAIN_THREAD);
if (IsWheelBasedScroll(type)) {
UMA_HISTOGRAM_BOOLEAN("Renderer4.CompositorWheelScrollUpdateThread",
scroll_on_main_thread);
} else {
UMA_HISTOGRAM_BOOLEAN("Renderer4.CompositorTouchScrollUpdateThread",
scroll_on_main_thread);
}
}
} // namespace
DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(PendingTreeDurationHistogramTimer,
"Scheduling.%s.PendingTreeDuration");
DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(PendingTreeRasterDurationHistogramTimer,
"Scheduling.%s.PendingTreeRasterDuration");
DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(
ImageInvalidationUpdateDurationHistogramTimer,
"Scheduling.%s.ImageInvalidationUpdateDuration");
LayerTreeHostImpl::FrameData::FrameData()
: render_surface_list(nullptr),
has_no_damage(false),
may_contain_video(false) {}
LayerTreeHostImpl::FrameData::~FrameData() = default;
std::unique_ptr<LayerTreeHostImpl> LayerTreeHostImpl::Create(
const LayerTreeSettings& settings,
LayerTreeHostImplClient* client,
TaskRunnerProvider* task_runner_provider,
RenderingStatsInstrumentation* rendering_stats_instrumentation,
TaskGraphRunner* task_graph_runner,
std::unique_ptr<MutatorHost> mutator_host,
int id,
scoped_refptr<base::SequencedTaskRunner> image_worker_task_runner) {
return base::WrapUnique(new LayerTreeHostImpl(
settings, client, task_runner_provider, rendering_stats_instrumentation,
task_graph_runner, std::move(mutator_host), id,
std::move(image_worker_task_runner)));
}
LayerTreeHostImpl::LayerTreeHostImpl(
const LayerTreeSettings& settings,
LayerTreeHostImplClient* client,
TaskRunnerProvider* task_runner_provider,
RenderingStatsInstrumentation* rendering_stats_instrumentation,
TaskGraphRunner* task_graph_runner,
std::unique_ptr<MutatorHost> mutator_host,
int id,
scoped_refptr<base::SequencedTaskRunner> image_worker_task_runner)
: client_(client),
task_runner_provider_(task_runner_provider),
current_begin_frame_tracker_(BEGINFRAMETRACKER_FROM_HERE),
layer_tree_frame_sink_(nullptr),
need_update_gpu_rasterization_status_(false),
content_has_slow_paths_(false),
content_has_non_aa_paint_(false),
has_gpu_rasterization_trigger_(false),
use_gpu_rasterization_(false),
use_msaa_(false),
gpu_rasterization_status_(GpuRasterizationStatus::OFF_DEVICE),
input_handler_client_(nullptr),
did_lock_scrolling_layer_(false),
wheel_scrolling_(false),
scroll_affects_scroll_handler_(false),
tile_priorities_dirty_(false),
settings_(settings),
visible_(false),
cached_managed_memory_policy_(settings.gpu_memory_policy),
is_synchronous_single_threaded_(!task_runner_provider->HasImplThread() &&
!settings.single_thread_proxy_scheduler),
// Must be initialized after is_synchronous_single_threaded_ and
// task_runner_provider_.
tile_manager_(this,
GetTaskRunner(),
std::move(image_worker_task_runner),
is_synchronous_single_threaded_
? std::numeric_limits<size_t>::max()
: settings.scheduled_raster_task_limit,
settings.ToTileManagerSettings()),
pinch_gesture_active_(false),
pinch_gesture_end_should_clear_scrolling_node_(false),
fps_counter_(
FrameRateCounter::Create(task_runner_provider_->HasImplThread())),
memory_history_(MemoryHistory::Create()),
debug_rect_history_(DebugRectHistory::Create()),
max_memory_needed_bytes_(0),
resourceless_software_draw_(false),
mutator_host_(std::move(mutator_host)),
rendering_stats_instrumentation_(rendering_stats_instrumentation),
micro_benchmark_controller_(this),
task_graph_runner_(task_graph_runner),
id_(id),
requires_high_res_to_draw_(false),
is_likely_to_require_a_draw_(false),
has_valid_layer_tree_frame_sink_(false),
scroll_animating_latched_element_id_(kInvalidElementId),
has_scrolled_by_wheel_(false),
has_scrolled_by_touch_(false),
touchpad_and_wheel_scroll_latching_enabled_(false),
impl_thread_phase_(ImplThreadPhase::IDLE) {
DCHECK(mutator_host_);
mutator_host_->SetMutatorHostClient(this);
DCHECK(task_runner_provider_->IsImplThread());
DidVisibilityChange(this, visible_);
SetDebugState(settings.initial_debug_state);
// LTHI always has an active tree.
active_tree_ = std::make_unique<LayerTreeImpl>(
this, new SyncedProperty<ScaleGroup>, new SyncedBrowserControls,
new SyncedElasticOverscroll);
active_tree_->property_trees()->is_active = true;
viewport_ = Viewport::Create(this);
TRACE_EVENT_OBJECT_CREATED_WITH_ID(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"cc::LayerTreeHostImpl", id_);
browser_controls_offset_manager_ = BrowserControlsOffsetManager::Create(
this, settings.top_controls_show_threshold,
settings.top_controls_hide_threshold);
tile_manager_.SetDecodedImageTracker(&decoded_image_tracker_);
if (settings_.enable_image_animations) {
// It is safe to use base::Unretained here since we will outlive the
// ImageAnimationController.
base::Closure invalidation_callback =
base::Bind(&LayerTreeHostImpl::RequestInvalidationForAnimatedImages,
base::Unretained(this));
image_animation_controller_.emplace(GetTaskRunner(),
std::move(invalidation_callback));
}
}
LayerTreeHostImpl::~LayerTreeHostImpl() {
DCHECK(task_runner_provider_->IsImplThread());
TRACE_EVENT0("cc", "LayerTreeHostImpl::~LayerTreeHostImpl()");
TRACE_EVENT_OBJECT_DELETED_WITH_ID(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"cc::LayerTreeHostImpl", id_);
// It is released before shutdown.
DCHECK(!layer_tree_frame_sink_);
DCHECK(!resource_provider_);
DCHECK(!resource_pool_);
DCHECK(!single_thread_synchronous_task_graph_runner_);
DCHECK(!image_decode_cache_);
if (input_handler_client_) {
input_handler_client_->WillShutdown();
input_handler_client_ = nullptr;
}
if (scroll_elasticity_helper_)
scroll_elasticity_helper_.reset();
// The layer trees must be destroyed before the layer tree host.
if (recycle_tree_)
recycle_tree_->Shutdown();
if (pending_tree_)
pending_tree_->Shutdown();
active_tree_->Shutdown();
recycle_tree_ = nullptr;
pending_tree_ = nullptr;
active_tree_ = nullptr;
mutator_host_->ClearMutators();
mutator_host_->SetMutatorHostClient(nullptr);
}
void LayerTreeHostImpl::BeginMainFrameAborted(
CommitEarlyOutReason reason,
std::vector<std::unique_ptr<SwapPromise>> swap_promises) {
// If the begin frame data was handled, then scroll and scale set was applied
// by the main thread, so the active tree needs to be updated as if these sent
// values were applied and committed.
if (CommitEarlyOutHandledCommit(reason)) {
active_tree_->ApplySentScrollAndScaleDeltasFromAbortedCommit();
if (pending_tree_) {
pending_tree_->AppendSwapPromises(std::move(swap_promises));
} else {
for (const auto& swap_promise : swap_promises)
swap_promise->DidNotSwap(SwapPromise::COMMIT_NO_UPDATE);
}
}
}
void LayerTreeHostImpl::BeginCommit() {
TRACE_EVENT0("cc", "LayerTreeHostImpl::BeginCommit");
if (!CommitToActiveTree())
CreatePendingTree();
}
void LayerTreeHostImpl::CommitComplete() {
TRACE_EVENT0("cc", "LayerTreeHostImpl::CommitComplete");
// In high latency mode commit cannot finish within the same frame. We need to
// flush input here to make sure they got picked up by |PrepareTiles()|.
if (input_handler_client_ && impl_thread_phase_ == ImplThreadPhase::IDLE)
input_handler_client_->DeliverInputForBeginFrame();
UpdateSyncTreeAfterCommitOrImplSideInvalidation();
micro_benchmark_controller_.DidCompleteCommit();
}
void LayerTreeHostImpl::UpdateSyncTreeAfterCommitOrImplSideInvalidation() {
if (CommitToActiveTree()) {
active_tree_->HandleScrollbarShowRequestsFromMain();
// We have to activate animations here or "IsActive()" is true on the layers
// but the animations aren't activated yet so they get ignored by
// UpdateDrawProperties.
ActivateAnimations();
}
// Start animations before UpdateDrawProperties and PrepareTiles, as they can
// change the results. When doing commit to the active tree, this must happen
// after ActivateAnimations() in order for this ticking to be propogated to
// layers on the active tree.
if (CommitToActiveTree())
Animate();
else
AnimatePendingTreeAfterCommit();
// LayerTreeHost may have changed the GPU rasterization flags state, which
// may require an update of the tree resources.
UpdateTreeResourcesForGpuRasterizationIfNeeded();
sync_tree()->set_needs_update_draw_properties();
// We need an update immediately post-commit to have the opportunity to create
// tilings.
// We can avoid updating the ImageAnimationController during this
// DrawProperties update since it will be done when we animate the controller
// below.
bool update_image_animation_controller = false;
sync_tree()->UpdateDrawProperties(update_image_animation_controller);
// Because invalidations may be coming from the main thread, it's
// safe to do an update for lcd text at this point and see if lcd text needs
// to be disabled on any layers.
// It'd be ideal if this could be done earlier, but when the raster source
// is updated from the main thread during push properties, update draw
// properties has not occurred yet and so it's not clear whether or not the
// layer can or cannot use lcd text. So, this is the cleanup pass to
// determine if lcd state needs to switch due to draw properties.
sync_tree()->UpdateCanUseLCDText();
// Defer invalidating images until UpdateDrawProperties is performed since
// that updates whether an image should be animated based on its visibility
// and the updated data for the image from the main frame.
{
ImageInvalidationUpdateDurationHistogramTimer image_invalidation_timer;
PaintImageIdFlatSet images_to_invalidate =
tile_manager_.TakeImagesToInvalidateOnSyncTree();
if (ukm_manager_)
ukm_manager_->AddCheckerboardedImages(images_to_invalidate.size());
if (image_animation_controller_.has_value()) {
const auto& animated_images =
image_animation_controller_.value().AnimateForSyncTree(
CurrentBeginFrameArgs().frame_time);
images_to_invalidate.insert(animated_images.begin(),
animated_images.end());
}
sync_tree()->InvalidateRegionForImages(images_to_invalidate);
}
// Start working on newly created tiles immediately if needed.
// TODO(vmpstr): Investigate always having PrepareTiles issue
// NotifyReadyToActivate, instead of handling it here.
bool did_prepare_tiles = PrepareTiles();
if (!did_prepare_tiles) {
NotifyReadyToActivate();
// Ensure we get ReadyToDraw signal even when PrepareTiles not run. This
// is important for SingleThreadProxy and impl-side painting case. For
// STP, we commit to active tree and RequiresHighResToDraw, and set
// Scheduler to wait for ReadyToDraw signal to avoid Checkerboard.
if (CommitToActiveTree())
NotifyReadyToDraw();
} else if (!CommitToActiveTree()) {
DCHECK(!pending_tree_raster_duration_timer_);
pending_tree_raster_duration_timer_ =
std::make_unique<PendingTreeRasterDurationHistogramTimer>();
}
}
bool LayerTreeHostImpl::CanDraw() const {
// Note: If you are changing this function or any other function that might
// affect the result of CanDraw, make sure to call
// client_->OnCanDrawStateChanged in the proper places and update the
// NotifyIfCanDrawChanged test.
if (!layer_tree_frame_sink_) {
TRACE_EVENT_INSTANT0("cc",
"LayerTreeHostImpl::CanDraw no LayerTreeFrameSink",
TRACE_EVENT_SCOPE_THREAD);
return false;
}
// TODO(boliu): Make draws without layers work and move this below
// |resourceless_software_draw_| check. Tracked in crbug.com/264967.
if (active_tree_->LayerListIsEmpty()) {
TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw no root layer",
TRACE_EVENT_SCOPE_THREAD);
return false;
}
if (resourceless_software_draw_)
return true;
if (DeviceViewport().IsEmpty()) {
TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw empty viewport",
TRACE_EVENT_SCOPE_THREAD);
return false;
}
if (active_tree_->ViewportSizeInvalid()) {
TRACE_EVENT_INSTANT0(
"cc", "LayerTreeHostImpl::CanDraw viewport size recently changed",
TRACE_EVENT_SCOPE_THREAD);
return false;
}
if (EvictedUIResourcesExist()) {
TRACE_EVENT_INSTANT0(
"cc", "LayerTreeHostImpl::CanDraw UI resources evicted not recreated",
TRACE_EVENT_SCOPE_THREAD);
return false;
}
return true;
}
void LayerTreeHostImpl::AnimatePendingTreeAfterCommit() {
AnimateInternal(false);
}
void LayerTreeHostImpl::Animate() {
AnimateInternal(true);
}
void LayerTreeHostImpl::AnimateInternal(bool active_tree) {
DCHECK(task_runner_provider_->IsImplThread());
base::TimeTicks monotonic_time = CurrentBeginFrameArgs().frame_time;
// mithro(TODO): Enable these checks.
// DCHECK(!current_begin_frame_tracker_.HasFinished());
// DCHECK(monotonic_time == current_begin_frame_tracker_.Current().frame_time)
// << "Called animate with unknown frame time!?";
bool did_animate = false;
if (input_handler_client_) {
// This animates fling scrolls. But on Android WebView root flings are
// controlled by the application, so the compositor does not animate them.
bool ignore_fling =
settings_.ignore_root_layer_flings && IsCurrentlyScrollingViewport();
if (!ignore_fling) {
// This does not set did_animate, because if the InputHandlerClient
// changes anything it will be through the InputHandler interface which
// does SetNeedsRedraw.
input_handler_client_->Animate(monotonic_time);
}
}
did_animate |= AnimatePageScale(monotonic_time);
did_animate |= AnimateLayers(monotonic_time, active_tree);
did_animate |= AnimateScrollbars(monotonic_time);
did_animate |= AnimateBrowserControls(monotonic_time);
if (active_tree) {
// Animating stuff can change the root scroll offset, so inform the
// synchronous input handler.
UpdateRootLayerStateForSynchronousInputHandler();
if (did_animate) {
// If the tree changed, then we want to draw at the end of the current
// frame.
SetNeedsRedraw();
}
}
}
bool LayerTreeHostImpl::PrepareTiles() {
if (!tile_priorities_dirty_)
return false;
client_->WillPrepareTiles();
bool did_prepare_tiles = tile_manager_.PrepareTiles(global_tile_state_);
if (did_prepare_tiles)
tile_priorities_dirty_ = false;
client_->DidPrepareTiles();
return did_prepare_tiles;
}
void LayerTreeHostImpl::StartPageScaleAnimation(
const gfx::Vector2d& target_offset,
bool anchor_point,
float page_scale,
base::TimeDelta duration) {
if (!InnerViewportScrollLayer())
return;
gfx::ScrollOffset scroll_total = active_tree_->TotalScrollOffset();
gfx::SizeF scrollable_size = active_tree_->ScrollableSize();
gfx::SizeF viewport_size =
gfx::SizeF(active_tree_->InnerViewportContainerLayer()->bounds());
// TODO(miletus) : Pass in ScrollOffset.
page_scale_animation_ =
PageScaleAnimation::Create(ScrollOffsetToVector2dF(scroll_total),
active_tree_->current_page_scale_factor(),
viewport_size, scrollable_size);
if (anchor_point) {
gfx::Vector2dF anchor(target_offset);
page_scale_animation_->ZoomWithAnchor(anchor, page_scale,
duration.InSecondsF());
} else {
gfx::Vector2dF scaled_target_offset = target_offset;
page_scale_animation_->ZoomTo(scaled_target_offset, page_scale,
duration.InSecondsF());
}
SetNeedsOneBeginImplFrame();
client_->SetNeedsCommitOnImplThread();
client_->RenewTreePriority();
}
void LayerTreeHostImpl::SetNeedsAnimateInput() {
DCHECK(!IsCurrentlyScrollingViewport() ||
!settings_.ignore_root_layer_flings);
SetNeedsOneBeginImplFrame();
}
bool LayerTreeHostImpl::IsCurrentlyScrollingViewport() const {
auto* node = CurrentlyScrollingNode();
if (!node)
return false;
if (!viewport()->MainScrollLayer())
return false;
return node->id == viewport()->MainScrollLayer()->scroll_tree_index();
}
bool LayerTreeHostImpl::IsCurrentlyScrollingLayerAt(
const gfx::Point& viewport_point,
InputHandler::ScrollInputType type) const {
auto* scrolling_node = CurrentlyScrollingNode();
if (!scrolling_node)
return false;
gfx::PointF device_viewport_point = gfx::ScalePoint(
gfx::PointF(viewport_point), active_tree_->device_scale_factor());
LayerImpl* layer_impl =
active_tree_->FindLayerThatIsHitByPoint(device_viewport_point);
bool scroll_on_main_thread = false;
uint32_t main_thread_scrolling_reasons;
auto* test_scroll_node = FindScrollNodeForDeviceViewportPoint(
device_viewport_point, type, layer_impl, &scroll_on_main_thread,
&main_thread_scrolling_reasons);
if (scroll_on_main_thread)
return false;
if (scrolling_node == test_scroll_node)
return true;
// For active scrolling state treat the inner/outer viewports interchangeably.
if (scrolling_node->scrolls_inner_viewport ||
scrolling_node->scrolls_outer_viewport) {
return test_scroll_node == OuterViewportScrollNode();
}
return false;
}
EventListenerProperties LayerTreeHostImpl::GetEventListenerProperties(
EventListenerClass event_class) const {
return active_tree_->event_listener_properties(event_class);
}
// Return true if scrollable node for 'ancestor' is the same as 'child' or an
// ancestor along the scroll tree.
bool LayerTreeHostImpl::IsScrolledBy(LayerImpl* child, ScrollNode* ancestor) {
DCHECK(ancestor && ancestor->scrollable);
if (!child)
return false;
DCHECK_EQ(child->layer_tree_impl(), active_tree_.get());
ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree;
for (ScrollNode* scroll_node = scroll_tree.Node(child->scroll_tree_index());
scroll_node; scroll_node = scroll_tree.parent(scroll_node)) {
if (scroll_node->id == ancestor->id)
return true;
}
return false;
}
InputHandler::TouchStartOrMoveEventListenerType
LayerTreeHostImpl::EventListenerTypeForTouchStartOrMoveAt(
const gfx::Point& viewport_point,
TouchAction* out_touch_action) {
gfx::PointF device_viewport_point = gfx::ScalePoint(
gfx::PointF(viewport_point), active_tree_->device_scale_factor());
LayerImpl* layer_impl_with_touch_handler =
active_tree_->FindLayerThatIsHitByPointInTouchHandlerRegion(
device_viewport_point);
if (layer_impl_with_touch_handler == nullptr) {
if (out_touch_action)
*out_touch_action = kTouchActionAuto;
return InputHandler::TouchStartOrMoveEventListenerType::NO_HANDLER;
}
if (out_touch_action) {
gfx::Transform layer_screen_space_transform =
layer_impl_with_touch_handler->ScreenSpaceTransform();
gfx::Transform inverse_layer_screen_space(
gfx::Transform::kSkipInitialization);
bool can_be_inversed =
layer_screen_space_transform.GetInverse(&inverse_layer_screen_space);
// Getting here indicates that |layer_impl_with_touch_handler| is non-null,
// which means that the |hit| in FindClosestMatchingLayer() is true, which
// indicates that the inverse is available.
DCHECK(can_be_inversed);
bool clipped = false;
gfx::Point3F planar_point = MathUtil::ProjectPoint3D(
inverse_layer_screen_space, device_viewport_point, &clipped);
gfx::PointF hit_test_point_in_layer_space =
gfx::PointF(planar_point.x(), planar_point.y());
const auto& region = layer_impl_with_touch_handler->touch_action_region();
gfx::Point point = gfx::ToRoundedPoint(hit_test_point_in_layer_space);
*out_touch_action = region.GetWhiteListedTouchAction(point);
}
if (!CurrentlyScrollingNode())
return InputHandler::TouchStartOrMoveEventListenerType::HANDLER;
// Check if the touch start (or move) hits on the current scrolling layer or
// its descendant. layer_impl_with_touch_handler is the layer hit by the
// pointer and has an event handler, otherwise it is null. We want to compare
// the most inner layer we are hitting on which may not have an event listener
// with the actual scrolling layer.
LayerImpl* layer_impl =
active_tree_->FindLayerThatIsHitByPoint(device_viewport_point);
bool is_ancestor = IsScrolledBy(layer_impl, CurrentlyScrollingNode());
return is_ancestor ? InputHandler::TouchStartOrMoveEventListenerType::
HANDLER_ON_SCROLLING_LAYER
: InputHandler::TouchStartOrMoveEventListenerType::HANDLER;
}
std::unique_ptr<SwapPromiseMonitor>
LayerTreeHostImpl::CreateLatencyInfoSwapPromiseMonitor(
ui::LatencyInfo* latency) {
return base::WrapUnique(
new LatencyInfoSwapPromiseMonitor(latency, nullptr, this));
}
ScrollElasticityHelper* LayerTreeHostImpl::CreateScrollElasticityHelper() {
DCHECK(!scroll_elasticity_helper_);
if (settings_.enable_elastic_overscroll) {
scroll_elasticity_helper_.reset(
ScrollElasticityHelper::CreateForLayerTreeHostImpl(this));
}
return scroll_elasticity_helper_.get();
}
bool LayerTreeHostImpl::GetScrollOffsetForLayer(int layer_id,
gfx::ScrollOffset* offset) {
LayerImpl* layer = active_tree()->FindActiveTreeLayerById(layer_id);
if (!layer)
return false;
*offset = layer->CurrentScrollOffset();
return true;
}
bool LayerTreeHostImpl::ScrollLayerTo(int layer_id,
const gfx::ScrollOffset& offset) {
LayerImpl* layer = active_tree()->FindActiveTreeLayerById(layer_id);
if (!layer)
return false;
layer->ScrollBy(
ScrollOffsetToVector2dF(offset - layer->CurrentScrollOffset()));
return true;
}
bool LayerTreeHostImpl::ScrollingShouldSwitchtoMainThread() {
ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree;
ScrollNode* scroll_node = scroll_tree.CurrentlyScrollingNode();
if (!scroll_node)
return true;
for (; scroll_tree.parent(scroll_node);
scroll_node = scroll_tree.parent(scroll_node)) {
if (!!scroll_node->main_thread_scrolling_reasons)
return true;
}
return false;
}
void LayerTreeHostImpl::QueueSwapPromiseForMainThreadScrollUpdate(
std::unique_ptr<SwapPromise> swap_promise) {
swap_promises_for_main_thread_scroll_update_.push_back(
std::move(swap_promise));
}
void LayerTreeHostImpl::FrameData::AsValueInto(
base::trace_event::TracedValue* value) const {
value->SetBoolean("has_no_damage", has_no_damage);
// Quad data can be quite large, so only dump render passes if we select
// cc.debug.quads.
bool quads_enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(
TRACE_DISABLED_BY_DEFAULT("cc.debug.quads"), &quads_enabled);
if (quads_enabled) {
value->BeginArray("render_passes");
for (size_t i = 0; i < render_passes.size(); ++i) {
value->BeginDictionary();
render_passes[i]->AsValueInto(value);
value->EndDictionary();
}
value->EndArray();
}
}
DrawMode LayerTreeHostImpl::GetDrawMode() const {
if (resourceless_software_draw_) {
return DRAW_MODE_RESOURCELESS_SOFTWARE;
} else if (layer_tree_frame_sink_->context_provider()) {
return DRAW_MODE_HARDWARE;
} else {
return DRAW_MODE_SOFTWARE;
}
}
static void AppendQuadsToFillScreen(
const gfx::Rect& root_scroll_layer_rect,
viz::RenderPass* target_render_pass,
const RenderSurfaceImpl* root_render_surface,
SkColor screen_background_color,
const Region& fill_region) {
if (!root_render_surface || !SkColorGetA(screen_background_color))
return;
if (fill_region.IsEmpty())
return;
// Manually create the quad state for the gutter quads, as the root layer
// doesn't have any bounds and so can't generate this itself.
// TODO(danakj): Make the gutter quads generated by the solid color layer
// (make it smarter about generating quads to fill unoccluded areas).
gfx::Rect root_target_rect = root_render_surface->content_rect();
float opacity = 1.f;
int sorting_context_id = 0;
bool are_contents_opaque = SkColorGetA(screen_background_color) == 0xFF;
viz::SharedQuadState* shared_quad_state =
target_render_pass->CreateAndAppendSharedQuadState();
shared_quad_state->SetAll(gfx::Transform(), root_target_rect,
root_target_rect, root_target_rect, false,
are_contents_opaque, opacity, SkBlendMode::kSrcOver,
sorting_context_id);
for (gfx::Rect screen_space_rect : fill_region) {
gfx::Rect visible_screen_space_rect = screen_space_rect;
// Skip the quad culler and just append the quads directly to avoid
// occlusion checks.
auto* quad =
target_render_pass->CreateAndAppendDrawQuad<viz::SolidColorDrawQuad>();
quad->SetNew(shared_quad_state, screen_space_rect,
visible_screen_space_rect, screen_background_color, false);
}
}
static viz::RenderPass* FindRenderPassById(const viz::RenderPassList& list,
viz::RenderPassId id) {
auto it = std::find_if(
list.begin(), list.end(),
[id](const std::unique_ptr<viz::RenderPass>& p) { return p->id == id; });
return it == list.end() ? nullptr : it->get();
}
bool LayerTreeHostImpl::HasDamage(bool handle_visibility_changed) const {
DCHECK(!active_tree()->needs_update_draw_properties());
DCHECK(CanDraw());
if (handle_visibility_changed || !viewport_damage_rect_.IsEmpty())
return true;
const LayerTreeImpl* active_tree = active_tree_.get();
// If the root render surface has no visible damage, then don't generate a
// frame at all.
const RenderSurfaceImpl* root_surface = active_tree->RootRenderSurface();
bool root_surface_has_no_visible_damage =
!root_surface->GetDamageRect().Intersects(root_surface->content_rect());
bool root_surface_has_contributing_layers =
!!root_surface->num_contributors();
bool hud_wants_to_draw_ = active_tree->hud_layer() &&
active_tree->hud_layer()->IsAnimatingHUDContents();
bool must_always_swap =
layer_tree_frame_sink_->capabilities().must_always_swap;
// If we have a new LocalSurfaceId, we must always submit a CompositorFrame
// because the parent is blocking on us.
bool local_surface_id_changed =
settings_.enable_surface_synchronization &&
(last_draw_local_surface_id_ != active_tree->local_surface_id());
return !root_surface_has_contributing_layers ||
!root_surface_has_no_visible_damage ||
active_tree_->property_trees()->effect_tree.HasCopyRequests() ||
must_always_swap || hud_wants_to_draw_ || local_surface_id_changed;
}
DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) {
DCHECK(frame->render_passes.empty());
DCHECK(CanDraw());
DCHECK(!active_tree_->LayerListIsEmpty());
// For now, we use damage tracking to compute a global scissor. To do this, we
// must compute all damage tracking before drawing anything, so that we know
// the root damage rect. The root damage rect is then used to scissor each
// surface.
DamageTracker::UpdateDamageTracking(active_tree_.get(),
active_tree_->GetRenderSurfaceList());
// When touch handle visibility changes there is no visible damage
// because touch handles are composited in the browser. However we
// still want the browser to be notified that the handles changed
// through the |ViewHostMsg_SwapCompositorFrame| IPC so we keep
// track of handle visibility changes through |handle_visibility_changed|.
bool handle_visibility_changed =
active_tree_->GetAndResetHandleVisibilityChanged();
if (!HasDamage(handle_visibility_changed)) {
TRACE_EVENT0("cc",
"LayerTreeHostImpl::CalculateRenderPasses::EmptyDamageRect");
frame->has_no_damage = true;
DCHECK(!resourceless_software_draw_);
return DRAW_SUCCESS;
}
TRACE_EVENT_BEGIN2("cc", "LayerTreeHostImpl::CalculateRenderPasses",
"render_surface_list.size()",
static_cast<uint64_t>(frame->render_surface_list->size()),
"RequiresHighResToDraw", RequiresHighResToDraw());
// Create the render passes in dependency order.
size_t render_surface_list_size = frame->render_surface_list->size();
for (size_t i = 0; i < render_surface_list_size; ++i) {
size_t surface_index = render_surface_list_size - 1 - i;
RenderSurfaceImpl* render_surface =
(*frame->render_surface_list)[surface_index];
bool is_root_surface =
render_surface->EffectTreeIndex() == EffectTree::kContentsRootNodeId;
bool should_draw_into_render_pass =
is_root_surface || render_surface->contributes_to_drawn_surface() ||
render_surface->HasCopyRequest() ||
render_surface->ShouldCacheRenderSurface();
if (should_draw_into_render_pass)
frame->render_passes.push_back(render_surface->CreateRenderPass());
}
// Damage rects for non-root passes aren't meaningful, so set them to be
// equal to the output rect.
for (size_t i = 0; i + 1 < frame->render_passes.size(); ++i) {
viz::RenderPass* pass = frame->render_passes[i].get();
pass->damage_rect = pass->output_rect;
}
// When we are displaying the HUD, change the root damage rect to cover the
// entire root surface. This will disable partial-swap/scissor optimizations
// that would prevent the HUD from updating, since the HUD does not cause
// damage itself, to prevent it from messing with damage visualizations. Since
// damage visualizations are done off the LayerImpls and RenderSurfaceImpls,
// changing the RenderPass does not affect them.
if (active_tree_->hud_layer()) {
viz::RenderPass* root_pass = frame->render_passes.back().get();
root_pass->damage_rect = root_pass->output_rect;
}
// Grab this region here before iterating layers. Taking copy requests from
// the layers while constructing the render passes will dirty the render
// surface layer list and this unoccluded region, flipping the dirty bit to
// true, and making us able to query for it without doing
// UpdateDrawProperties again. The value inside the Region is not actually
// changed until UpdateDrawProperties happens, so a reference to it is safe.
const Region& unoccluded_screen_space_region =
active_tree_->UnoccludedScreenSpaceRegion();
// Typically when we are missing a texture and use a checkerboard quad, we
// still draw the frame. However when the layer being checkerboarded is moving
// due to an impl-animation, we drop the frame to avoid flashing due to the
// texture suddenly appearing in the future.
DrawResult draw_result = DRAW_SUCCESS;
int layers_drawn = 0;
const DrawMode draw_mode = GetDrawMode();
int num_missing_tiles = 0;
int num_incomplete_tiles = 0;
int64_t checkerboarded_no_recording_content_area = 0;
int64_t checkerboarded_needs_raster_content_area = 0;
int64_t total_visible_area = 0;
bool have_copy_request =
active_tree()->property_trees()->effect_tree.HasCopyRequests();
bool have_missing_animated_tiles = false;
for (EffectTreeLayerListIterator it(active_tree());
it.state() != EffectTreeLayerListIterator::State::END; ++it) {
auto target_render_pass_id = it.target_render_surface()->id();
viz::RenderPass* target_render_pass =
FindRenderPassById(frame->render_passes, target_render_pass_id);
AppendQuadsData append_quads_data;
if (it.state() == EffectTreeLayerListIterator::State::TARGET_SURFACE) {
RenderSurfaceImpl* render_surface = it.target_render_surface();
if (render_surface->HasCopyRequest()) {
active_tree()
->property_trees()
->effect_tree.TakeCopyRequestsAndTransformToSurface(
render_surface->EffectTreeIndex(),
&target_render_pass->copy_requests);
}
} else if (it.state() ==
EffectTreeLayerListIterator::State::CONTRIBUTING_SURFACE) {
RenderSurfaceImpl* render_surface = it.current_render_surface();
if (render_surface->contributes_to_drawn_surface()) {
render_surface->AppendQuads(draw_mode, target_render_pass,
&append_quads_data);
}
} else if (it.state() == EffectTreeLayerListIterator::State::LAYER &&
!it.current_layer()->visible_layer_rect().IsEmpty()) {
LayerImpl* layer = it.current_layer();
bool occluded =
layer->draw_properties().occlusion_in_content_space.IsOccluded(
layer->visible_layer_rect());
if (!occluded && layer->WillDraw(draw_mode, resource_provider_.get())) {
DCHECK_EQ(active_tree_.get(), layer->layer_tree_impl());
frame->will_draw_layers.push_back(layer);
if (layer->may_contain_video())
frame->may_contain_video = true;
layer->AppendQuads(target_render_pass, &append_quads_data);
}
++layers_drawn;
rendering_stats_instrumentation_->AddVisibleContentArea(
append_quads_data.visible_layer_area);
rendering_stats_instrumentation_->AddApproximatedVisibleContentArea(
append_quads_data.approximated_visible_content_area);
rendering_stats_instrumentation_->AddCheckerboardedVisibleContentArea(
append_quads_data.checkerboarded_visible_content_area);
rendering_stats_instrumentation_->AddCheckerboardedNoRecordingContentArea(
append_quads_data.checkerboarded_no_recording_content_area);
rendering_stats_instrumentation_->AddCheckerboardedNeedsRasterContentArea(
append_quads_data.checkerboarded_needs_raster_content_area);
num_missing_tiles += append_quads_data.num_missing_tiles;
num_incomplete_tiles += append_quads_data.num_incomplete_tiles;
checkerboarded_no_recording_content_area +=
append_quads_data.checkerboarded_no_recording_content_area;
checkerboarded_needs_raster_content_area +=
append_quads_data.checkerboarded_needs_raster_content_area;
total_visible_area += append_quads_data.visible_layer_area;
if (append_quads_data.num_missing_tiles > 0) {
have_missing_animated_tiles |=
layer->screen_space_transform_is_animating();
}
}
frame->activation_dependencies.insert(
frame->activation_dependencies.end(),
append_quads_data.activation_dependencies.begin(),
append_quads_data.activation_dependencies.end());
if (append_quads_data.deadline_in_frames) {
if (!frame->deadline_in_frames)
frame->deadline_in_frames = 0u;
frame->deadline_in_frames = std::max(
*frame->deadline_in_frames, *append_quads_data.deadline_in_frames);
}
}
// If CommitToActiveTree() is true, then we wait to draw until
// NotifyReadyToDraw. That means we're in as good shape as is possible now,
// so there's no reason to stop the draw now (and this is not supported by
// SingleThreadProxy).
if (have_missing_animated_tiles && !CommitToActiveTree())
draw_result = DRAW_ABORTED_CHECKERBOARD_ANIMATIONS;
// When we require high res to draw, abort the draw (almost) always. This does
// not cause the scheduler to do a main frame, instead it will continue to try
// drawing until we finally complete, so the copy request will not be lost.
// TODO(weiliangc): Remove RequiresHighResToDraw. crbug.com/469175
if (num_incomplete_tiles || num_missing_tiles) {
if (RequiresHighResToDraw())
draw_result = DRAW_ABORTED_MISSING_HIGH_RES_CONTENT;
}
// When doing a resourceless software draw, we don't have control over the
// surface the compositor draws to, so even though the frame may not be
// complete, the previous frame has already been potentially lost, so an
// incomplete frame is better than nothing, so this takes highest precidence.
if (resourceless_software_draw_)
draw_result = DRAW_SUCCESS;
#if DCHECK_IS_ON()
for (const auto& render_pass : frame->render_passes) {
for (auto* quad : render_pass->quad_list)
DCHECK(quad->shared_quad_state);
}
DCHECK(frame->render_passes.back()->output_rect.origin().IsOrigin());
#endif
bool has_transparent_background =
SkColorGetA(active_tree_->background_color()) != SK_AlphaOPAQUE;
if (!has_transparent_background) {
frame->render_passes.back()->has_transparent_background = false;
AppendQuadsToFillScreen(
active_tree_->RootScrollLayerDeviceViewportBounds(),
frame->render_passes.back().get(), active_tree_->RootRenderSurface(),
active_tree_->background_color(), unoccluded_screen_space_region);
}
RemoveRenderPasses(frame);
// If we're making a frame to draw, it better have at least one render pass.
DCHECK(!frame->render_passes.empty());
if (have_copy_request) {
// Any copy requests left in the tree are not going to get serviced, and
// should be aborted.
active_tree()->property_trees()->effect_tree.ClearCopyRequests();
// Draw properties depend on copy requests.
active_tree()->set_needs_update_draw_properties();
}
if (ukm_manager_) {
ukm_manager_->AddCheckerboardStatsForFrame(
checkerboarded_no_recording_content_area +
checkerboarded_needs_raster_content_area,
num_missing_tiles, total_visible_area);
}
if (active_tree_->has_ever_been_drawn()) {
UMA_HISTOGRAM_COUNTS_100(
"Compositing.RenderPass.AppendQuadData.NumMissingTiles",
num_missing_tiles);
UMA_HISTOGRAM_COUNTS_100(
"Compositing.RenderPass.AppendQuadData.NumIncompleteTiles",
num_incomplete_tiles);
UMA_HISTOGRAM_COUNTS(
"Compositing.RenderPass.AppendQuadData."
"CheckerboardedNoRecordingContentArea",
checkerboarded_no_recording_content_area);
UMA_HISTOGRAM_COUNTS(
"Compositing.RenderPass.AppendQuadData."
"CheckerboardedNeedRasterContentArea",
checkerboarded_needs_raster_content_area);
}
TRACE_EVENT_END2("cc", "LayerTreeHostImpl::CalculateRenderPasses",
"draw_result", draw_result, "missing tiles",
num_missing_tiles);
// Draw has to be successful to not drop the copy request layer.
// When we have a copy request for a layer, we need to draw even if there
// would be animating checkerboards, because failing under those conditions
// triggers a new main frame, which may cause the copy request layer to be
// destroyed.
// TODO(weiliangc): Test copy request w/ LayerTreeFrameSink recreation. Would
// trigger this DCHECK.
DCHECK(!have_copy_request || draw_result == DRAW_SUCCESS);
// TODO(crbug.com/564832): This workaround to prevent creating unnecessarily
// persistent render passes. When a copy request is made, it may force a
// separate render pass for the layer, which will persist until a new commit
// removes it. Force a commit after copy requests, to remove extra render
// passes.
if (have_copy_request)
client_->SetNeedsCommitOnImplThread();
return draw_result;
}
void LayerTreeHostImpl::MainThreadHasStoppedFlinging() {
browser_controls_offset_manager_->MainThreadHasStoppedFlinging();
if (input_handler_client_)
input_handler_client_->MainThreadHasStoppedFlinging();
}
void LayerTreeHostImpl::DidAnimateScrollOffset() {
client_->SetNeedsCommitOnImplThread();
client_->RenewTreePriority();
}
void LayerTreeHostImpl::SetViewportDamage(const gfx::Rect& damage_rect) {
viewport_damage_rect_.Union(damage_rect);
}
void LayerTreeHostImpl::InvalidateContentOnImplSide() {
DCHECK(!pending_tree_);
if (!CommitToActiveTree())
CreatePendingTree();
UpdateSyncTreeAfterCommitOrImplSideInvalidation();
}
DrawResult LayerTreeHostImpl::PrepareToDraw(FrameData* frame) {
TRACE_EVENT1("cc", "LayerTreeHostImpl::PrepareToDraw", "SourceFrameNumber",
active_tree_->source_frame_number());
if (input_handler_client_)
input_handler_client_->ReconcileElasticOverscrollAndRootScroll();
if (const char* client_name = GetClientNameForMetrics()) {
size_t total_memory_in_bytes = 0;
size_t total_gpu_memory_for_tilings_in_bytes = 0;
for (const PictureLayerImpl* layer : active_tree()->picture_layers()) {
total_memory_in_bytes += layer->GetRasterSource()->GetMemoryUsage();
total_gpu_memory_for_tilings_in_bytes += layer->GPUMemoryUsageInBytes();
}
if (total_memory_in_bytes != 0) {
// GetClientNameForMetrics only returns one non-null value over the
// lifetime of the process, so this histogram name is runtime constant.
UMA_HISTOGRAM_COUNTS(
base::StringPrintf("Compositing.%s.PictureMemoryUsageKb",
client_name),
base::saturated_cast<int>(total_memory_in_bytes / 1024));
}
// GetClientNameForMetrics only returns one non-null value over the lifetime
// of the process, so these histogram names are runtime constant.
UMA_HISTOGRAM_CUSTOM_COUNTS(
base::StringPrintf("Compositing.%s.NumActiveLayers", client_name),
base::saturated_cast<int>(active_tree_->NumLayers()), 1, 400, 20);
UMA_HISTOGRAM_CUSTOM_COUNTS(
base::StringPrintf("Compositing.%s.NumActivePictureLayers",
client_name),
base::saturated_cast<int>(active_tree_->picture_layers().size()), 1,
400, 20);
// TODO(yigu): Maybe we should use the same check above. Need to figure out
// why exactly we skip 0.
if (!active_tree()->picture_layers().empty()) {
UMA_HISTOGRAM_CUSTOM_COUNTS(
base::StringPrintf("Compositing.%s.GPUMemoryForTilingsInKb",
client_name),
base::saturated_cast<int>(total_gpu_memory_for_tilings_in_bytes /
1024),
1, kGPUMemoryForTilingsLargestBucketKb,
kGPUMemoryForTilingsBucketCount);
}
}
bool ok = active_tree_->UpdateDrawProperties();
DCHECK(ok) << "UpdateDrawProperties failed during draw";
// This will cause NotifyTileStateChanged() to be called for any tiles that
// completed, which will add damage for visible tiles to the frame for them so
// they appear as part of the current frame being drawn.
tile_manager_.CheckForCompletedTasks();
frame->render_surface_list = &active_tree_->GetRenderSurfaceList();
frame->render_passes.clear();
frame->will_draw_layers.clear();
frame->has_no_damage = false;
frame->may_contain_video = false;
if (active_tree_->RootRenderSurface()) {
gfx::Rect device_viewport_damage_rect = viewport_damage_rect_;
viewport_damage_rect_ = gfx::Rect();
active_tree_->RootRenderSurface()->damage_tracker()->AddDamageNextUpdate(
device_viewport_damage_rect);
}
DrawResult draw_result = CalculateRenderPasses(frame);
if (draw_result != DRAW_SUCCESS) {
DCHECK(!resourceless_software_draw_);
return draw_result;
}
// If we return DRAW_SUCCESS, then we expect DrawLayers() to be called before
// this function is called again.
return draw_result;
}
void LayerTreeHostImpl::RemoveRenderPasses(FrameData* frame) {
// There is always at least a root RenderPass.
DCHECK_GE(frame->render_passes.size(), 1u);
// A set of RenderPasses that we have seen.
base::flat_set<viz::RenderPassId> pass_exists;
// A set of viz::RenderPassDrawQuads that we have seen (stored by the
// RenderPasses they refer to).
base::flat_map<viz::RenderPassId, int> pass_references;
// Iterate RenderPasses in draw order, removing empty render passes (except
// the root RenderPass).
for (size_t i = 0; i < frame->render_passes.size(); ++i) {
viz::RenderPass* pass = frame->render_passes[i].get();
// Remove orphan viz::RenderPassDrawQuads.
for (auto it = pass->quad_list.begin(); it != pass->quad_list.end();) {
if (it->material != viz::DrawQuad::RENDER_PASS) {
++it;
continue;
}
const viz::RenderPassDrawQuad* quad =
viz::RenderPassDrawQuad::MaterialCast(*it);
// If the RenderPass doesn't exist, we can remove the quad.
if (pass_exists.count(quad->render_pass_id)) {
// Otherwise, save a reference to the RenderPass so we know there's a
// quad using it.
pass_references[quad->render_pass_id]++;
++it;
} else {
it = pass->quad_list.EraseAndInvalidateAllPointers(it);
}
}
if (i == frame->render_passes.size() - 1) {
// Don't remove the root RenderPass.
break;
}
if (pass->quad_list.empty() && pass->copy_requests.empty() &&
pass->filters.IsEmpty() && pass->background_filters.IsEmpty()) {
// Remove the pass and decrement |i| to counter the for loop's increment,
// so we don't skip the next pass in the loop.
frame->render_passes.erase(frame->render_passes.begin() + i);
--i;
continue;
}
pass_exists.insert(pass->id);
}
// Remove RenderPasses that are not referenced by any draw quads or copy
// requests (except the root RenderPass).
for (size_t i = 0; i < frame->render_passes.size() - 1; ++i) {
// Iterating from the back of the list to the front, skipping over the
// back-most (root) pass, in order to remove each qualified RenderPass, and
// drop references to earlier RenderPasses allowing them to be removed to.
viz::RenderPass* pass =
frame->render_passes[frame->render_passes.size() - 2 - i].get();
if (!pass->copy_requests.empty())
continue;
if (pass_references[pass->id])
continue;
for (auto it = pass->quad_list.begin(); it != pass->quad_list.end(); ++it) {
if (it->material != viz::DrawQuad::RENDER_PASS)
continue;
const viz::RenderPassDrawQuad* quad =
viz::RenderPassDrawQuad::MaterialCast(*it);
pass_references[quad->render_pass_id]--;
}
frame->render_passes.erase(frame->render_passes.end() - 2 - i);
--i;
}
}
void LayerTreeHostImpl::EvictTexturesForTesting() {
UpdateTileManagerMemoryPolicy(ManagedMemoryPolicy(0));
}
void LayerTreeHostImpl::BlockNotifyReadyToActivateForTesting(bool block) {
NOTREACHED();
}
void LayerTreeHostImpl::BlockImplSideInvalidationRequestsForTesting(
bool block) {
NOTREACHED();
}
void LayerTreeHostImpl::ResetTreesForTesting() {
if (active_tree_)
active_tree_->DetachLayers();
active_tree_ =
std::make_unique<LayerTreeImpl>(this, active_tree()->page_scale_factor(),
active_tree()->top_controls_shown_ratio(),
active_tree()->elastic_overscroll());
active_tree_->property_trees()->is_active = true;
if (pending_tree_)
pending_tree_->DetachLayers();
pending_tree_ = nullptr;
pending_tree_duration_timer_ = nullptr;
if (recycle_tree_)
recycle_tree_->DetachLayers();
recycle_tree_ = nullptr;
}
size_t LayerTreeHostImpl::SourceAnimationFrameNumberForTesting() const {
return fps_counter_->current_frame_number();
}
void LayerTreeHostImpl::UpdateTileManagerMemoryPolicy(
const ManagedMemoryPolicy& policy) {
if (!resource_pool_)
return;
global_tile_state_.hard_memory_limit_in_bytes = 0;
global_tile_state_.soft_memory_limit_in_bytes = 0;
if (visible_ && policy.bytes_limit_when_visible > 0) {
global_tile_state_.hard_memory_limit_in_bytes =
policy.bytes_limit_when_visible;
global_tile_state_.soft_memory_limit_in_bytes =
(static_cast<int64_t>(global_tile_state_.hard_memory_limit_in_bytes) *
settings_.max_memory_for_prepaint_percentage) /
100;
}
global_tile_state_.memory_limit_policy =
ManagedMemoryPolicy::PriorityCutoffToTileMemoryLimitPolicy(
visible_ ? policy.priority_cutoff_when_visible
: gpu::MemoryAllocation::CUTOFF_ALLOW_NOTHING);
global_tile_state_.num_resources_limit = policy.num_resources_limit;
if (global_tile_state_.hard_memory_limit_in_bytes > 0) {
// If |global_tile_state_.hard_memory_limit_in_bytes| is greater than 0, we
// consider our contexts visible. Notify the contexts here. We handle
// becoming invisible in NotifyAllTileTasksComplete to avoid interrupting
// running work.
SetContextVisibility(true);
// If |global_tile_state_.hard_memory_limit_in_bytes| is greater than 0, we
// allow the image decode controller to retain resources. We handle the
// equal to 0 case in NotifyAllTileTasksComplete to avoid interrupting
// running work.
if (image_decode_cache_)
image_decode_cache_->SetShouldAggressivelyFreeResources(false);
} else {
// When the memory policy is set to zero, its important to release any
// decoded images cached by the tracker. But we can not re-checker any
// images that have been displayed since the resources, if held by the
// browser, may be re-used. Which is why its important to maintain the
// decode policy tracking.
bool can_clear_decode_policy_tracking = false;
tile_manager_.ClearCheckerImageTracking(can_clear_decode_policy_tracking);
}
DCHECK(resource_pool_);
resource_pool_->CheckBusyResources();
// Soft limit is used for resource pool such that memory returns to soft
// limit after going over.
resource_pool_->SetResourceUsageLimits(
global_tile_state_.soft_memory_limit_in_bytes,
global_tile_state_.num_resources_limit);
DidModifyTilePriorities();
}
void LayerTreeHostImpl::DidModifyTilePriorities() {
// Mark priorities as dirty and schedule a PrepareTiles().
tile_priorities_dirty_ = true;
client_->SetNeedsPrepareTilesOnImplThread();
}
std::unique_ptr<RasterTilePriorityQueue> LayerTreeHostImpl::BuildRasterQueue(
TreePriority tree_priority,
RasterTilePriorityQueue::Type type) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"LayerTreeHostImpl::BuildRasterQueue");
return RasterTilePriorityQueue::Create(active_tree_->picture_layers(),
pending_tree_
? pending_tree_->picture_layers()
: std::vector<PictureLayerImpl*>(),
tree_priority, type);
}
std::unique_ptr<EvictionTilePriorityQueue>
LayerTreeHostImpl::BuildEvictionQueue(TreePriority tree_priority) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"LayerTreeHostImpl::BuildEvictionQueue");
std::unique_ptr<EvictionTilePriorityQueue> queue(
new EvictionTilePriorityQueue);
queue->Build(active_tree_->picture_layers(),
pending_tree_ ? pending_tree_->picture_layers()
: std::vector<PictureLayerImpl*>(),
tree_priority);
return queue;
}
void LayerTreeHostImpl::SetIsLikelyToRequireADraw(
bool is_likely_to_require_a_draw) {
// Proactively tell the scheduler that we expect to draw within each vsync
// until we get all the tiles ready to draw. If we happen to miss a required
// for draw tile here, then we will miss telling the scheduler each frame that
// we intend to draw so it may make worse scheduling decisions.
is_likely_to_require_a_draw_ = is_likely_to_require_a_draw;
}
gfx::ColorSpace LayerTreeHostImpl::GetRasterColorSpace() const {
gfx::ColorSpace result;
// The pending tree will have the most recently updated color space, so
// prefer that.
if (pending_tree_)
result = pending_tree_->raster_color_space();
else if (active_tree_)
result = active_tree_->raster_color_space();
// If we are likely to software composite the resource, use sRGB because
// software compositing is unable to perform color conversion.
if (!layer_tree_frame_sink_ || !layer_tree_frame_sink_->context_provider())
result = gfx::ColorSpace::CreateSRGB();
// Always specify a color space if color correct rasterization is requested
// (not specifying a color space indicates that no color conversion is
// required).
if (!result.IsValid())
result = gfx::ColorSpace::CreateSRGB();
return result;
}
void LayerTreeHostImpl::RequestImplSideInvalidationForCheckerImagedTiles() {
// When using impl-side invalidation for checker-imaging, a pending tree does
// not need to be flushed as an independent update through the pipeline.
bool needs_first_draw_on_activation = false;
client_->NeedsImplSideInvalidation(needs_first_draw_on_activation);
}
size_t LayerTreeHostImpl::GetFrameIndexForImage(const PaintImage& paint_image,
WhichTree tree) const {
DCHECK(image_animation_controller_.has_value());
if (!paint_image.ShouldAnimate())
return paint_image.frame_index();
return image_animation_controller_->GetFrameIndexForImage(
paint_image.stable_id(), tree);
}
void LayerTreeHostImpl::NotifyReadyToActivate() {
pending_tree_raster_duration_timer_.reset();
client_->NotifyReadyToActivate();
}
void LayerTreeHostImpl::NotifyReadyToDraw() {
// Tiles that are ready will cause NotifyTileStateChanged() to be called so we
// don't need to schedule a draw here. Just stop WillBeginImplFrame() from
// causing optimistic requests to draw a frame.
is_likely_to_require_a_draw_ = false;
client_->NotifyReadyToDraw();
}
void LayerTreeHostImpl::NotifyAllTileTasksCompleted() {
// The tile tasks started by the most recent call to PrepareTiles have
// completed. Now is a good time to free resources if necessary.
if (global_tile_state_.hard_memory_limit_in_bytes == 0) {
// Free image decode controller resources before notifying the
// contexts of visibility change. This ensures that the imaged decode
// controller has released all Skia refs at the time Skia's cleanup
// executes (within worker context's cleanup).
if (image_decode_cache_)
image_decode_cache_->SetShouldAggressivelyFreeResources(true);
SetContextVisibility(false);
}
}
void LayerTreeHostImpl::NotifyTileStateChanged(const Tile* tile) {
TRACE_EVENT0("cc", "LayerTreeHostImpl::NotifyTileStateChanged");
if (active_tree_) {
LayerImpl* layer_impl =
active_tree_->FindActiveTreeLayerById(tile->layer_id());
if (layer_impl)
layer_impl->NotifyTileStateChanged(tile);
}
if (pending_tree_) {
LayerImpl* layer_impl =
pending_tree_->FindPendingTreeLayerById(tile->layer_id());
if (layer_impl)
layer_impl->NotifyTileStateChanged(tile);
}
// Check for a non-null active tree to avoid doing this during shutdown.
if (active_tree_ && !client_->IsInsideDraw() && tile->required_for_draw()) {
// The LayerImpl::NotifyTileStateChanged() should damage the layer, so this
// redraw will make those tiles be displayed.
SetNeedsRedraw();
}
}
void LayerTreeHostImpl::SetMemoryPolicy(const ManagedMemoryPolicy& policy) {
DCHECK(task_runner_provider_->IsImplThread());
SetManagedMemoryPolicy(policy);
// This is short term solution to synchronously drop tile resources when
// using synchronous compositing to avoid memory usage regression.
// TODO(boliu): crbug.com/499004 to track removing this.
if (!policy.bytes_limit_when_visible && resource_pool_ &&
settings_.using_synchronous_renderer_compositor) {
ReleaseTileResources();
CleanUpTileManagerResources();
// Force a call to NotifyAllTileTasks completed - otherwise this logic may
// be skipped if no work was enqueued at the time the tile manager was
// destroyed.
NotifyAllTileTasksCompleted();
CreateTileManagerResources();
RecreateTileResources();
}
}
void LayerTreeHostImpl::SetTreeActivationCallback(
const base::Closure& callback) {
DCHECK(task_runner_provider_->IsImplThread());
tree_activation_callback_ = callback;
}
void LayerTreeHostImpl::SetManagedMemoryPolicy(
const ManagedMemoryPolicy& policy) {
if (cached_managed_memory_policy_ == policy)
return;
ManagedMemoryPolicy old_policy = ActualManagedMemoryPolicy();
cached_managed_memory_policy_ = policy;
ManagedMemoryPolicy actual_policy = ActualManagedMemoryPolicy();
if (old_policy == actual_policy)
return;
UpdateTileManagerMemoryPolicy(actual_policy);
// If there is already enough memory to draw everything imaginable and the
// new memory limit does not change this, then do not re-commit. Don't bother
// skipping commits if this is not visible (commits don't happen when not
// visible, there will almost always be a commit when this becomes visible).
bool needs_commit = true;
if (visible() &&
actual_policy.bytes_limit_when_visible >= max_memory_needed_bytes_ &&
old_policy.bytes_limit_when_visible >= max_memory_needed_bytes_ &&
actual_policy.priority_cutoff_when_visible ==
old_policy.priority_cutoff_when_visible) {
needs_commit = false;
}
if (needs_commit)
client_->SetNeedsCommitOnImplThread();
}
void LayerTreeHostImpl::SetExternalTilePriorityConstraints(
const gfx::Rect& viewport_rect,
const gfx::Transform& transform) {
gfx::Rect viewport_rect_for_tile_priority_in_view_space;
gfx::Transform screen_to_view(gfx::Transform::kSkipInitialization);
if (transform.GetInverse(&screen_to_view)) {
// Convert from screen space to view space.
viewport_rect_for_tile_priority_in_view_space =
MathUtil::ProjectEnclosingClippedRect(screen_to_view, viewport_rect);
}
const bool tile_priority_params_changed =
viewport_rect_for_tile_priority_ !=
viewport_rect_for_tile_priority_in_view_space;
viewport_rect_for_tile_priority_ =
viewport_rect_for_tile_priority_in_view_space;
if (tile_priority_params_changed) {
active_tree_->set_needs_update_draw_properties();
if (pending_tree_)
pending_tree_->set_needs_update_draw_properties();
// Compositor, not LayerTreeFrameSink, is responsible for setting damage
// and triggering redraw for constraint changes.
SetFullViewportDamage();
SetNeedsRedraw();
}
}
void LayerTreeHostImpl::DidReceiveCompositorFrameAck() {
client_->DidReceiveCompositorFrameAckOnImplThread();
}
void LayerTreeHostImpl::DidPresentCompositorFrame(uint32_t presentation_token,
base::TimeTicks time,
base::TimeDelta refresh,
uint32_t flags) {
std::vector<int> source_frames;
auto iter = presentation_token_to_frame_.begin();
for (; iter != presentation_token_to_frame_.end() &&
iter->first <= presentation_token;
++iter) {
source_frames.push_back(iter->second);
}
presentation_token_to_frame_.erase(presentation_token_to_frame_.begin(),
iter);
if (presentation_token_to_frame_.empty()) {
DCHECK_EQ(last_presentation_token_, presentation_token);
last_presentation_token_ = 0u;
}
client_->DidPresentCompositorFrameOnImplThread(source_frames, time, refresh,
flags);
}
void LayerTreeHostImpl::DidDiscardCompositorFrame(uint32_t presentation_token) {
}
void LayerTreeHostImpl::ReclaimResources(
const std::vector<viz::ReturnedResource>& resources) {
// TODO(piman): We may need to do some validation on this ack before
// processing it.
if (!resource_provider_)
return;
resource_provider_->ReceiveReturnsFromParent(resources);
// In OOM, we now might be able to release more resources that were held
// because they were exported.
if (resource_pool_) {
if (resource_pool_->memory_usage_bytes()) {
const size_t kMegabyte = 1024 * 1024;
// This is a good time to log memory usage. A chunk of work has just
// completed but none of the memory used for that work has likely been
// freed.
UMA_HISTOGRAM_MEMORY_MB(
"Renderer4.ResourcePoolMemoryUsage",
static_cast<int>(resource_pool_->memory_usage_bytes() / kMegabyte));
}
resource_pool_->CheckBusyResources();
resource_pool_->ReduceResourceUsage();
}
// If we're not visible, we likely released resources, so we want to
// aggressively flush here to make sure those DeleteTextures make it to the
// GPU process to free up the memory.
if (!visible_)
resource_provider_->FlushPendingDeletions();
}
void LayerTreeHostImpl::OnDraw(const gfx::Transform& transform,
const gfx::Rect& viewport,
bool resourceless_software_draw) {
DCHECK(!resourceless_software_draw_);
const bool transform_changed = external_transform_ != transform;
const bool viewport_changed = external_viewport_ != viewport;
external_transform_ = transform;
external_viewport_ = viewport;
{
base::AutoReset<bool> resourceless_software_draw_reset(
&resourceless_software_draw_, resourceless_software_draw);
// For resourceless software draw, always set full damage to ensure they
// always swap. Otherwise, need to set redraw for any changes to draw
// parameters.
if (transform_changed || viewport_changed || resourceless_software_draw_) {
SetFullViewportDamage();
SetNeedsRedraw();
active_tree_->set_needs_update_draw_properties();
}
if (resourceless_software_draw) {
client_->OnCanDrawStateChanged(CanDraw());
}
client_->OnDrawForLayerTreeFrameSink(resourceless_software_draw_);
}
if (resourceless_software_draw) {
active_tree_->set_needs_update_draw_properties();
client_->OnCanDrawStateChanged(CanDraw());
// This draw may have reset all damage, which would lead to subsequent
// incorrect hardware draw, so explicitly set damage for next hardware
// draw as well.
SetFullViewportDamage();
}
}
void LayerTreeHostImpl::OnCanDrawStateChangedForTree() {
client_->OnCanDrawStateChanged(CanDraw());
}
viz::CompositorFrameMetadata LayerTreeHostImpl::MakeCompositorFrameMetadata() {
viz::CompositorFrameMetadata metadata;
if (active_tree_->request_presentation_time()) {
metadata.presentation_token = ++last_presentation_token_;
// Assume there is never a constant stream of requests that triggers
// overflow.
CHECK_NE(0u, last_presentation_token_);
presentation_token_to_frame_[last_presentation_token_] =
active_tree_->source_frame_number();
}
metadata.device_scale_factor = active_tree_->painted_device_scale_factor() *
active_tree_->device_scale_factor();
metadata.page_scale_factor = active_tree_->current_page_scale_factor();
metadata.scrollable_viewport_size = active_tree_->ScrollableViewportSize();
metadata.root_layer_size = active_tree_->ScrollableSize();
metadata.min_page_scale_factor = active_tree_->min_page_scale_factor();
metadata.max_page_scale_factor = active_tree_->max_page_scale_factor();
metadata.top_controls_height =
browser_controls_offset_manager_->TopControlsHeight();
metadata.top_controls_shown_ratio =
browser_controls_offset_manager_->TopControlsShownRatio();
metadata.bottom_controls_height =
browser_controls_offset_manager_->BottomControlsHeight();
metadata.bottom_controls_shown_ratio =
browser_controls_offset_manager_->BottomControlsShownRatio();
metadata.root_background_color = active_tree_->background_color();
metadata.content_source_id = active_tree_->content_source_id();
active_tree_->GetViewportSelection(&metadata.selection);
if (const auto* outer_viewport_scroll_node = OuterViewportScrollNode()) {
metadata.root_overflow_x_hidden =
!outer_viewport_scroll_node->user_scrollable_horizontal;
metadata.root_overflow_y_hidden =
!outer_viewport_scroll_node->user_scrollable_vertical;
}
if (GetDrawMode() == DRAW_MODE_RESOURCELESS_SOFTWARE) {
metadata.is_resourceless_software_draw_with_scroll_or_animation =
IsActivelyScrolling() || mutator_host_->NeedsTickAnimations();
}
for (auto& surface_id : active_tree_->SurfaceLayerIds()) {
metadata.referenced_surfaces.push_back(surface_id);
}
const auto* inner_viewport_scroll_node = InnerViewportScrollNode();
if (!inner_viewport_scroll_node)
return metadata;
metadata.root_overflow_x_hidden |=
!inner_viewport_scroll_node->user_scrollable_horizontal;
metadata.root_overflow_y_hidden |=
!inner_viewport_scroll_node->user_scrollable_vertical;
// TODO(miletus) : Change the metadata to hold ScrollOffset.
metadata.root_scroll_offset =
gfx::ScrollOffsetToVector2dF(active_tree_->TotalScrollOffset());
return metadata;
}
RenderFrameMetadata LayerTreeHostImpl::MakeRenderFrameMetadata() {
RenderFrameMetadata metadata;
metadata.root_scroll_offset =
gfx::ScrollOffsetToVector2dF(active_tree_->TotalScrollOffset());
return metadata;
}
bool LayerTreeHostImpl::DrawLayers(FrameData* frame) {
DCHECK(CanDraw());
DCHECK_EQ(frame->has_no_damage, frame->render_passes.empty());
TRACE_EVENT0("cc,benchmark", "LayerTreeHostImpl::DrawLayers");
ResetRequiresHighResToDraw();
if (frame->has_no_damage) {
DCHECK(!resourceless_software_draw_);
TRACE_EVENT_INSTANT0("cc", "EarlyOut_NoDamage", TRACE_EVENT_SCOPE_THREAD);
active_tree()->BreakSwapPromises(SwapPromise::SWAP_FAILS);
return false;
}
base::TimeTicks frame_time = CurrentBeginFrameArgs().frame_time;
fps_counter_->SaveTimeStamp(frame_time,
!layer_tree_frame_sink_->context_provider());
rendering_stats_instrumentation_->IncrementFrameCount(1);
memory_history_->SaveEntry(tile_manager_.memory_stats_from_last_assign());
if (debug_state_.ShowHudRects()) {
debug_rect_history_->SaveDebugRectsForCurrentFrame(
active_tree(), active_tree_->hud_layer(), *frame->render_surface_list,
debug_state_);
}
bool is_new_trace;
TRACE_EVENT_IS_NEW_TRACE(&is_new_trace);
if (is_new_trace) {
if (pending_tree_) {
LayerTreeHostCommon::CallFunctionForEveryLayer(
pending_tree(), [](LayerImpl* layer) { layer->DidBeginTracing(); });
}
LayerTreeHostCommon::CallFunctionForEveryLayer(
active_tree(), [](LayerImpl* layer) { layer->DidBeginTracing(); });
}
{
TRACE_EVENT0("cc", "DrawLayers.FrameViewerTracing");
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
frame_viewer_instrumentation::kCategoryLayerTree,
"cc::LayerTreeHostImpl", id_, AsValueWithFrame(frame));
}
const DrawMode draw_mode = GetDrawMode();
// Because the contents of the HUD depend on everything else in the frame, the
// contents of its texture are updated as the last thing before the frame is
// drawn.
if (active_tree_->hud_layer()) {
TRACE_EVENT0("cc", "DrawLayers.UpdateHudTexture");
active_tree_->hud_layer()->UpdateHudTexture(
draw_mode, resource_provider_.get(),
use_gpu_rasterization_ ? layer_tree_frame_sink_->context_provider()
: nullptr,
frame->render_passes);
}
viz::CompositorFrameMetadata metadata = MakeCompositorFrameMetadata();
metadata.may_contain_video = frame->may_contain_video;
metadata.deadline_in_frames = frame->deadline_in_frames;
metadata.activation_dependencies = std::move(frame->activation_dependencies);
RenderFrameMetadata render_frame_metadata = MakeRenderFrameMetadata();
active_tree()->FinishSwapPromises(&metadata, &render_frame_metadata);
metadata.latency_info.emplace_back(ui::SourceEventType::FRAME);
ui::LatencyInfo& new_latency_info = metadata.latency_info.back();
if (CommitToActiveTree()) {
new_latency_info.AddLatencyNumberWithTimestamp(
ui::LATENCY_BEGIN_FRAME_UI_COMPOSITOR_COMPONENT, 0, 0, frame_time, 1);
} else {
new_latency_info.AddLatencyNumberWithTimestamp(
ui::LATENCY_BEGIN_FRAME_RENDERER_COMPOSITOR_COMPONENT, 0, 0, frame_time,
1);
base::TimeTicks draw_time = base::TimeTicks::Now();
for (auto& latency : metadata.latency_info) {
latency.AddLatencyNumberWithTimestamp(
ui::INPUT_EVENT_LATENCY_RENDERER_SWAP_COMPONENT, 0, 0, draw_time, 1);
}
}
ui::LatencyInfo::TraceIntermediateFlowEvents(metadata.latency_info,
"SwapBuffers");
// Collect all resource ids in the render passes into a single array.
ResourceProvider::ResourceIdArray resources;
for (const auto& render_pass : frame->render_passes) {
for (auto* quad : render_pass->quad_list) {
for (viz::ResourceId resource_id : quad->resources)
resources.push_back(resource_id);
}
}
DCHECK_LE(viz::BeginFrameArgs::kStartingFrameNumber,
frame->begin_frame_ack.sequence_number);
metadata.begin_frame_ack = frame->begin_frame_ack;
viz::CompositorFrame compositor_frame;
compositor_frame.metadata = std::move(metadata);
resource_provider_->PrepareSendToParent(resources,
&compositor_frame.resource_list);
compositor_frame.render_pass_list = std::move(frame->render_passes);
// TODO(fsamuel): Once all clients get their viz::LocalSurfaceId from their
// parent, the viz::LocalSurfaceId should hang off CompositorFrameMetadata.
if (settings_.enable_surface_synchronization &&
active_tree()->local_surface_id().is_valid()) {
layer_tree_frame_sink_->SetLocalSurfaceId(
active_tree()->local_surface_id());
last_draw_local_surface_id_ = active_tree()->local_surface_id();
}
if (const char* client_name = GetClientNameForMetrics()) {
size_t total_quad_count = 0;
for (const auto& pass : compositor_frame.render_pass_list)
total_quad_count += pass->quad_list.size();
UMA_HISTOGRAM_COUNTS_1000(
base::StringPrintf("Compositing.%s.CompositorFrame.Quads", client_name),
total_quad_count);
}
layer_tree_frame_sink_->SubmitCompositorFrame(std::move(compositor_frame));
// Clears the list of swap promises after calling DidSwap on each of them to
// signal that the swap is over.
active_tree()->ClearSwapPromises();
// The next frame should start by assuming nothing has changed, and changes
// are noted as they occur.
// TODO(boliu): If we did a temporary software renderer frame, propogate the
// damage forward to the next frame.
for (size_t i = 0; i < frame->render_surface_list->size(); i++) {
auto* surface = (*frame->render_surface_list)[i];
surface->damage_tracker()->DidDrawDamagedArea();
}
active_tree_->ResetAllChangeTracking();
active_tree_->set_has_ever_been_drawn(true);
devtools_instrumentation::DidDrawFrame(id_);
benchmark_instrumentation::IssueImplThreadRenderingStatsEvent(
rendering_stats_instrumentation_->TakeImplThreadRenderingStats());
return true;
}
void LayerTreeHostImpl::DidDrawAllLayers(const FrameData& frame) {
for (size_t i = 0; i < frame.will_draw_layers.size(); ++i)
frame.will_draw_layers[i]->DidDraw(resource_provider_.get());
for (auto* it : video_frame_controllers_)
it->DidDrawFrame();
}
int LayerTreeHostImpl::RequestedMSAASampleCount() const {
if (settings_.gpu_rasterization_msaa_sample_count == -1) {
// Use the most up-to-date version of device_scale_factor that we have.
float device_scale_factor = pending_tree_
? pending_tree_->device_scale_factor()
: active_tree_->device_scale_factor();
return device_scale_factor >= 2.0f ? 4 : 8;
}
return settings_.gpu_rasterization_msaa_sample_count;
}
void LayerTreeHostImpl::SetHasGpuRasterizationTrigger(bool flag) {
if (has_gpu_rasterization_trigger_ != flag) {
has_gpu_rasterization_trigger_ = flag;
need_update_gpu_rasterization_status_ = true;
}
}
void LayerTreeHostImpl::SetContentHasSlowPaths(bool flag) {
if (content_has_slow_paths_ != flag) {
content_has_slow_paths_ = flag;
need_update_gpu_rasterization_status_ = true;
}
}
void LayerTreeHostImpl::SetContentHasNonAAPaint(bool flag) {
if (content_has_non_aa_paint_ != flag) {
content_has_non_aa_paint_ = flag;
need_update_gpu_rasterization_status_ = true;
}
}
void LayerTreeHostImpl::GetGpuRasterizationCapabilities(
bool* gpu_rasterization_enabled,
bool* gpu_rasterization_supported,
int* max_msaa_samples,
bool* supports_disable_msaa) {
*gpu_rasterization_enabled = false;
*gpu_rasterization_supported = false;
*max_msaa_samples = 0;
*supports_disable_msaa = false;
if (!(layer_tree_frame_sink_ && layer_tree_frame_sink_->context_provider() &&
layer_tree_frame_sink_->worker_context_provider()))
return;
viz::RasterContextProvider* context_provider =
layer_tree_frame_sink_->worker_context_provider();
viz::RasterContextProvider::ScopedRasterContextLock scoped_context(
context_provider);
const auto& caps = context_provider->ContextCapabilities();
*gpu_rasterization_enabled = caps.gpu_rasterization;
if (!*gpu_rasterization_enabled && !settings_.gpu_rasterization_forced)
return;
// Do not check GrContext above. It is lazy-created, and we only want to
// create it if it might be used.
GrContext* gr_context = context_provider->GrContext();
*gpu_rasterization_supported = !!gr_context;
if (!*gpu_rasterization_supported)
return;
*supports_disable_msaa = caps.multisample_compatibility;
if (!caps.msaa_is_slow && !caps.avoid_stencil_buffers) {
// Skia may blacklist MSAA independently of Chrome. Query skia for the
// requested sample count. This will return 0 if MSAA is unsupported.
*max_msaa_samples = gr_context->caps()->getSampleCount(
caps.max_samples, ToGrPixelConfig(settings_.preferred_tile_format));
}
}
bool LayerTreeHostImpl::UpdateGpuRasterizationStatus() {
if (!need_update_gpu_rasterization_status_)
return false;
need_update_gpu_rasterization_status_ = false;
// TODO(danakj): Can we avoid having this run when there's no
// LayerTreeFrameSink?
// For now just early out and leave things unchanged, we'll come back here
// when we get a LayerTreeFrameSink.
if (!layer_tree_frame_sink_)
return false;
int requested_msaa_samples = RequestedMSAASampleCount();
int max_msaa_samples = 0;
bool gpu_rasterization_enabled = false;
bool gpu_rasterization_supported = false;
bool supports_disable_msaa = false;
GetGpuRasterizationCapabilities(&gpu_rasterization_enabled,
&gpu_rasterization_supported,
&max_msaa_samples, &supports_disable_msaa);
bool use_gpu = false;
bool use_msaa = false;
bool using_msaa_for_slow_paths =
requested_msaa_samples > 0 &&
max_msaa_samples >= requested_msaa_samples &&
(!content_has_non_aa_paint_ || supports_disable_msaa);
if (settings_.gpu_rasterization_forced) {
use_gpu = true;
gpu_rasterization_status_ = GpuRasterizationStatus::ON_FORCED;
use_msaa = content_has_slow_paths_ && using_msaa_for_slow_paths;
if (use_msaa) {
gpu_rasterization_status_ = GpuRasterizationStatus::MSAA_CONTENT;
}
} else if (!gpu_rasterization_enabled) {
gpu_rasterization_status_ = GpuRasterizationStatus::OFF_DEVICE;
} else if (!has_gpu_rasterization_trigger_) {
gpu_rasterization_status_ = GpuRasterizationStatus::OFF_VIEWPORT;
} else if (content_has_slow_paths_ && using_msaa_for_slow_paths) {
use_gpu = use_msaa = true;
gpu_rasterization_status_ = GpuRasterizationStatus::MSAA_CONTENT;
} else {
use_gpu = true;
gpu_rasterization_status_ = GpuRasterizationStatus::ON;
}
if (use_gpu && !use_gpu_rasterization_) {
if (!gpu_rasterization_supported) {
// If GPU rasterization is unusable, e.g. if GlContext could not
// be created due to losing the GL context, force use of software
// raster.
use_gpu = false;
use_msaa = false;
gpu_rasterization_status_ = GpuRasterizationStatus::OFF_DEVICE;
}
}
if (use_gpu == use_gpu_rasterization_ && use_msaa == use_msaa_)
return false;
// Note that this must happen first, in case the rest of the calls want to
// query the new state of |use_gpu_rasterization_|.
use_gpu_rasterization_ = use_gpu;
use_msaa_ = use_msaa;
return true;
}
void LayerTreeHostImpl::UpdateTreeResourcesForGpuRasterizationIfNeeded() {
if (!UpdateGpuRasterizationStatus())
return;
// Clean up and replace existing tile manager with another one that uses
// appropriate rasterizer. Only do this however if we already have a
// resource pool, since otherwise we might not be able to create a new
// one.
ReleaseTileResources();
if (resource_pool_) {
CleanUpTileManagerResources();
CreateTileManagerResources();
}
RecreateTileResources();
// We have released tilings for both active and pending tree.
// We would not have any content to draw until the pending tree is activated.
// Prevent the active tree from drawing until activation.
// TODO(crbug.com/469175): Replace with RequiresHighResToDraw.
SetRequiresHighResToDraw();
}
void LayerTreeHostImpl::WillBeginImplFrame(const viz::BeginFrameArgs& args) {
current_begin_frame_tracker_.Start(args);
if (is_likely_to_require_a_draw_) {
// Optimistically schedule a draw. This will let us expect the tile manager
// to complete its work so that we can draw new tiles within the impl frame
// we are beginning now.
SetNeedsRedraw();
}
if (input_handler_client_)
input_handler_client_->DeliverInputForBeginFrame();
Animate();
for (auto* it : video_frame_controllers_)
it->OnBeginFrame(args);
impl_thread_phase_ = ImplThreadPhase::INSIDE_IMPL_FRAME;
}
void LayerTreeHostImpl::DidFinishImplFrame() {
impl_thread_phase_ = ImplThreadPhase::IDLE;
current_begin_frame_tracker_.Finish();
decoded_image_tracker_.NotifyFrameFinished();
}
void LayerTreeHostImpl::DidNotProduceFrame(const viz::BeginFrameAck& ack) {
if (layer_tree_frame_sink_)
layer_tree_frame_sink_->DidNotProduceFrame(ack);
}
void LayerTreeHostImpl::UpdateViewportContainerSizes() {
LayerImpl* inner_container = active_tree_->InnerViewportContainerLayer();
LayerImpl* outer_container = active_tree_->OuterViewportContainerLayer();
if (!inner_container)
return;
ViewportAnchor anchor(InnerViewportScrollLayer(), OuterViewportScrollLayer());
float top_controls_layout_height =
active_tree_->browser_controls_shrink_blink_size()
? active_tree_->top_controls_height()
: 0.f;
float delta_from_top_controls =
top_controls_layout_height -
browser_controls_offset_manager_->ContentTopOffset();
float bottom_controls_layout_height =
active_tree_->browser_controls_shrink_blink_size()
? active_tree_->bottom_controls_height()
: 0.f;
delta_from_top_controls +=
bottom_controls_layout_height -
browser_controls_offset_manager_->ContentBottomOffset();
// Adjust the viewport layers by shrinking/expanding the container to account
// for changes in the size (e.g. browser controls) since the last resize from
// Blink.
gfx::Vector2dF amount_to_expand(0.f, delta_from_top_controls);
inner_container->SetViewportBoundsDelta(amount_to_expand);
if (outer_container && !outer_container->BoundsForScrolling().IsEmpty()) {
// Adjust the outer viewport container as well, since adjusting only the
// inner may cause its bounds to exceed those of the outer, causing scroll
// clamping.
gfx::Vector2dF amount_to_expand_scaled = gfx::ScaleVector2d(
amount_to_expand, 1.f / active_tree_->min_page_scale_factor());
outer_container->SetViewportBoundsDelta(amount_to_expand_scaled);
active_tree_->InnerViewportScrollLayer()->SetViewportBoundsDelta(
amount_to_expand_scaled);
anchor.ResetViewportToAnchoredPosition();
}
}
void LayerTreeHostImpl::SynchronouslyInitializeAllTiles() {
// Only valid for the single-threaded non-scheduled/synchronous case
// using the zero copy raster worker pool.
single_thread_synchronous_task_graph_runner_->RunUntilIdle();
}
void LayerTreeHostImpl::DidLoseLayerTreeFrameSink() {
// Check that we haven't already detected context loss because we get it via
// two paths: compositor context loss on the compositor thread and worker
// context loss posted from main thread to compositor thread. We do not want
// to reset the context recovery state in the scheduler.
if (!has_valid_layer_tree_frame_sink_)
return;
has_valid_layer_tree_frame_sink_ = false;
if (resource_provider_)
resource_provider_->DidLoseContextProvider();
client_->DidLoseLayerTreeFrameSinkOnImplThread();
}
bool LayerTreeHostImpl::HaveRootScrollLayer() const {
return !!InnerViewportScrollLayer();
}
LayerImpl* LayerTreeHostImpl::InnerViewportContainerLayer() const {
return active_tree_->InnerViewportContainerLayer();
}
LayerImpl* LayerTreeHostImpl::InnerViewportScrollLayer() const {
return active_tree_->InnerViewportScrollLayer();
}
ScrollNode* LayerTreeHostImpl::InnerViewportScrollNode() const {
const auto* inner_viewport_scroll_layer = InnerViewportScrollLayer();
if (!inner_viewport_scroll_layer)
return nullptr;
ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree;
return scroll_tree.Node(inner_viewport_scroll_layer->scroll_tree_index());
}
LayerImpl* LayerTreeHostImpl::OuterViewportContainerLayer() const {
return active_tree_->OuterViewportContainerLayer();
}
LayerImpl* LayerTreeHostImpl::OuterViewportScrollLayer() const {
return active_tree_->OuterViewportScrollLayer();
}
ScrollNode* LayerTreeHostImpl::OuterViewportScrollNode() const {
// TODO(pdr): Refactor this to work like InnerViewportScrollNode and access
// OuterViewportScrollLayer instead of MainScrollLayer.
if (!viewport()->MainScrollLayer())
return nullptr;
ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree;
return scroll_tree.Node(viewport()->MainScrollLayer()->scroll_tree_index());
}
ScrollNode* LayerTreeHostImpl::CurrentlyScrollingNode() {
return active_tree()->CurrentlyScrollingNode();
}
const ScrollNode* LayerTreeHostImpl::CurrentlyScrollingNode() const {
return active_tree()->CurrentlyScrollingNode();
}
bool LayerTreeHostImpl::IsActivelyScrolling() const {
if (!CurrentlyScrollingNode())
return false;
// On Android WebView root flings are controlled by the application,
// so the compositor does not animate them and can't tell if they
// are actually animating. So assume there are none.
if (settings_.ignore_root_layer_flings && IsCurrentlyScrollingViewport())
return false;
return did_lock_scrolling_layer_;
}
void LayerTreeHostImpl::CreatePendingTree() {
CHECK(!pending_tree_);
if (recycle_tree_) {
recycle_tree_.swap(pending_tree_);
} else {
pending_tree_ = std::make_unique<LayerTreeImpl>(
this, active_tree()->page_scale_factor(),
active_tree()->top_controls_shown_ratio(),
active_tree()->elastic_overscroll());
}
client_->OnCanDrawStateChanged(CanDraw());
TRACE_EVENT_ASYNC_BEGIN0("cc", "PendingTree:waiting", pending_tree_.get());
DCHECK(!pending_tree_duration_timer_);
pending_tree_duration_timer_.reset(new PendingTreeDurationHistogramTimer());
}
void LayerTreeHostImpl::PushScrollbarOpacitiesFromActiveToPending() {
if (!active_tree())
return;
for (auto& pair : scrollbar_animation_controllers_) {
for (auto* scrollbar : pair.second->Scrollbars()) {
if (const EffectNode* source_effect_node =
active_tree()
->property_trees()
->effect_tree.FindNodeFromElementId(
scrollbar->element_id())) {
if (EffectNode* target_effect_node =
pending_tree()
->property_trees()
->effect_tree.FindNodeFromElementId(
scrollbar->element_id())) {
DCHECK(target_effect_node);
float source_opacity = source_effect_node->opacity;
float target_opacity = target_effect_node->opacity;
if (source_opacity == target_opacity)
continue;
target_effect_node->opacity = source_opacity;
pending_tree()->property_trees()->effect_tree.set_needs_update(true);
}
}
}
}
}
void LayerTreeHostImpl::ActivateSyncTree() {
if (pending_tree_) {
TRACE_EVENT_ASYNC_END0("cc", "PendingTree:waiting", pending_tree_.get());
active_tree_->lifecycle().AdvanceTo(LayerTreeLifecycle::kBeginningSync);
DCHECK(pending_tree_duration_timer_);
// Reset will call the destructor and log the timer histogram.
pending_tree_duration_timer_.reset();
// In most cases, this will be reset in NotifyReadyToActivate, since we
// activate the pending tree only when its ready. But an activation may be
// forced, in the case of a context loss for instance, so reset it here as
// well.
pending_tree_raster_duration_timer_.reset();
// Process any requests in the UI resource queue. The request queue is
// given in LayerTreeHost::FinishCommitOnImplThread. This must take place
// before the swap.
pending_tree_->ProcessUIResourceRequestQueue();
if (pending_tree_->needs_full_tree_sync()) {
TreeSynchronizer::SynchronizeTrees(pending_tree_.get(),
active_tree_.get());
}
PushScrollbarOpacitiesFromActiveToPending();
pending_tree_->PushPropertyTreesTo(active_tree_.get());
active_tree_->lifecycle().AdvanceTo(
LayerTreeLifecycle::kSyncedPropertyTrees);
TreeSynchronizer::PushLayerProperties(pending_tree(), active_tree());
active_tree_->lifecycle().AdvanceTo(
LayerTreeLifecycle::kSyncedLayerProperties);
pending_tree_->PushPropertiesTo(active_tree_.get());
if (!pending_tree_->LayerListIsEmpty())
pending_tree_->property_trees()->ResetAllChangeTracking();
active_tree_->lifecycle().AdvanceTo(LayerTreeLifecycle::kNotSyncing);
// Now that we've synced everything from the pending tree to the active
// tree, rename the pending tree the recycle tree so we can reuse it on the
// next sync.
DCHECK(!recycle_tree_);
pending_tree_.swap(recycle_tree_);
// If we commit to the active tree directly, this is already done during
// commit.
ActivateAnimations();
} else {
active_tree_->ProcessUIResourceRequestQueue();
}
UpdateViewportContainerSizes();
// Inform the ImageAnimationController and TileManager before dirtying tile
// priorities. Since these components cache tree specific state, these should
// be updated before DidModifyTilePriorities which can synchronously issue a
// PrepareTiles.
if (image_animation_controller_)
image_animation_controller_->DidActivate();
tile_manager_.DidActivateSyncTree();
active_tree_->DidBecomeActive();
client_->RenewTreePriority();
// If we have any picture layers, then by activating we also modified tile
// priorities.
if (!active_tree_->picture_layers().empty())
DidModifyTilePriorities();
tile_manager_.DidActivateSyncTree();
client_->OnCanDrawStateChanged(CanDraw());
client_->DidActivateSyncTree();
if (!tree_activation_callback_.is_null())
tree_activation_callback_.Run();
std::unique_ptr<PendingPageScaleAnimation> pending_page_scale_animation =
active_tree_->TakePendingPageScaleAnimation();
if (pending_page_scale_animation) {
StartPageScaleAnimation(pending_page_scale_animation->target_offset,
pending_page_scale_animation->use_anchor,
pending_page_scale_animation->scale,
pending_page_scale_animation->duration);
}
// Activation can change the root scroll offset, so inform the synchronous
// input handler.
UpdateRootLayerStateForSynchronousInputHandler();
}
void LayerTreeHostImpl::SetVisible(bool visible) {
DCHECK(task_runner_provider_->IsImplThread());
if (visible_ == visible)
return;
visible_ = visible;
DidVisibilityChange(this, visible_);
UpdateTileManagerMemoryPolicy(ActualManagedMemoryPolicy());
// If we just became visible, we have to ensure that we draw high res tiles,
// to prevent checkerboard/low res flashes.
if (visible_) {
// TODO(crbug.com/469175): Replace with RequiresHighResToDraw.
SetRequiresHighResToDraw();
// Prior CompositorFrame may have been discarded and thus we need to ensure
// that we submit a new one, even if there are no tiles. Therefore, force a
// full viewport redraw. However, this is unnecessary when we become visible
// for the first time (before the first commit) as there is no prior
// CompositorFrame to replace. We can safely use |!active_tree_->
// LayerListIsEmpty()| as a proxy for this, because we wouldn't be able to
// draw anything even if this is not the first time we become visible.
if (!active_tree_->LayerListIsEmpty()) {
SetFullViewportDamage();
SetNeedsRedraw();
}
} else {
EvictAllUIResources();
// Call PrepareTiles to evict tiles when we become invisible.
PrepareTiles();
}
}
void LayerTreeHostImpl::SetNeedsOneBeginImplFrame() {
// TODO(miletus): This is just the compositor-thread-side call to the
// SwapPromiseMonitor to say something happened that may cause a swap in the
// future. The name should not refer to SetNeedsRedraw but it does for now.
NotifySwapPromiseMonitorsOfSetNeedsRedraw();
client_->SetNeedsOneBeginImplFrameOnImplThread();
}
void LayerTreeHostImpl::SetNeedsRedraw() {
NotifySwapPromiseMonitorsOfSetNeedsRedraw();
client_->SetNeedsRedrawOnImplThread();
}
ManagedMemoryPolicy LayerTreeHostImpl::ActualManagedMemoryPolicy() const {
ManagedMemoryPolicy actual = cached_managed_memory_policy_;
if (debug_state_.rasterize_only_visible_content) {
actual.priority_cutoff_when_visible =
gpu::MemoryAllocation::CUTOFF_ALLOW_REQUIRED_ONLY;
} else if (use_gpu_rasterization()) {
actual.priority_cutoff_when_visible =
gpu::MemoryAllocation::CUTOFF_ALLOW_NICE_TO_HAVE;
}
return actual;
}
void LayerTreeHostImpl::ReleaseTreeResources() {
active_tree_->ReleaseResources();
if (pending_tree_)
pending_tree_->ReleaseResources();
if (recycle_tree_)
recycle_tree_->ReleaseResources();
EvictAllUIResources();
}
void LayerTreeHostImpl::ReleaseTileResources() {
active_tree_->ReleaseTileResources();
if (pending_tree_)
pending_tree_->ReleaseTileResources();
if (recycle_tree_)
recycle_tree_->ReleaseTileResources();
// Need to update tiles again in order to kick of raster work for all the
// tiles that are dropped here.
active_tree_->set_needs_update_draw_properties();
}
void LayerTreeHostImpl::RecreateTileResources() {
active_tree_->RecreateTileResources();
if (pending_tree_)
pending_tree_->RecreateTileResources();
if (recycle_tree_)
recycle_tree_->RecreateTileResources();
}
void LayerTreeHostImpl::CreateTileManagerResources() {
CreateResourceAndRasterBufferProvider(&raster_buffer_provider_,
&resource_pool_);
if (use_gpu_rasterization_) {
image_decode_cache_ = std::make_unique<GpuImageDecodeCache>(
layer_tree_frame_sink_->worker_context_provider(),
settings_.enable_oop_rasterization,
viz::ResourceFormatToClosestSkColorType(
settings_.preferred_tile_format),
settings_.decoded_image_working_set_budget_bytes);
} else {
image_decode_cache_ = std::make_unique<SoftwareImageDecodeCache>(
viz::ResourceFormatToClosestSkColorType(
settings_.preferred_tile_format),
settings_.decoded_image_working_set_budget_bytes);
}
// Pass the single-threaded synchronous task graph runner to the worker pool
// if we're in synchronous single-threaded mode.
TaskGraphRunner* task_graph_runner = task_graph_runner_;
if (is_synchronous_single_threaded_) {
DCHECK(!single_thread_synchronous_task_graph_runner_);
single_thread_synchronous_task_graph_runner_.reset(
new SynchronousTaskGraphRunner);
task_graph_runner = single_thread_synchronous_task_graph_runner_.get();
}
// TODO(vmpstr): Initialize tile task limit at ctor time.
tile_manager_.SetResources(resource_pool_.get(), image_decode_cache_.get(),
task_graph_runner, raster_buffer_provider_.get(),
is_synchronous_single_threaded_
? std::numeric_limits<size_t>::max()
: settings_.scheduled_raster_task_limit,
use_gpu_rasterization_);
tile_manager_.SetCheckerImagingForceDisabled(
settings_.only_checker_images_with_gpu_raster && !use_gpu_rasterization_);
UpdateTileManagerMemoryPolicy(ActualManagedMemoryPolicy());
}
void LayerTreeHostImpl::CreateResourceAndRasterBufferProvider(
std::unique_ptr<RasterBufferProvider>* raster_buffer_provider,
std::unique_ptr<ResourcePool>* resource_pool) {
DCHECK(GetTaskRunner());
// TODO(vmpstr): Make this a DCHECK (or remove) when crbug.com/419086 is
// resolved.
CHECK(resource_provider_);
viz::ContextProvider* compositor_context_provider =
layer_tree_frame_sink_->context_provider();
if (!compositor_context_provider) {
// This ResourcePool will vend software resources.
*resource_pool = std::make_unique<ResourcePool>(
resource_provider_.get(), GetTaskRunner(),
ResourcePool::kDefaultExpirationDelay,
settings_.disallow_non_exact_resource_reuse);
*raster_buffer_provider =
BitmapRasterBufferProvider::Create(resource_provider_.get());
return;
}
viz::RasterContextProvider* worker_context_provider =
layer_tree_frame_sink_->worker_context_provider();
if (use_gpu_rasterization_) {
DCHECK(worker_context_provider);
// This ResourcePool will vend gpu resources optimized for binding as a
// framebuffer for gpu raster.
*resource_pool = std::make_unique<ResourcePool>(
resource_provider_.get(), GetTaskRunner(),
viz::ResourceTextureHint::kFramebuffer,
ResourcePool::kDefaultExpirationDelay,
settings_.disallow_non_exact_resource_reuse);
int msaa_sample_count = use_msaa_ ? RequestedMSAASampleCount() : 0;
// The worker context must support oop raster to enable oop rasterization.
bool oop_raster_enabled = settings_.enable_oop_rasterization;
if (oop_raster_enabled) {
viz::RasterContextProvider::ScopedRasterContextLock hold(
worker_context_provider);
oop_raster_enabled &=
worker_context_provider->ContextCapabilities().supports_oop_raster;
}
*raster_buffer_provider = std::make_unique<GpuRasterBufferProvider>(
compositor_context_provider, worker_context_provider,
resource_provider_.get(), settings_.use_distance_field_text,
msaa_sample_count, settings_.preferred_tile_format, oop_raster_enabled);
return;
}
bool use_zero_copy = settings_.use_zero_copy;
// TODO(reveman): Remove this when mojo supports worker contexts.
// crbug.com/522440
if (!use_zero_copy && !worker_context_provider) {
LOG(ERROR)
<< "Forcing zero-copy tile initialization as worker context is missing";
use_zero_copy = true;
}
if (use_zero_copy) {
// This ResourcePool will vend gpu resources backed by gpu memory buffers.
*resource_pool = std::make_unique<ResourcePool>(
resource_provider_.get(), GetTaskRunner(),
gfx::BufferUsage::GPU_READ_CPU_READ_WRITE,
ResourcePool::kDefaultExpirationDelay,
settings_.disallow_non_exact_resource_reuse);
*raster_buffer_provider = ZeroCopyRasterBufferProvider::Create(
resource_provider_.get(), settings_.preferred_tile_format);
return;
}
// This ResourcePool will vend gpu texture resources.
*resource_pool = std::make_unique<ResourcePool>(
resource_provider_.get(), GetTaskRunner(),
viz::ResourceTextureHint::kDefault, ResourcePool::kDefaultExpirationDelay,
settings_.disallow_non_exact_resource_reuse);
const int max_copy_texture_chromium_size =
compositor_context_provider->ContextCapabilities()
.max_copy_texture_chromium_size;
*raster_buffer_provider = std::make_unique<OneCopyRasterBufferProvider>(
GetTaskRunner(), compositor_context_provider, worker_context_provider,
resource_provider_.get(), max_copy_texture_chromium_size,
settings_.use_partial_raster, settings_.max_staging_buffer_usage_in_bytes,
settings_.preferred_tile_format);
}
void LayerTreeHostImpl::SetLayerTreeMutator(
std::unique_ptr<LayerTreeMutator> mutator) {
mutator_host_->SetLayerTreeMutator(std::move(mutator));
}
LayerImpl* LayerTreeHostImpl::ViewportMainScrollLayer() {
return viewport()->MainScrollLayer();
}
void LayerTreeHostImpl::QueueImageDecode(int request_id,
const PaintImage& image) {
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"LayerTreeHostImpl::QueueImageDecode", "frame_key",
image.GetKeyForFrame(image.frame_index()).ToString());
// Optimistically specify the current raster color space, since we assume that
// it won't change.
decoded_image_tracker_.QueueImageDecode(
image, GetRasterColorSpace(),
base::Bind(&LayerTreeHostImpl::ImageDecodeFinished,
base::Unretained(this), request_id));
tile_manager_.checker_image_tracker().DisallowCheckeringForImage(image);
}
void LayerTreeHostImpl::ImageDecodeFinished(int request_id,
bool decode_succeeded) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"),
"LayerTreeHostImpl::ImageDecodeFinished");
completed_image_decode_requests_.emplace_back(request_id, decode_succeeded);
client_->NotifyImageDecodeRequestFinished();
}
std::vector<std::pair<int, bool>>
LayerTreeHostImpl::TakeCompletedImageDecodeRequests() {
auto result = std::move(completed_image_decode_requests_);
completed_image_decode_requests_.clear();
return result;
}
void LayerTreeHostImpl::ClearImageCacheOnNavigation() {
// It is safe to clear the decode policy tracking on navigations since it
// comes with an invalidation and the image ids are never re-used.
bool can_clear_decode_policy_tracking = true;
tile_manager_.ClearCheckerImageTracking(can_clear_decode_policy_tracking);
if (image_decode_cache_)
image_decode_cache_->ClearCache();
}
void LayerTreeHostImpl::DidChangeScrollbarVisibility() {
// Need a commit since input handling for scrollbars is handled in Blink so
// we need to communicate to Blink when the compositor shows/hides the
// scrollbars.
client_->SetNeedsCommitOnImplThread();
}
void LayerTreeHostImpl::CleanUpTileManagerResources() {
tile_manager_.FinishTasksAndCleanUp();
resource_pool_ = nullptr;
single_thread_synchronous_task_graph_runner_ = nullptr;
image_decode_cache_ = nullptr;
// We've potentially just freed a large number of resources on our various
// contexts. Flushing now helps ensure these are cleaned up quickly
// preventing driver cache growth. See crbug.com/643251
if (layer_tree_frame_sink_) {
if (auto* compositor_context = layer_tree_frame_sink_->context_provider())
compositor_context->ContextGL()->ShallowFlushCHROMIUM();
if (auto* worker_context =
layer_tree_frame_sink_->worker_context_provider()) {
viz::RasterContextProvider::ScopedRasterContextLock hold(worker_context);
hold.RasterInterface()->ShallowFlushCHROMIUM();
}
}
}
void LayerTreeHostImpl::ReleaseLayerTreeFrameSink() {
TRACE_EVENT0("cc", "LayerTreeHostImpl::ReleaseLayerTreeFrameSink");
if (!layer_tree_frame_sink_) {
DCHECK(!has_valid_layer_tree_frame_sink_);
return;
}
has_valid_layer_tree_frame_sink_ = false;
// Since we will create a new resource provider, we cannot continue to use
// the old resources (i.e. render_surfaces and texture IDs). Clear them
// before we destroy the old resource provider.
ReleaseTreeResources();
// Note: ui resource cleanup uses the |resource_provider_|.
CleanUpTileManagerResources();
ClearUIResources();
resource_provider_ = nullptr;
// Release any context visibility before we destroy the LayerTreeFrameSink.
SetContextVisibility(false);
// Detach from the old LayerTreeFrameSink and reset |layer_tree_frame_sink_|
// pointer as this surface is going to be destroyed independent of if binding
// the new LayerTreeFrameSink succeeds or not.
layer_tree_frame_sink_->DetachFromClient();
layer_tree_frame_sink_ = nullptr;
// We don't know if the next LayerTreeFrameSink will support GPU
// rasterization. Make sure to clear the flag so that we force a
// re-computation.
use_gpu_rasterization_ = false;
}
bool LayerTreeHostImpl::InitializeRenderer(
LayerTreeFrameSink* layer_tree_frame_sink) {
TRACE_EVENT0("cc", "LayerTreeHostImpl::InitializeRenderer");
ReleaseLayerTreeFrameSink();
if (!layer_tree_frame_sink->BindToClient(this)) {
// Avoid recreating tree resources because we might not have enough
// information to do this yet (eg. we don't have a TileManager at this