| // Copyright 2011 The Chromium Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "cc/trees/layer_tree_host_impl.h" |
| |
| #include <stddef.h> |
| #include <stdint.h> |
| |
| #include <algorithm> |
| #include <limits> |
| #include <map> |
| #include <set> |
| #include <unordered_map> |
| #include <utility> |
| |
| #include "base/auto_reset.h" |
| #include "base/bind.h" |
| #include "base/compiler_specific.h" |
| #include "base/containers/adapters.h" |
| #include "base/containers/flat_map.h" |
| #include "base/debug/crash_logging.h" |
| #include "base/debug/dump_without_crashing.h" |
| #include "base/json/json_writer.h" |
| #include "base/memory/ptr_util.h" |
| #include "base/memory/read_only_shared_memory_region.h" |
| #include "base/metrics/histogram.h" |
| #include "base/numerics/safe_conversions.h" |
| #include "base/stl_util.h" |
| #include "base/strings/stringprintf.h" |
| #include "base/system/sys_info.h" |
| #include "base/trace_event/traced_value.h" |
| #include "build/build_config.h" |
| #include "cc/base/devtools_instrumentation.h" |
| #include "cc/base/histograms.h" |
| #include "cc/base/math_util.h" |
| #include "cc/benchmarks/benchmark_instrumentation.h" |
| #include "cc/debug/rendering_stats_instrumentation.h" |
| #include "cc/input/browser_controls_offset_manager.h" |
| #include "cc/input/main_thread_scrolling_reason.h" |
| #include "cc/input/page_scale_animation.h" |
| #include "cc/input/scroll_elasticity_helper.h" |
| #include "cc/input/scroll_state.h" |
| #include "cc/input/scrollbar_animation_controller.h" |
| #include "cc/input/scroller_size_metrics.h" |
| #include "cc/input/snap_selection_strategy.h" |
| #include "cc/layers/append_quads_data.h" |
| #include "cc/layers/effect_tree_layer_list_iterator.h" |
| #include "cc/layers/heads_up_display_layer_impl.h" |
| #include "cc/layers/layer_impl.h" |
| #include "cc/layers/painted_scrollbar_layer_impl.h" |
| #include "cc/layers/render_surface_impl.h" |
| #include "cc/layers/scrollbar_layer_impl_base.h" |
| #include "cc/layers/surface_layer_impl.h" |
| #include "cc/layers/viewport.h" |
| #include "cc/paint/paint_worklet_layer_painter.h" |
| #include "cc/raster/bitmap_raster_buffer_provider.h" |
| #include "cc/raster/gpu_raster_buffer_provider.h" |
| #include "cc/raster/one_copy_raster_buffer_provider.h" |
| #include "cc/raster/raster_buffer_provider.h" |
| #include "cc/raster/synchronous_task_graph_runner.h" |
| #include "cc/raster/zero_copy_raster_buffer_provider.h" |
| #include "cc/resources/memory_history.h" |
| #include "cc/resources/resource_pool.h" |
| #include "cc/resources/ui_resource_bitmap.h" |
| #include "cc/scheduler/compositor_frame_reporting_controller.h" |
| #include "cc/tiles/eviction_tile_priority_queue.h" |
| #include "cc/tiles/frame_viewer_instrumentation.h" |
| #include "cc/tiles/gpu_image_decode_cache.h" |
| #include "cc/tiles/picture_layer_tiling.h" |
| #include "cc/tiles/raster_tile_priority_queue.h" |
| #include "cc/tiles/software_image_decode_cache.h" |
| #include "cc/trees/clip_node.h" |
| #include "cc/trees/damage_tracker.h" |
| #include "cc/trees/debug_rect_history.h" |
| #include "cc/trees/draw_property_utils.h" |
| #include "cc/trees/effect_node.h" |
| #include "cc/trees/frame_rate_counter.h" |
| #include "cc/trees/image_animation_controller.h" |
| #include "cc/trees/latency_info_swap_promise_monitor.h" |
| #include "cc/trees/layer_tree_frame_sink.h" |
| #include "cc/trees/layer_tree_host_common.h" |
| #include "cc/trees/layer_tree_impl.h" |
| #include "cc/trees/mutator_host.h" |
| #include "cc/trees/render_frame_metadata.h" |
| #include "cc/trees/render_frame_metadata_observer.h" |
| #include "cc/trees/scroll_node.h" |
| #include "cc/trees/single_thread_proxy.h" |
| #include "cc/trees/transform_node.h" |
| #include "cc/trees/tree_synchronizer.h" |
| #include "components/viz/common/features.h" |
| #include "components/viz/common/frame_sinks/copy_output_request.h" |
| #include "components/viz/common/frame_sinks/delay_based_time_source.h" |
| #include "components/viz/common/hit_test/hit_test_region_list.h" |
| #include "components/viz/common/quads/compositor_frame.h" |
| #include "components/viz/common/quads/compositor_frame_metadata.h" |
| #include "components/viz/common/quads/frame_deadline.h" |
| #include "components/viz/common/quads/render_pass_draw_quad.h" |
| #include "components/viz/common/quads/shared_quad_state.h" |
| #include "components/viz/common/quads/solid_color_draw_quad.h" |
| #include "components/viz/common/quads/texture_draw_quad.h" |
| #include "components/viz/common/resources/bitmap_allocation.h" |
| #include "components/viz/common/resources/platform_color.h" |
| #include "components/viz/common/resources/resource_sizes.h" |
| #include "components/viz/common/traced_value.h" |
| #include "gpu/GLES2/gl2extchromium.h" |
| #include "gpu/command_buffer/client/context_support.h" |
| #include "gpu/command_buffer/client/gles2_interface.h" |
| #include "gpu/command_buffer/client/raster_interface.h" |
| #include "gpu/command_buffer/client/shared_image_interface.h" |
| #include "gpu/command_buffer/common/shared_image_usage.h" |
| #include "services/metrics/public/cpp/ukm_recorder.h" |
| #include "third_party/skia/include/gpu/GrContext.h" |
| #include "ui/gfx/geometry/point_conversions.h" |
| #include "ui/gfx/geometry/rect_conversions.h" |
| #include "ui/gfx/geometry/scroll_offset.h" |
| #include "ui/gfx/geometry/size_conversions.h" |
| #include "ui/gfx/geometry/vector2d_conversions.h" |
| #include "ui/gfx/presentation_feedback.h" |
| #include "ui/gfx/skia_util.h" |
| |
| namespace cc { |
| namespace { |
| |
| // Used to accommodate finite precision when comparing scaled viewport and |
| // content widths. While this value may seem large, width=device-width on an N7 |
| // V1 saw errors of ~0.065 between computed window and content widths. |
| const float kMobileViewportWidthEpsilon = 0.15f; |
| |
| // In BuildHitTestData we iterate all layers to find all layers that overlap |
| // OOPIFs, but when the number of layers is greater than |
| // |kAssumeOverlapThreshold|, it can be inefficient to accumulate layer bounds |
| // for overlap checking. As a result, we are conservative and make OOPIFs |
| // kHitTestAsk after the threshold is reached. |
| const size_t kAssumeOverlapThreshold = 100; |
| |
| bool HasFixedPageScale(LayerTreeImpl* active_tree) { |
| return active_tree->min_page_scale_factor() == |
| active_tree->max_page_scale_factor(); |
| } |
| |
| bool HasMobileViewport(LayerTreeImpl* active_tree) { |
| float window_width_dip = active_tree->current_page_scale_factor() * |
| active_tree->ScrollableViewportSize().width(); |
| float content_width_css = active_tree->ScrollableSize().width(); |
| return content_width_css <= window_width_dip + kMobileViewportWidthEpsilon; |
| } |
| |
| bool IsMobileOptimized(LayerTreeImpl* active_tree) { |
| bool has_mobile_viewport = HasMobileViewport(active_tree); |
| bool has_fixed_page_scale = HasFixedPageScale(active_tree); |
| return has_fixed_page_scale || has_mobile_viewport; |
| } |
| |
| viz::ResourceFormat TileRasterBufferFormat( |
| const LayerTreeSettings& settings, |
| viz::ContextProvider* context_provider, |
| bool use_gpu_rasterization) { |
| // Software compositing always uses the native skia RGBA N32 format, but we |
| // just call it RGBA_8888 everywhere even though it can be BGRA ordering, |
| // because we don't need to communicate the actual ordering as the code all |
| // assumes the native skia format. |
| if (!context_provider) |
| return viz::RGBA_8888; |
| |
| // RGBA4444 overrides the defaults if specified, but only for gpu compositing. |
| // It is always supported on platforms where it is specified. |
| if (settings.use_rgba_4444) |
| return viz::RGBA_4444; |
| // Otherwise we use BGRA textures if we can but it depends on the context |
| // capabilities, and we have different preferences when rastering to textures |
| // vs uploading textures. |
| const gpu::Capabilities& caps = context_provider->ContextCapabilities(); |
| if (use_gpu_rasterization) |
| return viz::PlatformColor::BestSupportedRenderBufferFormat(caps); |
| return viz::PlatformColor::BestSupportedTextureFormat(caps); |
| } |
| |
| // Small helper class that saves the current viewport location as the user sees |
| // it and resets to the same location. |
| class ViewportAnchor { |
| public: |
| ViewportAnchor(ScrollNode* inner_scroll, |
| LayerImpl* outer_scroll, |
| LayerTreeImpl* tree_impl) |
| : inner_(inner_scroll), outer_(outer_scroll), tree_impl_(tree_impl) { |
| viewport_in_content_coordinates_ = |
| scroll_tree().current_scroll_offset(inner_->element_id); |
| |
| if (outer_) |
| viewport_in_content_coordinates_ += outer_->CurrentScrollOffset(); |
| } |
| |
| void ResetViewportToAnchoredPosition() { |
| DCHECK(outer_); |
| |
| scroll_tree().ClampScrollToMaxScrollOffset(inner_, tree_impl_); |
| outer_->ClampScrollToMaxScrollOffset(); |
| |
| gfx::ScrollOffset viewport_location = |
| scroll_tree().current_scroll_offset(inner_->element_id) + |
| outer_->CurrentScrollOffset(); |
| |
| gfx::Vector2dF delta = |
| viewport_in_content_coordinates_.DeltaFrom(viewport_location); |
| |
| delta = scroll_tree().ScrollBy(inner_, delta, tree_impl_); |
| outer_->ScrollBy(delta); |
| } |
| |
| private: |
| ScrollTree& scroll_tree() { |
| return tree_impl_->property_trees()->scroll_tree; |
| } |
| |
| ScrollNode* inner_; |
| LayerImpl* outer_; |
| LayerTreeImpl* tree_impl_; |
| gfx::ScrollOffset viewport_in_content_coordinates_; |
| }; |
| |
| void DidVisibilityChange(LayerTreeHostImpl* id, bool visible) { |
| if (visible) { |
| TRACE_EVENT_ASYNC_BEGIN1("cc", "LayerTreeHostImpl::SetVisible", id, |
| "LayerTreeHostImpl", id); |
| return; |
| } |
| |
| TRACE_EVENT_ASYNC_END0("cc", "LayerTreeHostImpl::SetVisible", id); |
| } |
| |
| enum ScrollThread { MAIN_THREAD, CC_THREAD }; |
| |
| void RecordCompositorSlowScrollMetric(InputHandler::ScrollInputType type, |
| ScrollThread scroll_thread) { |
| DCHECK_NE(type, InputHandler::SCROLL_INPUT_UNKNOWN); |
| bool scroll_on_main_thread = (scroll_thread == MAIN_THREAD); |
| if (type == InputHandler::WHEEL) { |
| UMA_HISTOGRAM_BOOLEAN("Renderer4.CompositorWheelScrollUpdateThread", |
| scroll_on_main_thread); |
| } else if (type == InputHandler::TOUCHSCREEN) { |
| UMA_HISTOGRAM_BOOLEAN("Renderer4.CompositorTouchScrollUpdateThread", |
| scroll_on_main_thread); |
| } |
| } |
| |
| ui::FrameMetricsSettings LTHI_FrameMetricsSettings( |
| const LayerTreeSettings& settings) { |
| ui::FrameMetricsSource source = |
| settings.commit_to_active_tree |
| ? ui::FrameMetricsSource::UiCompositor |
| : ui::FrameMetricsSource::RendererCompositor; |
| ui::FrameMetricsSourceThread source_thread = |
| settings.commit_to_active_tree |
| ? ui::FrameMetricsSourceThread::UiCompositor |
| : ui::FrameMetricsSourceThread::RendererCompositor; |
| ui::FrameMetricsCompileTarget compile_target = |
| settings.using_synchronous_renderer_compositor |
| ? ui::FrameMetricsCompileTarget::SynchronousCompositor |
| : settings.wait_for_all_pipeline_stages_before_draw |
| ? ui::FrameMetricsCompileTarget::Headless |
| : ui::FrameMetricsCompileTarget::Chromium; |
| return ui::FrameMetricsSettings(source, source_thread, compile_target); |
| } |
| |
| } // namespace |
| |
| DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(PendingTreeDurationHistogramTimer, |
| "Scheduling.%s.PendingTreeDuration") |
| DEFINE_SCOPED_UMA_HISTOGRAM_TIMER(PendingTreeRasterDurationHistogramTimer, |
| "Scheduling.%s.PendingTreeRasterDuration") |
| |
| LayerTreeHostImpl::FrameData::FrameData() = default; |
| LayerTreeHostImpl::FrameData::~FrameData() = default; |
| LayerTreeHostImpl::UIResourceData::UIResourceData() = default; |
| LayerTreeHostImpl::UIResourceData::~UIResourceData() = default; |
| LayerTreeHostImpl::UIResourceData::UIResourceData(UIResourceData&&) noexcept = |
| default; |
| LayerTreeHostImpl::UIResourceData& LayerTreeHostImpl::UIResourceData::operator=( |
| UIResourceData&&) = default; |
| |
| std::unique_ptr<LayerTreeHostImpl> LayerTreeHostImpl::Create( |
| const LayerTreeSettings& settings, |
| LayerTreeHostImplClient* client, |
| TaskRunnerProvider* task_runner_provider, |
| RenderingStatsInstrumentation* rendering_stats_instrumentation, |
| TaskGraphRunner* task_graph_runner, |
| std::unique_ptr<MutatorHost> mutator_host, |
| int id, |
| scoped_refptr<base::SequencedTaskRunner> image_worker_task_runner) { |
| return base::WrapUnique(new LayerTreeHostImpl( |
| settings, client, task_runner_provider, rendering_stats_instrumentation, |
| task_graph_runner, std::move(mutator_host), id, |
| std::move(image_worker_task_runner))); |
| } |
| |
| LayerTreeHostImpl::LayerTreeHostImpl( |
| const LayerTreeSettings& settings, |
| LayerTreeHostImplClient* client, |
| TaskRunnerProvider* task_runner_provider, |
| RenderingStatsInstrumentation* rendering_stats_instrumentation, |
| TaskGraphRunner* task_graph_runner, |
| std::unique_ptr<MutatorHost> mutator_host, |
| int id, |
| scoped_refptr<base::SequencedTaskRunner> image_worker_task_runner) |
| : client_(client), |
| task_runner_provider_(task_runner_provider), |
| current_begin_frame_tracker_(BEGINFRAMETRACKER_FROM_HERE), |
| compositor_frame_reporting_controller_( |
| std::make_unique<CompositorFrameReportingController>()), |
| settings_(settings), |
| is_synchronous_single_threaded_(!task_runner_provider->HasImplThread() && |
| !settings_.single_thread_proxy_scheduler), |
| resource_provider_(settings_.delegated_sync_points_required), |
| cached_managed_memory_policy_(settings.memory_policy), |
| // Must be initialized after is_synchronous_single_threaded_ and |
| // task_runner_provider_. |
| tile_manager_(this, |
| GetTaskRunner(), |
| std::move(image_worker_task_runner), |
| is_synchronous_single_threaded_ |
| ? std::numeric_limits<size_t>::max() |
| : settings.scheduled_raster_task_limit, |
| settings.ToTileManagerSettings()), |
| fps_counter_( |
| FrameRateCounter::Create(task_runner_provider_->HasImplThread())), |
| memory_history_(MemoryHistory::Create()), |
| debug_rect_history_(DebugRectHistory::Create()), |
| mutator_host_(std::move(mutator_host)), |
| rendering_stats_instrumentation_(rendering_stats_instrumentation), |
| micro_benchmark_controller_(this), |
| task_graph_runner_(task_graph_runner), |
| id_(id), |
| consecutive_frame_with_damage_count_(settings.damaged_frame_limit), |
| // It is safe to use base::Unretained here since we will outlive the |
| // ImageAnimationController. |
| image_animation_controller_(GetTaskRunner(), |
| this, |
| settings_.enable_image_animation_resync), |
| frame_metrics_(LTHI_FrameMetricsSettings(settings_)), |
| skipped_frame_tracker_(&frame_metrics_), |
| is_animating_for_snap_(false), |
| paint_image_generator_client_id_(PaintImage::GetNextGeneratorClientId()), |
| scrollbar_controller_(std::make_unique<ScrollbarController>(this)), |
| scroll_gesture_did_end_(false) { |
| DCHECK(mutator_host_); |
| mutator_host_->SetMutatorHostClient(this); |
| |
| DCHECK(task_runner_provider_->IsImplThread()); |
| DidVisibilityChange(this, visible_); |
| |
| // LTHI always has an active tree. |
| active_tree_ = std::make_unique<LayerTreeImpl>( |
| this, new SyncedProperty<ScaleGroup>, new SyncedBrowserControls, |
| new SyncedElasticOverscroll); |
| active_tree_->property_trees()->is_active = true; |
| |
| viewport_ = Viewport::Create(this); |
| |
| TRACE_EVENT_OBJECT_CREATED_WITH_ID(TRACE_DISABLED_BY_DEFAULT("cc.debug"), |
| "cc::LayerTreeHostImpl", id_); |
| |
| browser_controls_offset_manager_ = BrowserControlsOffsetManager::Create( |
| this, settings.top_controls_show_threshold, |
| settings.top_controls_hide_threshold); |
| |
| memory_pressure_listener_.reset( |
| new base::MemoryPressureListener(base::BindRepeating( |
| &LayerTreeHostImpl::OnMemoryPressure, base::Unretained(this)))); |
| |
| SetDebugState(settings.initial_debug_state); |
| } |
| |
| LayerTreeHostImpl::~LayerTreeHostImpl() { |
| DCHECK(task_runner_provider_->IsImplThread()); |
| TRACE_EVENT0("cc", "LayerTreeHostImpl::~LayerTreeHostImpl()"); |
| TRACE_EVENT_OBJECT_DELETED_WITH_ID(TRACE_DISABLED_BY_DEFAULT("cc.debug"), |
| "cc::LayerTreeHostImpl", id_); |
| |
| // The frame sink is released before shutdown, which takes down |
| // all the resource and raster structures. |
| DCHECK(!layer_tree_frame_sink_); |
| DCHECK(!resource_pool_); |
| DCHECK(!image_decode_cache_); |
| DCHECK(!single_thread_synchronous_task_graph_runner_); |
| |
| if (input_handler_client_) { |
| input_handler_client_->WillShutdown(); |
| input_handler_client_ = nullptr; |
| } |
| if (scroll_elasticity_helper_) |
| scroll_elasticity_helper_.reset(); |
| |
| // The layer trees must be destroyed before the LayerTreeHost. Also, if they |
| // are holding onto any resources, destroying them will release them, before |
| // we mark any leftover resources as lost. |
| if (recycle_tree_) |
| recycle_tree_->Shutdown(); |
| if (pending_tree_) |
| pending_tree_->Shutdown(); |
| active_tree_->Shutdown(); |
| recycle_tree_ = nullptr; |
| pending_tree_ = nullptr; |
| active_tree_ = nullptr; |
| |
| // All resources should already be removed, so lose anything still exported. |
| resource_provider_.ShutdownAndReleaseAllResources(); |
| |
| mutator_host_->ClearMutators(); |
| mutator_host_->SetMutatorHostClient(nullptr); |
| } |
| |
| void LayerTreeHostImpl::BeginMainFrameAborted( |
| CommitEarlyOutReason reason, |
| std::vector<std::unique_ptr<SwapPromise>> swap_promises) { |
| // If the begin frame data was handled, then scroll and scale set was applied |
| // by the main thread, so the active tree needs to be updated as if these sent |
| // values were applied and committed. |
| if (CommitEarlyOutHandledCommit(reason)) { |
| active_tree_->ApplySentScrollAndScaleDeltasFromAbortedCommit(); |
| if (pending_tree_) { |
| pending_tree_->AppendSwapPromises(std::move(swap_promises)); |
| } else { |
| for (const auto& swap_promise : swap_promises) |
| swap_promise->DidNotSwap(SwapPromise::COMMIT_NO_UPDATE); |
| } |
| } |
| } |
| |
| void LayerTreeHostImpl::BeginCommit() { |
| TRACE_EVENT0("cc", "LayerTreeHostImpl::BeginCommit"); |
| |
| if (!CommitToActiveTree()) |
| CreatePendingTree(); |
| } |
| |
| void LayerTreeHostImpl::CommitComplete() { |
| TRACE_EVENT0("cc", "LayerTreeHostImpl::CommitComplete"); |
| |
| // In high latency mode commit cannot finish within the same frame. We need to |
| // flush input here to make sure they got picked up by |PrepareTiles()|. |
| if (input_handler_client_ && impl_thread_phase_ == ImplThreadPhase::IDLE) |
| input_handler_client_->DeliverInputForBeginFrame(); |
| |
| if (CommitToActiveTree()) { |
| active_tree_->HandleScrollbarShowRequestsFromMain(); |
| |
| // We have to activate animations here or "IsActive()" is true on the layers |
| // but the animations aren't activated yet so they get ignored by |
| // UpdateDrawProperties. |
| ActivateAnimations(); |
| } |
| |
| // Start animations before UpdateDrawProperties and PrepareTiles, as they can |
| // change the results. When doing commit to the active tree, this must happen |
| // after ActivateAnimations() in order for this ticking to be propogated |
| // to layers on the active tree. |
| if (CommitToActiveTree()) |
| Animate(); |
| else |
| AnimatePendingTreeAfterCommit(); |
| |
| UpdateSyncTreeAfterCommitOrImplSideInvalidation(); |
| micro_benchmark_controller_.DidCompleteCommit(); |
| } |
| |
| void LayerTreeHostImpl::UpdateSyncTreeAfterCommitOrImplSideInvalidation() { |
| // LayerTreeHost may have changed the GPU rasterization flags state, which |
| // may require an update of the tree resources. |
| UpdateTreeResourcesIfNeeded(); |
| sync_tree()->set_needs_update_draw_properties(); |
| |
| // We need an update immediately post-commit to have the opportunity to create |
| // tilings. |
| // We can avoid updating the ImageAnimationController during this |
| // DrawProperties update since it will be done when we animate the controller |
| // below. |
| bool update_image_animation_controller = false; |
| sync_tree()->UpdateDrawProperties(update_image_animation_controller); |
| // Because invalidations may be coming from the main thread, it's |
| // safe to do an update for lcd text at this point and see if lcd text needs |
| // to be disabled on any layers. |
| // It'd be ideal if this could be done earlier, but when the raster source |
| // is updated from the main thread during push properties, update draw |
| // properties has not occurred yet and so it's not clear whether or not the |
| // layer can or cannot use lcd text. So, this is the cleanup pass to |
| // determine if lcd state needs to switch due to draw properties. |
| sync_tree()->UpdateCanUseLCDText(); |
| |
| // Defer invalidating images until UpdateDrawProperties is performed since |
| // that updates whether an image should be animated based on its visibility |
| // and the updated data for the image from the main frame. |
| PaintImageIdFlatSet images_to_invalidate = |
| tile_manager_.TakeImagesToInvalidateOnSyncTree(); |
| if (ukm_manager_) |
| ukm_manager_->AddCheckerboardedImages(images_to_invalidate.size()); |
| |
| const auto& animated_images = |
| image_animation_controller_.AnimateForSyncTree(CurrentBeginFrameArgs()); |
| images_to_invalidate.insert(animated_images.begin(), animated_images.end()); |
| sync_tree()->InvalidateRegionForImages(images_to_invalidate); |
| |
| // Note that it is important to push the state for checkerboarded and animated |
| // images prior to PrepareTiles here when committing to the active tree. This |
| // is because new tiles on the active tree depend on tree specific state |
| // cached in these components, which must be pushed to active before preparing |
| // tiles for the updated active tree. |
| if (CommitToActiveTree()) |
| ActivateStateForImages(); |
| |
| // Start working on newly created tiles immediately if needed. |
| // TODO(vmpstr): Investigate always having PrepareTiles issue |
| // NotifyReadyToActivate, instead of handling it here. |
| bool did_prepare_tiles = PrepareTiles(); |
| if (!did_prepare_tiles) { |
| NotifyReadyToActivate(); |
| |
| // Ensure we get ReadyToDraw signal even when PrepareTiles not run. This |
| // is important for SingleThreadProxy and impl-side painting case. For |
| // STP, we commit to active tree and RequiresHighResToDraw, and set |
| // Scheduler to wait for ReadyToDraw signal to avoid Checkerboard. |
| if (CommitToActiveTree()) |
| NotifyReadyToDraw(); |
| } else if (!CommitToActiveTree()) { |
| DCHECK(!pending_tree_raster_duration_timer_); |
| pending_tree_raster_duration_timer_ = |
| std::make_unique<PendingTreeRasterDurationHistogramTimer>(); |
| } |
| } |
| |
| bool LayerTreeHostImpl::CanDraw() const { |
| // Note: If you are changing this function or any other function that might |
| // affect the result of CanDraw, make sure to call |
| // client_->OnCanDrawStateChanged in the proper places and update the |
| // NotifyIfCanDrawChanged test. |
| |
| if (!layer_tree_frame_sink_) { |
| TRACE_EVENT_INSTANT0("cc", |
| "LayerTreeHostImpl::CanDraw no LayerTreeFrameSink", |
| TRACE_EVENT_SCOPE_THREAD); |
| return false; |
| } |
| |
| // TODO(boliu): Make draws without layers work and move this below |
| // |resourceless_software_draw_| check. Tracked in crbug.com/264967. |
| if (active_tree_->LayerListIsEmpty()) { |
| TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw no root layer", |
| TRACE_EVENT_SCOPE_THREAD); |
| return false; |
| } |
| |
| if (waiting_for_local_surface_id_) |
| return false; |
| |
| if (resourceless_software_draw_) |
| return true; |
| |
| if (active_tree_->GetDeviceViewport().IsEmpty()) { |
| TRACE_EVENT_INSTANT0("cc", "LayerTreeHostImpl::CanDraw empty viewport", |
| TRACE_EVENT_SCOPE_THREAD); |
| return false; |
| } |
| if (EvictedUIResourcesExist()) { |
| TRACE_EVENT_INSTANT0( |
| "cc", "LayerTreeHostImpl::CanDraw UI resources evicted not recreated", |
| TRACE_EVENT_SCOPE_THREAD); |
| return false; |
| } |
| return true; |
| } |
| |
| void LayerTreeHostImpl::AnimatePendingTreeAfterCommit() { |
| // Animate the pending tree layer animations to put them at initial positions |
| // and starting state. There is no need to run other animations on pending |
| // tree because they depend on user inputs so the state is identical to what |
| // the active tree has. |
| AnimateLayers(CurrentBeginFrameArgs().frame_time, /* is_active_tree */ false); |
| } |
| |
| void LayerTreeHostImpl::Animate() { |
| AnimateInternal(); |
| } |
| |
| void LayerTreeHostImpl::AnimateInternal() { |
| DCHECK(task_runner_provider_->IsImplThread()); |
| base::TimeTicks monotonic_time = CurrentBeginFrameArgs().frame_time; |
| |
| // mithro(TODO): Enable these checks. |
| // DCHECK(!current_begin_frame_tracker_.HasFinished()); |
| // DCHECK(monotonic_time == current_begin_frame_tracker_.Current().frame_time) |
| // << "Called animate with unknown frame time!?"; |
| |
| bool did_animate = false; |
| |
| if (input_handler_client_) { |
| // This animates fling scrolls. But on Android WebView root flings are |
| // controlled by the application, so the compositor does not animate them. |
| bool ignore_fling = |
| settings_.ignore_root_layer_flings && IsCurrentlyScrollingViewport(); |
| if (!ignore_fling) { |
| // This does not set did_animate, because if the InputHandlerClient |
| // changes anything it will be through the InputHandler interface which |
| // does SetNeedsRedraw. |
| input_handler_client_->Animate(monotonic_time); |
| } |
| } |
| |
| did_animate |= AnimatePageScale(monotonic_time); |
| did_animate |= AnimateLayers(monotonic_time, /* is_active_tree */ true); |
| did_animate |= AnimateScrollbars(monotonic_time); |
| did_animate |= AnimateBrowserControls(monotonic_time); |
| |
| // Animating stuff can change the root scroll offset, so inform the |
| // synchronous input handler. |
| UpdateRootLayerStateForSynchronousInputHandler(); |
| if (did_animate) { |
| // If the tree changed, then we want to draw at the end of the current |
| // frame. |
| SetNeedsRedraw(); |
| } |
| } |
| |
| |
| bool LayerTreeHostImpl::PrepareTiles() { |
| if (!tile_priorities_dirty_) |
| return false; |
| |
| client_->WillPrepareTiles(); |
| bool did_prepare_tiles = tile_manager_.PrepareTiles(global_tile_state_); |
| if (did_prepare_tiles) |
| tile_priorities_dirty_ = false; |
| client_->DidPrepareTiles(); |
| return did_prepare_tiles; |
| } |
| |
| void LayerTreeHostImpl::StartPageScaleAnimation( |
| const gfx::Vector2d& target_offset, |
| bool anchor_point, |
| float page_scale, |
| base::TimeDelta duration) { |
| // Temporary crash logging for https://crbug.com/845097. |
| static bool has_dumped_without_crashing = false; |
| if (settings().is_layer_tree_for_subframe && !has_dumped_without_crashing) { |
| has_dumped_without_crashing = true; |
| static auto* psf_oopif_animation_error = |
| base::debug::AllocateCrashKeyString("psf_oopif_animation_error", |
| base::debug::CrashKeySize::Size32); |
| base::debug::SetCrashKeyString( |
| psf_oopif_animation_error, |
| base::StringPrintf("%p", InnerViewportScrollNode())); |
| base::debug::DumpWithoutCrashing(); |
| } |
| |
| if (!InnerViewportScrollNode()) |
| return; |
| |
| gfx::ScrollOffset scroll_total = active_tree_->TotalScrollOffset(); |
| gfx::SizeF scrollable_size = active_tree_->ScrollableSize(); |
| gfx::SizeF viewport_size = |
| gfx::SizeF(active_tree_->InnerViewportContainerLayer()->bounds()); |
| |
| // TODO(miletus) : Pass in ScrollOffset. |
| page_scale_animation_ = |
| PageScaleAnimation::Create(ScrollOffsetToVector2dF(scroll_total), |
| active_tree_->current_page_scale_factor(), |
| viewport_size, scrollable_size); |
| |
| if (anchor_point) { |
| gfx::Vector2dF anchor(target_offset); |
| page_scale_animation_->ZoomWithAnchor(anchor, page_scale, |
| duration.InSecondsF()); |
| } else { |
| gfx::Vector2dF scaled_target_offset = target_offset; |
| page_scale_animation_->ZoomTo(scaled_target_offset, page_scale, |
| duration.InSecondsF()); |
| } |
| |
| SetNeedsOneBeginImplFrame(); |
| client_->SetNeedsCommitOnImplThread(); |
| client_->RenewTreePriority(); |
| } |
| |
| void LayerTreeHostImpl::SetNeedsAnimateInput() { |
| DCHECK(!IsCurrentlyScrollingViewport() || |
| !settings_.ignore_root_layer_flings); |
| SetNeedsOneBeginImplFrame(); |
| } |
| |
| bool LayerTreeHostImpl::IsCurrentlyScrollingViewport() const { |
| auto* node = CurrentlyScrollingNode(); |
| if (!node) |
| return false; |
| if (!viewport()->MainScrollLayer()) |
| return false; |
| return node->id == viewport()->MainScrollLayer()->scroll_tree_index(); |
| } |
| |
| bool LayerTreeHostImpl::IsCurrentlyScrollingLayerAt( |
| const gfx::Point& viewport_point, |
| InputHandler::ScrollInputType type) const { |
| auto* scrolling_node = CurrentlyScrollingNode(); |
| if (!scrolling_node) |
| return false; |
| |
| gfx::PointF device_viewport_point = gfx::ScalePoint( |
| gfx::PointF(viewport_point), active_tree_->device_scale_factor()); |
| |
| LayerImpl* layer_impl = |
| active_tree_->FindLayerThatIsHitByPoint(device_viewport_point); |
| |
| bool scroll_on_main_thread = false; |
| uint32_t main_thread_scrolling_reasons; |
| auto* test_scroll_node = FindScrollNodeForDeviceViewportPoint( |
| device_viewport_point, type, layer_impl, &scroll_on_main_thread, |
| &main_thread_scrolling_reasons); |
| |
| if (scroll_on_main_thread) |
| return false; |
| |
| if (scrolling_node == test_scroll_node) |
| return true; |
| |
| // For active scrolling state treat the inner/outer viewports interchangeably. |
| if (scrolling_node->scrolls_inner_viewport || |
| scrolling_node->scrolls_outer_viewport) { |
| return test_scroll_node == OuterViewportScrollNode(); |
| } |
| |
| return false; |
| } |
| |
| EventListenerProperties LayerTreeHostImpl::GetEventListenerProperties( |
| EventListenerClass event_class) const { |
| return active_tree_->event_listener_properties(event_class); |
| } |
| |
| // Return true if scrollable node for 'ancestor' is the same as 'child' or an |
| // ancestor along the scroll tree. |
| bool LayerTreeHostImpl::IsScrolledBy(LayerImpl* child, ScrollNode* ancestor) { |
| DCHECK(ancestor && ancestor->scrollable); |
| if (!child) |
| return false; |
| DCHECK_EQ(child->layer_tree_impl(), active_tree_.get()); |
| ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree; |
| for (ScrollNode* scroll_node = scroll_tree.Node(child->scroll_tree_index()); |
| scroll_node; scroll_node = scroll_tree.parent(scroll_node)) { |
| if (scroll_node->id == ancestor->id) |
| return true; |
| } |
| return false; |
| } |
| |
| InputHandler::TouchStartOrMoveEventListenerType |
| LayerTreeHostImpl::EventListenerTypeForTouchStartOrMoveAt( |
| const gfx::Point& viewport_point, |
| TouchAction* out_touch_action) { |
| gfx::PointF device_viewport_point = gfx::ScalePoint( |
| gfx::PointF(viewport_point), active_tree_->device_scale_factor()); |
| |
| LayerImpl* layer_impl_with_touch_handler = |
| active_tree_->FindLayerThatIsHitByPointInTouchHandlerRegion( |
| device_viewport_point); |
| |
| if (layer_impl_with_touch_handler == nullptr) { |
| if (out_touch_action) |
| *out_touch_action = kTouchActionAuto; |
| return InputHandler::TouchStartOrMoveEventListenerType::NO_HANDLER; |
| } |
| |
| if (out_touch_action) { |
| gfx::Transform layer_screen_space_transform = |
| layer_impl_with_touch_handler->ScreenSpaceTransform(); |
| gfx::Transform inverse_layer_screen_space( |
| gfx::Transform::kSkipInitialization); |
| bool can_be_inversed = |
| layer_screen_space_transform.GetInverse(&inverse_layer_screen_space); |
| // Getting here indicates that |layer_impl_with_touch_handler| is non-null, |
| // which means that the |hit| in FindClosestMatchingLayer() is true, which |
| // indicates that the inverse is available. |
| DCHECK(can_be_inversed); |
| bool clipped = false; |
| gfx::Point3F planar_point = MathUtil::ProjectPoint3D( |
| inverse_layer_screen_space, device_viewport_point, &clipped); |
| gfx::PointF hit_test_point_in_layer_space = |
| gfx::PointF(planar_point.x(), planar_point.y()); |
| const auto& region = layer_impl_with_touch_handler->touch_action_region(); |
| gfx::Point point = gfx::ToRoundedPoint(hit_test_point_in_layer_space); |
| *out_touch_action = region.GetAllowedTouchAction(point); |
| } |
| |
| if (!CurrentlyScrollingNode()) |
| return InputHandler::TouchStartOrMoveEventListenerType::HANDLER; |
| |
| // Check if the touch start (or move) hits on the current scrolling layer or |
| // its descendant. layer_impl_with_touch_handler is the layer hit by the |
| // pointer and has an event handler, otherwise it is null. We want to compare |
| // the most inner layer we are hitting on which may not have an event listener |
| // with the actual scrolling layer. |
| LayerImpl* layer_impl = |
| active_tree_->FindLayerThatIsHitByPoint(device_viewport_point); |
| bool is_ancestor = IsScrolledBy(layer_impl, CurrentlyScrollingNode()); |
| return is_ancestor ? InputHandler::TouchStartOrMoveEventListenerType:: |
| HANDLER_ON_SCROLLING_LAYER |
| : InputHandler::TouchStartOrMoveEventListenerType::HANDLER; |
| } |
| |
| bool LayerTreeHostImpl::HasBlockingWheelEventHandlerAt( |
| const gfx::Point& viewport_point) const { |
| gfx::PointF device_viewport_point = gfx::ScalePoint( |
| gfx::PointF(viewport_point), active_tree_->device_scale_factor()); |
| |
| LayerImpl* layer_impl_with_wheel_event_handler = |
| active_tree_->FindLayerThatIsHitByPointInWheelEventHandlerRegion( |
| device_viewport_point); |
| |
| return layer_impl_with_wheel_event_handler; |
| } |
| |
| std::unique_ptr<SwapPromiseMonitor> |
| LayerTreeHostImpl::CreateLatencyInfoSwapPromiseMonitor( |
| ui::LatencyInfo* latency) { |
| return base::WrapUnique( |
| new LatencyInfoSwapPromiseMonitor(latency, nullptr, this)); |
| } |
| |
| ScrollElasticityHelper* LayerTreeHostImpl::CreateScrollElasticityHelper() { |
| DCHECK(!scroll_elasticity_helper_); |
| if (settings_.enable_elastic_overscroll) { |
| scroll_elasticity_helper_.reset( |
| ScrollElasticityHelper::CreateForLayerTreeHostImpl(this)); |
| } |
| return scroll_elasticity_helper_.get(); |
| } |
| |
| bool LayerTreeHostImpl::GetScrollOffsetForLayer(ElementId element_id, |
| gfx::ScrollOffset* offset) { |
| ScrollTree& scroll_tree = active_tree()->property_trees()->scroll_tree; |
| ScrollNode* scroll_node = scroll_tree.FindNodeFromElementId(element_id); |
| if (!scroll_node) |
| return false; |
| *offset = scroll_tree.current_scroll_offset(element_id); |
| return true; |
| } |
| |
| bool LayerTreeHostImpl::ScrollLayerTo(ElementId element_id, |
| const gfx::ScrollOffset& offset) { |
| ScrollTree& scroll_tree = active_tree()->property_trees()->scroll_tree; |
| ScrollNode* scroll_node = scroll_tree.FindNodeFromElementId(element_id); |
| if (!scroll_node) |
| return false; |
| |
| scroll_tree.ScrollBy( |
| scroll_node, |
| ScrollOffsetToVector2dF(offset - |
| scroll_tree.current_scroll_offset(element_id)), |
| active_tree()); |
| return true; |
| } |
| |
| bool LayerTreeHostImpl::ScrollingShouldSwitchtoMainThread() { |
| ScrollTree& scroll_tree = active_tree_->property_trees()->scroll_tree; |
| ScrollNode* scroll_node = scroll_tree.CurrentlyScrollingNode(); |
| |
| if (!scroll_node) |
| return true; |
| |
| for (; scroll_tree.parent(scroll_node); |
| scroll_node = scroll_tree.parent(scroll_node)) { |
| if (!!scroll_node->main_thread_scrolling_reasons) |
| return true; |
| } |
| |
| return false; |
| } |
| |
| void LayerTreeHostImpl::QueueSwapPromiseForMainThreadScrollUpdate( |
| std::unique_ptr<SwapPromise> swap_promise) { |
| swap_promises_for_main_thread_scroll_update_.push_back( |
| std::move(swap_promise)); |
| } |
| |
| void LayerTreeHostImpl::FrameData::AsValueInto( |
| base::trace_event::TracedValue* value) const { |
| value->SetBoolean("has_no_damage", has_no_damage); |
| |
| // Quad data can be quite large, so only dump render passes if we select |
| // viz.quads. |
| bool quads_enabled; |
| TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("viz.quads"), |
| &quads_enabled); |
| if (quads_enabled) { |
| value->BeginArray("render_passes"); |
| for (size_t i = 0; i < render_passes.size(); ++i) { |
| value->BeginDictionary(); |
| render_passes[i]->AsValueInto(value); |
| value->EndDictionary(); |
| } |
| value->EndArray(); |
| } |
| } |
| |
| DrawMode LayerTreeHostImpl::GetDrawMode() const { |
| if (resourceless_software_draw_) { |
| return DRAW_MODE_RESOURCELESS_SOFTWARE; |
| } else if (layer_tree_frame_sink_->context_provider()) { |
| return DRAW_MODE_HARDWARE; |
| } else { |
| return DRAW_MODE_SOFTWARE; |
| } |
| } |
| |
| static void AppendQuadsToFillScreen( |
| const gfx::Rect& root_scroll_layer_rect, |
| viz::RenderPass* target_render_pass, |
| const RenderSurfaceImpl* root_render_surface, |
| SkColor screen_background_color, |
| const Region& fill_region) { |
| if (!root_render_surface || !SkColorGetA(screen_background_color)) |
| return; |
| if (fill_region.IsEmpty()) |
| return; |
| |
| // Manually create the quad state for the gutter quads, as the root layer |
| // doesn't have any bounds and so can't generate this itself. |
| // TODO(danakj): Make the gutter quads generated by the solid color layer |
| // (make it smarter about generating quads to fill unoccluded areas). |
| |
| gfx::Rect root_target_rect = root_render_surface->content_rect(); |
| float opacity = 1.f; |
| int sorting_context_id = 0; |
| bool are_contents_opaque = SkColorGetA(screen_background_color) == 0xFF; |
| viz::SharedQuadState* shared_quad_state = |
| target_render_pass->CreateAndAppendSharedQuadState(); |
| shared_quad_state->SetAll(gfx::Transform(), root_target_rect, |
| root_target_rect, gfx::RRectF(), root_target_rect, |
| false, are_contents_opaque, opacity, |
| SkBlendMode::kSrcOver, sorting_context_id); |
| |
| for (gfx::Rect screen_space_rect : fill_region) { |
| gfx::Rect visible_screen_space_rect = screen_space_rect; |
| // Skip the quad culler and just append the quads directly to avoid |
| // occlusion checks. |
| auto* quad = |
| target_render_pass->CreateAndAppendDrawQuad<viz::SolidColorDrawQuad>(); |
| quad->SetNew(shared_quad_state, screen_space_rect, |
| visible_screen_space_rect, screen_background_color, false); |
| } |
| } |
| |
| static viz::RenderPass* FindRenderPassById(const viz::RenderPassList& list, |
| viz::RenderPassId id) { |
| auto it = std::find_if( |
| list.begin(), list.end(), |
| [id](const std::unique_ptr<viz::RenderPass>& p) { return p->id == id; }); |
| return it == list.end() ? nullptr : it->get(); |
| } |
| |
| bool LayerTreeHostImpl::HasDamage() const { |
| DCHECK(!active_tree()->needs_update_draw_properties()); |
| DCHECK(CanDraw()); |
| |
| // When touch handle visibility changes there is no visible damage |
| // because touch handles are composited in the browser. However we |
| // still want the browser to be notified that the handles changed |
| // through the |ViewHostMsg_SwapCompositorFrame| IPC so we keep |
| // track of handle visibility changes here. |
| if (active_tree()->HandleVisibilityChanged()) |
| return true; |
| |
| if (!viewport_damage_rect_.IsEmpty()) |
| return true; |
| |
| // If the set of referenced surfaces has changed then we must submit a new |
| // CompositorFrame to update surface references. |
| if (last_draw_referenced_surfaces_ != active_tree()->SurfaceRanges()) |
| return true; |
| |
| // If we have a new LocalSurfaceId, we must always submit a CompositorFrame |
| // because the parent is blocking on us. |
| if (last_draw_local_surface_id_allocation_ != |
| child_local_surface_id_allocator_.GetCurrentLocalSurfaceIdAllocation()) { |
| return true; |
| } |
| |
| const LayerTreeImpl* active_tree = active_tree_.get(); |
| |
| // If the root render surface has no visible damage, then don't generate a |
| // frame at all. |
| const RenderSurfaceImpl* root_surface = active_tree->RootRenderSurface(); |
| bool root_surface_has_visible_damage = |
| root_surface->GetDamageRect().Intersects(root_surface->content_rect()); |
| bool hud_wants_to_draw_ = active_tree->hud_layer() && |
| active_tree->hud_layer()->IsAnimatingHUDContents(); |
| |
| return root_surface_has_visible_damage || |
| active_tree_->property_trees()->effect_tree.HasCopyRequests() || |
| hud_wants_to_draw_; |
| } |
| |
| DrawResult LayerTreeHostImpl::CalculateRenderPasses(FrameData* frame) { |
| DCHECK(frame->render_passes.empty()); |
| DCHECK(CanDraw()); |
| DCHECK(!active_tree_->LayerListIsEmpty()); |
| |
| // For now, we use damage tracking to compute a global scissor. To do this, we |
| // must compute all damage tracking before drawing anything, so that we know |
| // the root damage rect. The root damage rect is then used to scissor each |
| // surface. |
| DamageTracker::UpdateDamageTracking(active_tree_.get(), |
| active_tree_->GetRenderSurfaceList()); |
| |
| if (HasDamage()) { |
| consecutive_frame_with_damage_count_++; |
| } else { |
| TRACE_EVENT0("cc", |
| "LayerTreeHostImpl::CalculateRenderPasses::EmptyDamageRect"); |
| frame->has_no_damage = true; |
| DCHECK(!resourceless_software_draw_); |
| consecutive_frame_with_damage_count_ = 0; |
| return DRAW_SUCCESS; |
| } |
| |
| TRACE_EVENT_BEGIN2("cc,benchmark", "LayerTreeHostImpl::CalculateRenderPasses", |
| "render_surface_list.size()", |
| static_cast<uint64_t>(frame->render_surface_list->size()), |
| "RequiresHighResToDraw", RequiresHighResToDraw()); |
| |
| // HandleVisibilityChanged contributed to the above damage check, so reset it |
| // now that we're going to draw. |
| // TODO(jamwalla): only call this if we are sure the frame draws. Tracked in |
| // crbug.com/805673. |
| active_tree_->ResetHandleVisibilityChanged(); |
| |
| // Create the render passes in dependency order. |
| size_t render_surface_list_size = frame->render_surface_list->size(); |
| for (size_t i = 0; i < render_surface_list_size; ++i) { |
| size_t surface_index = render_surface_list_size - 1 - i; |
| RenderSurfaceImpl* render_surface = |
| (*frame->render_surface_list)[surface_index]; |
| |
| bool is_root_surface = |
| render_surface->EffectTreeIndex() == EffectTree::kContentsRootNodeId; |
| bool should_draw_into_render_pass = |
| is_root_surface || render_surface->contributes_to_drawn_surface() || |
| render_surface->HasCopyRequest() || |
| render_surface->ShouldCacheRenderSurface(); |
| if (should_draw_into_render_pass) |
| frame->render_passes.push_back(render_surface->CreateRenderPass()); |
| } |
| |
| // Damage rects for non-root passes aren't meaningful, so set them to be |
| // equal to the output rect. |
| for (size_t i = 0; i + 1 < frame->render_passes.size(); ++i) { |
| viz::RenderPass* pass = frame->render_passes[i].get(); |
| pass->damage_rect = pass->output_rect; |
| } |
| |
| // When we are displaying the HUD, change the root damage rect to cover the |
| // entire root surface. This will disable partial-swap/scissor optimizations |
| // that would prevent the HUD from updating, since the HUD does not cause |
| // damage itself, to prevent it from messing with damage visualizations. Since |
| // damage visualizations are done off the LayerImpls and RenderSurfaceImpls, |
| // changing the RenderPass does not affect them. |
| if (active_tree_->hud_layer()) { |
| viz::RenderPass* root_pass = frame->render_passes.back().get(); |
| root_pass->damage_rect = root_pass->output_rect; |
| } |
| |
| // Grab this region here before iterating layers. Taking copy requests from |
| // the layers while constructing the render passes will dirty the render |
| // surface layer list and this unoccluded region, flipping the dirty bit to |
| // true, and making us able to query for it without doing |
| // UpdateDrawProperties again. The value inside the Region is not actually |
| // changed until UpdateDrawProperties happens, so a reference to it is safe. |
| const Region& unoccluded_screen_space_region = |
| active_tree_->UnoccludedScreenSpaceRegion(); |
| |
| // Typically when we are missing a texture and use a checkerboard quad, we |
| // still draw the frame. However when the layer being checkerboarded is moving |
| // due to an impl-animation, we drop the frame to avoid flashing due to the |
| // texture suddenly appearing in the future. |
| DrawResult draw_result = DRAW_SUCCESS; |
| |
| const DrawMode draw_mode = GetDrawMode(); |
| |
| int num_missing_tiles = 0; |
| int num_incomplete_tiles = 0; |
| int64_t checkerboarded_no_recording_content_area = 0; |
| int64_t checkerboarded_needs_raster_content_area = 0; |
| int64_t total_visible_area = 0; |
| bool have_copy_request = |
| active_tree()->property_trees()->effect_tree.HasCopyRequests(); |
| bool have_missing_animated_tiles = false; |
| |
| for (EffectTreeLayerListIterator it(active_tree()); |
| it.state() != EffectTreeLayerListIterator::State::END; ++it) { |
| auto target_render_pass_id = it.target_render_surface()->id(); |
| viz::RenderPass* target_render_pass = |
| FindRenderPassById(frame->render_passes, target_render_pass_id); |
| |
| AppendQuadsData append_quads_data; |
| |
| if (it.state() == EffectTreeLayerListIterator::State::TARGET_SURFACE) { |
| RenderSurfaceImpl* render_surface = it.target_render_surface(); |
| if (render_surface->HasCopyRequest()) { |
| active_tree() |
| ->property_trees() |
| ->effect_tree.TakeCopyRequestsAndTransformToSurface( |
| render_surface->EffectTreeIndex(), |
| &target_render_pass->copy_requests); |
| } |
| } else if (it.state() == |
| EffectTreeLayerListIterator::State::CONTRIBUTING_SURFACE) { |
| RenderSurfaceImpl* render_surface = it.current_render_surface(); |
| if (render_surface->contributes_to_drawn_surface()) { |
| render_surface->AppendQuads(draw_mode, target_render_pass, |
| &append_quads_data); |
| } |
| } else if (it.state() == EffectTreeLayerListIterator::State::LAYER) { |
| LayerImpl* layer = it.current_layer(); |
| if (layer->WillDraw(draw_mode, &resource_provider_)) { |
| DCHECK_EQ(active_tree_.get(), layer->layer_tree_impl()); |
| |
| frame->will_draw_layers.push_back(layer); |
| if (layer->may_contain_video()) |
| frame->may_contain_video = true; |
| |
| layer->AppendQuads(target_render_pass, &append_quads_data); |
| } |
| |
| rendering_stats_instrumentation_->AddVisibleContentArea( |
| append_quads_data.visible_layer_area); |
| rendering_stats_instrumentation_->AddApproximatedVisibleContentArea( |
| append_quads_data.approximated_visible_content_area); |
| rendering_stats_instrumentation_->AddCheckerboardedVisibleContentArea( |
| append_quads_data.checkerboarded_visible_content_area); |
| rendering_stats_instrumentation_->AddCheckerboardedNoRecordingContentArea( |
| append_quads_data.checkerboarded_no_recording_content_area); |
| rendering_stats_instrumentation_->AddCheckerboardedNeedsRasterContentArea( |
| append_quads_data.checkerboarded_needs_raster_content_area); |
| |
| num_missing_tiles += append_quads_data.num_missing_tiles; |
| num_incomplete_tiles += append_quads_data.num_incomplete_tiles; |
| checkerboarded_no_recording_content_area += |
| append_quads_data.checkerboarded_no_recording_content_area; |
| checkerboarded_needs_raster_content_area += |
| append_quads_data.checkerboarded_needs_raster_content_area; |
| total_visible_area += append_quads_data.visible_layer_area; |
| if (append_quads_data.num_missing_tiles > 0) { |
| have_missing_animated_tiles |= |
| layer->screen_space_transform_is_animating(); |
| } |
| } |
| frame->activation_dependencies.insert( |
| frame->activation_dependencies.end(), |
| append_quads_data.activation_dependencies.begin(), |
| append_quads_data.activation_dependencies.end()); |
| if (append_quads_data.deadline_in_frames) { |
| if (!frame->deadline_in_frames) { |
| frame->deadline_in_frames = append_quads_data.deadline_in_frames; |
| } else { |
| frame->deadline_in_frames = std::max( |
| *frame->deadline_in_frames, *append_quads_data.deadline_in_frames); |
| } |
| } |
| frame->use_default_lower_bound_deadline |= |
| append_quads_data.use_default_lower_bound_deadline; |
| } |
| |
| // If CommitToActiveTree() is true, then we wait to draw until |
| // NotifyReadyToDraw. That means we're in as good shape as is possible now, |
| // so there's no reason to stop the draw now (and this is not supported by |
| // SingleThreadProxy). |
| if (have_missing_animated_tiles && !CommitToActiveTree()) |
| draw_result = DRAW_ABORTED_CHECKERBOARD_ANIMATIONS; |
| |
| // When we require high res to draw, abort the draw (almost) always. This does |
| // not cause the scheduler to do a main frame, instead it will continue to try |
| // drawing until we finally complete, so the copy request will not be lost. |
| // TODO(weiliangc): Remove RequiresHighResToDraw. crbug.com/469175 |
| if (num_incomplete_tiles || num_missing_tiles) { |
| if (RequiresHighResToDraw()) |
| draw_result = DRAW_ABORTED_MISSING_HIGH_RES_CONTENT; |
| } |
| |
| // When doing a resourceless software draw, we don't have control over the |
| // surface the compositor draws to, so even though the frame may not be |
| // complete, the previous frame has already been potentially lost, so an |
| // incomplete frame is better than nothing, so this takes highest precidence. |
| if (resourceless_software_draw_) |
| draw_result = DRAW_SUCCESS; |
| |
| #if DCHECK_IS_ON() |
| for (const auto& render_pass : frame->render_passes) { |
| for (auto* quad : render_pass->quad_list) |
| DCHECK(quad->shared_quad_state); |
| } |
| DCHECK(frame->render_passes.back()->output_rect.origin().IsOrigin()); |
| #endif |
| bool has_transparent_background = |
| SkColorGetA(active_tree_->background_color()) != SK_AlphaOPAQUE; |
| if (!has_transparent_background) { |
| frame->render_passes.back()->has_transparent_background = false; |
| AppendQuadsToFillScreen( |
| active_tree_->RootScrollLayerDeviceViewportBounds(), |
| frame->render_passes.back().get(), active_tree_->RootRenderSurface(), |
| active_tree_->background_color(), unoccluded_screen_space_region); |
| } |
| |
| RemoveRenderPasses(frame); |
| // If we're making a frame to draw, it better have at least one render pass. |
| DCHECK(!frame->render_passes.empty()); |
| |
| if (have_copy_request) { |
| // Any copy requests left in the tree are not going to get serviced, and |
| // should be aborted. |
| active_tree()->property_trees()->effect_tree.ClearCopyRequests(); |
| |
| // Draw properties depend on copy requests. |
| active_tree()->set_needs_update_draw_properties(); |
| } |
| |
| if (ukm_manager_) { |
| ukm_manager_->AddCheckerboardStatsForFrame( |
| checkerboarded_no_recording_content_area + |
| checkerboarded_needs_raster_content_area, |
| num_missing_tiles, total_visible_area); |
| } |
| |
| if (active_tree_->has_ever_been_drawn()) { |
| UMA_HISTOGRAM_COUNTS_100( |
| "Compositing.RenderPass.AppendQuadData.NumMissingTiles", |
| num_missing_tiles); |
| UMA_HISTOGRAM_COUNTS_100( |
| "Compositing.RenderPass.AppendQuadData.NumIncompleteTiles", |
| num_incomplete_tiles); |
| UMA_HISTOGRAM_COUNTS_1M( |
| "Compositing.RenderPass.AppendQuadData." |
| "CheckerboardedNoRecordingContentArea", |
| checkerboarded_no_recording_content_area); |
| UMA_HISTOGRAM_COUNTS_1M( |
| "Compositing.RenderPass.AppendQuadData." |
| "CheckerboardedNeedRasterContentArea", |
| checkerboarded_needs_raster_content_area); |
| } |
| |
| TRACE_EVENT_END2("cc,benchmark", "LayerTreeHostImpl::CalculateRenderPasses", |
| "draw_result", draw_result, "missing tiles", |
| num_missing_tiles); |
| |
| // Draw has to be successful to not drop the copy request layer. |
| // When we have a copy request for a layer, we need to draw even if there |
| // would be animating checkerboards, because failing under those conditions |
| // triggers a new main frame, which may cause the copy request layer to be |
| // destroyed. |
| // TODO(weiliangc): Test copy request w/ LayerTreeFrameSink recreation. Would |
| // trigger this DCHECK. |
| DCHECK(!have_copy_request || draw_result == DRAW_SUCCESS); |
| |
| // TODO(crbug.com/564832): This workaround to prevent creating unnecessarily |
| // persistent render passes. When a copy request is made, it may force a |
| // separate render pass for the layer, which will persist until a new commit |
| // removes it. Force a commit after copy requests, to remove extra render |
| // passes. |
| if (have_copy_request) |
| client_->SetNeedsCommitOnImplThread(); |
| |
| return draw_result; |
| } |
| |
| void LayerTreeHostImpl::DidAnimateScrollOffset() { |
| client_->SetNeedsCommitOnImplThread(); |
| client_->RenewTreePriority(); |
| } |
| |
| void LayerTreeHostImpl::SetViewportDamage(const gfx::Rect& damage_rect) { |
| viewport_damage_rect_.Union(damage_rect); |
| } |
| |
| void LayerTreeHostImpl::InvalidateContentOnImplSide() { |
| DCHECK(!pending_tree_); |
| // Invalidation should never be ran outside the impl frame for non |
| // synchronous compositor mode. For devices that use synchronous compositor, |
| // e.g. Android Webview, the assertion is not guaranteed because it may ask |
| // for a frame at any time. |
| DCHECK(impl_thread_phase_ == ImplThreadPhase::INSIDE_IMPL_FRAME || |
| settings_.using_synchronous_renderer_compositor); |
| |
| if (!CommitToActiveTree()) |
| CreatePendingTree(); |
| |
| UpdateSyncTreeAfterCommitOrImplSideInvalidation(); |
| } |
| |
| void LayerTreeHostImpl::InvalidateLayerTreeFrameSink(bool needs_redraw) { |
| DCHECK(layer_tree_frame_sink()); |
| |
| layer_tree_frame_sink()->Invalidate(needs_redraw); |
| skipped_frame_tracker_.DidProduceFrame(); |
| } |
| |
| DrawResult LayerTreeHostImpl::PrepareToDraw(FrameData* frame) { |
| TRACE_EVENT1("cc", "LayerTreeHostImpl::PrepareToDraw", "SourceFrameNumber", |
| active_tree_->source_frame_number()); |
| TRACE_EVENT_WITH_FLOW1("viz,benchmark", "Graphics.Pipeline", |
| TRACE_ID_GLOBAL(CurrentBeginFrameArgs().trace_id), |
| TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
| "step", "GenerateRenderPass"); |
| if (input_handler_client_) |
| input_handler_client_->ReconcileElasticOverscrollAndRootScroll(); |
| |
| // |client_name| is used for various UMA histograms below. |
| // GetClientNameForMetrics only returns one non-null value over the lifetime |
| // of the process, so the histogram names are runtime constant. |
| const char* client_name = GetClientNameForMetrics(); |
| if (client_name) { |
| size_t total_memory_in_bytes = 0; |
| size_t total_gpu_memory_for_tilings_in_bytes = 0; |
| for (const PictureLayerImpl* layer : active_tree()->picture_layers()) { |
| total_memory_in_bytes += layer->GetRasterSource()->GetMemoryUsage(); |
| total_gpu_memory_for_tilings_in_bytes += layer->GPUMemoryUsageInBytes(); |
| } |
| if (total_memory_in_bytes != 0) { |
| UMA_HISTOGRAM_COUNTS_1M( |
| base::StringPrintf("Compositing.%s.PictureMemoryUsageKb", |
| client_name), |
| base::saturated_cast<int>(total_memory_in_bytes / 1024)); |
| } |
| |
| UMA_HISTOGRAM_CUSTOM_COUNTS( |
| base::StringPrintf("Compositing.%s.NumActiveLayers", client_name), |
| base::saturated_cast<int>(active_tree_->NumLayers()), 1, 400, 20); |
| |
| UMA_HISTOGRAM_CUSTOM_COUNTS( |
| base::StringPrintf("Compositing.%s.NumActivePictureLayers", |
| client_name), |
| base::saturated_cast<int>(active_tree_->picture_layers().size()), 1, |
| 400, 20); |
| |
| // TODO(yigu): Maybe we should use the same check above. Need to figure out |
| // why exactly we skip 0. |
| if (!active_tree()->picture_layers().empty()) { |
| UMA_HISTOGRAM_CUSTOM_COUNTS( |
| base::StringPrintf("Compositing.%s.GPUMemoryForTilingsInKb", |
| client_name), |
| base::saturated_cast<int>(total_gpu_memory_for_tilings_in_bytes / |
| 1024), |
| 1, kGPUMemoryForTilingsLargestBucketKb, |
| kGPUMemoryForTilingsBucketCount); |
| } |
| } |
| |
| // Tick worklet animations here, just before draw, to give animation worklets |
| // as much time as possible to produce their output for this frame. Note that |
| // an animation worklet is asked to produce its output at the beginning of the |
| // frame along side other animations but its output arrives asynchronously so |
| // we tick worklet animations and apply that output here instead. |
| mutator_host_->TickWorkletAnimations(); |
| |
| bool ok = active_tree_->UpdateDrawProperties(); |
| DCHECK(ok) << "UpdateDrawProperties failed during draw"; |
| |
| // This will cause NotifyTileStateChanged() to be called for any tiles that |
| // completed, which will add damage for visible tiles to the frame for them so |
| // they appear as part of the current frame being drawn. |
| tile_manager_.CheckForCompletedTasks(); |
| |
| frame->render_surface_list = &active_tree_->GetRenderSurfaceList(); |
| frame->render_passes.clear(); |
| frame->will_draw_layers.clear(); |
| frame->has_no_damage = false; |
| frame->may_contain_video = false; |
| |
| if (active_tree_->RootRenderSurface()) { |
| active_tree_->RootRenderSurface()->damage_tracker()->AddDamageNextUpdate( |
| viewport_damage_rect_); |
| viewport_damage_rect_ = gfx::Rect(); |
| } |
| |
| DrawResult draw_result = CalculateRenderPasses(frame); |
| if (draw_result != DRAW_SUCCESS) { |
| DCHECK(!resourceless_software_draw_); |
| return draw_result; |
| } |
| |
| // If we return DRAW_SUCCESS, then we expect DrawLayers() to be called before |
| // this function is called again. |
| return DRAW_SUCCESS; |
| } |
| |
| void LayerTreeHostImpl::RemoveRenderPasses(FrameData* frame) { |
| // There is always at least a root RenderPass. |
| DCHECK_GE(frame->render_passes.size(), 1u); |
| |
| // A set of RenderPasses that we have seen. |
| base::flat_set<viz::RenderPassId> pass_exists; |
| // A set of viz::RenderPassDrawQuads that we have seen (stored by the |
| // RenderPasses they refer to). |
| base::flat_map<viz::RenderPassId, int> pass_references; |
| |
| // Iterate RenderPasses in draw order, removing empty render passes (except |
| // the root RenderPass). |
| for (size_t i = 0; i < frame->render_passes.size(); ++i) { |
| viz::RenderPass* pass = frame->render_passes[i].get(); |
| |
| // Remove orphan viz::RenderPassDrawQuads. |
| for (auto it = pass->quad_list.begin(); it != pass->quad_list.end();) { |
| if (it->material != viz::DrawQuad::Material::kRenderPass) { |
| ++it; |
| continue; |
| } |
| const viz::RenderPassDrawQuad* quad = |
| viz::RenderPassDrawQuad::MaterialCast(*it); |
| // If the RenderPass doesn't exist, we can remove the quad. |
| if (pass_exists.count(quad->render_pass_id)) { |
| // Otherwise, save a reference to the RenderPass so we know there's a |
| // quad using it. |
| pass_references[quad->render_pass_id]++; |
| ++it; |
| } else { |
| it = pass->quad_list.EraseAndInvalidateAllPointers(it); |
| } |
| } |
| |
| if (i == frame->render_passes.size() - 1) { |
| // Don't remove the root RenderPass. |
| break; |
| } |
| |
| if (pass->quad_list.empty() && pass->copy_requests.empty() && |
| pass->filters.IsEmpty() && pass->backdrop_filters.IsEmpty()) { |
| // Remove the pass and decrement |i| to counter the for loop's increment, |
| // so we don't skip the next pass in the loop. |
| frame->render_passes.erase(frame->render_passes.begin() + i); |
| --i; |
| continue; |
| } |
| |
| pass_exists.insert(pass->id); |
| } |
| |
| // Remove RenderPasses that are not referenced by any draw quads or copy |
| // requests (except the root RenderPass). |
| for (size_t i = 0; i < frame->render_passes.size() - 1; ++i) { |
| // Iterating from the back of the list to the front, skipping over the |
| // back-most (root) pass, in order to remove each qualified RenderPass, and |
| // drop references to earlier RenderPasses allowing them to be removed to. |
| viz::RenderPass* pass = |
| frame->render_passes[frame->render_passes.size() - 2 - i].get(); |
| if (!pass->copy_requests.empty()) |
| continue; |
| if (pass_references[pass->id]) |
| continue; |
| |
| for (auto it = pass->quad_list.begin(); it != pass->quad_list.end(); ++it) { |
| if (it->material != viz::DrawQuad::Material::kRenderPass) |
| continue; |
| const viz::RenderPassDrawQuad* quad = |
| viz::RenderPassDrawQuad::MaterialCast(*it); |
| pass_references[quad->render_pass_id]--; |
| } |
| |
| frame->render_passes.erase(frame->render_passes.end() - 2 - i); |
| --i; |
| } |
| } |
| |
| void LayerTreeHostImpl::EvictTexturesForTesting() { |
| UpdateTileManagerMemoryPolicy(ManagedMemoryPolicy(0)); |
| } |
| |
| void LayerTreeHostImpl::BlockNotifyReadyToActivateForTesting(bool block) { |
| NOTREACHED(); |
| } |
| |
| void LayerTreeHostImpl::BlockImplSideInvalidationRequestsForTesting( |
| bool block) { |
| NOTREACHED(); |
| } |
| |
| void LayerTreeHostImpl::ResetTreesForTesting() { |
| if (active_tree_) |
| active_tree_->DetachLayers(); |
| active_tree_ = |
| std::make_unique<LayerTreeImpl>(this, active_tree()->page_scale_factor(), |
| active_tree()->top_controls_shown_ratio(), |
| active_tree()->elastic_overscroll()); |
| active_tree_->property_trees()->is_active = true; |
| if (pending_tree_) |
| pending_tree_->DetachLayers(); |
| pending_tree_ = nullptr; |
| pending_tree_duration_timer_ = nullptr; |
| if (recycle_tree_) |
| recycle_tree_->DetachLayers(); |
| recycle_tree_ = nullptr; |
| } |
| |
| size_t LayerTreeHostImpl::SourceAnimationFrameNumberForTesting() const { |
| return fps_counter_->current_frame_number(); |
| } |
| |
| void LayerTreeHostImpl::UpdateTileManagerMemoryPolicy( |
| const ManagedMemoryPolicy& policy) { |
| if (!resource_pool_) |
| return; |
| |
| global_tile_state_.hard_memory_limit_in_bytes = 0; |
| global_tile_state_.soft_memory_limit_in_bytes = 0; |
| if (visible_ && policy.bytes_limit_when_visible > 0) { |
| global_tile_state_.hard_memory_limit_in_bytes = |
| policy.bytes_limit_when_visible; |
| global_tile_state_.soft_memory_limit_in_bytes = |
| (static_cast<int64_t>(global_tile_state_.hard_memory_limit_in_bytes) * |
| settings_.max_memory_for_prepaint_percentage) / |
| 100; |
| } |
| global_tile_state_.memory_limit_policy = |
| ManagedMemoryPolicy::PriorityCutoffToTileMemoryLimitPolicy( |
| visible_ ? policy.priority_cutoff_when_visible |
| : gpu::MemoryAllocation::CUTOFF_ALLOW_NOTHING); |
| global_tile_state_.num_resources_limit = policy.num_resources_limit; |
| |
| if (global_tile_state_.hard_memory_limit_in_bytes > 0) { |
| // If |global_tile_state_.hard_memory_limit_in_bytes| is greater than 0, we |
| // consider our contexts visible. Notify the contexts here. We handle |
| // becoming invisible in NotifyAllTileTasksComplete to avoid interrupting |
| // running work. |
| SetContextVisibility(true); |
| |
| // If |global_tile_state_.hard_memory_limit_in_bytes| is greater than 0, we |
| // allow the image decode controller to retain resources. We handle the |
| // equal to 0 case in NotifyAllTileTasksComplete to avoid interrupting |
| // running work. |
| if (image_decode_cache_) |
| image_decode_cache_->SetShouldAggressivelyFreeResources(false); |
| } else { |
| // When the memory policy is set to zero, its important to release any |
| // decoded images cached by the tracker. But we can not re-checker any |
| // images that have been displayed since the resources, if held by the |
| // browser, may be re-used. Which is why its important to maintain the |
| // decode policy tracking. |
| bool can_clear_decode_policy_tracking = false; |
| tile_manager_.ClearCheckerImageTracking(can_clear_decode_policy_tracking); |
| } |
| |
| DCHECK(resource_pool_); |
| // Soft limit is used for resource pool such that memory returns to soft |
| // limit after going over. |
| resource_pool_->SetResourceUsageLimits( |
| global_tile_state_.soft_memory_limit_in_bytes, |
| global_tile_state_.num_resources_limit); |
| |
| DidModifyTilePriorities(); |
| } |
| |
| void LayerTreeHostImpl::DidModifyTilePriorities() { |
| // Mark priorities as dirty and schedule a PrepareTiles(). |
| tile_priorities_dirty_ = true; |
| tile_manager_.DidModifyTilePriorities(); |
| client_->SetNeedsPrepareTilesOnImplThread(); |
| } |
| |
| std::unique_ptr<RasterTilePriorityQueue> LayerTreeHostImpl::BuildRasterQueue( |
| TreePriority tree_priority, |
| RasterTilePriorityQueue::Type type) { |
| TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), |
| "LayerTreeHostImpl::BuildRasterQueue"); |
| |
| return RasterTilePriorityQueue::Create(active_tree_->picture_layers(), |
| pending_tree_ |
| ? pending_tree_->picture_layers() |
| : std::vector<PictureLayerImpl*>(), |
| tree_priority, type); |
| } |
| |
| std::unique_ptr<EvictionTilePriorityQueue> |
| LayerTreeHostImpl::BuildEvictionQueue(TreePriority tree_priority) { |
| TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("cc.debug"), |
| "LayerTreeHostImpl::BuildEvictionQueue"); |
| |
| std::unique_ptr<EvictionTilePriorityQueue> queue( |
| new EvictionTilePriorityQueue); |
| queue->Build(active_tree_->picture_layers(), |
| pending_tree_ ? pending_tree_->picture_layers() |
| : std::vector<PictureLayerImpl*>(), |
| tree_priority); |
| return queue; |
| } |
| |
| void LayerTreeHostImpl::SetIsLikelyToRequireADraw( |
| bool is_likely_to_require_a_draw) { |
| // Proactively tell the scheduler that we expect to draw within each vsync |
| // until we get all the tiles ready to draw. If we happen to miss a required |
| // for draw tile here, then we will miss telling the scheduler each frame that |
| // we intend to draw so it may make worse scheduling decisions. |
| is_likely_to_require_a_draw_ = is_likely_to_require_a_draw; |
| } |
| |
| const gfx::ColorSpace& LayerTreeHostImpl::GetRasterColorSpace() const { |
| int dummy; |
| return GetRasterColorSpaceAndId(&dummy); |
| } |
| |
| const gfx::ColorSpace& LayerTreeHostImpl::GetRasterColorSpaceAndId( |
| int* id) const { |
| const gfx::ColorSpace* result = nullptr; |
| // The pending tree will have the most recently updated color space, so |
| // prefer that. |
| if (pending_tree_) { |
| result = &pending_tree_->raster_color_space(); |
| *id = pending_tree_->raster_color_space_id(); |
| } else if (active_tree_) { |
| result = &active_tree_->raster_color_space(); |
| *id = active_tree_->raster_color_space_id(); |
| } |
| |
| // If we are likely to software composite the resource, we use sRGB because |
| // software compositing is unable to perform color conversion. Also always |
| // specify a color space if color correct rasterization is requested |
| // (not specifying a color space indicates that no color conversion is |
| // required). |
| if (!layer_tree_frame_sink_ || !layer_tree_frame_sink_->context_provider() || |
| !result || !result->IsValid()) { |
| result = &default_color_space_; |
| *id = default_color_space_id_; |
| } |
| return *result; |
| } |
| |
| void LayerTreeHostImpl::RequestImplSideInvalidationForCheckerImagedTiles() { |
| // When using impl-side invalidation for checker-imaging, a pending tree does |
| // not need to be flushed as an independent update through the pipeline. |
| bool needs_first_draw_on_activation = false; |
| client_->NeedsImplSideInvalidation(needs_first_draw_on_activation); |
| } |
| |
| size_t LayerTreeHostImpl::GetFrameIndexForImage(const PaintImage& paint_image, |
| WhichTree tree) const { |
| if (!paint_image.ShouldAnimate()) |
| return PaintImage::kDefaultFrameIndex; |
| |
| return image_animation_controller_.GetFrameIndexForImage( |
| paint_image.stable_id(), tree); |
| } |
| |
| void LayerTreeHostImpl::NotifyReadyToActivate() { |
| pending_tree_raster_duration_timer_.reset(); |
| client_->NotifyReadyToActivate(); |
| } |
| |
| void LayerTreeHostImpl::NotifyReadyToDraw() { |
| // Tiles that are ready will cause NotifyTileStateChanged() to be called so we |
| // don't need to schedule a draw here. Just stop WillBeginImplFrame() from |
| // causing optimistic requests to draw a frame. |
| is_likely_to_require_a_draw_ = false; |
| |
| client_->NotifyReadyToDraw(); |
| } |
| |
| void LayerTreeHostImpl::NotifyAllTileTasksCompleted() { |
| // The tile tasks started by the most recent call to PrepareTiles have |
| // completed. Now is a good time to free resources if necessary. |
| if (global_tile_state_.hard_memory_limit_in_bytes == 0) { |
| // Free image decode controller resources before notifying the |
| // contexts of visibility change. This ensures that the imaged decode |
| // controller has released all Skia refs at the time Skia's cleanup |
| // executes (within worker context's cleanup). |
| if (image_decode_cache_) |
| image_decode_cache_->SetShouldAggressivelyFreeResources(true); |
| SetContextVisibility(false); |
| } |
| } |
| |
| void LayerTreeHostImpl::NotifyTileStateChanged(const Tile* tile) { |
| TRACE_EVENT0("cc", "LayerTreeHostImpl::NotifyTileStateChanged"); |
| |
| LayerImpl* layer_impl = nullptr; |
| |
| // We must have a pending or active tree layer here, since the layer is |
| // guaranteed to outlive its tiles. |
| if (tile->tiling()->tree() == WhichTree::PENDING_TREE) |
| layer_impl = pending_tree_->FindPendingTreeLayerById(tile->layer_id()); |
| else |
| layer_impl = active_tree_->FindActiveTreeLayerById(tile->layer_id()); |
| |
| layer_impl->NotifyTileStateChanged(tile); |
| |
| if (!client_->IsInsideDraw() && tile->required_for_draw()) { |
| // The LayerImpl::NotifyTileStateChanged() should damage the layer, so this |
| // redraw will make those tiles be displayed. |
| SetNeedsRedraw(); |
| } |
| } |
| |
| void LayerTreeHostImpl::SetMemoryPolicy(const ManagedMemoryPolicy& policy) { |
| DCHECK(task_runner_provider_->IsImplThread()); |
| |
| SetManagedMemoryPolicy(policy); |
| |
| // This is short term solution to synchronously drop tile resources when |
| // using synchronous compositing to avoid memory usage regression. |
| // TODO(boliu): crbug.com/499004 to track removing this. |
| if (!policy.bytes_limit_when_visible && resource_pool_ && |
| settings_.using_synchronous_renderer_compositor) { |
| ReleaseTileResources(); |
| CleanUpTileManagerResources(); |
| |
| // Force a call to NotifyAllTileTasks completed - otherwise this logic may |
| // be skipped if no work was enqueued at the time the tile manager was |
| // destroyed. |
| NotifyAllTileTasksCompleted(); |
| |
| CreateTileManagerResources(); |
| RecreateTileResources(); |
| } |
| } |
| |
| void LayerTreeHostImpl::SetTreeActivationCallback( |
| base::RepeatingClosure callback) { |
| DCHECK(task_runner_provider_->IsImplThread()); |
| tree_activation_callback_ = std::move(callback); |
| } |
| |
| void LayerTreeHostImpl::SetManagedMemoryPolicy( |
| const ManagedMemoryPolicy& policy) { |
| if (cached_managed_memory_policy_ == policy) |
| return; |
| |
| ManagedMemoryPolicy old_policy = ActualManagedMemoryPolicy(); |
| cached_managed_memory_policy_ = policy; |
| ManagedMemoryPolicy actual_policy = ActualManagedMemoryPolicy(); |
| |
| if (old_policy == actual_policy) |
| return; |
| |
| UpdateTileManagerMemoryPolicy(actual_policy); |
| |
| // If there is already enough memory to draw everything imaginable and the |
| // new memory limit does not change this, then do not re-commit. Don't bother |
| // skipping commits if this is not visible (commits don't happen when not |
| // visible, there will almost always be a commit when this becomes visible). |
| bool needs_commit = true; |
| if (visible() && |
| actual_policy.bytes_limit_when_visible >= max_memory_needed_bytes_ && |
| old_policy.bytes_limit_when_visible >= max_memory_needed_bytes_ && |
| actual_policy.priority_cutoff_when_visible == |
| old_policy.priority_cutoff_when_visible) { |
| needs_commit = false; |
| } |
| |
| if (needs_commit) |
| client_->SetNeedsCommitOnImplThread(); |
| } |
| |
| void LayerTreeHostImpl::SetExternalTilePriorityConstraints( |
| const gfx::Rect& viewport_rect, |
| const gfx::Transform& transform) { |
| const bool tile_priority_params_changed = |
| viewport_rect_for_tile_priority_ != viewport_rect; |
| viewport_rect_for_tile_priority_ = viewport_rect; |
| |
| if (tile_priority_params_changed) { |
| active_tree_->set_needs_update_draw_properties(); |
| if (pending_tree_) |
| pending_tree_->set_needs_update_draw_properties(); |
| |
| // Compositor, not LayerTreeFrameSink, is responsible for setting damage |
| // and triggering redraw for constraint changes. |
| SetFullViewportDamage(); |
| SetNeedsRedraw(); |
| } |
| } |
| |
| void LayerTreeHostImpl::DidReceiveCompositorFrameAck() { |
| client_->DidReceiveCompositorFrameAckOnImplThread(); |
| } |
| |
| LayerTreeHostImpl::FrameTokenInfo::FrameTokenInfo( |
| uint32_t token, |
| base::TimeTicks cc_frame_time, |
| std::vector<LayerTreeHost::PresentationTimeCallback> callbacks) |
| : token(token), |
| cc_frame_time(cc_frame_time), |
| callbacks(std::move(callbacks)) {} |
| |
| LayerTreeHostImpl::FrameTokenInfo::FrameTokenInfo(FrameTokenInfo&&) = default; |
| LayerTreeHostImpl::FrameTokenInfo::~FrameTokenInfo() = default; |
| |
| void LayerTreeHostImpl::DidPresentCompositorFrame( |
| uint32_t frame_token, |
| const gfx::PresentationFeedback& feedback) { |
| std::vector<LayerTreeHost::PresentationTimeCallback> all_callbacks; |
| while (!frame_token_infos_.empty()) { |
| auto info = frame_token_infos_.begin(); |
| if (viz::FrameTokenGT(info->token, frame_token)) |
| break; |
| |
| // Update compositor frame latency and smoothness stats only for frames |
| // that caused on-screen damage. |
| if (info->token == frame_token) |
| frame_metrics_.AddFrameDisplayed(info->cc_frame_time, feedback.timestamp); |
| |
| std::copy(std::make_move_iterator(info->callbacks.begin()), |
| std::make_move_iterator(info->callbacks.end()), |
| std::back_inserter(all_callbacks)); |
| frame_token_infos_.erase(info); |
| } |
| client_->DidPresentCompositorFrameOnImplThread( |
| frame_token, std::move(all_callbacks), feedback); |
| } |
| |
| void LayerTreeHostImpl::DidNotNeedBeginFrame() { |
| skipped_frame_tracker_.WillNotProduceFrame(); |
| } |
| |
| void LayerTreeHostImpl::ReclaimResources( |
| const std::vector<viz::ReturnedResource>& resources) { |
| resource_provider_.ReceiveReturnsFromParent(resources); |
| |
| // In OOM, we now might be able to release more resources that were held |
| // because they were exported. |
| if (resource_pool_) { |
| if (resource_pool_->memory_usage_bytes()) { |
| const size_t kMegabyte = 1024 * 1024; |
| |
| // This is a good time to log memory usage. A chunk of work has just |
| // completed but none of the memory used for that work has likely been |
| // freed. |
| UMA_HISTOGRAM_MEMORY_MB( |
| "Renderer4.ResourcePoolMemoryUsage", |
| static_cast<int>(resource_pool_->memory_usage_bytes() / kMegabyte)); |
| } |
| |
| resource_pool_->ReduceResourceUsage(); |
| } |
| |
| // If we're not visible, we likely released resources, so we want to |
| // aggressively flush here to make sure those DeleteTextures make it to the |
| // GPU process to free up the memory. |
| if (!visible_ && layer_tree_frame_sink_->context_provider()) { |
| auto* gl = layer_tree_frame_sink_->context_provider()->ContextGL(); |
| gl->ShallowFlushCHROMIUM(); |
| } |
| } |
| |
| void LayerTreeHostImpl::OnDraw(const gfx::Transform& transform, |
| const gfx::Rect& viewport, |
| bool resourceless_software_draw, |
| bool skip_draw) { |
| DCHECK(!resourceless_software_draw_); |
| |
| if (skip_draw) { |
| client_->OnDrawForLayerTreeFrameSink(resourceless_software_draw_, true); |
| return; |
| } |
| |
| const bool transform_changed = external_transform_ != transform; |
| const bool viewport_changed = external_viewport_ != viewport; |
| |
| external_transform_ = transform; |
| external_viewport_ = viewport; |
| |
| { |
| base::AutoReset<bool> resourceless_software_draw_reset( |
| &resourceless_software_draw_, resourceless_software_draw); |
| |
| // For resourceless software draw, always set full damage to ensure they |
| // always swap. Otherwise, need to set redraw for any changes to draw |
| // parameters. |
| if (transform_changed || viewport_changed || resourceless_software_draw_) { |
| SetFullViewportDamage(); |
| SetNeedsRedraw(); |
| active_tree_->set_needs_update_draw_properties(); |
| } |
| |
| if (resourceless_software_draw) |
| client_->OnCanDrawStateChanged(CanDraw()); |
| |
| client_->OnDrawForLayerTreeFrameSink(resourceless_software_draw_, |
| skip_draw); |
| } |
| |
| if (resourceless_software_draw) { |
| active_tree_->set_needs_update_draw_properties(); |
| client_->OnCanDrawStateChanged(CanDraw()); |
| // This draw may have reset all damage, which would lead to subsequent |
| // incorrect hardware draw, so explicitly set damage for next hardware |
| // draw as well. |
| SetFullViewportDamage(); |
| } |
| } |
| |
| void LayerTreeHostImpl::OnCanDrawStateChangedForTree() { |
| client_->OnCanDrawStateChanged(CanDraw()); |
| } |
| |
| viz::CompositorFrameMetadata LayerTreeHostImpl::MakeCompositorFrameMetadata() { |
| viz::CompositorFrameMetadata metadata; |
| metadata.frame_token = ++next_frame_token_; |
| metadata.device_scale_factor = active_tree_->painted_device_scale_factor() * |
| active_tree_->device_scale_factor(); |
| |
| metadata.page_scale_factor = active_tree_->current_page_scale_factor(); |
| metadata.scrollable_viewport_size = active_tree_->ScrollableViewportSize(); |
| metadata.root_background_color = active_tree_->background_color(); |
| metadata.content_source_id = active_tree_->content_source_id(); |
| |
| if (active_tree_->has_presentation_callbacks()) { |
| frame_token_infos_.emplace_back(metadata.frame_token, |
| CurrentBeginFrameArgs().frame_time, |
| active_tree_->TakePresentationCallbacks()); |
| |
| DCHECK_LE(frame_token_infos_.size(), 25u); |
| } |
| |
| if (GetDrawMode() == DRAW_MODE_RESOURCELESS_SOFTWARE) { |
| metadata.is_resourceless_software_draw_with_scroll_or_animation = |
| IsActivelyScrolling() || mutator_host_->NeedsTickAnimations(); |
| } |
| |
| const base::flat_set<viz::SurfaceRange>& referenced_surfaces = |
| active_tree_->SurfaceRanges(); |
| for (auto& surface_range : referenced_surfaces) |
| metadata.referenced_surfaces.push_back(surface_range); |
| |
| if (last_draw_referenced_surfaces_ != referenced_surfaces) |
| last_draw_referenced_surfaces_ = referenced_surfaces; |
| |
| metadata.min_page_scale_factor = active_tree_->min_page_scale_factor(); |
| |
| metadata.top_controls_height = |
| browser_controls_offset_manager_->TopControlsHeight(); |
| metadata.top_controls_shown_ratio = |
| browser_controls_offset_manager_->TopControlsShownRatio(); |
| |
| metadata.local_surface_id_allocation_time = |
| child_local_surface_id_allocator_.GetCurrentLocalSurfaceIdAllocation() |
| .allocation_time(); |
| |
| #if defined(OS_ANDROID) |
| metadata.max_page_scale_factor = active_tree_->max_page_scale_factor(); |
| metadata.root_layer_size = active_tree_->ScrollableSize(); |
| |
| if (const auto* outer_viewport_scroll_node = OuterViewportScrollNode()) { |
| metadata.root_overflow_y_hidden = |
| !outer_viewport_scroll_node->user_scrollable_vertical; |
| } |
| |
| metadata.bottom_controls_height = |
| browser_controls_offset_manager_->BottomControlsHeight(); |
| metadata.bottom_controls_shown_ratio = |
| browser_controls_offset_manager_->BottomControlsShownRatio(); |
| |
| active_tree_->GetViewportSelection(&metadata.selection); |
| #endif |
| |
| const auto* inner_viewport_scroll_node = InnerViewportScrollNode(); |
| if (!inner_viewport_scroll_node) |
| return metadata; |
| |
| #if defined(OS_ANDROID) |
| metadata.root_overflow_y_hidden |= |
| !inner_viewport_scroll_node->user_scrollable_vertical; |
| #endif |
| |
| // TODO(miletus) : Change the metadata to hold ScrollOffset. |
| metadata.root_scroll_offset = |
| gfx::ScrollOffsetToVector2dF(active_tree_->TotalScrollOffset()); |
| |
| return metadata; |
| } |
| |
| RenderFrameMetadata LayerTreeHostImpl::MakeRenderFrameMetadata( |
| FrameData* frame) { |
| RenderFrameMetadata metadata; |
| metadata.root_scroll_offset = |
| gfx::ScrollOffsetToVector2dF(active_tree_->TotalScrollOffset()); |
| |
| metadata.root_background_color = active_tree_->background_color(); |
| metadata.is_scroll_offset_at_top = active_tree_->TotalScrollOffset().y() == 0; |
| metadata.device_scale_factor = active_tree_->painted_device_scale_factor() * |
| active_tree_->device_scale_factor(); |
| active_tree_->GetViewportSelection(&metadata.selection); |
| metadata.is_mobile_optimized = IsMobileOptimized(active_tree_.get()); |
| metadata.viewport_size_in_pixels = active_tree_->GetDeviceViewport().size(); |
| |
| metadata.page_scale_factor = active_tree_->current_page_scale_factor(); |
| metadata.external_page_scale_factor = |
| active_tree_->external_page_scale_factor(); |
| |
| metadata.top_controls_height = |
| browser_controls_offset_manager_->TopControlsHeight(); |
| metadata.top_controls_shown_ratio = |
| browser_controls_offset_manager_->TopControlsShownRatio(); |
| #if defined(OS_ANDROID) |
| metadata.bottom_controls_height = |
| browser_controls_offset_manager_->BottomControlsHeight(); |
| metadata.bottom_controls_shown_ratio = |
| browser_controls_offset_manager_->BottomControlsShownRatio(); |
| metadata.scrollable_viewport_size = active_tree_->ScrollableViewportSize(); |
| metadata.min_page_scale_factor = active_tree_->min_page_scale_factor(); |
| metadata.max_page_scale_factor = active_tree_->max_page_scale_factor(); |
| metadata.root_layer_size = active_tree_->ScrollableSize(); |
| if (const auto* outer_viewport_scroll_node = OuterViewportScrollNode()) { |
| metadata.root_overflow_y_hidden = |
| !outer_viewport_scroll_node->user_scrollable_vertical; |
| } |
| const auto* inner_viewport_scroll_node = InnerViewportScrollNode(); |
| if (inner_viewport_scroll_node) { |
| metadata.root_overflow_y_hidden |= |
| !inner_viewport_scroll_node->user_scrollable_vertical; |
| } |
| metadata.has_transparent_background = |
| frame->render_passes.back()->has_transparent_background; |
| #endif |
| |
| bool allocate_new_local_surface_id = |
| #if !defined(OS_ANDROID) |
| last_draw_render_frame_metadata_ && |
| (last_draw_render_frame_metadata_->top_controls_height != |
| metadata.top_controls_height || |
| last_draw_render_frame_metadata_->top_controls_shown_ratio != |
| metadata.top_controls_shown_ratio); |
| #else |
| last_draw_render_frame_metadata_ && |
| (last_draw_render_frame_metadata_->top_controls_height != |
| metadata.top_controls_height || |
| last_draw_render_frame_metadata_->top_controls_shown_ratio != |
| metadata.top_controls_shown_ratio || |
| last_draw_render_frame_metadata_->bottom_controls_height != |
| metadata.bottom_controls_height || |
| last_draw_render_frame_metadata_->bottom_controls_shown_ratio != |
| metadata.bottom_controls_shown_ratio || |
| last_draw_render_frame_metadata_->selection != metadata.selection || |
| last_draw_render_frame_metadata_->has_transparent_background != |
| metadata.has_transparent_background); |
| #endif |
| |
| if (child_local_surface_id_allocator_.GetCurrentLocalSurfaceIdAllocation() |
| .IsValid()) { |
| if (allocate_new_local_surface_id) |
| AllocateLocalSurfaceId(); |
| metadata.local_surface_id_allocation = |
| child_local_surface_id_allocator_.GetCurrentLocalSurfaceIdAllocation(); |
| } |
| |
| return metadata; |
| } |
| |
| bool LayerTreeHostImpl::DrawLayers(FrameData* frame) { |
| DCHECK(CanDraw()); |
| DCHECK_EQ(frame->has_no_damage, frame->render_passes.empty()); |
| ResetRequiresHighResToDraw(); |
| skipped_frame_tracker_.DidProduceFrame(); |
| |
| if (frame->has_no_damage) { |
| DCHECK(!resourceless_software_draw_); |
| |
| TRACE_EVENT_INSTANT0("cc", "EarlyOut_NoDamage", TRACE_EVENT_SCOPE_THREAD); |
| active_tree()->BreakSwapPromises(SwapPromise::SWAP_FAILS); |
| return false; |
| } |
| |
| layer_tree_frame_sink_->set_source_frame_number( |
| active_tree_->source_frame_number()); |
| |
| auto compositor_frame = GenerateCompositorFrame(frame); |
| layer_tree_frame_sink_->SubmitCompositorFrame( |
| std::move(compositor_frame), |
| /*hit_test_data_changed=*/false, debug_state_.show_hit_test_borders); |
| |
| // Clears the list of swap promises after calling DidSwap on each of them to |
| // signal that the swap is over. |
| active_tree()->ClearSwapPromises(); |
| |
| // The next frame should start by assuming nothing has changed, and changes |
| // are noted as they occur. |
| // TODO(boliu): If we did a temporary software renderer frame, propogate the |
| // damage forward to the next frame. |
| for (size_t i = 0; i < frame->render_surface_list->size(); i++) { |
| auto* surface = (*frame->render_surface_list)[i]; |
| surface->damage_tracker()->DidDrawDamagedArea(); |
| } |
| active_tree_->ResetAllChangeTracking(); |
| |
| active_tree_->set_has_ever_been_drawn(true); |
| devtools_instrumentation::DidDrawFrame(id_); |
| benchmark_instrumentation::IssueImplThreadRenderingStatsEvent( |
| rendering_stats_instrumentation_->TakeImplThreadRenderingStats()); |
| return true; |
| } |
| |
| viz::CompositorFrame LayerTreeHostImpl::GenerateCompositorFrame( |
| FrameData* frame) { |
| TRACE_EVENT0("cc,benchmark", "LayerTreeHostImpl::GenerateCompositorFrame"); |
| TRACE_EVENT_WITH_FLOW1("viz,benchmark", "Graphics.Pipeline", |
| TRACE_ID_GLOBAL(CurrentBeginFrameArgs().trace_id), |
| TRACE_EVENT_FLAG_FLOW_IN | TRACE_EVENT_FLAG_FLOW_OUT, |
| "step", "GenerateCompositorFrame"); |
| base::TimeTicks frame_time = CurrentBeginFrameArgs().frame_time; |
| fps_counter_->SaveTimeStamp(frame_time, |
| !layer_tree_frame_sink_->context_provider()); |
| rendering_stats_instrumentation_->IncrementFrameCount(1); |
| |
| memory_history_->SaveEntry(tile_manager_.memory_stats_from_last_assign()); |
| |
| if (debug_state_.ShowHudRects()) { |
| debug_rect_history_->SaveDebugRectsForCurrentFrame( |
| active_tree(), active_tree_->hud_layer(), *frame->render_surface_list, |
| debug_state_); |
| } |
| |
| TRACE_EVENT_INSTANT2("cc", "Scroll Delta This Frame", |
| TRACE_EVENT_SCOPE_THREAD, "x", |
| scroll_accumulated_this_frame_.x(), "y", |
| scroll_accumulated_this_frame_.y()); |
| scroll_accumulated_this_frame_ = gfx::ScrollOffset(); |
| |
| bool is_new_trace; |
| TRACE_EVENT_IS_NEW_TRACE(&is_new_trace); |
| if (is_new_trace) { |
| if (pending_tree_) { |
| LayerTreeHostCommon::CallFunctionForEveryLayer( |
| pending_tree(), [](LayerImpl* layer) { layer->DidBeginTracing(); }); |
| } |
| LayerTreeHostCommon::CallFunctionForEveryLayer( |
| active_tree(), [](LayerImpl* layer) { layer->DidBeginTracing(); }); |
| } |
| |
| { |
| TRACE_EVENT0("cc", "DrawLayers.FrameViewerTracing"); |
| TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( |
| frame_viewer_instrumentation::CategoryLayerTree(), |
| "cc::LayerTreeHostImpl", id_, AsValueWithFrame(frame)); |
| } |
| |
| const DrawMode draw_mode = GetDrawMode(); |
| |
| // Because the contents of the HUD depend on everything else in the frame, the |
| // contents of its texture are updated as the last thing before the frame is |
| // drawn. |
| if (active_tree_->hud_layer()) { |
| TRACE_EVENT0("cc", "DrawLayers.UpdateHudTexture"); |
| active_tree_->hud_layer()->UpdateHudTexture( |
| draw_mode, layer_tree_frame_sink_, &resource_provider_, |
| // The hud uses Gpu rasterization if the device is capable, not related |
| // to the content of the web page. |
| gpu_rasterization_status_ != GpuRasterizationStatus::OFF_DEVICE, |
| frame->render_passes); |
| } |
| |
| viz::CompositorFrameMetadata metadata = MakeCompositorFrameMetadata(); |
| metadata.may_contain_video = frame->may_contain_video; |
| metadata.deadline = viz::FrameDeadline( |
| CurrentBeginFrameArgs().frame_time, |
| frame->deadline_in_frames.value_or(0u), CurrentBeginFrameArgs().interval, |
| frame->use_default_lower_bound_deadline); |
| |
| metadata.activation_dependencies = std::move(frame->activation_dependencies); |
| active_tree()->FinishSwapPromises(&metadata); |
| // The swap-promises should not change the frame-token. |
| DCHECK_EQ(metadata.frame_token, *next_frame_token_); |
| |
| if (render_frame_metadata_observer_) { |
| last_draw_render_frame_metadata_ = MakeRenderFrameMetadata(frame); |
| render_frame_metadata_observer_->OnRenderFrameSubmission( |
| *last_draw_render_frame_metadata_, &metadata, |
| active_tree()->TakeForceSendMetadataRequest()); |
| } |
| |
| metadata.latency_info.emplace_back(ui::SourceEventType::FRAME); |
| ui::LatencyInfo& new_latency_info = metadata.latency_info.back(); |
| if (CommitToActiveTree()) { |
| new_latency_info.AddLatencyNumberWithTimestamp( |
| ui::LATENCY_BEGIN_FRAME_UI_COMPOSITOR_COMPONENT, frame_time, 1); |
| } else { |
| new_latency_info.AddLatencyNumberWithTimestamp( |
| ui::LATENCY_BEGIN_FRAME_RENDERER_COMPOSITOR_COMPONENT, frame_time, 1); |
| |
| base::TimeTicks draw_time = base::TimeTicks::Now(); |
| for (auto& latency : metadata.latency_info) { |
| latency.AddLatencyNumberWithTimestamp( |
| ui::INPUT_EVENT_LATENCY_RENDERER_SWAP_COMPONENT, draw_time, 1); |
| } |
| } |
| ui::LatencyInfo::TraceIntermediateFlowEvents(metadata.latency_info, |
| "SwapBuffers"); |
| |
| // Collect all resource ids in the render passes into a single array. |
| std::vector<viz::ResourceId> resources; |
| for (const auto& render_pass : frame->render_passes) { |
| for (auto* quad : render_pass->quad_list) { |
| for (viz::ResourceId resource_id : quad->resources) |
| resources.push_back(resource_id); |
| } |
| } |
| |
| DCHECK_LE(viz::BeginFrameArgs::kStartingFrameNumber, |
| frame->begin_frame_ack.sequence_number); |
| metadata.begin_frame_ack = frame->begin_frame_ack; |
| |
| viz::CompositorFrame compositor_frame; |
| compositor_frame.metadata = std::move(metadata); |
| resource_provider_.PrepareSendToParent( |
| resources, &compositor_frame.resource_list, |
| layer_tree_frame_sink_->context_provider()); |
| compositor_frame.render_pass_list = std::move(frame->render_passes); |
| // TODO(fsamuel): Once all clients get their viz::LocalSurfaceId from their |
| // parent, the viz::LocalSurfaceId should hang off CompositorFrameMetadata. |
| if (settings_.enable_surface_synchronization) { |
| // If surface synchronization is on, we should always have a valid |
| // LocalSurfaceId in LayerTreeImpl unless we don't have a scheduler because |
| // without a scheduler commits are not deferred and LayerTrees without valid |
| // LocalSurfaceId might slip through, but single-thread-without-scheduler |
| // mode is only used in tests so it doesn't matter. |
| CHECK(!settings_.single_thread_proxy_scheduler || |
| active_tree()->local_surface_id_allocation_from_parent().IsValid()); |
| layer_tree_frame_sink_->SetLocalSurfaceId( |
| child_local_surface_id_allocator_.GetCurrentLocalSurfaceIdAllocation() |
| .local_surface_id()); |
| } |
| last_draw_local_surface_id_allocation_ = |
| child_local_surface_id_allocator_.GetCurrentLocalSurfaceIdAllocation(); |
| if (const char* client_name = GetClientNameForMetrics()) { |
| size_t total_quad_count = 0; |
| for (const auto& pass : compositor_frame.render_pass_list) |
| total_quad_count += pass->quad_list.size(); |
| UMA_HISTOGRAM_COUNTS_1000( |
| base::StringPrintf("Compositing.%s.CompositorFrame.Quads", client_name), |
| total_quad_count); |
| } |
| |
| return compositor_frame; |
| } |
| |
| void LayerTreeHostImpl::DidDrawAllLayers(const FrameData& frame) { |
| // TODO(lethalantidote): LayerImpl::DidDraw can be removed when |
| // VideoLayerImpl is removed. |
| for (size_t i = 0; i < frame.will_draw_layers.size(); ++i) |
| frame.will_draw_layers[i]->DidDraw(&resource_provider_); |
| |
| for (auto* it : video_frame_controllers_) |
| it->DidDrawFrame(); |
| } |
| |
| int LayerTreeHostImpl::RequestedMSAASampleCount() const { |
| if (settings_.gpu_rasterization_msaa_sample_count == -1) { |
| // Use the most up-to-date version of device_scale_factor that we have. |
| float device_scale_factor = pending_tree_ |
| ? pending_tree_->device_scale_factor() |
| : active_tree_->device_scale_factor(); |
| return device_scale_factor >= 2.0f ? 4 : 8; |
| } |
| |
| return settings_.gpu_rasterization_msaa_sample_count; |
| } |
| |
| void LayerTreeHostImpl::SetHasGpuRasterizationTrigger(bool flag) { |
| if (has_gpu_rasterization_trigger_ != flag) { |
| has_gpu_rasterization_trigger_ = flag; |
| need_update_gpu_rasterization_status_ = true; |
| } |
| } |
| |
| void LayerTreeHostImpl::SetContentHasSlowPaths(bool flag) { |
| if (content_has_slow_paths_ != flag) { |
| content_has_slow_paths_ = flag; |
| need_update_gpu_rasterization_status_ = true; |
| } |
| } |
| |
| void LayerTreeHostImpl::SetContentHasNonAAPaint(bool flag) { |
| if (content_has_non_aa_paint_ != flag) { |
| content_has_non_aa_paint_ = flag; |
| need_update_gpu_rasterization_status_ = true; |
| } |
| } |
| |
| void LayerTreeHostImpl::GetGpuRasterizationCapabilities( |
| bool* gpu_rasterization_enabled, |
| bool* gpu_rasterization_supported, |
| int* max_msaa_samples, |
| bool* supports_disable_msaa) { |
| *gpu_rasterization_enabled = false; |
| *gpu_rasterization_supported = false; |
| *max_msaa_samples = 0; |
| *supports_disable_msaa = false; |
| |
| if (!(layer_tree_frame_sink_ && layer_tree_frame_sink_->context_provider() && |
| layer_tree_frame_sink_->worker_context_provider())) |
| return; |
| |
| viz::RasterContextProvider* context_provider = |
| layer_tree_frame_sink_->worker_context_provider(); |
| viz::RasterContextProvider::ScopedRasterContextLock scoped_context( |
| context_provider); |
| |
| const auto& caps = context_provider->ContextCapabilities(); |
| *gpu_rasterization_enabled = caps.gpu_rasterization; |
| if (!*gpu_rasterization_enabled && !settings_.gpu_rasterization_forced) |
| return; |
| |
| if (use_oop_rasterization_) { |
| *gpu_rasterization_supported = true; |
| *supports_disable_msaa = caps.multisample_compatibility; |
| // For OOP raster, the gpu service side will disable msaa if the |
| // requested samples are not enough. GPU raster does this same |
| // logic below client side. |
| *max_msaa_samples = RequestedMSAASampleCount(); |
| return; |
| } |
| |
| if (!context_provider->ContextSupport()->HasGrContextSupport()) |
| return; |
| |
| // Do not check GrContext above. It is lazy-created, and we only want to |
| // create it if it might be used. |
| GrContext* gr_context = context_provider->GrContext(); |
| *gpu_rasterization_supported = !!gr_context; |
| if (!*gpu_rasterization_supported) |
| return; |
| |
| *supports_disable_msaa = caps.multisample_compatibility; |
| if (!caps.msaa_is_slow && !caps.avoid_stencil_buffers) { |
| // Skia may blacklist MSAA independently of Chrome. Query Skia for its max |
| // supported sample count. Assume gpu compositing + gpu raster for this, as |
| // that is what we are hoping to use. |
| viz::ResourceFormat tile_format = TileRasterBufferFormat( |
| settings_, layer_tree_frame_sink_->context_provider(), |
| /*use_gpu_rasterization=*/true); |
| SkColorType color_type = ResourceFormatToClosestSkColorType( |
| /*gpu_compositing=*/true, tile_format); |
| *max_msaa_samples = |
| gr_context->maxSurfaceSampleCountForColorType(color_type); |
| } |
| } |
| |
| bool LayerTreeHostImpl::UpdateGpuRasterizationStatus() { |
| if (!need_update_gpu_rasterization_status_) |
| return false; |
| need_update_gpu_rasterization_status_ = false; |
| |
| // TODO(danakj): Can we avoid having this run when there's no |
| // LayerTreeFrameSink? |
| // For now just early out and leave things unchanged, we'll come back here |
| // when we get a LayerTreeFrameSink. |
| if (!layer_tree_frame_sink_) |
| return false; |
| |
| int requested_msaa_samples = RequestedMSAASampleCount(); |
| int max_msaa_samples = 0; |
| bool gpu_rasterization_enabled = false; |
| bool gpu_rasterization_supported = false; |
| bool supports_disable_msaa = false; |
| GetGpuRasterizationCapabilities(&gpu_rasterization_enabled, |
| &gpu_rasterization_supported, |
| &max_msaa_samples, &supports_disable_msaa); |
| |
| bool use_gpu = false; |
| bool use_msaa = false; |
| bool using_msaa_for_slow_paths = |
| requested_msaa_samples > 0 && |
| max_msaa_samples >= requested_msaa_samples && |
| (!content_has_non_aa_paint_ || supports_disable_msaa); |
| if (settings_.gpu_rasterization_forced) { |
| use_gpu = true; |
| gpu_rasterization_status_ = GpuRasterizationStatus::ON_FORCED; |
| use_msaa = content_has_slow_paths_ && using_msaa_for_slow_paths; |
| if (use_msaa) { |
| gpu_rasterization_status_ = GpuRasterizationStatus::MSAA_CONTENT; |
| } |
| } else if (!gpu_rasterization_enabled) { |
| gpu_rasterization_status_ = GpuRasterizationStatus::OFF_DEVICE; |
| } else if (!has_gpu_rasterization_trigger_) { |
| gpu_rasterization_status_ = GpuRasterizationStatus::OFF_VIEWPORT; |
| } else if (content_has_slow_paths_ && using_msaa_for_slow_paths) { |
| use_gpu = use_msaa = true; |
| gpu_rasterization_status_ = GpuRasterizationStatus::MSAA_CONTENT; |
| } else { |
| use_gpu = true; |
| gpu_rasterization_status_ = GpuRasterizationStatus::ON; |
| } |
| |
| if (use_gpu && !use_gpu_rasterization_) { |
| if (!gpu_rasterization_supported) { |
| // If GPU rasterization is unusable, e.g. if GlContext could not |
| // be created due to losing the GL context, force use of software |
| // raster. |
| use_gpu = false; |
| use_msaa = false; |
| gpu_rasterization_status_ = GpuRasterizationStatus::OFF_DEVICE; |
| } |
| } |
| |
| if (use_gpu == use_gpu_rasterization_ && use_msaa == use_msaa_) |
| return false; |
| |
| // Note that this must happen first, in case the rest of the calls want to |
| // query the new state of |use_gpu_rasterization_|. |
| use_gpu_rasterization_ = use_gpu; |
| use_msaa_ = use_msaa; |
| return true; |
| } |
| |
| void LayerTreeHostImpl::UpdateTreeResourcesIfNeeded() { |
| // For simplicity, clobber all resources when the color space changes. |
| // This is mostly to clear the image decode caches, which don't handle |
| // multiple color space at once. |
| int color_space_id = -1; |
| GetRasterColorSpaceAndId(&color_space_id); |
| bool color_space_changed = last_color_space_id_ != color_space_id; |
| last_color_space_id_ = color_space_id; |
| |
| if (!UpdateGpuRasterizationStatus() && !color_space_changed) |
| return; |
| |
| // Clean up and replace existing tile manager with another one that uses |
| // appropriate rasterizer. Only do this however if we already have a |
| // resource pool, since otherwise we might not be able to create a new |
| // one. |
| ReleaseTileResources(); |
| if (resource_pool_) { |
| CleanUpTileManagerResources(); |
| CreateTileManagerResources(); |
| } |
| RecreateTileResources(); |
| |
| // We have released tilings for both active and pending tree. |
| // We would not have any content to draw until the pending tree is activated. |
| // Prevent the active tree from drawing until activation. |
| // TODO(crbug.com/469175): Replace with RequiresHighResToDraw. |
| SetRequiresHighResToDraw(); |
| } |
| |
| bool LayerTreeHostImpl::WillBeginImplFrame(const viz::BeginFrameArgs& args) { |
| impl_thread_phase_ = ImplThreadPhase::INSIDE_IMPL_FRAME; |
| current_begin_frame_tracker_.Start(args); |
| |
| if (is_likely_to_require_a_draw_) { |
| // Optimistically schedule a draw. This will let us expect the tile manager |
| // to complete its work so that we can draw new tiles within the impl frame |
| // we are beginning now. |
| SetNeedsRedraw(); |
| } |
| |
| if (input_handler_client_) |
| input_handler_client_->DeliverInputForBeginFrame(); |
| |
| Animate(); |
| |
| image_animation_controller_.WillBeginImplFrame(args); |
| |
| for (auto* it : video_frame_controllers_) |
| it->OnBeginFrame(args); |
| |
| skipped_frame_tracker_.BeginFrame(args.frame_time, args.interval); |
| |
| bool recent_frame_had_no_damage = |
| consecutive_frame_with_damage_count_ < settings_.damaged_frame_limit; |
| // Check damage early if the setting is enabled and a recent frame had no |
| // damage. HasDamage() expects CanDraw to be true. If we can't check damage, |
| // return true to indicate that there might be damage in this frame. |
| if (settings_.enable_early_damage_check && recent_frame_had_no_damage && |
| CanDraw()) { |
| bool ok = active_tree()->UpdateDrawProperties(); |
| DCHECK(ok); |
| DamageTracker::UpdateDamageTracking(active_tree_.get(), |
| active_tree_->GetRenderSurfaceList()); |
| bool has_damage = HasDamage(); |
| // Animations are updated after we attempt to draw. If the frame is aborted, |
| // update animations now. |
| if (!has_damage) |
| UpdateAnimationState(true); |
| return has_damage; |
| } |
| // Assume there is damage if we cannot check for damage. |
| return true; |
| } |
| |
| void LayerTreeHostImpl::DidFinishImplFrame() { |
| skipped_frame_tracker_.FinishFrame(); |
| impl_thread_phase_ = ImplThreadPhase::IDLE; |
| current_begin_frame_tracker_.Finish(); |
| } |
| |
| void LayerTreeHostImpl::DidNotProduceFrame(const viz::BeginFrameAck& ack) { |
| if (layer_tree_frame_sink_) |
| layer_tree_frame_sink_->DidNotProduceFrame(ack); |
| } |
| |
| void LayerTreeHostImpl::UpdateViewportContainerSizes() { |
| if (!InnerViewportScrollNode()) |
| return; |
| |
| ViewportAnchor anchor(InnerViewportScrollNode(), OuterViewportScrollLayer(), |
| active_tree_.get()); |
| |
| float top_controls_layout_height = |
| active_tree_->browser_controls_shrink_blink_size() |
| ? active_tree_->top_controls_height() |
| : 0.f; |
| float delta_from_top_controls = |
| top_controls_layout_height - |
| browser_controls_offset_manager_->ContentTopOffset(); |
| float bottom_controls_layout_height = |
| active_tree_->browser_controls_shrink_blink_size() |
| ? active_tree_->bottom_controls_height() |
| : 0.f; |
| delta_from_top_controls += |
| bottom_controls_layout_height - |
| browser_controls_offset_manager_->ContentBottomOffset(); |
| |
| // Adjust the viewport layers by shrinking/expanding the container to account |
| // for changes in the size (e.g. browser controls) since the last resize from |
| // Blink. |
| auto* property_trees = active_tree_->property_trees(); |
| gfx::Vector2dF bounds_delta(0.f, delta_from_top_controls); |
| if (property_trees->inner_viewport_container_bounds_delta() == bounds_delta) |
| return; |
| |
| property_trees->SetInnerViewportContainerBoundsDelta(bounds_delta); |
| |
| ClipNode* inner_clip_node = property_trees->clip_tree.Node( |
| InnerViewportScrollLayer()->clip_tree_index()); |
| inner_clip_node->clip.set_height( |
| InnerViewportScrollNode()->container_bounds.height() + bounds_delta.y()); |
| |
| // Adjust the outer viewport container as well, since adjusting only the |
| // inner may cause its bounds to exceed those of the outer, causing scroll |
| // clamping. |
| if (OuterViewportScrollNode()) { |
| gfx::Vector2dF scaled_bounds_delta = gfx::ScaleVector2d( |
| bounds_delta, 1.f / active_tree_->min_page_scale_factor()); |
| |
| property_trees->SetOuterViewportContainerBoundsDelta(scaled_bounds_delta); |
| property_trees->SetInnerViewportScrollBoundsDelta(scaled_bounds_delta); |
| |
| ClipNode* outer_clip_node = property_trees->clip_tree.Node( |
| OuterViewportScrollLayer()->clip_tree_index()); |
| |
| float container_height = |
| OuterViewportScrollNode()->container_bounds.height(); |
| |
| // TODO(bokan): The container bounds for the outer viewport are incorrectly |
| // computed pre-Blink-Gen-Property-Trees so we must apply the minimum page |
| // scale factor. https://crbug.com/901083 |
| if (!settings().use_layer_lists) |
| container_height *= active_tree_->min_page_scale_factor(); |
| |
| outer_clip_node->clip.set_height(container_height + bounds_delta.y()); |
| |
| // Expand all clips between the outer viewport and the inner viewport. |
| auto* outer_ancestor = property_trees->clip_tree.parent(outer_clip_node); |
| while (outer_ancestor && outer_ancestor != inner_clip_node) { |
| outer_ancestor->clip.Union(outer_clip_node->clip); |
| outer_ancestor = property_trees->clip_tree.parent(outer_ancestor); |
| } |
| |
| anchor.ResetViewportToAnchoredPosition(); |
| } |
| |
| property_trees->clip_tree.set_needs_update(true); |
| property_trees->full_tree_damaged = true; |
| active_tree_->set_needs_update_draw_properties(); |
| |
| // Viewport scrollbar positions are determined using the viewport bounds |
| // delta. |
| active_tree_->SetScrollbarGeometriesNeedUpdate(); |
| active_tree_->set_needs_update_draw_properties(); |
| |
| // For pre-BlinkGenPropertyTrees mode, we need to ensure the layers are |
| // appropriately updated. |
| if (!settings().use_layer_lists) { |
| if (OuterViewportContainerLayer()) |
| OuterViewportContainerLayer()->NoteLayerPropertyChanged(); |
| if (InnerViewportScrollLayer()) |
| InnerViewportScrollLayer()->NoteLayerPropertyChanged(); |
| if (OuterViewportScrollLayer()) |
| OuterViewportScrollLayer()->NoteLayerPropertyChanged(); |
| } |
| } |
| |
| void LayerTreeHostImpl::SynchronouslyInitializeAllTiles() { |
| // Only valid for the single-threaded non-scheduled/synchronous case |
| // using the zero copy raster worker pool. |
| single_thread_synchronous_task_graph_runner_->RunUntilIdle(); |
| } |
| |
| static uint32_t GetFlagsForSurfaceLayer(const SurfaceLayerImpl* layer) { |
| uint32_t flags = viz::HitTestRegionFlags::kHitTestMouse | |
| viz::HitTestRegionFlags::kHitTestTouch; |
| if (layer->range().IsValid()) { |
| flags |= viz::HitTestRegionFlags::kHitTestChildSurface; |
| } else { |
| flags |= viz::HitTestRegionFlags::kHitTestMine; |
| } |
| return flags; |
| } |
| |
| static void PopulateHitTestRegion(viz::HitTestRegion* hit_test_region, |
| const LayerImpl* layer, |
| uint32_t flags, |
| uint32_t async_hit_test_reasons, |
| const gfx::Rect& rect, |
| const viz::SurfaceId& surface_id, |
| float device_scale_factor) { |
| hit_test_region->frame_sink_id = surface_id.frame_sink_id(); |
| hit_test_region->flags = flags; |
| hit_test_region->async_hit_test_reasons = async_hit_test_reasons; |
| DCHECK_EQ(!!async_hit_test_reasons, |
| !!(flags & viz::HitTestRegionFlags::kHitTestAsk)); |
| |
| hit_test_region->rect = rect; |
| // The transform of hit test region maps a point from parent hit test region |
| // to the local space. This is the inverse of screen space transform. Because |
| // hit test query wants the point in target to be in Pixel space, we |
| // counterscale the transform here. Note that the rect is scaled by dsf, so |
| // the point and the rect are still in the same space. |
| gfx::Transform surface_to_root_transform = layer->ScreenSpaceTransform(); |
| surface_to_root_transform.Scale(SK_MScalar1 / device_scale_factor, |
| SK_MScalar1 / device_scale_factor); |
| surface_to_root_transform.FlattenTo2d(); |
| // TODO(sunxd): Avoid losing precision by not using inverse if possible. |
| bool ok = surface_to_root_transform.GetInverse(&hit_test_region->transform); |
| // Note: If |ok| is false, the |transform| is set to the identity before |
| // returning, which is what we want. |
| ALLOW_UNUSED_LOCAL(ok); |
| } |
| |
| base::Optional<viz::HitTestRegionList> LayerTreeHostImpl::BuildHitTestData() { |
| TRACE_EVENT0("cc", "LayerTreeHostImpl::BuildHitTestData"); |
| |
| if (!settings_.build_hit_test_data) |
| return {}; |
| |
| base::Optional<viz::HitTestRegionList> hit_test_region_list(base::in_place); |
| hit_test_region_list->flags = viz::HitTestRegionFlags::kHitTestMine | |
| viz::HitTestRegionFlags::kHitTestMouse | |
| viz::HitTestRegionFlags::kHitTestTouch; |
| hit_test_region_list->bounds = active_tree_->GetDeviceViewport(); |
| hit_test_region_list->transform = DrawTransform(); |
| |
| float device_scale_factor = active_tree()->device_scale_factor(); |
| |
| Region overlapping_region; |
| size_t num_iterated_layers = 0; |
| // If the layer tree contains more than 100 layers, we stop accumulating |
| // layers in |overlapping_region| to save compositor frame submitting time, as |
| // a result we do async hit test on any surface layers that |
| bool assume_overlap = false; |
| for (const auto* layer : base::Reversed(*active_tree())) { |
| // Viz hit test needs to collect information for pointer-events: none OOPIFs |
| // as well. Now Layer::HitTestable ignores pointer-events property, but this |
| // early out will not work correctly if we integrate has_pointer_events_none |
| // into Layer::HitTestable, so we make sure we don't skip surface layers |
| // that draws content but has pointer-events: none property. |
| if (!(layer->HitTestable() || |
| (layer->is_surface_layer() && layer->DrawsContent()))) |
| continue; |
| |
| if (layer->is_surface_layer()) { |
| const auto* surface_layer = static_cast<const SurfaceLayerImpl*>(layer); |
| // If a surface layer is created not by child frame compositor or the |
| // frame owner has pointer-events: none property, the surface layer |
| // becomes not hit testable. We should not generate data for it. |
| if (!surface_layer->surface_hit_testable() || |
| !surface_layer->range().IsValid()) { |
| // We collect any overlapped regions that does not have pointer-events: |
| // none. |
| if (!surface_layer->has_pointer_events_none() && !assume_overlap) { |
| overlapping_region.Union(MathUtil::MapEnclosingClippedRect( |
| layer->ScreenSpaceTransform(), |
| gfx::Rect(surface_layer->bounds()))); |
| } |
| continue; |
| } |
| |
| gfx::Rect content_rect( |
| gfx::ScaleToEnclosingRect(gfx::Rect(surface_layer->bounds()), |
| device_scale_factor, device_scale_factor)); |
| |
| gfx::Rect layer_screen_space_rect = MathUtil::MapEnclosingClippedRect( |
| surface_layer->ScreenSpaceTransform(), |
| gfx::Rect(surface_layer->bounds())); |
| auto flag = GetFlagsForSurfaceLayer(surface_layer); |
| uint32_t async_hit_test_reasons = |
| viz::AsyncHitTestReasons::kNotAsyncHitTest; |
| if (surface_layer->has_pointer_events_none()) |
| flag |= viz::HitTestRegionFlags::kHitTestIgnore; |
| if (assume_overlap || |
| overlapping_region.Intersects(layer_screen_space_rect)) { |
| flag |= viz::HitTestRegionFlags::kHitTestAsk; |
| async_hit_test_reasons |= viz::AsyncHitTestReasons::kOverlappedRegion; |
| } |
| bool layer_hit_test_region_is_masked = |
| active_tree() |
| ->property_trees() |
| ->effect_tree.HitTestMayBeAffectedByMask( |
| surface_layer->effect_tree_index()); |
| if (surface_layer->is_clipped() || layer_hit_test_region_is_masked) { |
| bool layer_hit_test_region_is_rectangle = |
| !layer_hit_test_region_is_masked && |
| surface_layer->ScreenSpaceTransform().Preserves2dAxisAlignment() && |
| active_tree() |
| ->property_trees() |
| ->effect_tree.ClippedHitTestRegionIsRectangle( |
| surface_layer->effect_tree_index()); |
| content_rect = |
| gfx::ScaleToEnclosingRect(surface_layer->visible_layer_rect(), |
| device_scale_factor, device_scale_factor); |
| if (!layer_hit_test_region_is_rectangle) { |
| flag |= viz::HitTestRegionFlags::kHitTestAsk; |
| async_hit_test_reasons |= viz::AsyncHitTestReasons::kIrregularClip; |
| } |
| } |
| const auto& surface_id = surface_layer->range().end(); |
| hit_test_region_list->regions.emplace_back(); |
| PopulateHitTestRegion(&hit_test_region_list->regions.back(), layer, flag, |
| async_hit_test_reasons, content_rect, surface_id, |
| device_scale_factor); |
| continue; |
| } |
| // TODO(sunxd): Submit all overlapping layer bounds as hit test regions. |
| // Also investigate if we can use visible layer rect as overlapping regions. |
| num_iterated_layers++; |
| if (num_iterated_layers > kAssumeOverlapThreshold) |
| assume_o
|