diff --git a/DEPS b/DEPS index 434a99a..8c11000 100644 --- a/DEPS +++ b/DEPS
@@ -44,7 +44,7 @@ # Three lines of non-changing comments so that # the commit queue can handle CLs rolling V8 # and whatever else without interference from each other. - 'v8_revision': 'db76e7af00fca3256a97937ba8fbdecf6dd76cd4', + 'v8_revision': 'a5140c0032e2bb6e5174bbf235f505070692cef3', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling swarming_client # and whatever else without interference from each other. @@ -64,7 +64,7 @@ # Three lines of non-changing comments so that # the commit queue can handle CLs rolling PDFium # and whatever else without interference from each other. - 'pdfium_revision': 'c589fdc5e4e996dd6d2502f7267414c471e5fd6d', + 'pdfium_revision': '5e9066cbfa252b84d49f8b4adba445ba7761e81f', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling openmax_dl # and whatever else without interference from each other. @@ -96,7 +96,7 @@ # Three lines of non-changing comments so that # the commit queue can handle CLs rolling catapult # and whatever else without interference from each other. - 'catapult_revision': '01464d2b3b932f91d29e1f16b6853c2f89abd5da', + 'catapult_revision': 'd7650682cad0991a394ae8cf241474621ccdf68d', # Three lines of non-changing comments so that # the commit queue can handle CLs rolling libFuzzer # and whatever else without interference from each other.
diff --git a/ash/BUILD.gn b/ash/BUILD.gn index c8e77ca..06f3ffc 100644 --- a/ash/BUILD.gn +++ b/ash/BUILD.gn
@@ -44,8 +44,6 @@ "aura/wm_lookup_aura.h", "aura/wm_root_window_controller_aura.cc", "aura/wm_root_window_controller_aura.h", - "aura/wm_shelf_aura.cc", - "aura/wm_shelf_aura.h", "aura/wm_shell_aura.cc", "aura/wm_shell_aura.h", "aura/wm_window_aura.cc",
diff --git a/ash/aura/wm_root_window_controller_aura.cc b/ash/aura/wm_root_window_controller_aura.cc index 6fdb472..d8bab920 100644 --- a/ash/aura/wm_root_window_controller_aura.cc +++ b/ash/aura/wm_root_window_controller_aura.cc
@@ -4,10 +4,10 @@ #include "ash/aura/wm_root_window_controller_aura.h" -#include "ash/aura/wm_shelf_aura.h" #include "ash/aura/wm_shell_aura.h" #include "ash/aura/wm_window_aura.h" #include "ash/common/shelf/shelf_widget.h" +#include "ash/common/shelf/wm_shelf.h" #include "ash/display/window_tree_host_manager.h" #include "ash/root_window_controller.h" #include "ash/shell.h" @@ -62,7 +62,7 @@ } bool WmRootWindowControllerAura::HasShelf() { - return root_window_controller_->wm_shelf_aura()->shelf_widget() != nullptr; + return root_window_controller_->wm_shelf()->shelf_widget() != nullptr; } WmShell* WmRootWindowControllerAura::GetShell() { @@ -70,7 +70,7 @@ } WmShelf* WmRootWindowControllerAura::GetShelf() { - return root_window_controller_->wm_shelf_aura(); + return root_window_controller_->wm_shelf(); } WmWindow* WmRootWindowControllerAura::GetWindow() {
diff --git a/ash/aura/wm_shelf_aura.cc b/ash/aura/wm_shelf_aura.cc deleted file mode 100644 index 7feec5a..0000000 --- a/ash/aura/wm_shelf_aura.cc +++ /dev/null
@@ -1,71 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "ash/aura/wm_shelf_aura.h" - -#include "ash/aura/wm_window_aura.h" -#include "ash/common/shelf/shelf_layout_manager.h" -#include "ash/shelf/shelf_bezel_event_handler.h" -#include "ash/shell.h" - -namespace ash { - -// WmShelfAura::AutoHideEventHandler ------------------------------------------- - -// Forwards mouse and gesture events to ShelfLayoutManager for auto-hide. -// TODO(mash): Add similar event handling support for mash. -class WmShelfAura::AutoHideEventHandler : public ui::EventHandler { - public: - explicit AutoHideEventHandler(ShelfLayoutManager* shelf_layout_manager) - : shelf_layout_manager_(shelf_layout_manager) { - Shell::GetInstance()->AddPreTargetHandler(this); - } - ~AutoHideEventHandler() override { - Shell::GetInstance()->RemovePreTargetHandler(this); - } - - // Overridden from ui::EventHandler: - void OnMouseEvent(ui::MouseEvent* event) override { - shelf_layout_manager_->UpdateAutoHideForMouseEvent( - event, WmWindowAura::Get(static_cast<aura::Window*>(event->target()))); - } - void OnGestureEvent(ui::GestureEvent* event) override { - shelf_layout_manager_->UpdateAutoHideForGestureEvent( - event, WmWindowAura::Get(static_cast<aura::Window*>(event->target()))); - } - - private: - ShelfLayoutManager* shelf_layout_manager_; - DISALLOW_COPY_AND_ASSIGN(AutoHideEventHandler); -}; - -// WmShelfAura ----------------------------------------------------------------- - -WmShelfAura::WmShelfAura() {} - -WmShelfAura::~WmShelfAura() {} - -void WmShelfAura::CreateShelfWidget(WmWindow* root) { - WmShelf::CreateShelfWidget(root); - bezel_event_handler_.reset(new ShelfBezelEventHandler(this)); -} - -void WmShelfAura::WillDeleteShelfLayoutManager() { - // Clear event handlers that might forward events to the destroyed instance. - auto_hide_event_handler_.reset(); - bezel_event_handler_.reset(); - WmShelf::WillDeleteShelfLayoutManager(); -} - -void WmShelfAura::WillChangeVisibilityState(ShelfVisibilityState new_state) { - WmShelf::WillChangeVisibilityState(new_state); - if (new_state != SHELF_AUTO_HIDE) { - auto_hide_event_handler_.reset(); - } else if (!auto_hide_event_handler_) { - auto_hide_event_handler_.reset( - new AutoHideEventHandler(shelf_layout_manager())); - } -} - -} // namespace ash
diff --git a/ash/aura/wm_shelf_aura.h b/ash/aura/wm_shelf_aura.h deleted file mode 100644 index 48c6467..0000000 --- a/ash/aura/wm_shelf_aura.h +++ /dev/null
@@ -1,43 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef ASH_AURA_WM_SHELF_AURA_H_ -#define ASH_AURA_WM_SHELF_AURA_H_ - -#include "ash/ash_export.h" -#include "ash/common/shelf/wm_shelf.h" -#include "base/macros.h" - -namespace ash { - -class ShelfBezelEventHandler; - -// Aura implementation of WmShelf. -class ASH_EXPORT WmShelfAura : public WmShelf { - public: - WmShelfAura(); - ~WmShelfAura() override; - - // WmShelf: - void CreateShelfWidget(WmWindow* root) override; - void WillDeleteShelfLayoutManager() override; - void WillChangeVisibilityState(ShelfVisibilityState new_state) override; - - private: - class AutoHideEventHandler; - - // Forwards mouse and gesture events to ShelfLayoutManager for auto-hide. - // TODO(mash): Facilitate simliar functionality in mash: crbug.com/631216 - std::unique_ptr<AutoHideEventHandler> auto_hide_event_handler_; - - // Forwards touch gestures on a bezel sensor to the shelf. - // TODO(mash): Facilitate simliar functionality in mash: crbug.com/636647 - std::unique_ptr<ShelfBezelEventHandler> bezel_event_handler_; - - DISALLOW_COPY_AND_ASSIGN(WmShelfAura); -}; - -} // namespace ash - -#endif // ASH_AURA_WM_SHELF_AURA_H_
diff --git a/ash/common/accelerators/accelerator_controller.cc b/ash/common/accelerators/accelerator_controller.cc index 38bae00..45540c3 100644 --- a/ash/common/accelerators/accelerator_controller.cc +++ b/ash/common/accelerators/accelerator_controller.cc
@@ -169,12 +169,15 @@ } bool CanHandleCycleMru(const ui::Accelerator& accelerator) { - // Don't do anything when Alt+Tab comes from a virtual keyboard. Touchscreen - // users have better window switching options. See http://crbug.com/638269 + // Don't do anything when Alt+Tab is hit while a virtual keyboard is showing. + // Touchscreen users have better window switching options. It would be + // preferable if we could tell whether this event actually came from a virtual + // keyboard, but there's no easy way to do so, thus we block Alt+Tab when the + // virtual keyboard is showing, even if it came from a real keyboard. See + // http://crbug.com/638269 keyboard::KeyboardController* keyboard_controller = keyboard::KeyboardController::GetInstance(); - return !(keyboard_controller && keyboard_controller->keyboard_visible() && - (accelerator.modifiers() & ui::EF_IS_SYNTHESIZED)); + return !(keyboard_controller && keyboard_controller->keyboard_visible()); } // We must avoid showing the Deprecated NEXT_IME notification erronously.
diff --git a/ash/common/shelf/wm_shelf.cc b/ash/common/shelf/wm_shelf.cc index a86a991..191d11e 100644 --- a/ash/common/shelf/wm_shelf.cc +++ b/ash/common/shelf/wm_shelf.cc
@@ -4,6 +4,7 @@ #include "ash/common/shelf/wm_shelf.h" +#include "ash/aura/wm_window_aura.h" #include "ash/common/shelf/shelf_controller.h" #include "ash/common/shelf/shelf_delegate.h" #include "ash/common/shelf/shelf_item_delegate.h" @@ -18,11 +19,50 @@ #include "ash/common/wm_shell.h" #include "ash/common/wm_window.h" #include "ash/public/cpp/shell_window_ids.h" +#include "ash/shelf/shelf_bezel_event_handler.h" +#include "ash/shell.h" #include "base/logging.h" +#include "base/memory/ptr_util.h" +#include "ui/aura/env.h" #include "ui/gfx/geometry/rect.h" namespace ash { +// WmShelf::AutoHideEventHandler ----------------------------------------------- + +// Forwards mouse and gesture events to ShelfLayoutManager for auto-hide. +// TODO(mash): Add similar event handling support for mash. +class WmShelf::AutoHideEventHandler : public ui::EventHandler { + public: + explicit AutoHideEventHandler(ShelfLayoutManager* shelf_layout_manager) + : shelf_layout_manager_(shelf_layout_manager) { + Shell::GetInstance()->AddPreTargetHandler(this); + } + ~AutoHideEventHandler() override { + Shell::GetInstance()->RemovePreTargetHandler(this); + } + + // Overridden from ui::EventHandler: + void OnMouseEvent(ui::MouseEvent* event) override { + shelf_layout_manager_->UpdateAutoHideForMouseEvent( + event, WmWindowAura::Get(static_cast<aura::Window*>(event->target()))); + } + void OnGestureEvent(ui::GestureEvent* event) override { + shelf_layout_manager_->UpdateAutoHideForGestureEvent( + event, WmWindowAura::Get(static_cast<aura::Window*>(event->target()))); + } + + private: + ShelfLayoutManager* shelf_layout_manager_; + DISALLOW_COPY_AND_ASSIGN(AutoHideEventHandler); +}; + +// WmShelf --------------------------------------------------------------------- + +WmShelf::WmShelf() : time_last_auto_hide_change_(base::TimeTicks::Now()) {} + +WmShelf::~WmShelf() {} + // static WmShelf* WmShelf::ForWindow(WmWindow* window) { return window->GetRootWindowController()->GetShelf(); @@ -72,6 +112,11 @@ WmWindow* status_container = root->GetChildByShellWindowId(kShellWindowId_StatusContainer); shelf_widget_->CreateStatusAreaWidget(status_container); + + // TODO: ShelfBezelEventHandler needs to work with mus too. + // http://crbug.com/636647 + if (aura::Env::GetInstance()->mode() == aura::Env::Mode::LOCAL) + bezel_event_handler_ = base::MakeUnique<ShelfBezelEventHandler>(this); } void WmShelf::ShutdownShelfWidget() { @@ -309,11 +354,16 @@ return shelf_view_; } -WmShelf::WmShelf() : time_last_auto_hide_change_(base::TimeTicks::Now()) {} - -WmShelf::~WmShelf() {} - void WmShelf::WillDeleteShelfLayoutManager() { + if (aura::Env::GetInstance()->mode() == aura::Env::Mode::MUS) { + // TODO(sky): this should be removed once Shell is used everywhere. + ShutdownShelfWidget(); + } + + // Clear event handlers that might forward events to the destroyed instance. + auto_hide_event_handler_.reset(); + bezel_event_handler_.reset(); + DCHECK(shelf_layout_manager_); shelf_layout_manager_->RemoveObserver(this); shelf_layout_manager_ = nullptr; @@ -322,6 +372,13 @@ void WmShelf::WillChangeVisibilityState(ShelfVisibilityState new_state) { for (auto& observer : observers_) observer.WillChangeVisibilityState(new_state); + if (new_state != SHELF_AUTO_HIDE) { + auto_hide_event_handler_.reset(); + } else if (!auto_hide_event_handler_ && + aura::Env::GetInstance()->mode() == aura::Env::Mode::LOCAL) { + auto_hide_event_handler_ = + base::MakeUnique<AutoHideEventHandler>(shelf_layout_manager()); + } } void WmShelf::OnAutoHideStateChanged(ShelfAutoHideState new_state) {
diff --git a/ash/common/shelf/wm_shelf.h b/ash/common/shelf/wm_shelf.h index 0e97159..819be307 100644 --- a/ash/common/shelf/wm_shelf.h +++ b/ash/common/shelf/wm_shelf.h
@@ -23,6 +23,7 @@ namespace ash { +class ShelfBezelEventHandler; class ShelfLayoutManager; class ShelfLayoutManagerTest; class ShelfLockingManager; @@ -36,6 +37,9 @@ // controller. Note that the shelf widget may not be created until after login. class ASH_EXPORT WmShelf : public ShelfLayoutManagerObserver { public: + WmShelf(); + ~WmShelf() override; + // Returns the shelf for the display that |window| is on. Note that the shelf // widget may not exist, or the shelf may not be visible. static WmShelf* ForWindow(WmWindow* window); @@ -44,7 +48,7 @@ // adjust the alignment (eg. not allowed in guest and supervised user modes). static bool CanChangeShelfAlignment(); - virtual void CreateShelfWidget(WmWindow* root); + void CreateShelfWidget(WmWindow* root); void ShutdownShelfWidget(); void DestroyShelfWidget(); @@ -136,9 +140,6 @@ ShelfView* GetShelfViewForTesting(); protected: - WmShelf(); - ~WmShelf() override; - // ShelfLayoutManagerObserver: void WillDeleteShelfLayoutManager() override; void WillChangeVisibilityState(ShelfVisibilityState new_state) override; @@ -147,6 +148,7 @@ BackgroundAnimatorChangeType change_type) override; private: + class AutoHideEventHandler; friend class ShelfLayoutManagerTest; // Layout manager for the shelf container window. Instances are constructed by @@ -172,6 +174,14 @@ base::TimeTicks time_last_auto_hide_change_; int count_auto_hide_changes_ = 0; + // Forwards mouse and gesture events to ShelfLayoutManager for auto-hide. + // TODO(mash): Facilitate simliar functionality in mash: crbug.com/631216 + std::unique_ptr<AutoHideEventHandler> auto_hide_event_handler_; + + // Forwards touch gestures on a bezel sensor to the shelf. + // TODO(mash): Facilitate simliar functionality in mash: crbug.com/636647 + std::unique_ptr<ShelfBezelEventHandler> bezel_event_handler_; + DISALLOW_COPY_AND_ASSIGN(WmShelf); };
diff --git a/ash/mus/BUILD.gn b/ash/mus/BUILD.gn index b921bc58..f40d004c 100644 --- a/ash/mus/BUILD.gn +++ b/ash/mus/BUILD.gn
@@ -26,8 +26,6 @@ "bridge/wm_lookup_mus.h", "bridge/wm_root_window_controller_mus.cc", "bridge/wm_root_window_controller_mus.h", - "bridge/wm_shelf_mus.cc", - "bridge/wm_shelf_mus.h", "bridge/wm_shell_mus.cc", "bridge/wm_shell_mus.h", "bridge/wm_window_mus.cc",
diff --git a/ash/mus/bridge/wm_root_window_controller_mus.cc b/ash/mus/bridge/wm_root_window_controller_mus.cc index f539364..fab8782 100644 --- a/ash/mus/bridge/wm_root_window_controller_mus.cc +++ b/ash/mus/bridge/wm_root_window_controller_mus.cc
@@ -4,7 +4,7 @@ #include "ash/mus/bridge/wm_root_window_controller_mus.h" -#include "ash/mus/bridge/wm_shelf_mus.h" +#include "ash/common/shelf/wm_shelf.h" #include "ash/mus/bridge/wm_shell_mus.h" #include "ash/mus/bridge/wm_window_mus.h" #include "ash/mus/root_window_controller.h"
diff --git a/ash/mus/bridge/wm_shelf_mus.cc b/ash/mus/bridge/wm_shelf_mus.cc deleted file mode 100644 index e368f80..0000000 --- a/ash/mus/bridge/wm_shelf_mus.cc +++ /dev/null
@@ -1,25 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#include "ash/mus/bridge/wm_shelf_mus.h" - -#include "ash/common/shelf/shelf_widget.h" -#include "ash/common/wm_root_window_controller.h" -#include "ash/common/wm_shell.h" -#include "ash/common/wm_window.h" - -namespace ash { -namespace mus { - -WmShelfMus::WmShelfMus() {} - -WmShelfMus::~WmShelfMus() {} - -void WmShelfMus::WillDeleteShelfLayoutManager() { - ShutdownShelfWidget(); - WmShelf::WillDeleteShelfLayoutManager(); -} - -} // namespace mus -} // namespace ash
diff --git a/ash/mus/bridge/wm_shelf_mus.h b/ash/mus/bridge/wm_shelf_mus.h deleted file mode 100644 index 9a2726f..0000000 --- a/ash/mus/bridge/wm_shelf_mus.h +++ /dev/null
@@ -1,30 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef ASH_MUS_BRIDGE_WM_SHELF_MUS_H_ -#define ASH_MUS_BRIDGE_WM_SHELF_MUS_H_ - -#include "ash/common/shelf/wm_shelf.h" -#include "base/macros.h" - -namespace ash { -namespace mus { - -// WmShelf implementation for mus. -class WmShelfMus : public WmShelf { - public: - WmShelfMus(); - ~WmShelfMus() override; - - // WmShelf: - void WillDeleteShelfLayoutManager() override; - - private: - DISALLOW_COPY_AND_ASSIGN(WmShelfMus); -}; - -} // namespace mus -} // namespace ash - -#endif // ASH_MUS_BRIDGE_WM_SHELF_MUS_H_
diff --git a/ash/mus/root_window_controller.cc b/ash/mus/root_window_controller.cc index 61ae21e..cbc62d6 100644 --- a/ash/mus/root_window_controller.cc +++ b/ash/mus/root_window_controller.cc
@@ -14,12 +14,12 @@ #include <vector> #include "ash/common/shelf/shelf_layout_manager.h" +#include "ash/common/shelf/wm_shelf.h" #include "ash/common/wm/container_finder.h" #include "ash/common/wm/dock/docked_window_layout_manager.h" #include "ash/common/wm/panels/panel_layout_manager.h" #include "ash/common/wm/root_window_layout_manager.h" #include "ash/mus/bridge/wm_root_window_controller_mus.h" -#include "ash/mus/bridge/wm_shelf_mus.h" #include "ash/mus/bridge/wm_shell_mus.h" #include "ash/mus/bridge/wm_window_mus.h" #include "ash/mus/non_client_frame_controller.h" @@ -68,7 +68,7 @@ window_tree_host_(std::move(window_tree_host)), window_count_(0), display_(display), - wm_shelf_(base::MakeUnique<WmShelfMus>()) { + wm_shelf_(base::MakeUnique<WmShelf>()) { wm_root_window_controller_ = base::MakeUnique<WmRootWindowControllerMus>( window_manager_->shell(), this); wm_root_window_controller_->CreateContainers();
diff --git a/ash/mus/root_window_controller.h b/ash/mus/root_window_controller.h index e125f25..d4c5532 100644 --- a/ash/mus/root_window_controller.h +++ b/ash/mus/root_window_controller.h
@@ -28,11 +28,12 @@ namespace ash { +class WmShelf; + namespace mus { class WindowManager; class WmRootWindowControllerMus; -class WmShelfMus; class WmTestBase; class WmTestHelper; class WmWindowMus; @@ -75,7 +76,7 @@ const display::Display& display() const { return display_; } - WmShelfMus* wm_shelf() { return wm_shelf_.get(); } + WmShelf* wm_shelf() { return wm_shelf_.get(); } private: friend class WmTestBase; @@ -93,7 +94,7 @@ display::Display display_; std::unique_ptr<WmRootWindowControllerMus> wm_root_window_controller_; - std::unique_ptr<WmShelfMus> wm_shelf_; + std::unique_ptr<WmShelf> wm_shelf_; std::unique_ptr<aura::client::WindowParentingClient> parenting_client_;
diff --git a/ash/root_window_controller.cc b/ash/root_window_controller.cc index 791e4dc..d31636cd 100644 --- a/ash/root_window_controller.cc +++ b/ash/root_window_controller.cc
@@ -10,7 +10,6 @@ #include "ash/ash_touch_exploration_manager_chromeos.h" #include "ash/aura/aura_layout_manager_adapter.h" #include "ash/aura/wm_root_window_controller_aura.h" -#include "ash/aura/wm_shelf_aura.h" #include "ash/aura/wm_window_aura.h" #include "ash/common/ash_constants.h" #include "ash/common/ash_switches.h" @@ -20,6 +19,7 @@ #include "ash/common/shelf/shelf_delegate.h" #include "ash/common/shelf/shelf_layout_manager.h" #include "ash/common/shelf/shelf_widget.h" +#include "ash/common/shelf/wm_shelf.h" #include "ash/common/shell_delegate.h" #include "ash/common/system/status_area_layout_manager.h" #include "ash/common/system/status_area_widget.h" @@ -282,10 +282,9 @@ void RootWindowController::CloseChildWindows() { // Remove observer as deactivating keyboard causes // docked_window_layout_manager() to fire notifications. - if (docked_window_layout_manager() && - wm_shelf_aura_->shelf_layout_manager()) { + if (docked_window_layout_manager() && wm_shelf_->shelf_layout_manager()) { docked_window_layout_manager()->RemoveObserver( - wm_shelf_aura_->shelf_layout_manager()); + wm_shelf_->shelf_layout_manager()); } // Deactivate keyboard container before closing child windows and shutting @@ -303,23 +302,23 @@ } ShelfLayoutManager* RootWindowController::GetShelfLayoutManager() { - return wm_shelf_aura_->shelf_layout_manager(); + return wm_shelf_->shelf_layout_manager(); } StatusAreaWidget* RootWindowController::GetStatusAreaWidget() { - ShelfWidget* shelf_widget = wm_shelf_aura_->shelf_widget(); + ShelfWidget* shelf_widget = wm_shelf_->shelf_widget(); return shelf_widget ? shelf_widget->status_area_widget() : nullptr; } SystemTray* RootWindowController::GetSystemTray() { // We assume in throughout the code that this will not return NULL. If code // triggers this for valid reasons, it should test status_area_widget first. - CHECK(wm_shelf_aura_->shelf_widget()->status_area_widget()); - return wm_shelf_aura_->shelf_widget()->status_area_widget()->system_tray(); + CHECK(wm_shelf_->shelf_widget()->status_area_widget()); + return wm_shelf_->shelf_widget()->status_area_widget()->system_tray(); } void RootWindowController::UpdateShelfVisibility() { - wm_shelf_aura_->UpdateVisibilityState(); + wm_shelf_->UpdateVisibilityState(); } aura::Window* RootWindowController::GetWindowForFullscreenMode() { @@ -334,7 +333,7 @@ return; } DCHECK(keyboard_controller); - keyboard_controller->AddObserver(wm_shelf_aura_->shelf_layout_manager()); + keyboard_controller->AddObserver(wm_shelf_->shelf_layout_manager()); keyboard_controller->AddObserver(panel_layout_manager()); keyboard_controller->AddObserver(docked_window_layout_manager()); keyboard_controller->AddObserver(workspace_controller()->layout_manager()); @@ -364,7 +363,7 @@ // Virtual keyboard may be deactivated while still showing, notify all // observers that keyboard bounds changed to 0 before remove them. keyboard_controller->NotifyKeyboardBoundsChanging(gfx::Rect()); - keyboard_controller->RemoveObserver(wm_shelf_aura_->shelf_layout_manager()); + keyboard_controller->RemoveObserver(wm_shelf_->shelf_layout_manager()); keyboard_controller->RemoveObserver(panel_layout_manager()); keyboard_controller->RemoveObserver(docked_window_layout_manager()); keyboard_controller->RemoveObserver( @@ -392,7 +391,7 @@ RootWindowController::RootWindowController(AshWindowTreeHost* ash_host) : ash_host_(ash_host), - wm_shelf_aura_(new WmShelfAura), + wm_shelf_(base::MakeUnique<WmShelf>()), touch_hud_debug_(NULL), touch_hud_projection_(NULL) { aura::Window* root_window = GetRootWindow(); @@ -451,7 +450,7 @@ void RootWindowController::InitLayoutManagers() { // Create the shelf and status area widgets. - DCHECK(!wm_shelf_aura_->shelf_widget()); + DCHECK(!wm_shelf_->shelf_widget()); aura::Window* shelf_container = GetContainer(kShellWindowId_ShelfContainer); aura::Window* status_container = GetContainer(kShellWindowId_StatusContainer); WmWindow* wm_shelf_container = WmWindowAura::Get(shelf_container); @@ -462,9 +461,9 @@ // Make it easier to resize windows that partially overlap the shelf. Must // occur after the ShelfLayoutManager is constructed by ShelfWidget. shelf_container->SetEventTargeter(base::MakeUnique<ShelfWindowTargeter>( - wm_shelf_container, wm_shelf_aura_.get())); + wm_shelf_container, wm_shelf_.get())); status_container->SetEventTargeter(base::MakeUnique<ShelfWindowTargeter>( - wm_status_container, wm_shelf_aura_.get())); + wm_status_container, wm_shelf_.get())); panel_container_handler_ = base::MakeUnique<PanelWindowEventHandler>(); GetContainer(kShellWindowId_PanelContainer) @@ -538,7 +537,7 @@ } void RootWindowController::OnLoginStateChanged(LoginStatus status) { - wm_shelf_aura_->UpdateVisibilityState(); + wm_shelf_->UpdateVisibilityState(); } void RootWindowController::OnTouchHudProjectionToggled(bool enabled) {
diff --git a/ash/root_window_controller.h b/ash/root_window_controller.h index 3588097..1fe197d 100644 --- a/ash/root_window_controller.h +++ b/ash/root_window_controller.h
@@ -54,7 +54,7 @@ class TouchHudDebug; class TouchHudProjection; class WmRootWindowControllerAura; -class WmShelfAura; +class WmShelf; class WorkspaceController; // This class maintains the per root window state for ash. This class @@ -94,7 +94,7 @@ WorkspaceController* workspace_controller(); - WmShelfAura* wm_shelf_aura() const { return wm_shelf_aura_.get(); } + WmShelf* wm_shelf() const { return wm_shelf_.get(); } WmRootWindowControllerAura* wm_root_window_controller() { return wm_root_window_controller_; @@ -216,7 +216,7 @@ // The shelf controller for this root window. Exists for the entire lifetime // of the RootWindowController so that it is safe for observers to be added // to it during construction of the shelf widget and status tray. - std::unique_ptr<WmShelfAura> wm_shelf_aura_; + std::unique_ptr<WmShelf> wm_shelf_; std::unique_ptr<SystemWallpaperController> system_wallpaper_;
diff --git a/ash/screen_util.cc b/ash/screen_util.cc index e6d48059..29baadf4 100644 --- a/ash/screen_util.cc +++ b/ash/screen_util.cc
@@ -4,7 +4,7 @@ #include "ash/screen_util.h" -#include "ash/aura/wm_shelf_aura.h" +#include "ash/common/shelf/wm_shelf.h" #include "ash/root_window_controller.h" #include "ash/shell.h" #include "base/logging.h" @@ -20,7 +20,7 @@ // static gfx::Rect ScreenUtil::GetMaximizedWindowBoundsInParent(aura::Window* window) { aura::Window* root_window = window->GetRootWindow(); - if (GetRootWindowController(root_window)->wm_shelf_aura()->shelf_widget()) + if (GetRootWindowController(root_window)->wm_shelf()->shelf_widget()) return GetDisplayWorkAreaBoundsInParent(window); else return GetDisplayBoundsInParent(window);
diff --git a/ash/shelf/shelf_layout_manager_unittest.cc b/ash/shelf/shelf_layout_manager_unittest.cc index 22a2865..2219b33 100644 --- a/ash/shelf/shelf_layout_manager_unittest.cc +++ b/ash/shelf/shelf_layout_manager_unittest.cc
@@ -4,7 +4,6 @@ #include "ash/common/shelf/shelf_layout_manager.h" -#include "ash/aura/wm_shelf_aura.h" #include "ash/aura/wm_window_aura.h" #include "ash/common/accelerators/accelerator_controller.h" #include "ash/common/accelerators/accelerator_table.h" @@ -1010,8 +1009,8 @@ EXPECT_EQ(root_windows.size(), 2U); // Get the shelves in both displays and set them to be 'AutoHide'. - WmShelf* shelf_1 = GetRootWindowController(root_windows[0])->wm_shelf_aura(); - WmShelf* shelf_2 = GetRootWindowController(root_windows[1])->wm_shelf_aura(); + WmShelf* shelf_1 = GetRootWindowController(root_windows[0])->wm_shelf(); + WmShelf* shelf_2 = GetRootWindowController(root_windows[1])->wm_shelf(); EXPECT_NE(shelf_1, shelf_2); EXPECT_NE(shelf_1->GetWindow()->GetRootWindow(), shelf_2->GetWindow()->GetRootWindow()); @@ -1131,8 +1130,8 @@ EXPECT_EQ(2U, root_windows.size()); // Get the shelves in both displays and set them to be 'AutoHide'. - WmShelf* shelf_1 = GetRootWindowController(root_windows[0])->wm_shelf_aura(); - WmShelf* shelf_2 = GetRootWindowController(root_windows[1])->wm_shelf_aura(); + WmShelf* shelf_1 = GetRootWindowController(root_windows[0])->wm_shelf(); + WmShelf* shelf_2 = GetRootWindowController(root_windows[1])->wm_shelf(); EXPECT_NE(shelf_1, shelf_2); EXPECT_NE(shelf_1->GetWindow()->GetRootWindow(), shelf_2->GetWindow()->GetRootWindow());
diff --git a/base/allocator/partition_allocator/address_space_randomization.cc b/base/allocator/partition_allocator/address_space_randomization.cc index fd66b1b0..1be5baf 100644 --- a/base/allocator/partition_allocator/address_space_randomization.cc +++ b/base/allocator/partition_allocator/address_space_randomization.cc
@@ -80,7 +80,7 @@ // Calculates a random preferred mapping address. In calculating an address, we // balance good ASLR against not fragmenting the address space too badly. -void* getRandomPageBase() { +void* GetRandomPageBase() { uintptr_t random; random = static_cast<uintptr_t>(ranval(&s_ranctx)); #if defined(ARCH_CPU_X86_64)
diff --git a/base/allocator/partition_allocator/address_space_randomization.h b/base/allocator/partition_allocator/address_space_randomization.h index 4455cc4..19069b4 100644 --- a/base/allocator/partition_allocator/address_space_randomization.h +++ b/base/allocator/partition_allocator/address_space_randomization.h
@@ -9,7 +9,7 @@ // Calculates a random preferred mapping address. In calculating an address, we // balance good ASLR against not fragmenting the address space too badly. -void* getRandomPageBase(); +void* GetRandomPageBase(); } // namespace base
diff --git a/base/allocator/partition_allocator/page_allocator.cc b/base/allocator/partition_allocator/page_allocator.cc index 5b1ef8ba..1884c469 100644 --- a/base/allocator/partition_allocator/page_allocator.cc +++ b/base/allocator/partition_allocator/page_allocator.cc
@@ -45,25 +45,25 @@ // This internal function wraps the OS-specific page allocation call: // |VirtualAlloc| on Windows, and |mmap| on POSIX. -static void* systemAllocPages( +static void* SystemAllocPages( void* hint, - size_t len, - PageAccessibilityConfiguration pageAccessibility) { - DCHECK(!(len & kPageAllocationGranularityOffsetMask)); + size_t length, + PageAccessibilityConfiguration page_accessibility) { + DCHECK(!(length & kPageAllocationGranularityOffsetMask)); DCHECK(!(reinterpret_cast<uintptr_t>(hint) & kPageAllocationGranularityOffsetMask)); void* ret; #if defined(OS_WIN) - DWORD accessFlag = - pageAccessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; - ret = VirtualAlloc(hint, len, MEM_RESERVE | MEM_COMMIT, accessFlag); + DWORD access_flag = + page_accessibility == PageAccessible ? PAGE_READWRITE : PAGE_NOACCESS; + ret = VirtualAlloc(hint, length, MEM_RESERVE | MEM_COMMIT, access_flag); if (!ret) base::subtle::Release_Store(&s_allocPageErrorCode, GetLastError()); #else - int accessFlag = pageAccessibility == PageAccessible - ? (PROT_READ | PROT_WRITE) - : PROT_NONE; - ret = mmap(hint, len, accessFlag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + int access_flag = page_accessibility == PageAccessible + ? (PROT_READ | PROT_WRITE) + : PROT_NONE; + ret = mmap(hint, length, access_flag, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (ret == MAP_FAILED) { base::subtle::Release_Store(&s_allocPageErrorCode, errno); ret = 0; @@ -74,204 +74,204 @@ // Trims base to given length and alignment. Windows returns null on failure and // frees base. -static void* trimMapping(void* base, - size_t baseLen, - size_t trimLen, +static void* TrimMapping(void* base, + size_t base_length, + size_t trim_length, uintptr_t align, - PageAccessibilityConfiguration pageAccessibility) { - size_t preSlack = reinterpret_cast<uintptr_t>(base) & (align - 1); - if (preSlack) - preSlack = align - preSlack; - size_t postSlack = baseLen - preSlack - trimLen; - DCHECK(baseLen >= trimLen || preSlack || postSlack); - DCHECK(preSlack < baseLen); - DCHECK(postSlack < baseLen); + PageAccessibilityConfiguration page_accessibility) { + size_t pre_slack = reinterpret_cast<uintptr_t>(base) & (align - 1); + if (pre_slack) + pre_slack = align - pre_slack; + size_t post_slack = base_length - pre_slack - trim_length; + DCHECK(base_length >= trim_length || pre_slack || post_slack); + DCHECK(pre_slack < base_length); + DCHECK(post_slack < base_length); void* ret = base; #if defined(OS_POSIX) // On POSIX we can resize the allocation run. - (void)pageAccessibility; - if (preSlack) { - int res = munmap(base, preSlack); + (void)page_accessibility; + if (pre_slack) { + int res = munmap(base, pre_slack); CHECK(!res); - ret = reinterpret_cast<char*>(base) + preSlack; + ret = reinterpret_cast<char*>(base) + pre_slack; } - if (postSlack) { - int res = munmap(reinterpret_cast<char*>(ret) + trimLen, postSlack); + if (post_slack) { + int res = munmap(reinterpret_cast<char*>(ret) + trim_length, post_slack); CHECK(!res); } #else // On Windows we can't resize the allocation run. - if (preSlack || postSlack) { - ret = reinterpret_cast<char*>(base) + preSlack; - freePages(base, baseLen); - ret = systemAllocPages(ret, trimLen, pageAccessibility); + if (pre_slack || post_slack) { + ret = reinterpret_cast<char*>(base) + pre_slack; + FreePages(base, base_length); + ret = SystemAllocPages(ret, trim_length, page_accessibility); } #endif return ret; } -void* allocPages(void* addr, - size_t len, +void* AllocPages(void* address, + size_t length, size_t align, - PageAccessibilityConfiguration pageAccessibility) { - DCHECK(len >= kPageAllocationGranularity); - DCHECK(!(len & kPageAllocationGranularityOffsetMask)); + PageAccessibilityConfiguration page_accessibility) { + DCHECK(length >= kPageAllocationGranularity); + DCHECK(!(length & kPageAllocationGranularityOffsetMask)); DCHECK(align >= kPageAllocationGranularity); DCHECK(!(align & kPageAllocationGranularityOffsetMask)); - DCHECK(!(reinterpret_cast<uintptr_t>(addr) & + DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPageAllocationGranularityOffsetMask)); - uintptr_t alignOffsetMask = align - 1; - uintptr_t alignBaseMask = ~alignOffsetMask; - DCHECK(!(reinterpret_cast<uintptr_t>(addr) & alignOffsetMask)); + uintptr_t align_offset_mask = align - 1; + uintptr_t align_base_mask = ~align_offset_mask; + DCHECK(!(reinterpret_cast<uintptr_t>(address) & align_offset_mask)); // If the client passed null as the address, choose a good one. - if (!addr) { - addr = getRandomPageBase(); - addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & - alignBaseMask); + if (!address) { + address = GetRandomPageBase(); + address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) & + align_base_mask); } // First try to force an exact-size, aligned allocation from our random base. for (int count = 0; count < 3; ++count) { - void* ret = systemAllocPages(addr, len, pageAccessibility); + void* ret = SystemAllocPages(address, length, page_accessibility); if (kHintIsAdvisory || ret) { // If the alignment is to our liking, we're done. - if (!(reinterpret_cast<uintptr_t>(ret) & alignOffsetMask)) + if (!(reinterpret_cast<uintptr_t>(ret) & align_offset_mask)) return ret; - freePages(ret, len); + FreePages(ret, length); #if defined(ARCH_CPU_32_BITS) - addr = reinterpret_cast<void*>( - (reinterpret_cast<uintptr_t>(ret) + align) & alignBaseMask); + address = reinterpret_cast<void*>( + (reinterpret_cast<uintptr_t>(ret) + align) & align_base_mask); #endif - } else if (!addr) { // We know we're OOM when an unhinted allocation fails. + } else if (!address) { // We know we're OOM when an unhinted allocation + // fails. return nullptr; - } else { #if defined(ARCH_CPU_32_BITS) - addr = reinterpret_cast<char*>(addr) + align; + address = reinterpret_cast<char*>(address) + align; #endif } #if !defined(ARCH_CPU_32_BITS) // Keep trying random addresses on systems that have a large address space. - addr = getRandomPageBase(); - addr = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & - alignBaseMask); + address = GetRandomPageBase(); + address = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(address) & + align_base_mask); #endif } // Map a larger allocation so we can force alignment, but continue randomizing // only on 64-bit POSIX. - size_t tryLen = len + (align - kPageAllocationGranularity); - CHECK(tryLen >= len); + size_t try_length = length + (align - kPageAllocationGranularity); + CHECK(try_length >= length); void* ret; do { // Don't continue to burn cycles on mandatory hints (Windows). - addr = kHintIsAdvisory ? getRandomPageBase() : nullptr; - ret = systemAllocPages(addr, tryLen, pageAccessibility); + address = kHintIsAdvisory ? GetRandomPageBase() : nullptr; + ret = SystemAllocPages(address, try_length, page_accessibility); // The retries are for Windows, where a race can steal our mapping on // resize. } while (ret && - (ret = trimMapping(ret, tryLen, len, align, pageAccessibility)) == - nullptr); + (ret = TrimMapping(ret, try_length, length, align, + page_accessibility)) == nullptr); return ret; } -void freePages(void* addr, size_t len) { - DCHECK(!(reinterpret_cast<uintptr_t>(addr) & +void FreePages(void* address, size_t length) { + DCHECK(!(reinterpret_cast<uintptr_t>(address) & kPageAllocationGranularityOffsetMask)); - DCHECK(!(len & kPageAllocationGranularityOffsetMask)); + DCHECK(!(length & kPageAllocationGranularityOffsetMask)); #if defined(OS_POSIX) - int ret = munmap(addr, len); + int ret = munmap(address, length); CHECK(!ret); #else - BOOL ret = VirtualFree(addr, 0, MEM_RELEASE); + BOOL ret = VirtualFree(address, 0, MEM_RELEASE); CHECK(ret); #endif } -void setSystemPagesInaccessible(void* addr, size_t len) { - DCHECK(!(len & kSystemPageOffsetMask)); +void SetSystemPagesInaccessible(void* address, size_t length) { + DCHECK(!(length & kSystemPageOffsetMask)); #if defined(OS_POSIX) - int ret = mprotect(addr, len, PROT_NONE); + int ret = mprotect(address, length, PROT_NONE); CHECK(!ret); #else - BOOL ret = VirtualFree(addr, len, MEM_DECOMMIT); + BOOL ret = VirtualFree(address, length, MEM_DECOMMIT); CHECK(ret); #endif } -bool setSystemPagesAccessible(void* addr, size_t len) { - DCHECK(!(len & kSystemPageOffsetMask)); +bool SetSystemPagesAccessible(void* address, size_t length) { + DCHECK(!(length & kSystemPageOffsetMask)); #if defined(OS_POSIX) - return !mprotect(addr, len, PROT_READ | PROT_WRITE); + return !mprotect(address, length, PROT_READ | PROT_WRITE); #else - return !!VirtualAlloc(addr, len, MEM_COMMIT, PAGE_READWRITE); + return !!VirtualAlloc(address, length, MEM_COMMIT, PAGE_READWRITE); #endif } -void decommitSystemPages(void* addr, size_t len) { - DCHECK(!(len & kSystemPageOffsetMask)); +void DecommitSystemPages(void* address, size_t length) { + DCHECK(!(length & kSystemPageOffsetMask)); #if defined(OS_POSIX) - int ret = madvise(addr, len, MADV_FREE); + int ret = madvise(address, length, MADV_FREE); if (ret != 0 && errno == EINVAL) { // MADV_FREE only works on Linux 4.5+ . If request failed, // retry with older MADV_DONTNEED . Note that MADV_FREE // being defined at compile time doesn't imply runtime support. - ret = madvise(addr, len, MADV_DONTNEED); + ret = madvise(address, length, MADV_DONTNEED); } CHECK(!ret); #else - setSystemPagesInaccessible(addr, len); + SetSystemPagesInaccessible(address, length); #endif } -void recommitSystemPages(void* addr, size_t len) { - DCHECK(!(len & kSystemPageOffsetMask)); +void RecommitSystemPages(void* address, size_t length) { + DCHECK(!(length & kSystemPageOffsetMask)); #if defined(OS_POSIX) - (void)addr; + (void)address; #else - CHECK(setSystemPagesAccessible(addr, len)); + CHECK(SetSystemPagesAccessible(address, length)); #endif } -void discardSystemPages(void* addr, size_t len) { - DCHECK(!(len & kSystemPageOffsetMask)); +void DiscardSystemPages(void* address, size_t length) { + DCHECK(!(length & kSystemPageOffsetMask)); #if defined(OS_POSIX) // On POSIX, the implementation detail is that discard and decommit are the // same, and lead to pages that are returned to the system immediately and // get replaced with zeroed pages when touched. So we just call - // decommitSystemPages() here to avoid code duplication. - decommitSystemPages(addr, len); + // DecommitSystemPages() here to avoid code duplication. + DecommitSystemPages(address, length); #else // On Windows discarded pages are not returned to the system immediately and // not guaranteed to be zeroed when returned to the application. using DiscardVirtualMemoryFunction = DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size); - static DiscardVirtualMemoryFunction discardVirtualMemory = + static DiscardVirtualMemoryFunction discard_virtual_memory = reinterpret_cast<DiscardVirtualMemoryFunction>(-1); - if (discardVirtualMemory == + if (discard_virtual_memory == reinterpret_cast<DiscardVirtualMemoryFunction>(-1)) - discardVirtualMemory = + discard_virtual_memory = reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress( GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory")); // Use DiscardVirtualMemory when available because it releases faster than // MEM_RESET. DWORD ret = 1; - if (discardVirtualMemory) - ret = discardVirtualMemory(addr, len); + if (discard_virtual_memory) + ret = discard_virtual_memory(address, length); // DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on // failure. if (ret) { - void* ret = VirtualAlloc(addr, len, MEM_RESET, PAGE_READWRITE); + void* ret = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE); CHECK(ret); } #endif } -uint32_t getAllocPageErrorCode() { +uint32_t GetAllocPageErrorCode() { return base::subtle::Acquire_Load(&s_allocPageErrorCode); }
diff --git a/base/allocator/partition_allocator/page_allocator.h b/base/allocator/partition_allocator/page_allocator.h index d520505..f57beb7 100644 --- a/base/allocator/partition_allocator/page_allocator.h +++ b/base/allocator/partition_allocator/page_allocator.h
@@ -49,20 +49,20 @@ // PageAccessibilityConfiguration controls the permission of the // allocated pages. // This call will return null if the allocation cannot be satisfied. -BASE_EXPORT void* allocPages(void* addr, +BASE_EXPORT void* AllocPages(void* address, size_t len, size_t align, PageAccessibilityConfiguration); // Free one or more pages. // addr and len must match a previous call to allocPages(). -BASE_EXPORT void freePages(void* addr, size_t len); +BASE_EXPORT void FreePages(void* address, size_t length); // Mark one or more system pages as being inaccessible. // Subsequently accessing any address in the range will fault, and the // addresses will not be re-used by future allocations. // len must be a multiple of kSystemPageSize bytes. -BASE_EXPORT void setSystemPagesInaccessible(void* addr, size_t len); +BASE_EXPORT void SetSystemPagesInaccessible(void* address, size_t length); // Mark one or more system pages as being accessible. // The pages will be readable and writeable. @@ -70,8 +70,8 @@ // The result bool value indicates whether the permission // change succeeded or not. You must check the result // (in most cases you need to CHECK that it is true). -BASE_EXPORT WARN_UNUSED_RESULT bool setSystemPagesAccessible(void* addr, - size_t len); +BASE_EXPORT WARN_UNUSED_RESULT bool SetSystemPagesAccessible(void* address, + size_t length); // Decommit one or more system pages. Decommitted means that the physical memory // is released to the system, but the virtual address space remains reserved. @@ -83,13 +83,13 @@ // after recommitting and writing to it. In particlar note that system pages are // not guaranteed to be zero-filled upon re-commit. len must be a multiple of // kSystemPageSize bytes. -BASE_EXPORT void decommitSystemPages(void* addr, size_t len); +BASE_EXPORT void DecommitSystemPages(void* address, size_t length); // Recommit one or more system pages. Decommitted system pages must be // recommitted before they are read are written again. // Note that this operation may be a no-op on some platforms. // len must be a multiple of kSystemPageSize bytes. -BASE_EXPORT void recommitSystemPages(void* addr, size_t len); +BASE_EXPORT void RecommitSystemPages(void* address, size_t length); // Discard one or more system pages. Discarding is a hint to the system that // the page is no longer required. The hint may: @@ -106,18 +106,18 @@ // guaranteed stable once more. After being written to, the page content may be // based on the original page content, or a page of zeroes. // len must be a multiple of kSystemPageSize bytes. -BASE_EXPORT void discardSystemPages(void* addr, size_t len); +BASE_EXPORT void DiscardSystemPages(void* address, size_t length); -ALWAYS_INLINE uintptr_t roundUpToSystemPage(uintptr_t address) { +ALWAYS_INLINE uintptr_t RoundUpToSystemPage(uintptr_t address) { return (address + kSystemPageOffsetMask) & kSystemPageBaseMask; } -ALWAYS_INLINE uintptr_t roundDownToSystemPage(uintptr_t address) { +ALWAYS_INLINE uintptr_t RoundDownToSystemPage(uintptr_t address) { return address & kSystemPageBaseMask; } // Returns errno (or GetLastError code) when mmap (or VirtualAlloc) fails. -BASE_EXPORT uint32_t getAllocPageErrorCode(); +BASE_EXPORT uint32_t GetAllocPageErrorCode(); } // namespace base
diff --git a/base/allocator/partition_allocator/partition_alloc.cc b/base/allocator/partition_allocator/partition_alloc.cc index d5c4f6a..c2dbb578 100644 --- a/base/allocator/partition_allocator/partition_alloc.cc +++ b/base/allocator/partition_allocator/partition_alloc.cc
@@ -45,11 +45,11 @@ PartitionPage PartitionRootBase::gSeedPage; PartitionBucket PartitionRootBase::gPagedBucket; void (*PartitionRootBase::gOomHandlingFunction)() = nullptr; -PartitionAllocHooks::AllocationHook* PartitionAllocHooks::m_allocationHook = +PartitionAllocHooks::AllocationHook* PartitionAllocHooks::allocation_hook_ = nullptr; -PartitionAllocHooks::FreeHook* PartitionAllocHooks::m_freeHook = nullptr; +PartitionAllocHooks::FreeHook* PartitionAllocHooks::free_hook_ = nullptr; -static uint8_t partitionBucketNumSystemPages(size_t size) { +static uint8_t PartitionBucketNumSystemPages(size_t size) { // This works out reasonably for the current bucket sizes of the generic // allocator, and the current values of partition page size and constants. // Specifically, we have enough room to always pack the slots perfectly into @@ -60,40 +60,40 @@ // so small that the waste would be tiny with just 4, or 1, system pages. // Later, we can investigate whether there are anti-fragmentation benefits // to using fewer system pages. - double bestWasteRatio = 1.0f; - uint16_t bestPages = 0; + double best_waste_ratio = 1.0f; + uint16_t best_pages = 0; if (size > kMaxSystemPagesPerSlotSpan * kSystemPageSize) { DCHECK(!(size % kSystemPageSize)); - bestPages = static_cast<uint16_t>(size / kSystemPageSize); - CHECK(bestPages < (1 << 8)); - return static_cast<uint8_t>(bestPages); + best_pages = static_cast<uint16_t>(size / kSystemPageSize); + CHECK(best_pages < (1 << 8)); + return static_cast<uint8_t>(best_pages); } DCHECK(size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize); for (uint16_t i = kNumSystemPagesPerPartitionPage - 1; i <= kMaxSystemPagesPerSlotSpan; ++i) { - size_t pageSize = kSystemPageSize * i; - size_t numSlots = pageSize / size; - size_t waste = pageSize - (numSlots * size); + size_t page_size = kSystemPageSize * i; + size_t num_slots = page_size / size; + size_t waste = page_size - (num_slots * size); // Leaving a page unfaulted is not free; the page will occupy an empty page // table entry. Make a simple attempt to account for that. - size_t numRemainderPages = i & (kNumSystemPagesPerPartitionPage - 1); - size_t numUnfaultedPages = - numRemainderPages - ? (kNumSystemPagesPerPartitionPage - numRemainderPages) + size_t num_remainder_pages = i & (kNumSystemPagesPerPartitionPage - 1); + size_t num_unfaulted_pages = + num_remainder_pages + ? (kNumSystemPagesPerPartitionPage - num_remainder_pages) : 0; - waste += sizeof(void*) * numUnfaultedPages; - double wasteRatio = (double)waste / (double)pageSize; - if (wasteRatio < bestWasteRatio) { - bestWasteRatio = wasteRatio; - bestPages = i; + waste += sizeof(void*) * num_unfaulted_pages; + double waste_ratio = (double)waste / (double)page_size; + if (waste_ratio < best_waste_ratio) { + best_waste_ratio = waste_ratio; + best_pages = i; } } - DCHECK(bestPages > 0); - CHECK(bestPages <= kMaxSystemPagesPerSlotSpan); - return static_cast<uint8_t>(bestPages); + DCHECK(best_pages > 0); + CHECK(best_pages <= kMaxSystemPagesPerSlotSpan); + return static_cast<uint8_t>(best_pages); } -static void partitionAllocBaseInit(PartitionRootBase* root) { +static void PartitionAllocBaseInit(PartitionRootBase* root) { DCHECK(!root->initialized); { subtle::SpinLock::Guard guard(PartitionRootBase::gInitializedLock); @@ -101,91 +101,93 @@ PartitionRootBase::gInitialized = true; // We mark the seed page as free to make sure it is skipped by our // logic to find a new active page. - PartitionRootBase::gPagedBucket.activePagesHead = + PartitionRootBase::gPagedBucket.active_pages_head = &PartitionRootGeneric::gSeedPage; } } root->initialized = true; - root->totalSizeOfCommittedPages = 0; - root->totalSizeOfSuperPages = 0; - root->totalSizeOfDirectMappedPages = 0; - root->nextSuperPage = 0; - root->nextPartitionPage = 0; - root->nextPartitionPageEnd = 0; - root->firstExtent = 0; - root->currentExtent = 0; - root->directMapList = 0; + root->total_size_of_committed_pages = 0; + root->total_size_of_super_pages = 0; + root->total_size_of_direct_mapped_pages = 0; + root->next_super_page = 0; + root->next_partition_page = 0; + root->next_partition_page_end = 0; + root->first_extent = 0; + root->current_extent = 0; + root->direct_map_list = 0; - memset(&root->globalEmptyPageRing, '\0', sizeof(root->globalEmptyPageRing)); - root->globalEmptyPageRingIndex = 0; + memset(&root->global_empty_page_ring, '\0', + sizeof(root->global_empty_page_ring)); + root->global_empty_page_ring_index = 0; // This is a "magic" value so we can test if a root pointer is valid. - root->invertedSelf = ~reinterpret_cast<uintptr_t>(root); + root->inverted_self = ~reinterpret_cast<uintptr_t>(root); } -static void partitionBucketInitBase(PartitionBucket* bucket, +static void PartitionBucketInitBase(PartitionBucket* bucket, PartitionRootBase* root) { - bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; - bucket->emptyPagesHead = 0; - bucket->decommittedPagesHead = 0; - bucket->numFullPages = 0; - bucket->numSystemPagesPerSlotSpan = - partitionBucketNumSystemPages(bucket->slotSize); + bucket->active_pages_head = &PartitionRootGeneric::gSeedPage; + bucket->empty_pages_head = 0; + bucket->decommitted_pages_head = 0; + bucket->num_full_pages = 0; + bucket->num_system_pages_per_slot_span = + PartitionBucketNumSystemPages(bucket->slot_size); } -void partitionAllocGlobalInit(void (*oomHandlingFunction)()) { - DCHECK(oomHandlingFunction); - PartitionRootBase::gOomHandlingFunction = oomHandlingFunction; +void PartitionAllocGlobalInit(void (*oom_handling_function)()) { + DCHECK(oom_handling_function); + PartitionRootBase::gOomHandlingFunction = oom_handling_function; } -void partitionAllocInit(PartitionRoot* root, - size_t numBuckets, - size_t maxAllocation) { - partitionAllocBaseInit(root); +void PartitionAllocInit(PartitionRoot* root, + size_t num_buckets, + size_t max_allocation) { + PartitionAllocBaseInit(root); - root->numBuckets = numBuckets; - root->maxAllocation = maxAllocation; + root->num_buckets = num_buckets; + root->max_allocation = max_allocation; size_t i; - for (i = 0; i < root->numBuckets; ++i) { + for (i = 0; i < root->num_buckets; ++i) { PartitionBucket* bucket = &root->buckets()[i]; if (!i) - bucket->slotSize = kAllocationGranularity; + bucket->slot_size = kAllocationGranularity; else - bucket->slotSize = i << kBucketShift; - partitionBucketInitBase(bucket, root); + bucket->slot_size = i << kBucketShift; + PartitionBucketInitBase(bucket, root); } } -void partitionAllocGenericInit(PartitionRootGeneric* root) { +void PartitionAllocGenericInit(PartitionRootGeneric* root) { subtle::SpinLock::Guard guard(root->lock); - partitionAllocBaseInit(root); + PartitionAllocBaseInit(root); // Precalculate some shift and mask constants used in the hot path. // Example: malloc(41) == 101001 binary. - // Order is 6 (1 << 6-1)==32 is highest bit set. - // orderIndex is the next three MSB == 010 == 2. - // subOrderIndexMask is a mask for the remaining bits == 11 (masking to 01 for - // the subOrderIndex). + // Order is 6 (1 << 6-1) == 32 is highest bit set. + // order_index is the next three MSB == 010 == 2. + // sub_order_index_mask is a mask for the remaining bits == 11 (masking to 01 + // for + // the sub_order_index). size_t order; - for (order = 0; order <= kBitsPerSizet; ++order) { - size_t orderIndexShift; + for (order = 0; order <= kBitsPerSizeT; ++order) { + size_t order_index_shift; if (order < kGenericNumBucketsPerOrderBits + 1) - orderIndexShift = 0; + order_index_shift = 0; else - orderIndexShift = order - (kGenericNumBucketsPerOrderBits + 1); - root->orderIndexShifts[order] = orderIndexShift; - size_t subOrderIndexMask; - if (order == kBitsPerSizet) { + order_index_shift = order - (kGenericNumBucketsPerOrderBits + 1); + root->order_index_shifts[order] = order_index_shift; + size_t sub_order_index_mask; + if (order == kBitsPerSizeT) { // This avoids invoking undefined behavior for an excessive shift. - subOrderIndexMask = + sub_order_index_mask = static_cast<size_t>(-1) >> (kGenericNumBucketsPerOrderBits + 1); } else { - subOrderIndexMask = ((static_cast<size_t>(1) << order) - 1) >> - (kGenericNumBucketsPerOrderBits + 1); + sub_order_index_mask = ((static_cast<size_t>(1) << order) - 1) >> + (kGenericNumBucketsPerOrderBits + 1); } - root->orderSubIndexMasks[order] = subOrderIndexMask; + root->order_sub_index_masks[order] = sub_order_index_mask; } // Set up the actual usable buckets first. @@ -195,29 +197,29 @@ // We avoid them in the bucket lookup map, but we tolerate them to keep the // code simpler and the structures more generic. size_t i, j; - size_t currentSize = kGenericSmallestBucket; + size_t current_size = kGenericSmallestBucket; size_t currentIncrement = kGenericSmallestBucket >> kGenericNumBucketsPerOrderBits; PartitionBucket* bucket = &root->buckets[0]; for (i = 0; i < kGenericNumBucketedOrders; ++i) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { - bucket->slotSize = currentSize; - partitionBucketInitBase(bucket, root); + bucket->slot_size = current_size; + PartitionBucketInitBase(bucket, root); // Disable psuedo buckets so that touching them faults. - if (currentSize % kGenericSmallestBucket) - bucket->activePagesHead = 0; - currentSize += currentIncrement; + if (current_size % kGenericSmallestBucket) + bucket->active_pages_head = 0; + current_size += currentIncrement; ++bucket; } currentIncrement <<= 1; } - DCHECK(currentSize == 1 << kGenericMaxBucketedOrder); + DCHECK(current_size == 1 << kGenericMaxBucketedOrder); DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); // Then set up the fast size -> bucket lookup table. bucket = &root->buckets[0]; - PartitionBucket** bucketPtr = &root->bucketLookups[0]; - for (order = 0; order <= kBitsPerSizet; ++order) { + PartitionBucket** bucketPtr = &root->bucket_lookups[0]; + for (order = 0; order <= kBitsPerSizeT; ++order) { for (j = 0; j < kGenericNumBucketsPerOrder; ++j) { if (order < kGenericMinBucketedOrder) { // Use the bucket of the finest granularity for malloc(0) etc. @@ -227,7 +229,7 @@ } else { PartitionBucket* validBucket = bucket; // Skip over invalid buckets. - while (validBucket->slotSize % kGenericSmallestBucket) + while (validBucket->slot_size % kGenericSmallestBucket) validBucket++; *bucketPtr++ = validBucket; bucket++; @@ -236,23 +238,23 @@ } DCHECK(bucket == &root->buckets[0] + kGenericNumBuckets); DCHECK(bucketPtr == - &root->bucketLookups[0] + - ((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder)); + &root->bucket_lookups[0] + + ((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder)); // And there's one last bucket lookup that will be hit for e.g. malloc(-1), // which tries to overflow to a non-existant order. *bucketPtr = &PartitionRootGeneric::gPagedBucket; } -static bool partitionAllocShutdownBucket(PartitionBucket* bucket) { +static bool PartitionAllocShutdownBucket(PartitionBucket* bucket) { // Failure here indicates a memory leak. - bool foundLeak = bucket->numFullPages != 0; - for (PartitionPage* page = bucket->activePagesHead; page; - page = page->nextPage) - foundLeak |= (page->numAllocatedSlots > 0); + bool foundLeak = bucket->num_full_pages != 0; + for (PartitionPage* page = bucket->active_pages_head; page; + page = page->next_page) + foundLeak |= (page->num_allocated_slots > 0); return foundLeak; } -static bool partitionAllocBaseShutdown(PartitionRootBase* root) { +static bool PartitionAllocBaseShutdown(PartitionRootBase* root) { DCHECK(root->initialized); root->initialized = false; @@ -260,40 +262,40 @@ // to free all our super pages. Since the super page extent entries are // stored in the super pages, we need to be careful not to access them // after we've released the corresponding super page. - PartitionSuperPageExtentEntry* entry = root->firstExtent; + PartitionSuperPageExtentEntry* entry = root->first_extent; while (entry) { PartitionSuperPageExtentEntry* nextEntry = entry->next; - char* superPage = entry->superPageBase; - char* superPagesEnd = entry->superPagesEnd; - while (superPage < superPagesEnd) { - freePages(superPage, kSuperPageSize); - superPage += kSuperPageSize; + char* super_page = entry->super_page_base; + char* super_pages_end = entry->super_pages_end; + while (super_page < super_pages_end) { + FreePages(super_page, kSuperPageSize); + super_page += kSuperPageSize; } entry = nextEntry; } - return root->directMapList != nullptr; + return root->direct_map_list != nullptr; } -bool partitionAllocShutdown(PartitionRoot* root) { +bool PartitionAllocShutdown(PartitionRoot* root) { bool foundLeak = false; size_t i; - for (i = 0; i < root->numBuckets; ++i) { + for (i = 0; i < root->num_buckets; ++i) { PartitionBucket* bucket = &root->buckets()[i]; - foundLeak |= partitionAllocShutdownBucket(bucket); + foundLeak |= PartitionAllocShutdownBucket(bucket); } - foundLeak |= partitionAllocBaseShutdown(root); + foundLeak |= PartitionAllocBaseShutdown(root); return !foundLeak; } -bool partitionAllocGenericShutdown(PartitionRootGeneric* root) { +bool PartitionAllocGenericShutdown(PartitionRootGeneric* root) { subtle::SpinLock::Guard guard(root->lock); bool foundLeak = false; size_t i; for (i = 0; i < kGenericNumBuckets; ++i) { PartitionBucket* bucket = &root->buckets[i]; - foundLeak |= partitionAllocShutdownBucket(bucket); + foundLeak |= PartitionAllocShutdownBucket(bucket); } - foundLeak |= partitionAllocBaseShutdown(root); + foundLeak |= PartitionAllocBaseShutdown(root); return !foundLeak; } @@ -307,8 +309,9 @@ #if !defined(ARCH_CPU_64_BITS) // Check whether this OOM is due to a lot of super pages that are allocated // but not committed, probably due to http://crbug.com/421387. - if (root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages - - root->totalSizeOfCommittedPages > + if (root->total_size_of_super_pages + + root->total_size_of_direct_mapped_pages - + root->total_size_of_committed_pages > kReasonableSizeOfUnusedPages) { partitionOutOfMemoryWithLotsOfUncommitedPages(); } @@ -331,89 +334,91 @@ // the page lists. Specifically, you can't call these functions on full pages // that were detached from the active list. static bool ALWAYS_INLINE -partitionPageStateIsActive(const PartitionPage* page) { +PartitionPageStateIsActive(const PartitionPage* page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - DCHECK(!page->pageOffset); - return (page->numAllocatedSlots > 0 && - (page->freelistHead || page->numUnprovisionedSlots)); + DCHECK(!page->page_offset); + return (page->num_allocated_slots > 0 && + (page->freelist_head || page->num_unprovisioned_slots)); } -static bool ALWAYS_INLINE partitionPageStateIsFull(const PartitionPage* page) { +static bool ALWAYS_INLINE PartitionPageStateIsFull(const PartitionPage* page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - DCHECK(!page->pageOffset); - bool ret = (page->numAllocatedSlots == partitionBucketSlots(page->bucket)); + DCHECK(!page->page_offset); + bool ret = (page->num_allocated_slots == PartitionBucketSlots(page->bucket)); if (ret) { - DCHECK(!page->freelistHead); - DCHECK(!page->numUnprovisionedSlots); + DCHECK(!page->freelist_head); + DCHECK(!page->num_unprovisioned_slots); } return ret; } -static bool ALWAYS_INLINE partitionPageStateIsEmpty(const PartitionPage* page) { +static bool ALWAYS_INLINE PartitionPageStateIsEmpty(const PartitionPage* page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - DCHECK(!page->pageOffset); - return (!page->numAllocatedSlots && page->freelistHead); + DCHECK(!page->page_offset); + return (!page->num_allocated_slots && page->freelist_head); } static bool ALWAYS_INLINE -partitionPageStateIsDecommitted(const PartitionPage* page) { +PartitionPageStateIsDecommitted(const PartitionPage* page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - DCHECK(!page->pageOffset); - bool ret = (!page->numAllocatedSlots && !page->freelistHead); + DCHECK(!page->page_offset); + bool ret = (!page->num_allocated_slots && !page->freelist_head); if (ret) { - DCHECK(!page->numUnprovisionedSlots); - DCHECK(page->emptyCacheIndex == -1); + DCHECK(!page->num_unprovisioned_slots); + DCHECK(page->empty_cache_index == -1); } return ret; } static void partitionIncreaseCommittedPages(PartitionRootBase* root, size_t len) { - root->totalSizeOfCommittedPages += len; - DCHECK(root->totalSizeOfCommittedPages <= - root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); + root->total_size_of_committed_pages += len; + DCHECK(root->total_size_of_committed_pages <= + root->total_size_of_super_pages + + root->total_size_of_direct_mapped_pages); } static void partitionDecreaseCommittedPages(PartitionRootBase* root, size_t len) { - root->totalSizeOfCommittedPages -= len; - DCHECK(root->totalSizeOfCommittedPages <= - root->totalSizeOfSuperPages + root->totalSizeOfDirectMappedPages); + root->total_size_of_committed_pages -= len; + DCHECK(root->total_size_of_committed_pages <= + root->total_size_of_super_pages + + root->total_size_of_direct_mapped_pages); } static ALWAYS_INLINE void partitionDecommitSystemPages(PartitionRootBase* root, - void* addr, - size_t len) { - decommitSystemPages(addr, len); - partitionDecreaseCommittedPages(root, len); + void* address, + size_t length) { + DecommitSystemPages(address, length); + partitionDecreaseCommittedPages(root, length); } static ALWAYS_INLINE void partitionRecommitSystemPages(PartitionRootBase* root, - void* addr, - size_t len) { - recommitSystemPages(addr, len); - partitionIncreaseCommittedPages(root, len); + void* address, + size_t length) { + RecommitSystemPages(address, length); + partitionIncreaseCommittedPages(root, length); } -static ALWAYS_INLINE void* partitionAllocPartitionPages( +static ALWAYS_INLINE void* PartitionAllocPartitionPages( PartitionRootBase* root, int flags, - uint16_t numPartitionPages) { - DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPage) % + uint16_t num_partition_pages) { + DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page) % kPartitionPageSize)); - DCHECK(!(reinterpret_cast<uintptr_t>(root->nextPartitionPageEnd) % + DCHECK(!(reinterpret_cast<uintptr_t>(root->next_partition_page_end) % kPartitionPageSize)); - DCHECK(numPartitionPages <= kNumPartitionPagesPerSuperPage); - size_t totalSize = kPartitionPageSize * numPartitionPages; - size_t numPartitionPagesLeft = - (root->nextPartitionPageEnd - root->nextPartitionPage) >> + DCHECK(num_partition_pages <= kNumPartitionPagesPerSuperPage); + size_t total_size = kPartitionPageSize * num_partition_pages; + size_t num_partition_pages_left = + (root->next_partition_page_end - root->next_partition_page) >> kPartitionPageShift; - if (LIKELY(numPartitionPagesLeft >= numPartitionPages)) { + if (LIKELY(num_partition_pages_left >= num_partition_pages)) { // In this case, we can still hand out pages from the current super page // allocation. - char* ret = root->nextPartitionPage; - root->nextPartitionPage += totalSize; - partitionIncreaseCommittedPages(root, totalSize); + char* ret = root->next_partition_page; + root->next_partition_page += total_size; + partitionIncreaseCommittedPages(root, total_size); return ret; } @@ -421,28 +426,28 @@ // address region as much as possible. This is important for not causing // page table bloat and not fragmenting address spaces in 32 bit // architectures. - char* requestedAddress = root->nextSuperPage; - char* superPage = reinterpret_cast<char*>(allocPages( + char* requestedAddress = root->next_super_page; + char* super_page = reinterpret_cast<char*>(AllocPages( requestedAddress, kSuperPageSize, kSuperPageSize, PageAccessible)); - if (UNLIKELY(!superPage)) + if (UNLIKELY(!super_page)) return 0; - root->totalSizeOfSuperPages += kSuperPageSize; - partitionIncreaseCommittedPages(root, totalSize); + root->total_size_of_super_pages += kSuperPageSize; + partitionIncreaseCommittedPages(root, total_size); - root->nextSuperPage = superPage + kSuperPageSize; - char* ret = superPage + kPartitionPageSize; - root->nextPartitionPage = ret + totalSize; - root->nextPartitionPageEnd = root->nextSuperPage - kPartitionPageSize; + root->next_super_page = super_page + kSuperPageSize; + char* ret = super_page + kPartitionPageSize; + root->next_partition_page = ret + total_size; + root->next_partition_page_end = root->next_super_page - kPartitionPageSize; // Make the first partition page in the super page a guard page, but leave a // hole in the middle. // This is where we put page metadata and also a tiny amount of extent // metadata. - setSystemPagesInaccessible(superPage, kSystemPageSize); - setSystemPagesInaccessible(superPage + (kSystemPageSize * 2), + SetSystemPagesInaccessible(super_page, kSystemPageSize); + SetSystemPagesInaccessible(super_page + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2)); // Also make the last partition page a guard page. - setSystemPagesInaccessible(superPage + (kSuperPageSize - kPartitionPageSize), + SetSystemPagesInaccessible(super_page + (kSuperPageSize - kPartitionPageSize), kPartitionPageSize); // If we were after a specific address, but didn't get it, assume that @@ -451,155 +456,155 @@ // distributions will allocate the mapping directly before the last // successful mapping, which is far from random. So we just get fresh // randomness for the next mapping attempt. - if (requestedAddress && requestedAddress != superPage) - root->nextSuperPage = 0; + if (requestedAddress && requestedAddress != super_page) + root->next_super_page = 0; // We allocated a new super page so update super page metadata. // First check if this is a new extent or not. - PartitionSuperPageExtentEntry* latestExtent = + PartitionSuperPageExtentEntry* latest_extent = reinterpret_cast<PartitionSuperPageExtentEntry*>( - partitionSuperPageToMetadataArea(superPage)); + PartitionSuperPageToMetadataArea(super_page)); // By storing the root in every extent metadata object, we have a fast way // to go from a pointer within the partition to the root object. - latestExtent->root = root; + latest_extent->root = root; // Most new extents will be part of a larger extent, and these three fields // are unused, but we initialize them to 0 so that we get a clear signal // in case they are accidentally used. - latestExtent->superPageBase = 0; - latestExtent->superPagesEnd = 0; - latestExtent->next = 0; + latest_extent->super_page_base = 0; + latest_extent->super_pages_end = 0; + latest_extent->next = 0; - PartitionSuperPageExtentEntry* currentExtent = root->currentExtent; - bool isNewExtent = (superPage != requestedAddress); + PartitionSuperPageExtentEntry* current_extent = root->current_extent; + bool isNewExtent = (super_page != requestedAddress); if (UNLIKELY(isNewExtent)) { - if (UNLIKELY(!currentExtent)) { - DCHECK(!root->firstExtent); - root->firstExtent = latestExtent; + if (UNLIKELY(!current_extent)) { + DCHECK(!root->first_extent); + root->first_extent = latest_extent; } else { - DCHECK(currentExtent->superPageBase); - currentExtent->next = latestExtent; + DCHECK(current_extent->super_page_base); + current_extent->next = latest_extent; } - root->currentExtent = latestExtent; - latestExtent->superPageBase = superPage; - latestExtent->superPagesEnd = superPage + kSuperPageSize; + root->current_extent = latest_extent; + latest_extent->super_page_base = super_page; + latest_extent->super_pages_end = super_page + kSuperPageSize; } else { // We allocated next to an existing extent so just nudge the size up a // little. - DCHECK(currentExtent->superPagesEnd); - currentExtent->superPagesEnd += kSuperPageSize; - DCHECK(ret >= currentExtent->superPageBase && - ret < currentExtent->superPagesEnd); + DCHECK(current_extent->super_pages_end); + current_extent->super_pages_end += kSuperPageSize; + DCHECK(ret >= current_extent->super_page_base && + ret < current_extent->super_pages_end); } return ret; } static ALWAYS_INLINE uint16_t partitionBucketPartitionPages(const PartitionBucket* bucket) { - return (bucket->numSystemPagesPerSlotSpan + + return (bucket->num_system_pages_per_slot_span + (kNumSystemPagesPerPartitionPage - 1)) / kNumSystemPagesPerPartitionPage; } static ALWAYS_INLINE void partitionPageReset(PartitionPage* page) { - DCHECK(partitionPageStateIsDecommitted(page)); + DCHECK(PartitionPageStateIsDecommitted(page)); - page->numUnprovisionedSlots = partitionBucketSlots(page->bucket); - DCHECK(page->numUnprovisionedSlots); + page->num_unprovisioned_slots = PartitionBucketSlots(page->bucket); + DCHECK(page->num_unprovisioned_slots); - page->nextPage = nullptr; + page->next_page = nullptr; } static ALWAYS_INLINE void partitionPageSetup(PartitionPage* page, PartitionBucket* bucket) { // The bucket never changes. We set it up once. page->bucket = bucket; - page->emptyCacheIndex = -1; + page->empty_cache_index = -1; partitionPageReset(page); // If this page has just a single slot, do not set up page offsets for any // page metadata other than the first one. This ensures that attempts to // touch invalid page metadata fail. - if (page->numUnprovisionedSlots == 1) + if (page->num_unprovisioned_slots == 1) return; - uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); + uint16_t num_partition_pages = partitionBucketPartitionPages(bucket); char* pageCharPtr = reinterpret_cast<char*>(page); - for (uint16_t i = 1; i < numPartitionPages; ++i) { + for (uint16_t i = 1; i < num_partition_pages; ++i) { pageCharPtr += kPageMetadataSize; PartitionPage* secondaryPage = reinterpret_cast<PartitionPage*>(pageCharPtr); - secondaryPage->pageOffset = i; + secondaryPage->page_offset = i; } } static ALWAYS_INLINE char* partitionPageAllocAndFillFreelist( PartitionPage* page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - uint16_t numSlots = page->numUnprovisionedSlots; - DCHECK(numSlots); + uint16_t num_slots = page->num_unprovisioned_slots; + DCHECK(num_slots); PartitionBucket* bucket = page->bucket; // We should only get here when _every_ slot is either used or unprovisioned. // (The third state is "on the freelist". If we have a non-empty freelist, we // should not get here.) - DCHECK(numSlots + page->numAllocatedSlots == partitionBucketSlots(bucket)); + DCHECK(num_slots + page->num_allocated_slots == PartitionBucketSlots(bucket)); // Similarly, make explicitly sure that the freelist is empty. - DCHECK(!page->freelistHead); - DCHECK(page->numAllocatedSlots >= 0); + DCHECK(!page->freelist_head); + DCHECK(page->num_allocated_slots >= 0); - size_t size = bucket->slotSize; - char* base = reinterpret_cast<char*>(partitionPageToPointer(page)); - char* returnObject = base + (size * page->numAllocatedSlots); - char* firstFreelistPointer = returnObject + size; + size_t size = bucket->slot_size; + char* base = reinterpret_cast<char*>(PartitionPageToPointer(page)); + char* return_object = base + (size * page->num_allocated_slots); + char* firstFreelistPointer = return_object + size; char* firstFreelistPointerExtent = firstFreelistPointer + sizeof(PartitionFreelistEntry*); // Our goal is to fault as few system pages as possible. We calculate the // page containing the "end" of the returned slot, and then allow freelist // pointers to be written up to the end of that page. - char* subPageLimit = reinterpret_cast<char*>( - roundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); - char* slotsLimit = returnObject + (size * numSlots); - char* freelistLimit = subPageLimit; - if (UNLIKELY(slotsLimit < freelistLimit)) - freelistLimit = slotsLimit; + char* sub_page_limit = reinterpret_cast<char*>( + RoundUpToSystemPage(reinterpret_cast<size_t>(firstFreelistPointer))); + char* slots_limit = return_object + (size * num_slots); + char* freelist_limit = sub_page_limit; + if (UNLIKELY(slots_limit < freelist_limit)) + freelist_limit = slots_limit; - uint16_t numNewFreelistEntries = 0; - if (LIKELY(firstFreelistPointerExtent <= freelistLimit)) { + uint16_t num_new_freelist_entries = 0; + if (LIKELY(firstFreelistPointerExtent <= freelist_limit)) { // Only consider used space in the slot span. If we consider wasted // space, we may get an off-by-one when a freelist pointer fits in the // wasted space, but a slot does not. // We know we can fit at least one freelist pointer. - numNewFreelistEntries = 1; + num_new_freelist_entries = 1; // Any further entries require space for the whole slot span. - numNewFreelistEntries += static_cast<uint16_t>( - (freelistLimit - firstFreelistPointerExtent) / size); + num_new_freelist_entries += static_cast<uint16_t>( + (freelist_limit - firstFreelistPointerExtent) / size); } // We always return an object slot -- that's the +1 below. // We do not neccessarily create any new freelist entries, because we cross // sub page boundaries frequently for large bucket sizes. - DCHECK(numNewFreelistEntries + 1 <= numSlots); - numSlots -= (numNewFreelistEntries + 1); - page->numUnprovisionedSlots = numSlots; - page->numAllocatedSlots++; + DCHECK(num_new_freelist_entries + 1 <= num_slots); + num_slots -= (num_new_freelist_entries + 1); + page->num_unprovisioned_slots = num_slots; + page->num_allocated_slots++; - if (LIKELY(numNewFreelistEntries)) { - char* freelistPointer = firstFreelistPointer; + if (LIKELY(num_new_freelist_entries)) { + char* freelist_pointer = firstFreelistPointer; PartitionFreelistEntry* entry = - reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); - page->freelistHead = entry; - while (--numNewFreelistEntries) { - freelistPointer += size; + reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); + page->freelist_head = entry; + while (--num_new_freelist_entries) { + freelist_pointer += size; PartitionFreelistEntry* nextEntry = - reinterpret_cast<PartitionFreelistEntry*>(freelistPointer); - entry->next = partitionFreelistMask(nextEntry); + reinterpret_cast<PartitionFreelistEntry*>(freelist_pointer); + entry->next = PartitionFreelistMask(nextEntry); entry = nextEntry; } - entry->next = partitionFreelistMask(0); + entry->next = PartitionFreelistMask(0); } else { - page->freelistHead = 0; + page->freelist_head = 0; } - return returnObject; + return return_object; } // This helper function scans a bucket's active page list for a suitable new @@ -611,69 +616,69 @@ // Empty pages are swept on to the empty page list, decommitted pages on to the // decommitted page list and full pages are unlinked from any list. static bool partitionSetNewActivePage(PartitionBucket* bucket) { - PartitionPage* page = bucket->activePagesHead; + PartitionPage* page = bucket->active_pages_head; if (page == &PartitionRootBase::gSeedPage) return false; - PartitionPage* nextPage; + PartitionPage* next_page; - for (; page; page = nextPage) { - nextPage = page->nextPage; + for (; page; page = next_page) { + next_page = page->next_page; DCHECK(page->bucket == bucket); - DCHECK(page != bucket->emptyPagesHead); - DCHECK(page != bucket->decommittedPagesHead); + DCHECK(page != bucket->empty_pages_head); + DCHECK(page != bucket->decommitted_pages_head); // Deal with empty and decommitted pages. - if (LIKELY(partitionPageStateIsActive(page))) { + if (LIKELY(PartitionPageStateIsActive(page))) { // This page is usable because it has freelist entries, or has // unprovisioned slots we can create freelist entries from. - bucket->activePagesHead = page; + bucket->active_pages_head = page; return true; } - if (LIKELY(partitionPageStateIsEmpty(page))) { - page->nextPage = bucket->emptyPagesHead; - bucket->emptyPagesHead = page; - } else if (LIKELY(partitionPageStateIsDecommitted(page))) { - page->nextPage = bucket->decommittedPagesHead; - bucket->decommittedPagesHead = page; + if (LIKELY(PartitionPageStateIsEmpty(page))) { + page->next_page = bucket->empty_pages_head; + bucket->empty_pages_head = page; + } else if (LIKELY(PartitionPageStateIsDecommitted(page))) { + page->next_page = bucket->decommitted_pages_head; + bucket->decommitted_pages_head = page; } else { - DCHECK(partitionPageStateIsFull(page)); + DCHECK(PartitionPageStateIsFull(page)); // If we get here, we found a full page. Skip over it too, and also // tag it as full (via a negative value). We need it tagged so that // free'ing can tell, and move it back into the active page list. - page->numAllocatedSlots = -page->numAllocatedSlots; - ++bucket->numFullPages; - // numFullPages is a uint16_t for efficient packing so guard against + page->num_allocated_slots = -page->num_allocated_slots; + ++bucket->num_full_pages; + // num_full_pages is a uint16_t for efficient packing so guard against // overflow to be safe. - if (UNLIKELY(!bucket->numFullPages)) + if (UNLIKELY(!bucket->num_full_pages)) partitionBucketFull(); // Not necessary but might help stop accidents. - page->nextPage = 0; + page->next_page = 0; } } - bucket->activePagesHead = &PartitionRootGeneric::gSeedPage; + bucket->active_pages_head = &PartitionRootGeneric::gSeedPage; return false; } static ALWAYS_INLINE PartitionDirectMapExtent* partitionPageToDirectMapExtent( PartitionPage* page) { - DCHECK(partitionBucketIsDirectMapped(page->bucket)); + DCHECK(PartitionBucketIsDirectMapped(page->bucket)); return reinterpret_cast<PartitionDirectMapExtent*>( reinterpret_cast<char*>(page) + 3 * kPageMetadataSize); } static ALWAYS_INLINE void partitionPageSetRawSize(PartitionPage* page, size_t size) { - size_t* rawSizePtr = partitionPageGetRawSizePtr(page); - if (UNLIKELY(rawSizePtr != nullptr)) - *rawSizePtr = size; + size_t* raw_sizePtr = PartitionPageGetRawSizePtr(page); + if (UNLIKELY(raw_sizePtr != nullptr)) + *raw_sizePtr = size; } static ALWAYS_INLINE PartitionPage* partitionDirectMap(PartitionRootBase* root, int flags, - size_t rawSize) { - size_t size = partitionDirectMapSize(rawSize); + size_t raw_size) { + size_t size = PartitionDirectMapSize(raw_size); // Because we need to fake looking like a super page, we need to allocate // a bunch of system pages more than "size": @@ -682,131 +687,131 @@ // page sized clump. // - We add a trailing guard page on 32-bit (on 64-bit we rely on the // massive address space plus randomization instead). - size_t mapSize = size + kPartitionPageSize; + size_t map_size = size + kPartitionPageSize; #if !defined(ARCH_CPU_64_BITS) - mapSize += kSystemPageSize; + map_size += kSystemPageSize; #endif // Round up to the allocation granularity. - mapSize += kPageAllocationGranularityOffsetMask; - mapSize &= kPageAllocationGranularityBaseMask; + map_size += kPageAllocationGranularityOffsetMask; + map_size &= kPageAllocationGranularityBaseMask; // TODO: these pages will be zero-filled. Consider internalizing an // allocZeroed() API so we can avoid a memset() entirely in this case. char* ptr = reinterpret_cast<char*>( - allocPages(0, mapSize, kSuperPageSize, PageAccessible)); + AllocPages(0, map_size, kSuperPageSize, PageAccessible)); if (UNLIKELY(!ptr)) return nullptr; size_t committedPageSize = size + kSystemPageSize; - root->totalSizeOfDirectMappedPages += committedPageSize; + root->total_size_of_direct_mapped_pages += committedPageSize; partitionIncreaseCommittedPages(root, committedPageSize); char* slot = ptr + kPartitionPageSize; - setSystemPagesInaccessible(ptr + (kSystemPageSize * 2), + SetSystemPagesInaccessible(ptr + (kSystemPageSize * 2), kPartitionPageSize - (kSystemPageSize * 2)); #if !defined(ARCH_CPU_64_BITS) - setSystemPagesInaccessible(ptr, kSystemPageSize); - setSystemPagesInaccessible(slot + size, kSystemPageSize); + SetSystemPagesInaccessible(ptr, kSystemPageSize); + SetSystemPagesInaccessible(slot + size, kSystemPageSize); #endif PartitionSuperPageExtentEntry* extent = reinterpret_cast<PartitionSuperPageExtentEntry*>( - partitionSuperPageToMetadataArea(ptr)); + PartitionSuperPageToMetadataArea(ptr)); extent->root = root; // The new structures are all located inside a fresh system page so they // will all be zeroed out. These DCHECKs are for documentation. - DCHECK(!extent->superPageBase); - DCHECK(!extent->superPagesEnd); + DCHECK(!extent->super_page_base); + DCHECK(!extent->super_pages_end); DCHECK(!extent->next); - PartitionPage* page = partitionPointerToPageNoAlignmentCheck(slot); + PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(slot); PartitionBucket* bucket = reinterpret_cast<PartitionBucket*>( reinterpret_cast<char*>(page) + (kPageMetadataSize * 2)); - DCHECK(!page->nextPage); - DCHECK(!page->numAllocatedSlots); - DCHECK(!page->numUnprovisionedSlots); - DCHECK(!page->pageOffset); - DCHECK(!page->emptyCacheIndex); + DCHECK(!page->next_page); + DCHECK(!page->num_allocated_slots); + DCHECK(!page->num_unprovisioned_slots); + DCHECK(!page->page_offset); + DCHECK(!page->empty_cache_index); page->bucket = bucket; - page->freelistHead = reinterpret_cast<PartitionFreelistEntry*>(slot); + page->freelist_head = reinterpret_cast<PartitionFreelistEntry*>(slot); PartitionFreelistEntry* nextEntry = reinterpret_cast<PartitionFreelistEntry*>(slot); - nextEntry->next = partitionFreelistMask(0); + nextEntry->next = PartitionFreelistMask(0); - DCHECK(!bucket->activePagesHead); - DCHECK(!bucket->emptyPagesHead); - DCHECK(!bucket->decommittedPagesHead); - DCHECK(!bucket->numSystemPagesPerSlotSpan); - DCHECK(!bucket->numFullPages); - bucket->slotSize = size; + DCHECK(!bucket->active_pages_head); + DCHECK(!bucket->empty_pages_head); + DCHECK(!bucket->decommitted_pages_head); + DCHECK(!bucket->num_system_pages_per_slot_span); + DCHECK(!bucket->num_full_pages); + bucket->slot_size = size; PartitionDirectMapExtent* mapExtent = partitionPageToDirectMapExtent(page); - mapExtent->mapSize = mapSize - kPartitionPageSize - kSystemPageSize; + mapExtent->map_size = map_size - kPartitionPageSize - kSystemPageSize; mapExtent->bucket = bucket; // Maintain the doubly-linked list of all direct mappings. - mapExtent->nextExtent = root->directMapList; - if (mapExtent->nextExtent) - mapExtent->nextExtent->prevExtent = mapExtent; - mapExtent->prevExtent = nullptr; - root->directMapList = mapExtent; + mapExtent->next_extent = root->direct_map_list; + if (mapExtent->next_extent) + mapExtent->next_extent->prev_extent = mapExtent; + mapExtent->prev_extent = nullptr; + root->direct_map_list = mapExtent; return page; } static ALWAYS_INLINE void partitionDirectUnmap(PartitionPage* page) { - PartitionRootBase* root = partitionPageToRoot(page); + PartitionRootBase* root = PartitionPageToRoot(page); const PartitionDirectMapExtent* extent = partitionPageToDirectMapExtent(page); - size_t unmapSize = extent->mapSize; + size_t unmap_size = extent->map_size; // Maintain the doubly-linked list of all direct mappings. - if (extent->prevExtent) { - DCHECK(extent->prevExtent->nextExtent == extent); - extent->prevExtent->nextExtent = extent->nextExtent; + if (extent->prev_extent) { + DCHECK(extent->prev_extent->next_extent == extent); + extent->prev_extent->next_extent = extent->next_extent; } else { - root->directMapList = extent->nextExtent; + root->direct_map_list = extent->next_extent; } - if (extent->nextExtent) { - DCHECK(extent->nextExtent->prevExtent == extent); - extent->nextExtent->prevExtent = extent->prevExtent; + if (extent->next_extent) { + DCHECK(extent->next_extent->prev_extent == extent); + extent->next_extent->prev_extent = extent->prev_extent; } // Add on the size of the trailing guard page and preceeding partition // page. - unmapSize += kPartitionPageSize + kSystemPageSize; + unmap_size += kPartitionPageSize + kSystemPageSize; - size_t uncommittedPageSize = page->bucket->slotSize + kSystemPageSize; + size_t uncommittedPageSize = page->bucket->slot_size + kSystemPageSize; partitionDecreaseCommittedPages(root, uncommittedPageSize); - DCHECK(root->totalSizeOfDirectMappedPages >= uncommittedPageSize); - root->totalSizeOfDirectMappedPages -= uncommittedPageSize; + DCHECK(root->total_size_of_direct_mapped_pages >= uncommittedPageSize); + root->total_size_of_direct_mapped_pages -= uncommittedPageSize; - DCHECK(!(unmapSize & kPageAllocationGranularityOffsetMask)); + DCHECK(!(unmap_size & kPageAllocationGranularityOffsetMask)); - char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); + char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); // Account for the mapping starting a partition page before the actual // allocation address. ptr -= kPartitionPageSize; - freePages(ptr, unmapSize); + FreePages(ptr, unmap_size); } -void* partitionAllocSlowPath(PartitionRootBase* root, +void* PartitionAllocSlowPath(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket) { // The slow path is called when the freelist is empty. - DCHECK(!bucket->activePagesHead->freelistHead); + DCHECK(!bucket->active_pages_head->freelist_head); PartitionPage* newPage = nullptr; - // For the partitionAllocGeneric API, we have a bunch of buckets marked + // For the PartitionAllocGeneric API, we have a bunch of buckets marked // as special cases. We bounce them through to the slow path so that we // can still have a blazing fast hot path due to lack of corner-case // branches. bool returnNull = flags & PartitionAllocReturnNull; - if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { + if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) { DCHECK(size > kGenericMaxBucketed); DCHECK(bucket == &PartitionRootBase::gPagedBucket); - DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); + DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage); if (size > kGenericMaxDirectMapped) { if (returnNull) return nullptr; @@ -815,52 +820,53 @@ newPage = partitionDirectMap(root, flags, size); } else if (LIKELY(partitionSetNewActivePage(bucket))) { // First, did we find an active page in the active pages list? - newPage = bucket->activePagesHead; - DCHECK(partitionPageStateIsActive(newPage)); - } else if (LIKELY(bucket->emptyPagesHead != nullptr) || - LIKELY(bucket->decommittedPagesHead != nullptr)) { + newPage = bucket->active_pages_head; + DCHECK(PartitionPageStateIsActive(newPage)); + } else if (LIKELY(bucket->empty_pages_head != nullptr) || + LIKELY(bucket->decommitted_pages_head != nullptr)) { // Second, look in our lists of empty and decommitted pages. // Check empty pages first, which are preferred, but beware that an // empty page might have been decommitted. - while (LIKELY((newPage = bucket->emptyPagesHead) != nullptr)) { + while (LIKELY((newPage = bucket->empty_pages_head) != nullptr)) { DCHECK(newPage->bucket == bucket); - DCHECK(partitionPageStateIsEmpty(newPage) || - partitionPageStateIsDecommitted(newPage)); - bucket->emptyPagesHead = newPage->nextPage; + DCHECK(PartitionPageStateIsEmpty(newPage) || + PartitionPageStateIsDecommitted(newPage)); + bucket->empty_pages_head = newPage->next_page; // Accept the empty page unless it got decommitted. - if (newPage->freelistHead) { - newPage->nextPage = nullptr; + if (newPage->freelist_head) { + newPage->next_page = nullptr; break; } - DCHECK(partitionPageStateIsDecommitted(newPage)); - newPage->nextPage = bucket->decommittedPagesHead; - bucket->decommittedPagesHead = newPage; + DCHECK(PartitionPageStateIsDecommitted(newPage)); + newPage->next_page = bucket->decommitted_pages_head; + bucket->decommitted_pages_head = newPage; } - if (UNLIKELY(!newPage) && LIKELY(bucket->decommittedPagesHead != nullptr)) { - newPage = bucket->decommittedPagesHead; + if (UNLIKELY(!newPage) && + LIKELY(bucket->decommitted_pages_head != nullptr)) { + newPage = bucket->decommitted_pages_head; DCHECK(newPage->bucket == bucket); - DCHECK(partitionPageStateIsDecommitted(newPage)); - bucket->decommittedPagesHead = newPage->nextPage; - void* addr = partitionPageToPointer(newPage); + DCHECK(PartitionPageStateIsDecommitted(newPage)); + bucket->decommitted_pages_head = newPage->next_page; + void* addr = PartitionPageToPointer(newPage); partitionRecommitSystemPages(root, addr, - partitionBucketBytes(newPage->bucket)); + PartitionBucketBytes(newPage->bucket)); partitionPageReset(newPage); } DCHECK(newPage); } else { // Third. If we get here, we need a brand new page. - uint16_t numPartitionPages = partitionBucketPartitionPages(bucket); + uint16_t num_partition_pages = partitionBucketPartitionPages(bucket); void* rawPages = - partitionAllocPartitionPages(root, flags, numPartitionPages); + PartitionAllocPartitionPages(root, flags, num_partition_pages); if (LIKELY(rawPages != nullptr)) { - newPage = partitionPointerToPageNoAlignmentCheck(rawPages); + newPage = PartitionPointerToPageNoAlignmentCheck(rawPages); partitionPageSetup(newPage, bucket); } } // Bail if we had a memory allocation failure. if (UNLIKELY(!newPage)) { - DCHECK(bucket->activePagesHead == &PartitionRootGeneric::gSeedPage); + DCHECK(bucket->active_pages_head == &PartitionRootGeneric::gSeedPage); if (returnNull) return nullptr; partitionOutOfMemory(root); @@ -868,29 +874,29 @@ bucket = newPage->bucket; DCHECK(bucket != &PartitionRootBase::gPagedBucket); - bucket->activePagesHead = newPage; + bucket->active_pages_head = newPage; partitionPageSetRawSize(newPage, size); // If we found an active page with free slots, or an empty page, we have a // usable freelist head. - if (LIKELY(newPage->freelistHead != nullptr)) { - PartitionFreelistEntry* entry = newPage->freelistHead; - PartitionFreelistEntry* newHead = partitionFreelistMask(entry->next); - newPage->freelistHead = newHead; - newPage->numAllocatedSlots++; + if (LIKELY(newPage->freelist_head != nullptr)) { + PartitionFreelistEntry* entry = newPage->freelist_head; + PartitionFreelistEntry* newHead = PartitionFreelistMask(entry->next); + newPage->freelist_head = newHead; + newPage->num_allocated_slots++; return entry; } // Otherwise, we need to build the freelist. - DCHECK(newPage->numUnprovisionedSlots); + DCHECK(newPage->num_unprovisioned_slots); return partitionPageAllocAndFillFreelist(newPage); } static ALWAYS_INLINE void partitionDecommitPage(PartitionRootBase* root, PartitionPage* page) { - DCHECK(partitionPageStateIsEmpty(page)); - DCHECK(!partitionBucketIsDirectMapped(page->bucket)); - void* addr = partitionPageToPointer(page); - partitionDecommitSystemPages(root, addr, partitionBucketBytes(page->bucket)); + DCHECK(PartitionPageStateIsEmpty(page)); + DCHECK(!PartitionBucketIsDirectMapped(page->bucket)); + void* addr = PartitionPageToPointer(page); + partitionDecommitSystemPages(root, addr, PartitionBucketBytes(page->bucket)); // We actually leave the decommitted page in the active list. We'll sweep // it on to the decommitted page list when we next walk the active page @@ -898,35 +904,35 @@ // Pulling this trick enables us to use a singly-linked page list for all // cases, which is critical in keeping the page metadata structure down to // 32 bytes in size. - page->freelistHead = 0; - page->numUnprovisionedSlots = 0; - DCHECK(partitionPageStateIsDecommitted(page)); + page->freelist_head = 0; + page->num_unprovisioned_slots = 0; + DCHECK(PartitionPageStateIsDecommitted(page)); } static void partitionDecommitPageIfPossible(PartitionRootBase* root, PartitionPage* page) { - DCHECK(page->emptyCacheIndex >= 0); - DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); - DCHECK(page == root->globalEmptyPageRing[page->emptyCacheIndex]); - page->emptyCacheIndex = -1; - if (partitionPageStateIsEmpty(page)) + DCHECK(page->empty_cache_index >= 0); + DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans); + DCHECK(page == root->global_empty_page_ring[page->empty_cache_index]); + page->empty_cache_index = -1; + if (PartitionPageStateIsEmpty(page)) partitionDecommitPage(root, page); } static ALWAYS_INLINE void partitionRegisterEmptyPage(PartitionPage* page) { - DCHECK(partitionPageStateIsEmpty(page)); - PartitionRootBase* root = partitionPageToRoot(page); + DCHECK(PartitionPageStateIsEmpty(page)); + PartitionRootBase* root = PartitionPageToRoot(page); // If the page is already registered as empty, give it another life. - if (page->emptyCacheIndex != -1) { - DCHECK(page->emptyCacheIndex >= 0); - DCHECK(static_cast<unsigned>(page->emptyCacheIndex) < kMaxFreeableSpans); - DCHECK(root->globalEmptyPageRing[page->emptyCacheIndex] == page); - root->globalEmptyPageRing[page->emptyCacheIndex] = 0; + if (page->empty_cache_index != -1) { + DCHECK(page->empty_cache_index >= 0); + DCHECK(static_cast<unsigned>(page->empty_cache_index) < kMaxFreeableSpans); + DCHECK(root->global_empty_page_ring[page->empty_cache_index] == page); + root->global_empty_page_ring[page->empty_cache_index] = 0; } - int16_t currentIndex = root->globalEmptyPageRingIndex; - PartitionPage* pageToDecommit = root->globalEmptyPageRing[currentIndex]; + int16_t currentIndex = root->global_empty_page_ring_index; + PartitionPage* pageToDecommit = root->global_empty_page_ring[currentIndex]; // The page might well have been re-activated, filled up, etc. before we get // around to looking at it here. if (pageToDecommit) @@ -936,110 +942,110 @@ // empty". thus providing it a bit of breathing room to get re-used before // we really free it. This improves performance, particularly on Mac OS X // which has subpar memory management performance. - root->globalEmptyPageRing[currentIndex] = page; - page->emptyCacheIndex = currentIndex; + root->global_empty_page_ring[currentIndex] = page; + page->empty_cache_index = currentIndex; ++currentIndex; if (currentIndex == kMaxFreeableSpans) currentIndex = 0; - root->globalEmptyPageRingIndex = currentIndex; + root->global_empty_page_ring_index = currentIndex; } static void partitionDecommitEmptyPages(PartitionRootBase* root) { for (size_t i = 0; i < kMaxFreeableSpans; ++i) { - PartitionPage* page = root->globalEmptyPageRing[i]; + PartitionPage* page = root->global_empty_page_ring[i]; if (page) partitionDecommitPageIfPossible(root, page); - root->globalEmptyPageRing[i] = nullptr; + root->global_empty_page_ring[i] = nullptr; } } -void partitionFreeSlowPath(PartitionPage* page) { +void PartitionFreeSlowPath(PartitionPage* page) { PartitionBucket* bucket = page->bucket; DCHECK(page != &PartitionRootGeneric::gSeedPage); - if (LIKELY(page->numAllocatedSlots == 0)) { + if (LIKELY(page->num_allocated_slots == 0)) { // Page became fully unused. - if (UNLIKELY(partitionBucketIsDirectMapped(bucket))) { + if (UNLIKELY(PartitionBucketIsDirectMapped(bucket))) { partitionDirectUnmap(page); return; } // If it's the current active page, change it. We bounce the page to // the empty list as a force towards defragmentation. - if (LIKELY(page == bucket->activePagesHead)) + if (LIKELY(page == bucket->active_pages_head)) (void)partitionSetNewActivePage(bucket); - DCHECK(bucket->activePagesHead != page); + DCHECK(bucket->active_pages_head != page); partitionPageSetRawSize(page, 0); - DCHECK(!partitionPageGetRawSize(page)); + DCHECK(!PartitionPageGetRawSize(page)); partitionRegisterEmptyPage(page); } else { - DCHECK(!partitionBucketIsDirectMapped(bucket)); + DCHECK(!PartitionBucketIsDirectMapped(bucket)); // Ensure that the page is full. That's the only valid case if we // arrive here. - DCHECK(page->numAllocatedSlots < 0); - // A transition of numAllocatedSlots from 0 to -1 is not legal, and + DCHECK(page->num_allocated_slots < 0); + // A transition of num_allocated_slots from 0 to -1 is not legal, and // likely indicates a double-free. - CHECK(page->numAllocatedSlots != -1); - page->numAllocatedSlots = -page->numAllocatedSlots - 2; - DCHECK(page->numAllocatedSlots == partitionBucketSlots(bucket) - 1); + CHECK(page->num_allocated_slots != -1); + page->num_allocated_slots = -page->num_allocated_slots - 2; + DCHECK(page->num_allocated_slots == PartitionBucketSlots(bucket) - 1); // Fully used page became partially used. It must be put back on the // non-full page list. Also make it the current page to increase the // chances of it being filled up again. The old current page will be // the next page. - DCHECK(!page->nextPage); - if (LIKELY(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage)) - page->nextPage = bucket->activePagesHead; - bucket->activePagesHead = page; - --bucket->numFullPages; + DCHECK(!page->next_page); + if (LIKELY(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage)) + page->next_page = bucket->active_pages_head; + bucket->active_pages_head = page; + --bucket->num_full_pages; // Special case: for a partition page with just a single slot, it may // now be empty and we want to run it through the empty logic. - if (UNLIKELY(page->numAllocatedSlots == 0)) - partitionFreeSlowPath(page); + if (UNLIKELY(page->num_allocated_slots == 0)) + PartitionFreeSlowPath(page); } } bool partitionReallocDirectMappedInPlace(PartitionRootGeneric* root, PartitionPage* page, - size_t rawSize) { - DCHECK(partitionBucketIsDirectMapped(page->bucket)); + size_t raw_size) { + DCHECK(PartitionBucketIsDirectMapped(page->bucket)); - rawSize = partitionCookieSizeAdjustAdd(rawSize); + raw_size = PartitionCookieSizeAdjustAdd(raw_size); // Note that the new size might be a bucketed size; this function is called // whenever we're reallocating a direct mapped allocation. - size_t newSize = partitionDirectMapSize(rawSize); - if (newSize < kGenericMinDirectMappedDownsize) + size_t new_size = PartitionDirectMapSize(raw_size); + if (new_size < kGenericMinDirectMappedDownsize) return false; - // bucket->slotSize is the current size of the allocation. - size_t currentSize = page->bucket->slotSize; - if (newSize == currentSize) + // bucket->slot_size is the current size of the allocation. + size_t current_size = page->bucket->slot_size; + if (new_size == current_size) return true; - char* charPtr = static_cast<char*>(partitionPageToPointer(page)); + char* char_ptr = static_cast<char*>(PartitionPageToPointer(page)); - if (newSize < currentSize) { - size_t mapSize = partitionPageToDirectMapExtent(page)->mapSize; + if (new_size < current_size) { + size_t map_size = partitionPageToDirectMapExtent(page)->map_size; // Don't reallocate in-place if new size is less than 80 % of the full // map size, to avoid holding on to too much unused address space. - if ((newSize / kSystemPageSize) * 5 < (mapSize / kSystemPageSize) * 4) + if ((new_size / kSystemPageSize) * 5 < (map_size / kSystemPageSize) * 4) return false; // Shrink by decommitting unneeded pages and making them inaccessible. - size_t decommitSize = currentSize - newSize; - partitionDecommitSystemPages(root, charPtr + newSize, decommitSize); - setSystemPagesInaccessible(charPtr + newSize, decommitSize); - } else if (newSize <= partitionPageToDirectMapExtent(page)->mapSize) { + size_t decommitSize = current_size - new_size; + partitionDecommitSystemPages(root, char_ptr + new_size, decommitSize); + SetSystemPagesInaccessible(char_ptr + new_size, decommitSize); + } else if (new_size <= partitionPageToDirectMapExtent(page)->map_size) { // Grow within the actually allocated memory. Just need to make the // pages accessible again. - size_t recommitSize = newSize - currentSize; - bool ret = setSystemPagesAccessible(charPtr + currentSize, recommitSize); + size_t recommit_size = new_size - current_size; + bool ret = SetSystemPagesAccessible(char_ptr + current_size, recommit_size); CHECK(ret); - partitionRecommitSystemPages(root, charPtr + currentSize, recommitSize); + partitionRecommitSystemPages(root, char_ptr + current_size, recommit_size); #if DCHECK_IS_ON() - memset(charPtr + currentSize, kUninitializedByte, recommitSize); + memset(char_ptr + current_size, kUninitializedByte, recommit_size); #endif } else { // We can't perform the realloc in-place. @@ -1049,127 +1055,127 @@ #if DCHECK_IS_ON() // Write a new trailing cookie. - partitionCookieWriteValue(charPtr + rawSize - kCookieSize); + PartitionCookieWriteValue(char_ptr + raw_size - kCookieSize); #endif - partitionPageSetRawSize(page, rawSize); - DCHECK(partitionPageGetRawSize(page) == rawSize); + partitionPageSetRawSize(page, raw_size); + DCHECK(PartitionPageGetRawSize(page) == raw_size); - page->bucket->slotSize = newSize; + page->bucket->slot_size = new_size; return true; } -void* partitionReallocGeneric(PartitionRootGeneric* root, +void* PartitionReallocGeneric(PartitionRootGeneric* root, void* ptr, - size_t newSize, - const char* typeName) { + size_t new_size, + const char* type_name) { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) - return realloc(ptr, newSize); + return realloc(ptr, new_size); #else if (UNLIKELY(!ptr)) - return partitionAllocGeneric(root, newSize, typeName); - if (UNLIKELY(!newSize)) { - partitionFreeGeneric(root, ptr); + return PartitionAllocGeneric(root, new_size, type_name); + if (UNLIKELY(!new_size)) { + PartitionFreeGeneric(root, ptr); return 0; } - if (newSize > kGenericMaxDirectMapped) + if (new_size > kGenericMaxDirectMapped) partitionExcessiveAllocationSize(); - DCHECK(partitionPointerIsValid(partitionCookieFreePointerAdjust(ptr))); + DCHECK(PartitionPointerIsValid(PartitionCookieFreePointerAdjust(ptr))); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); - if (UNLIKELY(partitionBucketIsDirectMapped(page->bucket))) { + if (UNLIKELY(PartitionBucketIsDirectMapped(page->bucket))) { // We may be able to perform the realloc in place by changing the // accessibility of memory pages and, if reducing the size, decommitting // them. - if (partitionReallocDirectMappedInPlace(root, page, newSize)) { - PartitionAllocHooks::reallocHookIfEnabled(ptr, ptr, newSize, typeName); + if (partitionReallocDirectMappedInPlace(root, page, new_size)) { + PartitionAllocHooks::ReallocHookIfEnabled(ptr, ptr, new_size, type_name); return ptr; } } - size_t actualNewSize = partitionAllocActualSize(root, newSize); - size_t actualOldSize = partitionAllocGetSize(ptr); + size_t actualNewSize = PartitionAllocActualSize(root, new_size); + size_t actualOldSize = PartitionAllocGetSize(ptr); // TODO: note that tcmalloc will "ignore" a downsizing realloc() unless the // new size is a significant percentage smaller. We could do the same if we // determine it is a win. if (actualNewSize == actualOldSize) { - // Trying to allocate a block of size newSize would give us a block of + // Trying to allocate a block of size new_size would give us a block of // the same size as the one we've already got, so no point in doing // anything here. return ptr; } // This realloc cannot be resized in-place. Sadness. - void* ret = partitionAllocGeneric(root, newSize, typeName); - size_t copySize = actualOldSize; - if (newSize < copySize) - copySize = newSize; + void* ret = PartitionAllocGeneric(root, new_size, type_name); + size_t copy_size = actualOldSize; + if (new_size < copy_size) + copy_size = new_size; - memcpy(ret, ptr, copySize); - partitionFreeGeneric(root, ptr); + memcpy(ret, ptr, copy_size); + PartitionFreeGeneric(root, ptr); return ret; #endif } -static size_t partitionPurgePage(PartitionPage* page, bool discard) { +static size_t PartitionPurgePage(PartitionPage* page, bool discard) { const PartitionBucket* bucket = page->bucket; - size_t slotSize = bucket->slotSize; - if (slotSize < kSystemPageSize || !page->numAllocatedSlots) + size_t slot_size = bucket->slot_size; + if (slot_size < kSystemPageSize || !page->num_allocated_slots) return 0; - size_t bucketNumSlots = partitionBucketSlots(bucket); - size_t discardableBytes = 0; + size_t bucket_num_slots = PartitionBucketSlots(bucket); + size_t discardable_bytes = 0; - size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); - if (rawSize) { - uint32_t usedBytes = static_cast<uint32_t>(roundUpToSystemPage(rawSize)); - discardableBytes = bucket->slotSize - usedBytes; - if (discardableBytes && discard) { - char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); + size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page)); + if (raw_size) { + uint32_t usedBytes = static_cast<uint32_t>(RoundUpToSystemPage(raw_size)); + discardable_bytes = bucket->slot_size - usedBytes; + if (discardable_bytes && discard) { + char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); ptr += usedBytes; - discardSystemPages(ptr, discardableBytes); + DiscardSystemPages(ptr, discardable_bytes); } - return discardableBytes; + return discardable_bytes; } const size_t maxSlotCount = (kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) / kSystemPageSize; - DCHECK(bucketNumSlots <= maxSlotCount); - DCHECK(page->numUnprovisionedSlots < bucketNumSlots); - size_t numSlots = bucketNumSlots - page->numUnprovisionedSlots; + DCHECK(bucket_num_slots <= maxSlotCount); + DCHECK(page->num_unprovisioned_slots < bucket_num_slots); + size_t num_slots = bucket_num_slots - page->num_unprovisioned_slots; char slotUsage[maxSlotCount]; size_t lastSlot = static_cast<size_t>(-1); - memset(slotUsage, 1, numSlots); - char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); - PartitionFreelistEntry* entry = page->freelistHead; + memset(slotUsage, 1, num_slots); + char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); + PartitionFreelistEntry* entry = page->freelist_head; // First, walk the freelist for this page and make a bitmap of which slots // are not in use. while (entry) { - size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slotSize; - DCHECK(slotIndex < numSlots); + size_t slotIndex = (reinterpret_cast<char*>(entry) - ptr) / slot_size; + DCHECK(slotIndex < num_slots); slotUsage[slotIndex] = 0; - entry = partitionFreelistMask(entry->next); + entry = PartitionFreelistMask(entry->next); // If we have a slot where the masked freelist entry is 0, we can // actually discard that freelist entry because touching a discarded // page is guaranteed to return original content or 0. // (Note that this optimization won't fire on big endian machines // because the masking function is negation.) - if (!partitionFreelistMask(entry)) + if (!PartitionFreelistMask(entry)) lastSlot = slotIndex; } // If the slot(s) at the end of the slot span are not in used, we can // truncate them entirely and rewrite the freelist. size_t truncatedSlots = 0; - while (!slotUsage[numSlots - 1]) { + while (!slotUsage[num_slots - 1]) { truncatedSlots++; - numSlots--; - DCHECK(numSlots); + num_slots--; + DCHECK(num_slots); } // First, do the work of calculating the discardable bytes. Don't actually // discard anything unless the discard flag was passed in. @@ -1177,83 +1183,83 @@ char* endPtr = nullptr; size_t unprovisionedBytes = 0; if (truncatedSlots) { - beginPtr = ptr + (numSlots * slotSize); - endPtr = beginPtr + (slotSize * truncatedSlots); + beginPtr = ptr + (num_slots * slot_size); + endPtr = beginPtr + (slot_size * truncatedSlots); beginPtr = reinterpret_cast<char*>( - roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); + RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); // We round the end pointer here up and not down because we're at the // end of a slot span, so we "own" all the way up the page boundary. endPtr = reinterpret_cast<char*>( - roundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); - DCHECK(endPtr <= ptr + partitionBucketBytes(bucket)); + RoundUpToSystemPage(reinterpret_cast<size_t>(endPtr))); + DCHECK(endPtr <= ptr + PartitionBucketBytes(bucket)); if (beginPtr < endPtr) { unprovisionedBytes = endPtr - beginPtr; - discardableBytes += unprovisionedBytes; + discardable_bytes += unprovisionedBytes; } } if (unprovisionedBytes && discard) { DCHECK(truncatedSlots > 0); size_t numNewEntries = 0; - page->numUnprovisionedSlots += static_cast<uint16_t>(truncatedSlots); + page->num_unprovisioned_slots += static_cast<uint16_t>(truncatedSlots); // Rewrite the freelist. - PartitionFreelistEntry** entryPtr = &page->freelistHead; - for (size_t slotIndex = 0; slotIndex < numSlots; ++slotIndex) { + PartitionFreelistEntry** entryPtr = &page->freelist_head; + for (size_t slotIndex = 0; slotIndex < num_slots; ++slotIndex) { if (slotUsage[slotIndex]) continue; PartitionFreelistEntry* entry = reinterpret_cast<PartitionFreelistEntry*>( - ptr + (slotSize * slotIndex)); - *entryPtr = partitionFreelistMask(entry); + ptr + (slot_size * slotIndex)); + *entryPtr = PartitionFreelistMask(entry); entryPtr = reinterpret_cast<PartitionFreelistEntry**>(entry); numNewEntries++; } // Terminate the freelist chain. *entryPtr = nullptr; // The freelist head is stored unmasked. - page->freelistHead = partitionFreelistMask(page->freelistHead); - DCHECK(numNewEntries == numSlots - page->numAllocatedSlots); + page->freelist_head = PartitionFreelistMask(page->freelist_head); + DCHECK(numNewEntries == num_slots - page->num_allocated_slots); // Discard the memory. - discardSystemPages(beginPtr, unprovisionedBytes); + DiscardSystemPages(beginPtr, unprovisionedBytes); } // Next, walk the slots and for any not in use, consider where the system // page boundaries occur. We can release any system pages back to the // system as long as we don't interfere with a freelist pointer or an // adjacent slot. - for (size_t i = 0; i < numSlots; ++i) { + for (size_t i = 0; i < num_slots; ++i) { if (slotUsage[i]) continue; // The first address we can safely discard is just after the freelist // pointer. There's one quirk: if the freelist pointer is actually a // null, we can discard that pointer value too. - char* beginPtr = ptr + (i * slotSize); - char* endPtr = beginPtr + slotSize; + char* beginPtr = ptr + (i * slot_size); + char* endPtr = beginPtr + slot_size; if (i != lastSlot) beginPtr += sizeof(PartitionFreelistEntry); beginPtr = reinterpret_cast<char*>( - roundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); + RoundUpToSystemPage(reinterpret_cast<size_t>(beginPtr))); endPtr = reinterpret_cast<char*>( - roundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); + RoundDownToSystemPage(reinterpret_cast<size_t>(endPtr))); if (beginPtr < endPtr) { size_t partialSlotBytes = endPtr - beginPtr; - discardableBytes += partialSlotBytes; + discardable_bytes += partialSlotBytes; if (discard) - discardSystemPages(beginPtr, partialSlotBytes); + DiscardSystemPages(beginPtr, partialSlotBytes); } } - return discardableBytes; + return discardable_bytes; } static void partitionPurgeBucket(PartitionBucket* bucket) { - if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { - for (PartitionPage* page = bucket->activePagesHead; page; - page = page->nextPage) { + if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) { + for (PartitionPage* page = bucket->active_pages_head; page; + page = page->next_page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - (void)partitionPurgePage(page, true); + (void)PartitionPurgePage(page, true); } } } -void partitionPurgeMemory(PartitionRoot* root, int flags) { +void PartitionPurgeMemory(PartitionRoot* root, int flags) { if (flags & PartitionPurgeDecommitEmptyPages) partitionDecommitEmptyPages(root); // We don't currently do anything for PartitionPurgeDiscardUnusedSystemPages @@ -1262,105 +1268,106 @@ // at the moment. } -void partitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { +void PartitionPurgeMemoryGeneric(PartitionRootGeneric* root, int flags) { subtle::SpinLock::Guard guard(root->lock); if (flags & PartitionPurgeDecommitEmptyPages) partitionDecommitEmptyPages(root); if (flags & PartitionPurgeDiscardUnusedSystemPages) { for (size_t i = 0; i < kGenericNumBuckets; ++i) { PartitionBucket* bucket = &root->buckets[i]; - if (bucket->slotSize >= kSystemPageSize) + if (bucket->slot_size >= kSystemPageSize) partitionPurgeBucket(bucket); } } } -static void partitionDumpPageStats(PartitionBucketMemoryStats* statsOut, +static void PartitionDumpPageStats(PartitionBucketMemoryStats* stats_out, const PartitionPage* page) { - uint16_t bucketNumSlots = partitionBucketSlots(page->bucket); + uint16_t bucket_num_slots = PartitionBucketSlots(page->bucket); - if (partitionPageStateIsDecommitted(page)) { - ++statsOut->numDecommittedPages; + if (PartitionPageStateIsDecommitted(page)) { + ++stats_out->num_decommitted_pages; return; } - statsOut->discardableBytes += - partitionPurgePage(const_cast<PartitionPage*>(page), false); + stats_out->discardable_bytes += + PartitionPurgePage(const_cast<PartitionPage*>(page), false); - size_t rawSize = partitionPageGetRawSize(const_cast<PartitionPage*>(page)); - if (rawSize) - statsOut->activeBytes += static_cast<uint32_t>(rawSize); + size_t raw_size = PartitionPageGetRawSize(const_cast<PartitionPage*>(page)); + if (raw_size) + stats_out->active_bytes += static_cast<uint32_t>(raw_size); else - statsOut->activeBytes += - (page->numAllocatedSlots * statsOut->bucketSlotSize); + stats_out->active_bytes += + (page->num_allocated_slots * stats_out->bucket_slot_size); - size_t pageBytesResident = - roundUpToSystemPage((bucketNumSlots - page->numUnprovisionedSlots) * - statsOut->bucketSlotSize); - statsOut->residentBytes += pageBytesResident; - if (partitionPageStateIsEmpty(page)) { - statsOut->decommittableBytes += pageBytesResident; - ++statsOut->numEmptyPages; - } else if (partitionPageStateIsFull(page)) { - ++statsOut->numFullPages; + size_t page_bytes_resident = + RoundUpToSystemPage((bucket_num_slots - page->num_unprovisioned_slots) * + stats_out->bucket_slot_size); + stats_out->resident_bytes += page_bytes_resident; + if (PartitionPageStateIsEmpty(page)) { + stats_out->decommittable_bytes += page_bytes_resident; + ++stats_out->num_empty_pages; + } else if (PartitionPageStateIsFull(page)) { + ++stats_out->num_full_pages; } else { - DCHECK(partitionPageStateIsActive(page)); - ++statsOut->numActivePages; + DCHECK(PartitionPageStateIsActive(page)); + ++stats_out->num_active_pages; } } -static void partitionDumpBucketStats(PartitionBucketMemoryStats* statsOut, +static void PartitionDumpBucketStats(PartitionBucketMemoryStats* stats_out, const PartitionBucket* bucket) { - DCHECK(!partitionBucketIsDirectMapped(bucket)); - statsOut->isValid = false; + DCHECK(!PartitionBucketIsDirectMapped(bucket)); + stats_out->is_valid = false; // If the active page list is empty (== &PartitionRootGeneric::gSeedPage), // the bucket might still need to be reported if it has a list of empty, // decommitted or full pages. - if (bucket->activePagesHead == &PartitionRootGeneric::gSeedPage && - !bucket->emptyPagesHead && !bucket->decommittedPagesHead && - !bucket->numFullPages) + if (bucket->active_pages_head == &PartitionRootGeneric::gSeedPage && + !bucket->empty_pages_head && !bucket->decommitted_pages_head && + !bucket->num_full_pages) return; - memset(statsOut, '\0', sizeof(*statsOut)); - statsOut->isValid = true; - statsOut->isDirectMap = false; - statsOut->numFullPages = static_cast<size_t>(bucket->numFullPages); - statsOut->bucketSlotSize = bucket->slotSize; - uint16_t bucketNumSlots = partitionBucketSlots(bucket); - size_t bucketUsefulStorage = statsOut->bucketSlotSize * bucketNumSlots; - statsOut->allocatedPageSize = partitionBucketBytes(bucket); - statsOut->activeBytes = bucket->numFullPages * bucketUsefulStorage; - statsOut->residentBytes = bucket->numFullPages * statsOut->allocatedPageSize; + memset(stats_out, '\0', sizeof(*stats_out)); + stats_out->is_valid = true; + stats_out->is_direct_map = false; + stats_out->num_full_pages = static_cast<size_t>(bucket->num_full_pages); + stats_out->bucket_slot_size = bucket->slot_size; + uint16_t bucket_num_slots = PartitionBucketSlots(bucket); + size_t bucketUsefulStorage = stats_out->bucket_slot_size * bucket_num_slots; + stats_out->allocated_page_size = PartitionBucketBytes(bucket); + stats_out->active_bytes = bucket->num_full_pages * bucketUsefulStorage; + stats_out->resident_bytes = + bucket->num_full_pages * stats_out->allocated_page_size; - for (const PartitionPage* page = bucket->emptyPagesHead; page; - page = page->nextPage) { - DCHECK(partitionPageStateIsEmpty(page) || - partitionPageStateIsDecommitted(page)); - partitionDumpPageStats(statsOut, page); + for (const PartitionPage* page = bucket->empty_pages_head; page; + page = page->next_page) { + DCHECK(PartitionPageStateIsEmpty(page) || + PartitionPageStateIsDecommitted(page)); + PartitionDumpPageStats(stats_out, page); } - for (const PartitionPage* page = bucket->decommittedPagesHead; page; - page = page->nextPage) { - DCHECK(partitionPageStateIsDecommitted(page)); - partitionDumpPageStats(statsOut, page); + for (const PartitionPage* page = bucket->decommitted_pages_head; page; + page = page->next_page) { + DCHECK(PartitionPageStateIsDecommitted(page)); + PartitionDumpPageStats(stats_out, page); } - if (bucket->activePagesHead != &PartitionRootGeneric::gSeedPage) { - for (const PartitionPage* page = bucket->activePagesHead; page; - page = page->nextPage) { + if (bucket->active_pages_head != &PartitionRootGeneric::gSeedPage) { + for (const PartitionPage* page = bucket->active_pages_head; page; + page = page->next_page) { DCHECK(page != &PartitionRootGeneric::gSeedPage); - partitionDumpPageStats(statsOut, page); + PartitionDumpPageStats(stats_out, page); } } } -void partitionDumpStatsGeneric(PartitionRootGeneric* partition, - const char* partitionName, - bool isLightDump, - PartitionStatsDumper* partitionStatsDumper) { - PartitionBucketMemoryStats bucketStats[kGenericNumBuckets]; +void PartitionDumpStatsGeneric(PartitionRootGeneric* partition, + const char* partition_name, + bool is_light_dump, + PartitionStatsDumper* dumper) { + PartitionBucketMemoryStats bucket_stats[kGenericNumBuckets]; static const size_t kMaxReportableDirectMaps = 4096; - uint32_t directMapLengths[kMaxReportableDirectMaps]; - size_t numDirectMappedAllocations = 0; + uint32_t direct_map_lengths[kMaxReportableDirectMaps]; + size_t num_direct_mapped_allocations = 0; { subtle::SpinLock::Guard guard(partition->lock); @@ -1369,97 +1376,94 @@ const PartitionBucket* bucket = &partition->buckets[i]; // Don't report the pseudo buckets that the generic allocator sets up in // order to preserve a fast size->bucket map (see - // partitionAllocGenericInit for details). - if (!bucket->activePagesHead) - bucketStats[i].isValid = false; + // PartitionAllocGenericInit for details). + if (!bucket->active_pages_head) + bucket_stats[i].is_valid = false; else - partitionDumpBucketStats(&bucketStats[i], bucket); + PartitionDumpBucketStats(&bucket_stats[i], bucket); } - for (PartitionDirectMapExtent* extent = partition->directMapList; extent; - extent = extent->nextExtent) { - DCHECK(!extent->nextExtent || extent->nextExtent->prevExtent == extent); - directMapLengths[numDirectMappedAllocations] = extent->bucket->slotSize; - ++numDirectMappedAllocations; - if (numDirectMappedAllocations == kMaxReportableDirectMaps) + for (PartitionDirectMapExtent* extent = partition->direct_map_list; extent; + extent = extent->next_extent) { + DCHECK(!extent->next_extent || + extent->next_extent->prev_extent == extent); + direct_map_lengths[num_direct_mapped_allocations] = + extent->bucket->slot_size; + ++num_direct_mapped_allocations; + if (num_direct_mapped_allocations == kMaxReportableDirectMaps) break; } } - // partitionsDumpBucketStats is called after collecting stats because it - // can try to allocate using PartitionAllocGeneric and it can't obtain the - // lock. - PartitionMemoryStats partitionStats = {0}; - partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages + - partition->totalSizeOfDirectMappedPages; - partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; + // Call |PartitionsDumpBucketStats| after collecting stats because it can try + // to allocate using |PartitionAllocGeneric| and it can't obtain the lock. + PartitionMemoryStats stats = {0}; + stats.total_mmapped_bytes = partition->total_size_of_super_pages + + partition->total_size_of_direct_mapped_pages; + stats.total_committed_bytes = partition->total_size_of_committed_pages; for (size_t i = 0; i < kGenericNumBuckets; ++i) { - if (bucketStats[i].isValid) { - partitionStats.totalResidentBytes += bucketStats[i].residentBytes; - partitionStats.totalActiveBytes += bucketStats[i].activeBytes; - partitionStats.totalDecommittableBytes += - bucketStats[i].decommittableBytes; - partitionStats.totalDiscardableBytes += bucketStats[i].discardableBytes; - if (!isLightDump) - partitionStatsDumper->partitionsDumpBucketStats(partitionName, - &bucketStats[i]); + if (bucket_stats[i].is_valid) { + stats.total_resident_bytes += bucket_stats[i].resident_bytes; + stats.total_active_bytes += bucket_stats[i].active_bytes; + stats.total_decommittable_bytes += bucket_stats[i].decommittable_bytes; + stats.total_discardable_bytes += bucket_stats[i].discardable_bytes; + if (!is_light_dump) + dumper->PartitionsDumpBucketStats(partition_name, &bucket_stats[i]); } } - size_t directMappedAllocationsTotalSize = 0; - for (size_t i = 0; i < numDirectMappedAllocations; ++i) { - uint32_t size = directMapLengths[i]; - directMappedAllocationsTotalSize += size; - if (isLightDump) + size_t direct_mapped_allocations_total_size = 0; + for (size_t i = 0; i < num_direct_mapped_allocations; ++i) { + uint32_t size = direct_map_lengths[i]; + direct_mapped_allocations_total_size += size; + if (is_light_dump) continue; PartitionBucketMemoryStats stats; memset(&stats, '\0', sizeof(stats)); - stats.isValid = true; - stats.isDirectMap = true; - stats.numFullPages = 1; - stats.allocatedPageSize = size; - stats.bucketSlotSize = size; - stats.activeBytes = size; - stats.residentBytes = size; - partitionStatsDumper->partitionsDumpBucketStats(partitionName, &stats); + stats.is_valid = true; + stats.is_direct_map = true; + stats.num_full_pages = 1; + stats.allocated_page_size = size; + stats.bucket_slot_size = size; + stats.active_bytes = size; + stats.resident_bytes = size; + dumper->PartitionsDumpBucketStats(partition_name, &stats); } - partitionStats.totalResidentBytes += directMappedAllocationsTotalSize; - partitionStats.totalActiveBytes += directMappedAllocationsTotalSize; - partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); + stats.total_resident_bytes += direct_mapped_allocations_total_size; + stats.total_active_bytes += direct_mapped_allocations_total_size; + dumper->PartitionDumpTotals(partition_name, &stats); } -void partitionDumpStats(PartitionRoot* partition, - const char* partitionName, - bool isLightDump, - PartitionStatsDumper* partitionStatsDumper) { +void PartitionDumpStats(PartitionRoot* partition, + const char* partition_name, + bool is_light_dump, + PartitionStatsDumper* dumper) { static const size_t kMaxReportableBuckets = 4096 / sizeof(void*); - PartitionBucketMemoryStats memoryStats[kMaxReportableBuckets]; - const size_t partitionNumBuckets = partition->numBuckets; + PartitionBucketMemoryStats memory_stats[kMaxReportableBuckets]; + const size_t partitionNumBuckets = partition->num_buckets; DCHECK(partitionNumBuckets <= kMaxReportableBuckets); for (size_t i = 0; i < partitionNumBuckets; ++i) - partitionDumpBucketStats(&memoryStats[i], &partition->buckets()[i]); + PartitionDumpBucketStats(&memory_stats[i], &partition->buckets()[i]); - // partitionsDumpBucketStats is called after collecting stats because it + // PartitionsDumpBucketStats is called after collecting stats because it // can use PartitionAlloc to allocate and this can affect the statistics. - PartitionMemoryStats partitionStats = {0}; - partitionStats.totalMmappedBytes = partition->totalSizeOfSuperPages; - partitionStats.totalCommittedBytes = partition->totalSizeOfCommittedPages; - DCHECK(!partition->totalSizeOfDirectMappedPages); + PartitionMemoryStats stats = {0}; + stats.total_mmapped_bytes = partition->total_size_of_super_pages; + stats.total_committed_bytes = partition->total_size_of_committed_pages; + DCHECK(!partition->total_size_of_direct_mapped_pages); for (size_t i = 0; i < partitionNumBuckets; ++i) { - if (memoryStats[i].isValid) { - partitionStats.totalResidentBytes += memoryStats[i].residentBytes; - partitionStats.totalActiveBytes += memoryStats[i].activeBytes; - partitionStats.totalDecommittableBytes += - memoryStats[i].decommittableBytes; - partitionStats.totalDiscardableBytes += memoryStats[i].discardableBytes; - if (!isLightDump) - partitionStatsDumper->partitionsDumpBucketStats(partitionName, - &memoryStats[i]); + if (memory_stats[i].is_valid) { + stats.total_resident_bytes += memory_stats[i].resident_bytes; + stats.total_active_bytes += memory_stats[i].active_bytes; + stats.total_decommittable_bytes += memory_stats[i].decommittable_bytes; + stats.total_discardable_bytes += memory_stats[i].discardable_bytes; + if (!is_light_dump) + dumper->PartitionsDumpBucketStats(partition_name, &memory_stats[i]); } } - partitionStatsDumper->partitionDumpTotals(partitionName, &partitionStats); + dumper->PartitionDumpTotals(partition_name, &stats); } } // namespace base
diff --git a/base/allocator/partition_allocator/partition_alloc.h b/base/allocator/partition_allocator/partition_alloc.h index 4c36435..0fafdf6 100644 --- a/base/allocator/partition_allocator/partition_alloc.h +++ b/base/allocator/partition_allocator/partition_alloc.h
@@ -6,8 +6,8 @@ #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H // DESCRIPTION -// partitionAlloc() / partitionAllocGeneric() and partitionFree() / -// partitionFreeGeneric() are approximately analagous to malloc() and free(). +// partitionAlloc() / PartitionAllocGeneric() and PartitionFree() / +// PartitionFreeGeneric() are approximately analagous to malloc() and free(). // // The main difference is that a PartitionRoot / PartitionRootGeneric object // must be supplied to these functions, representing a specific "heap partition" @@ -30,7 +30,7 @@ // - Allocation sizes must be aligned to the system pointer size. // - Allocations are bucketed exactly according to size. // -// And for partitionAllocGeneric(): +// And for PartitionAllocGeneric(): // - Multi-threaded use against a single partition is ok; locking is handled. // - Allocations of any arbitrary size can be handled (subject to a limit of // INT_MAX bytes for security reasons). @@ -91,7 +91,7 @@ // Slot span sizes are adjusted depending on the allocation size, to make sure // the packing does not lead to unused (wasted) space at the end of the last // system page of the span. For our current max slot span size of 64k and other -// constant values, we pack _all_ partitionAllocGeneric() sizes perfectly up +// constant values, we pack _all_ PartitionAllocGeneric() sizes perfectly up // against the end of a system page. static const size_t kPartitionPageShift = 14; // 16KB static const size_t kPartitionPageSize = 1 << kPartitionPageShift; @@ -199,7 +199,7 @@ kGenericMaxBucketed + 1; // Limit when downsizing a direct mapping using realloc(). static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; -static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; +static const size_t kBitsPerSizeT = sizeof(void*) * CHAR_BIT; // Constants for the memory reclaim logic. static const size_t kMaxFreeableSpans = 16; @@ -255,23 +255,23 @@ // an empty or decommitted page (if one exists) will be pulled from the empty // list on to the active list. struct PartitionPage { - PartitionFreelistEntry* freelistHead; - PartitionPage* nextPage; + PartitionFreelistEntry* freelist_head; + PartitionPage* next_page; PartitionBucket* bucket; // Deliberately signed, 0 for empty or decommitted page, -n for full pages: - int16_t numAllocatedSlots; - uint16_t numUnprovisionedSlots; - uint16_t pageOffset; - int16_t emptyCacheIndex; // -1 if not in the empty cache. + int16_t num_allocated_slots; + uint16_t num_unprovisioned_slots; + uint16_t page_offset; + int16_t empty_cache_index; // -1 if not in the empty cache. }; struct PartitionBucket { - PartitionPage* activePagesHead; // Accessed most in hot path => goes first. - PartitionPage* emptyPagesHead; - PartitionPage* decommittedPagesHead; - uint32_t slotSize; - unsigned numSystemPagesPerSlotSpan : 8; - unsigned numFullPages : 24; + PartitionPage* active_pages_head; // Accessed most in hot path => goes first. + PartitionPage* empty_pages_head; + PartitionPage* decommitted_pages_head; + uint32_t slot_size; + unsigned num_system_pages_per_slot_span : 8; + unsigned num_full_pages : 24; }; // An "extent" is a span of consecutive superpages. We link to the partition's @@ -279,36 +279,37 @@ // area. struct PartitionSuperPageExtentEntry { PartitionRootBase* root; - char* superPageBase; - char* superPagesEnd; + char* super_page_base; + char* super_pages_end; PartitionSuperPageExtentEntry* next; }; struct PartitionDirectMapExtent { - PartitionDirectMapExtent* nextExtent; - PartitionDirectMapExtent* prevExtent; + PartitionDirectMapExtent* next_extent; + PartitionDirectMapExtent* prev_extent; PartitionBucket* bucket; - size_t mapSize; // Mapped size, not including guard pages and meta-data. + size_t map_size; // Mapped size, not including guard pages and meta-data. }; struct BASE_EXPORT PartitionRootBase { - size_t totalSizeOfCommittedPages; - size_t totalSizeOfSuperPages; - size_t totalSizeOfDirectMappedPages; - // Invariant: totalSizeOfCommittedPages <= - // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. - unsigned numBuckets; - unsigned maxAllocation; + size_t total_size_of_committed_pages; + size_t total_size_of_super_pages; + size_t total_size_of_direct_mapped_pages; + // Invariant: total_size_of_committed_pages <= + // total_size_of_super_pages + + // total_size_of_direct_mapped_pages. + unsigned num_buckets; + unsigned max_allocation; bool initialized; - char* nextSuperPage; - char* nextPartitionPage; - char* nextPartitionPageEnd; - PartitionSuperPageExtentEntry* currentExtent; - PartitionSuperPageExtentEntry* firstExtent; - PartitionDirectMapExtent* directMapList; - PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; - int16_t globalEmptyPageRingIndex; - uintptr_t invertedSelf; + char* next_super_page; + char* next_partition_page; + char* next_partition_page_end; + PartitionSuperPageExtentEntry* current_extent; + PartitionSuperPageExtentEntry* first_extent; + PartitionDirectMapExtent* direct_map_list; + PartitionPage* global_empty_page_ring[kMaxFreeableSpans]; + int16_t global_empty_page_ring_index; + uintptr_t inverted_self; static subtle::SpinLock gInitializedLock; static bool gInitialized; @@ -338,19 +339,19 @@ struct PartitionRootGeneric : public PartitionRootBase { subtle::SpinLock lock; // Some pre-computed constants. - size_t orderIndexShifts[kBitsPerSizet + 1]; - size_t orderSubIndexMasks[kBitsPerSizet + 1]; + size_t order_index_shifts[kBitsPerSizeT + 1]; + size_t order_sub_index_masks[kBitsPerSizeT + 1]; // The bucket lookup table lets us map a size_t to a bucket quickly. // The trailing +1 caters for the overflow case for very large allocation // sizes. It is one flat array instead of a 2D array because in the 2D // world, we'd need to index array[blah][max+1] which risks undefined // behavior. PartitionBucket* - bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; + bucket_lookups[((kBitsPerSizeT + 1) * kGenericNumBucketsPerOrder) + 1]; PartitionBucket buckets[kGenericNumBuckets]; }; -// Flags for partitionAllocGenericFlags. +// Flags for PartitionAllocGenericFlags. enum PartitionAllocFlags { PartitionAllocReturnNull = 1 << 0, }; @@ -358,56 +359,56 @@ // Struct used to retrieve total memory usage of a partition. Used by // PartitionStatsDumper implementation. struct PartitionMemoryStats { - size_t totalMmappedBytes; // Total bytes mmaped from the system. - size_t totalCommittedBytes; // Total size of commmitted pages. - size_t totalResidentBytes; // Total bytes provisioned by the partition. - size_t totalActiveBytes; // Total active bytes in the partition. - size_t totalDecommittableBytes; // Total bytes that could be decommitted. - size_t totalDiscardableBytes; // Total bytes that could be discarded. + size_t total_mmapped_bytes; // Total bytes mmaped from the system. + size_t total_committed_bytes; // Total size of commmitted pages. + size_t total_resident_bytes; // Total bytes provisioned by the partition. + size_t total_active_bytes; // Total active bytes in the partition. + size_t total_decommittable_bytes; // Total bytes that could be decommitted. + size_t total_discardable_bytes; // Total bytes that could be discarded. }; // Struct used to retrieve memory statistics about a partition bucket. Used by // PartitionStatsDumper implementation. struct PartitionBucketMemoryStats { - bool isValid; // Used to check if the stats is valid. - bool isDirectMap; // True if this is a direct mapping; size will not be - // unique. - uint32_t bucketSlotSize; // The size of the slot in bytes. - uint32_t allocatedPageSize; // Total size the partition page allocated from + bool is_valid; // Used to check if the stats is valid. + bool is_direct_map; // True if this is a direct mapping; size will not be + // unique. + uint32_t bucket_slot_size; // The size of the slot in bytes. + uint32_t allocated_page_size; // Total size the partition page allocated from // the system. - uint32_t activeBytes; // Total active bytes used in the bucket. - uint32_t residentBytes; // Total bytes provisioned in the bucket. - uint32_t decommittableBytes; // Total bytes that could be decommitted. - uint32_t discardableBytes; // Total bytes that could be discarded. - uint32_t numFullPages; // Number of pages with all slots allocated. - uint32_t numActivePages; // Number of pages that have at least one + uint32_t active_bytes; // Total active bytes used in the bucket. + uint32_t resident_bytes; // Total bytes provisioned in the bucket. + uint32_t decommittable_bytes; // Total bytes that could be decommitted. + uint32_t discardable_bytes; // Total bytes that could be discarded. + uint32_t num_full_pages; // Number of pages with all slots allocated. + uint32_t num_active_pages; // Number of pages that have at least one // provisioned slot. - uint32_t numEmptyPages; // Number of pages that are empty + uint32_t num_empty_pages; // Number of pages that are empty // but not decommitted. - uint32_t numDecommittedPages; // Number of pages that are empty - // and decommitted. + uint32_t num_decommitted_pages; // Number of pages that are empty + // and decommitted. }; -// Interface that is passed to partitionDumpStats and -// partitionDumpStatsGeneric for using the memory statistics. +// Interface that is passed to PartitionDumpStats and +// PartitionDumpStatsGeneric for using the memory statistics. class BASE_EXPORT PartitionStatsDumper { public: // Called to dump total memory used by partition, once per partition. - virtual void partitionDumpTotals(const char* partitionName, + virtual void PartitionDumpTotals(const char* partition_name, const PartitionMemoryStats*) = 0; // Called to dump stats about buckets, for each bucket. - virtual void partitionsDumpBucketStats(const char* partitionName, + virtual void PartitionsDumpBucketStats(const char* partition_name, const PartitionBucketMemoryStats*) = 0; }; -BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); -BASE_EXPORT void partitionAllocInit(PartitionRoot*, - size_t numBuckets, - size_t maxAllocation); -BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*); -BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); -BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); +BASE_EXPORT void PartitionAllocGlobalInit(void (*oom_handling_function)()); +BASE_EXPORT void PartitionAllocInit(PartitionRoot*, + size_t num_buckets, + size_t max_allocation); +BASE_EXPORT bool PartitionAllocShutdown(PartitionRoot*); +BASE_EXPORT void PartitionAllocGenericInit(PartitionRootGeneric*); +BASE_EXPORT bool PartitionAllocGenericShutdown(PartitionRootGeneric*); enum PartitionPurgeFlags { // Decommitting the ring list of empty pages is reasonably fast. @@ -419,73 +420,73 @@ PartitionPurgeDiscardUnusedSystemPages = 1 << 1, }; -BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int); -BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); +BASE_EXPORT void PartitionPurgeMemory(PartitionRoot*, int); +BASE_EXPORT void PartitionPurgeMemoryGeneric(PartitionRootGeneric*, int); -BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*, +BASE_EXPORT NOINLINE void* PartitionAllocSlowPath(PartitionRootBase*, int, size_t, PartitionBucket*); -BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*); -BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*, +BASE_EXPORT NOINLINE void PartitionFreeSlowPath(PartitionPage*); +BASE_EXPORT NOINLINE void* PartitionReallocGeneric(PartitionRootGeneric*, void*, size_t, - const char* typeName); + const char* type_name); -BASE_EXPORT void partitionDumpStats(PartitionRoot*, - const char* partitionName, - bool isLightDump, +BASE_EXPORT void PartitionDumpStats(PartitionRoot*, + const char* partition_name, + bool is_light_dump, PartitionStatsDumper*); -BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, - const char* partitionName, - bool isLightDump, +BASE_EXPORT void PartitionDumpStatsGeneric(PartitionRootGeneric*, + const char* partition_name, + bool is_light_dump, PartitionStatsDumper*); class BASE_EXPORT PartitionAllocHooks { public: - typedef void AllocationHook(void* address, size_t, const char* typeName); + typedef void AllocationHook(void* address, size_t, const char* type_name); typedef void FreeHook(void* address); - static void setAllocationHook(AllocationHook* hook) { - m_allocationHook = hook; + static void SetAllocationHook(AllocationHook* hook) { + allocation_hook_ = hook; } - static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } + static void SetFreeHook(FreeHook* hook) { free_hook_ = hook; } - static void allocationHookIfEnabled(void* address, + static void AllocationHookIfEnabled(void* address, size_t size, - const char* typeName) { - AllocationHook* allocationHook = m_allocationHook; - if (UNLIKELY(allocationHook != nullptr)) - allocationHook(address, size, typeName); + const char* type_name) { + AllocationHook* hook = allocation_hook_; + if (UNLIKELY(hook != nullptr)) + hook(address, size, type_name); } - static void freeHookIfEnabled(void* address) { - FreeHook* freeHook = m_freeHook; - if (UNLIKELY(freeHook != nullptr)) - freeHook(address); + static void FreeHookIfEnabled(void* address) { + FreeHook* hook = free_hook_; + if (UNLIKELY(hook != nullptr)) + hook(address); } - static void reallocHookIfEnabled(void* oldAddress, - void* newAddress, + static void ReallocHookIfEnabled(void* old_address, + void* new_address, size_t size, - const char* typeName) { + const char* type_name) { // Report a reallocation as a free followed by an allocation. - AllocationHook* allocationHook = m_allocationHook; - FreeHook* freeHook = m_freeHook; - if (UNLIKELY(allocationHook && freeHook)) { - freeHook(oldAddress); - allocationHook(newAddress, size, typeName); + AllocationHook* allocation_hook = allocation_hook_; + FreeHook* free_hook = free_hook_; + if (UNLIKELY(allocation_hook && free_hook)) { + free_hook(old_address); + allocation_hook(new_address, size, type_name); } } private: // Pointers to hook functions that PartitionAlloc will call on allocation and // free if the pointers are non-null. - static AllocationHook* m_allocationHook; - static FreeHook* m_freeHook; + static AllocationHook* allocation_hook_; + static FreeHook* free_hook_; }; -ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( +ALWAYS_INLINE PartitionFreelistEntry* PartitionFreelistMask( PartitionFreelistEntry* ptr) { // We use bswap on little endian as a fast mask for two reasons: // 1) If an object is freed and its vtable used where the attacker doesn't @@ -503,16 +504,18 @@ return reinterpret_cast<PartitionFreelistEntry*>(masked); } -ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { +ALWAYS_INLINE size_t PartitionCookieSizeAdjustAdd(size_t size) { #if DCHECK_IS_ON() - // Add space for cookies, checking for integer overflow. + // Add space for cookies, checking for integer overflow. TODO(palmer): + // Investigate the performance and code size implications of using + // CheckedNumeric throughout PA. DCHECK(size + (2 * kCookieSize) > size); size += 2 * kCookieSize; #endif return size; } -ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { +ALWAYS_INLINE size_t PartitionCookieSizeAdjustSubtract(size_t size) { #if DCHECK_IS_ON() // Remove space for cookies. DCHECK(size >= 2 * kCookieSize); @@ -521,7 +524,7 @@ return size; } -ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { +ALWAYS_INLINE void* PartitionCookieFreePointerAdjust(void* ptr) { #if DCHECK_IS_ON() // The value given to the application is actually just after the cookie. ptr = static_cast<char*>(ptr) - kCookieSize; @@ -529,281 +532,283 @@ return ptr; } -ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { +ALWAYS_INLINE void PartitionCookieWriteValue(void* ptr) { #if DCHECK_IS_ON() - unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); - for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) - *cookiePtr = kCookieValue[i]; + unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); + for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) + *cookie_ptr = kCookieValue[i]; #endif } -ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { +ALWAYS_INLINE void PartitionCookieCheckValue(void* ptr) { #if DCHECK_IS_ON() - unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); - for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) - DCHECK(*cookiePtr == kCookieValue[i]); + unsigned char* cookie_ptr = reinterpret_cast<unsigned char*>(ptr); + for (size_t i = 0; i < kCookieSize; ++i, ++cookie_ptr) + DCHECK(*cookie_ptr == kCookieValue[i]); #endif } -ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { - uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); - DCHECK(!(pointerAsUint & kSuperPageOffsetMask)); +ALWAYS_INLINE char* PartitionSuperPageToMetadataArea(char* ptr) { + uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr); + DCHECK(!(pointer_as_uint & kSuperPageOffsetMask)); // The metadata area is exactly one system page (the guard page) into the // super page. - return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); + return reinterpret_cast<char*>(pointer_as_uint + kSystemPageSize); } -ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { - uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); - char* superPagePtr = - reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); - uintptr_t partitionPageIndex = - (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; +ALWAYS_INLINE PartitionPage* PartitionPointerToPageNoAlignmentCheck(void* ptr) { + uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(ptr); + char* super_page_ptr = + reinterpret_cast<char*>(pointer_as_uint & kSuperPageBaseMask); + uintptr_t partition_page_index = + (pointer_as_uint & kSuperPageOffsetMask) >> kPartitionPageShift; // Index 0 is invalid because it is the metadata and guard area and // the last index is invalid because it is a guard page. - DCHECK(partitionPageIndex); - DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); + DCHECK(partition_page_index); + DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); PartitionPage* page = reinterpret_cast<PartitionPage*>( - partitionSuperPageToMetadataArea(superPagePtr) + - (partitionPageIndex << kPageMetadataShift)); + PartitionSuperPageToMetadataArea(super_page_ptr) + + (partition_page_index << kPageMetadataShift)); // Partition pages in the same slot span can share the same page object. // Adjust for that. - size_t delta = page->pageOffset << kPageMetadataShift; + size_t delta = page->page_offset << kPageMetadataShift; page = reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); return page; } -ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { - uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); - uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); - DCHECK(superPageOffset > kSystemPageSize); - DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * - kPageMetadataSize)); - uintptr_t partitionPageIndex = - (superPageOffset - kSystemPageSize) >> kPageMetadataShift; +ALWAYS_INLINE void* PartitionPageToPointer(const PartitionPage* page) { + uintptr_t pointer_as_uint = reinterpret_cast<uintptr_t>(page); + uintptr_t super_page_offset = (pointer_as_uint & kSuperPageOffsetMask); + DCHECK(super_page_offset > kSystemPageSize); + DCHECK(super_page_offset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * + kPageMetadataSize)); + uintptr_t partition_page_index = + (super_page_offset - kSystemPageSize) >> kPageMetadataShift; // Index 0 is invalid because it is the metadata area and the last index is // invalid because it is a guard page. - DCHECK(partitionPageIndex); - DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); - uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); + DCHECK(partition_page_index); + DCHECK(partition_page_index < kNumPartitionPagesPerSuperPage - 1); + uintptr_t super_page_base = (pointer_as_uint & kSuperPageBaseMask); void* ret = reinterpret_cast<void*>( - superPageBase + (partitionPageIndex << kPartitionPageShift)); + super_page_base + (partition_page_index << kPartitionPageShift)); return ret; } -ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { - PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); +ALWAYS_INLINE PartitionPage* PartitionPointerToPage(void* ptr) { + PartitionPage* page = PartitionPointerToPageNoAlignmentCheck(ptr); // Checks that the pointer is a multiple of bucket size. DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - - reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % - page->bucket->slotSize)); + reinterpret_cast<uintptr_t>(PartitionPageToPointer(page))) % + page->bucket->slot_size)); return page; } -ALWAYS_INLINE bool partitionBucketIsDirectMapped( +ALWAYS_INLINE bool PartitionBucketIsDirectMapped( const PartitionBucket* bucket) { - return !bucket->numSystemPagesPerSlotSpan; + return !bucket->num_system_pages_per_slot_span; } -ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { - return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; +ALWAYS_INLINE size_t PartitionBucketBytes(const PartitionBucket* bucket) { + return bucket->num_system_pages_per_slot_span * kSystemPageSize; } -ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { - return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); +ALWAYS_INLINE uint16_t PartitionBucketSlots(const PartitionBucket* bucket) { + return static_cast<uint16_t>(PartitionBucketBytes(bucket) / + bucket->slot_size); } -ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { +ALWAYS_INLINE size_t* PartitionPageGetRawSizePtr(PartitionPage* page) { // For single-slot buckets which span more than one partition page, we // have some spare metadata space to store the raw allocation size. We // can use this to report better statistics. PartitionBucket* bucket = page->bucket; - if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) + if (bucket->slot_size <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) return nullptr; - DCHECK((bucket->slotSize % kSystemPageSize) == 0); - DCHECK(partitionBucketIsDirectMapped(bucket) || - partitionBucketSlots(bucket) == 1); + DCHECK((bucket->slot_size % kSystemPageSize) == 0); + DCHECK(PartitionBucketIsDirectMapped(bucket) || + PartitionBucketSlots(bucket) == 1); page++; - return reinterpret_cast<size_t*>(&page->freelistHead); + return reinterpret_cast<size_t*>(&page->freelist_head); } -ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { - size_t* rawSizePtr = partitionPageGetRawSizePtr(page); - if (UNLIKELY(rawSizePtr != nullptr)) - return *rawSizePtr; +ALWAYS_INLINE size_t PartitionPageGetRawSize(PartitionPage* page) { + size_t* raw_size_ptr = PartitionPageGetRawSizePtr(page); + if (UNLIKELY(raw_size_ptr != nullptr)) + return *raw_size_ptr; return 0; } -ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) { - PartitionSuperPageExtentEntry* extentEntry = +ALWAYS_INLINE PartitionRootBase* PartitionPageToRoot(PartitionPage* page) { + PartitionSuperPageExtentEntry* extent_entry = reinterpret_cast<PartitionSuperPageExtentEntry*>( reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); - return extentEntry->root; + return extent_entry->root; } -ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) { - PartitionPage* page = partitionPointerToPage(ptr); - PartitionRootBase* root = partitionPageToRoot(page); - return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); +ALWAYS_INLINE bool PartitionPointerIsValid(void* ptr) { + PartitionPage* page = PartitionPointerToPage(ptr); + PartitionRootBase* root = PartitionPageToRoot(page); + return root->inverted_self == ~reinterpret_cast<uintptr_t>(root); } -ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, +ALWAYS_INLINE void* PartitionBucketAlloc(PartitionRootBase* root, int flags, size_t size, PartitionBucket* bucket) { - PartitionPage* page = bucket->activePagesHead; + PartitionPage* page = bucket->active_pages_head; // Check that this page is neither full nor freed. - DCHECK(page->numAllocatedSlots >= 0); - void* ret = page->freelistHead; + DCHECK(page->num_allocated_slots >= 0); + void* ret = page->freelist_head; if (LIKELY(ret != 0)) { // If these asserts fire, you probably corrupted memory. - DCHECK(partitionPointerIsValid(ret)); + DCHECK(PartitionPointerIsValid(ret)); // All large allocations must go through the slow path to correctly // update the size metadata. - DCHECK(partitionPageGetRawSize(page) == 0); - PartitionFreelistEntry* newHead = - partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); - page->freelistHead = newHead; - page->numAllocatedSlots++; + DCHECK(PartitionPageGetRawSize(page) == 0); + PartitionFreelistEntry* new_head = + PartitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); + page->freelist_head = new_head; + page->num_allocated_slots++; } else { - ret = partitionAllocSlowPath(root, flags, size, bucket); - DCHECK(!ret || partitionPointerIsValid(ret)); + ret = PartitionAllocSlowPath(root, flags, size, bucket); + DCHECK(!ret || PartitionPointerIsValid(ret)); } #if DCHECK_IS_ON() if (!ret) return 0; // Fill the uninitialized pattern, and write the cookies. - page = partitionPointerToPage(ret); - size_t slotSize = page->bucket->slotSize; - size_t rawSize = partitionPageGetRawSize(page); - if (rawSize) { - DCHECK(rawSize == size); - slotSize = rawSize; + page = PartitionPointerToPage(ret); + size_t slot_size = page->bucket->slot_size; + size_t raw_size = PartitionPageGetRawSize(page); + if (raw_size) { + DCHECK(raw_size == size); + slot_size = raw_size; } - size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); - char* charRet = static_cast<char*>(ret); + size_t no_cookie_size = PartitionCookieSizeAdjustSubtract(slot_size); + char* char_ret = static_cast<char*>(ret); // The value given to the application is actually just after the cookie. - ret = charRet + kCookieSize; - memset(ret, kUninitializedByte, noCookieSize); - partitionCookieWriteValue(charRet); - partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); + ret = char_ret + kCookieSize; + memset(ret, kUninitializedByte, no_cookie_size); + PartitionCookieWriteValue(char_ret); + PartitionCookieWriteValue(char_ret + kCookieSize + no_cookie_size); #endif return ret; } -ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, +ALWAYS_INLINE void* PartitionAlloc(PartitionRoot* root, size_t size, - const char* typeName) { + const char* type_name) { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) void* result = malloc(size); CHECK(result); return result; #else - size_t requestedSize = size; - size = partitionCookieSizeAdjustAdd(size); + size_t requested_size = size; + size = PartitionCookieSizeAdjustAdd(size); DCHECK(root->initialized); size_t index = size >> kBucketShift; - DCHECK(index < root->numBuckets); + DCHECK(index < root->num_buckets); DCHECK(size == index << kBucketShift); PartitionBucket* bucket = &root->buckets()[index]; - void* result = partitionBucketAlloc(root, 0, size, bucket); - PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); + void* result = PartitionBucketAlloc(root, 0, size, bucket); + PartitionAllocHooks::AllocationHookIfEnabled(result, requested_size, + type_name); return result; #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) } -ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { +ALWAYS_INLINE void PartitionFreeWithPage(void* ptr, PartitionPage* page) { // If these asserts fire, you probably corrupted memory. #if DCHECK_IS_ON() - size_t slotSize = page->bucket->slotSize; - size_t rawSize = partitionPageGetRawSize(page); - if (rawSize) - slotSize = rawSize; - partitionCookieCheckValue(ptr); - partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - + size_t slot_size = page->bucket->slot_size; + size_t raw_size = PartitionPageGetRawSize(page); + if (raw_size) + slot_size = raw_size; + PartitionCookieCheckValue(ptr); + PartitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slot_size - kCookieSize); - memset(ptr, kFreedByte, slotSize); + memset(ptr, kFreedByte, slot_size); #endif - DCHECK(page->numAllocatedSlots); - PartitionFreelistEntry* freelistHead = page->freelistHead; - DCHECK(!freelistHead || partitionPointerIsValid(freelistHead)); - CHECK(ptr != freelistHead); // Catches an immediate double free. + DCHECK(page->num_allocated_slots); + PartitionFreelistEntry* freelist_head = page->freelist_head; + DCHECK(!freelist_head || PartitionPointerIsValid(freelist_head)); + CHECK(ptr != freelist_head); // Catches an immediate double free. // Look for double free one level deeper in debug. - DCHECK(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); + DCHECK(!freelist_head || ptr != PartitionFreelistMask(freelist_head->next)); PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); - entry->next = partitionFreelistMask(freelistHead); - page->freelistHead = entry; - --page->numAllocatedSlots; - if (UNLIKELY(page->numAllocatedSlots <= 0)) { - partitionFreeSlowPath(page); + entry->next = PartitionFreelistMask(freelist_head); + page->freelist_head = entry; + --page->num_allocated_slots; + if (UNLIKELY(page->num_allocated_slots <= 0)) { + PartitionFreeSlowPath(page); } else { // All single-slot allocations must go through the slow path to // correctly update the size metadata. - DCHECK(partitionPageGetRawSize(page) == 0); + DCHECK(PartitionPageGetRawSize(page) == 0); } } -ALWAYS_INLINE void partitionFree(void* ptr) { +ALWAYS_INLINE void PartitionFree(void* ptr) { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) free(ptr); #else - PartitionAllocHooks::freeHookIfEnabled(ptr); - ptr = partitionCookieFreePointerAdjust(ptr); - DCHECK(partitionPointerIsValid(ptr)); - PartitionPage* page = partitionPointerToPage(ptr); - partitionFreeWithPage(ptr, page); + PartitionAllocHooks::FreeHookIfEnabled(ptr); + ptr = PartitionCookieFreePointerAdjust(ptr); + DCHECK(PartitionPointerIsValid(ptr)); + PartitionPage* page = PartitionPointerToPage(ptr); + PartitionFreeWithPage(ptr, page); #endif } -ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( +ALWAYS_INLINE PartitionBucket* PartitionGenericSizeToBucket( PartitionRootGeneric* root, size_t size) { - size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size); + size_t order = kBitsPerSizeT - bits::CountLeadingZeroBitsSizeT(size); // The order index is simply the next few bits after the most significant bit. - size_t orderIndex = (size >> root->orderIndexShifts[order]) & - (kGenericNumBucketsPerOrder - 1); + size_t order_index = (size >> root->order_index_shifts[order]) & + (kGenericNumBucketsPerOrder - 1); // And if the remaining bits are non-zero we must bump the bucket up. - size_t subOrderIndex = size & root->orderSubIndexMasks[order]; + size_t sub_order_index = size & root->order_sub_index_masks[order]; PartitionBucket* bucket = - root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + - orderIndex + !!subOrderIndex]; - DCHECK(!bucket->slotSize || bucket->slotSize >= size); - DCHECK(!(bucket->slotSize % kGenericSmallestBucket)); + root->bucket_lookups[(order << kGenericNumBucketsPerOrderBits) + + order_index + !!sub_order_index]; + DCHECK(!bucket->slot_size || bucket->slot_size >= size); + DCHECK(!(bucket->slot_size % kGenericSmallestBucket)); return bucket; } -ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, +ALWAYS_INLINE void* PartitionAllocGenericFlags(PartitionRootGeneric* root, int flags, size_t size, - const char* typeName) { + const char* type_name) { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) void* result = malloc(size); CHECK(result || flags & PartitionAllocReturnNull); return result; #else DCHECK(root->initialized); - size_t requestedSize = size; - size = partitionCookieSizeAdjustAdd(size); - PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); + size_t requested_size = size; + size = PartitionCookieSizeAdjustAdd(size); + PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); void* ret = nullptr; { subtle::SpinLock::Guard guard(root->lock); - ret = partitionBucketAlloc(root, flags, size, bucket); + ret = PartitionBucketAlloc(root, flags, size, bucket); } - PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); + PartitionAllocHooks::AllocationHookIfEnabled(ret, requested_size, type_name); return ret; #endif } -ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, +ALWAYS_INLINE void* PartitionAllocGeneric(PartitionRootGeneric* root, size_t size, - const char* typeName) { - return partitionAllocGenericFlags(root, 0, size, typeName); + const char* type_name) { + return PartitionAllocGenericFlags(root, 0, size, type_name); } -ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { +ALWAYS_INLINE void PartitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) free(ptr); #else @@ -812,18 +817,18 @@ if (UNLIKELY(!ptr)) return; - PartitionAllocHooks::freeHookIfEnabled(ptr); - ptr = partitionCookieFreePointerAdjust(ptr); - DCHECK(partitionPointerIsValid(ptr)); - PartitionPage* page = partitionPointerToPage(ptr); + PartitionAllocHooks::FreeHookIfEnabled(ptr); + ptr = PartitionCookieFreePointerAdjust(ptr); + DCHECK(PartitionPointerIsValid(ptr)); + PartitionPage* page = PartitionPointerToPage(ptr); { subtle::SpinLock::Guard guard(root->lock); - partitionFreeWithPage(ptr, page); + PartitionFreeWithPage(ptr, page); } #endif } -ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { +ALWAYS_INLINE size_t PartitionDirectMapSize(size_t size) { // Caller must check that the size is not above the kGenericMaxDirectMapped // limit before calling. This also guards against integer overflow in the // calculation here. @@ -831,27 +836,27 @@ return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; } -ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, +ALWAYS_INLINE size_t PartitionAllocActualSize(PartitionRootGeneric* root, size_t size) { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) return size; #else DCHECK(root->initialized); - size = partitionCookieSizeAdjustAdd(size); - PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); - if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { - size = bucket->slotSize; + size = PartitionCookieSizeAdjustAdd(size); + PartitionBucket* bucket = PartitionGenericSizeToBucket(root, size); + if (LIKELY(!PartitionBucketIsDirectMapped(bucket))) { + size = bucket->slot_size; } else if (size > kGenericMaxDirectMapped) { // Too large to allocate => return the size unchanged. } else { DCHECK(bucket == &PartitionRootBase::gPagedBucket); - size = partitionDirectMapSize(size); + size = PartitionDirectMapSize(size); } - return partitionCookieSizeAdjustSubtract(size); + return PartitionCookieSizeAdjustSubtract(size); #endif } -ALWAYS_INLINE bool partitionAllocSupportsGetSize() { +ALWAYS_INLINE bool PartitionAllocSupportsGetSize() { #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) return false; #else @@ -859,15 +864,15 @@ #endif } -ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { - // No need to lock here. Only 'ptr' being freed by another thread could +ALWAYS_INLINE size_t PartitionAllocGetSize(void* ptr) { + // No need to lock here. Only |ptr| being freed by another thread could // cause trouble, and the caller is responsible for that not happening. - DCHECK(partitionAllocSupportsGetSize()); - ptr = partitionCookieFreePointerAdjust(ptr); - DCHECK(partitionPointerIsValid(ptr)); - PartitionPage* page = partitionPointerToPage(ptr); - size_t size = page->bucket->slotSize; - return partitionCookieSizeAdjustSubtract(size); + DCHECK(PartitionAllocSupportsGetSize()); + ptr = PartitionCookieFreePointerAdjust(ptr); + DCHECK(PartitionPointerIsValid(ptr)); + PartitionPage* page = PartitionPointerToPage(ptr); + size_t size = page->bucket->slot_size; + return PartitionCookieSizeAdjustSubtract(size); } // N (or more accurately, N - sizeof(void*)) represents the largest size in @@ -879,24 +884,24 @@ static const size_t kMaxAllocation = N - kAllocationGranularity; static const size_t kNumBuckets = N / kAllocationGranularity; void init() { - partitionAllocInit(&m_partitionRoot, kNumBuckets, kMaxAllocation); + PartitionAllocInit(&partition_root_, kNumBuckets, kMaxAllocation); } - bool shutdown() { return partitionAllocShutdown(&m_partitionRoot); } - ALWAYS_INLINE PartitionRoot* root() { return &m_partitionRoot; } + bool shutdown() { return PartitionAllocShutdown(&partition_root_); } + ALWAYS_INLINE PartitionRoot* root() { return &partition_root_; } private: - PartitionRoot m_partitionRoot; - PartitionBucket m_actualBuckets[kNumBuckets]; + PartitionRoot partition_root_; + PartitionBucket actual_buckets_[kNumBuckets]; }; class PartitionAllocatorGeneric { public: - void init() { partitionAllocGenericInit(&m_partitionRoot); } - bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } - ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } + void init() { PartitionAllocGenericInit(&partition_root_); } + bool shutdown() { return PartitionAllocGenericShutdown(&partition_root_); } + ALWAYS_INLINE PartitionRootGeneric* root() { return &partition_root_; } private: - PartitionRootGeneric m_partitionRoot; + PartitionRootGeneric partition_root_; }; } // namespace base
diff --git a/base/allocator/partition_allocator/partition_alloc_unittest.cc b/base/allocator/partition_allocator/partition_alloc_unittest.cc index b404ab752..dd1136f 100644 --- a/base/allocator/partition_allocator/partition_alloc_unittest.cc +++ b/base/allocator/partition_allocator/partition_alloc_unittest.cc
@@ -40,7 +40,7 @@ const size_t kTestMaxAllocation = 4096; SizeSpecificPartitionAllocator<kTestMaxAllocation> allocator; -PartitionAllocatorGeneric genericAllocator; +PartitionAllocatorGeneric generic_allocator; const size_t kTestAllocSize = 16; #if !DCHECK_IS_ON() @@ -53,18 +53,18 @@ const size_t kRealAllocSize = kTestAllocSize + kExtraAllocSize; const size_t kTestBucketIndex = kRealAllocSize >> kBucketShift; -const char* typeName = nullptr; +const char* type_name = nullptr; void TestSetup() { allocator.init(); - genericAllocator.init(); + generic_allocator.init(); } void TestShutdown() { // We expect no leaks in the general case. We have a test for leak // detection. EXPECT_TRUE(allocator.shutdown()); - EXPECT_TRUE(genericAllocator.shutdown()); + EXPECT_TRUE(generic_allocator.shutdown()); } #if !defined(ARCH_CPU_64_BITS) || defined(OS_POSIX) @@ -111,72 +111,73 @@ #endif PartitionPage* GetFullPage(size_t size) { - size_t realSize = size + kExtraAllocSize; - size_t bucketIdx = realSize >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; - size_t numSlots = - (bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / realSize; + size_t real_size = size + kExtraAllocSize; + size_t bucket_index = real_size >> kBucketShift; + PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + size_t num_slots = + (bucket->num_system_pages_per_slot_span * kSystemPageSize) / real_size; void* first = 0; void* last = 0; size_t i; - for (i = 0; i < numSlots; ++i) { - void* ptr = partitionAlloc(allocator.root(), size, typeName); + for (i = 0; i < num_slots; ++i) { + void* ptr = PartitionAlloc(allocator.root(), size, type_name); EXPECT_TRUE(ptr); if (!i) - first = partitionCookieFreePointerAdjust(ptr); - else if (i == numSlots - 1) - last = partitionCookieFreePointerAdjust(ptr); + first = PartitionCookieFreePointerAdjust(ptr); + else if (i == num_slots - 1) + last = PartitionCookieFreePointerAdjust(ptr); } - EXPECT_EQ(partitionPointerToPage(first), partitionPointerToPage(last)); - if (bucket->numSystemPagesPerSlotSpan == kNumSystemPagesPerPartitionPage) + EXPECT_EQ(PartitionPointerToPage(first), PartitionPointerToPage(last)); + if (bucket->num_system_pages_per_slot_span == kNumSystemPagesPerPartitionPage) EXPECT_EQ(reinterpret_cast<size_t>(first) & kPartitionPageBaseMask, reinterpret_cast<size_t>(last) & kPartitionPageBaseMask); - EXPECT_EQ(numSlots, - static_cast<size_t>(bucket->activePagesHead->numAllocatedSlots)); - EXPECT_EQ(0, bucket->activePagesHead->freelistHead); - EXPECT_TRUE(bucket->activePagesHead); - EXPECT_TRUE(bucket->activePagesHead != &PartitionRootGeneric::gSeedPage); - return bucket->activePagesHead; + EXPECT_EQ(num_slots, static_cast<size_t>( + bucket->active_pages_head->num_allocated_slots)); + EXPECT_EQ(0, bucket->active_pages_head->freelist_head); + EXPECT_TRUE(bucket->active_pages_head); + EXPECT_TRUE(bucket->active_pages_head != &PartitionRootGeneric::gSeedPage); + return bucket->active_pages_head; } void FreeFullPage(PartitionPage* page) { - size_t size = page->bucket->slotSize; - size_t numSlots = - (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / size; - EXPECT_EQ(numSlots, static_cast<size_t>(abs(page->numAllocatedSlots))); - char* ptr = reinterpret_cast<char*>(partitionPageToPointer(page)); + size_t size = page->bucket->slot_size; + size_t num_slots = + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / size; + EXPECT_EQ(num_slots, static_cast<size_t>(abs(page->num_allocated_slots))); + char* ptr = reinterpret_cast<char*>(PartitionPageToPointer(page)); size_t i; - for (i = 0; i < numSlots; ++i) { - partitionFree(ptr + kPointerOffset); + for (i = 0; i < num_slots; ++i) { + PartitionFree(ptr + kPointerOffset); ptr += size; } } void CycleFreeCache(size_t size) { - size_t realSize = size + kExtraAllocSize; - size_t bucketIdx = realSize >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; - DCHECK(!bucket->activePagesHead->numAllocatedSlots); + size_t real_size = size + kExtraAllocSize; + size_t bucket_index = real_size >> kBucketShift; + PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + DCHECK(!bucket->active_pages_head->num_allocated_slots); for (size_t i = 0; i < kMaxFreeableSpans; ++i) { - void* ptr = partitionAlloc(allocator.root(), size, typeName); - EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); - partitionFree(ptr); - EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); - EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); + void* ptr = PartitionAlloc(allocator.root(), size, type_name); + EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); + PartitionFree(ptr); + EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); + EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); } } void CycleGenericFreeCache(size_t size) { for (size_t i = 0; i < kMaxFreeableSpans; ++i) { - void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr = + PartitionAllocGeneric(generic_allocator.root(), size, type_name); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); PartitionBucket* bucket = page->bucket; - EXPECT_EQ(1, bucket->activePagesHead->numAllocatedSlots); - partitionFreeGeneric(genericAllocator.root(), ptr); - EXPECT_EQ(0, bucket->activePagesHead->numAllocatedSlots); - EXPECT_NE(-1, bucket->activePagesHead->emptyCacheIndex); + EXPECT_EQ(1, bucket->active_pages_head->num_allocated_slots); + PartitionFreeGeneric(generic_allocator.root(), ptr); + EXPECT_EQ(0, bucket->active_pages_head->num_allocated_slots); + EXPECT_NE(-1, bucket->active_pages_head->empty_cache_index); } } @@ -195,52 +196,52 @@ class MockPartitionStatsDumper : public PartitionStatsDumper { public: MockPartitionStatsDumper() - : m_totalResidentBytes(0), - m_totalActiveBytes(0), - m_totalDecommittableBytes(0), - m_totalDiscardableBytes(0) {} + : total_resident_bytes(0), + total_active_bytes(0), + total_decommittable_bytes(0), + total_discardable_bytes(0) {} - void partitionDumpTotals(const char* partitionName, - const PartitionMemoryStats* memoryStats) override { - EXPECT_GE(memoryStats->totalMmappedBytes, memoryStats->totalResidentBytes); - EXPECT_EQ(m_totalResidentBytes, memoryStats->totalResidentBytes); - EXPECT_EQ(m_totalActiveBytes, memoryStats->totalActiveBytes); - EXPECT_EQ(m_totalDecommittableBytes, memoryStats->totalDecommittableBytes); - EXPECT_EQ(m_totalDiscardableBytes, memoryStats->totalDiscardableBytes); + void PartitionDumpTotals(const char* partition_name, + const PartitionMemoryStats* stats) override { + EXPECT_GE(stats->total_mmapped_bytes, stats->total_resident_bytes); + EXPECT_EQ(total_resident_bytes, stats->total_resident_bytes); + EXPECT_EQ(total_active_bytes, stats->total_active_bytes); + EXPECT_EQ(total_decommittable_bytes, stats->total_decommittable_bytes); + EXPECT_EQ(total_discardable_bytes, stats->total_discardable_bytes); } - void partitionsDumpBucketStats( - const char* partitionName, - const PartitionBucketMemoryStats* memoryStats) override { - (void)partitionName; - EXPECT_TRUE(memoryStats->isValid); - EXPECT_EQ(0u, memoryStats->bucketSlotSize & kAllocationGranularityMask); - m_bucketStats.push_back(*memoryStats); - m_totalResidentBytes += memoryStats->residentBytes; - m_totalActiveBytes += memoryStats->activeBytes; - m_totalDecommittableBytes += memoryStats->decommittableBytes; - m_totalDiscardableBytes += memoryStats->discardableBytes; + void PartitionsDumpBucketStats( + const char* partition_name, + const PartitionBucketMemoryStats* stats) override { + (void)partition_name; + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->bucket_slot_size & kAllocationGranularityMask); + bucket_stats.push_back(*stats); + total_resident_bytes += stats->resident_bytes; + total_active_bytes += stats->active_bytes; + total_decommittable_bytes += stats->decommittable_bytes; + total_discardable_bytes += stats->discardable_bytes; } bool IsMemoryAllocationRecorded() { - return m_totalResidentBytes != 0 && m_totalActiveBytes != 0; + return total_resident_bytes != 0 && total_active_bytes != 0; } - const PartitionBucketMemoryStats* GetBucketStats(size_t bucketSize) { - for (size_t i = 0; i < m_bucketStats.size(); ++i) { - if (m_bucketStats[i].bucketSlotSize == bucketSize) - return &m_bucketStats[i]; + const PartitionBucketMemoryStats* GetBucketStats(size_t bucket_size) { + for (size_t i = 0; i < bucket_stats.size(); ++i) { + if (bucket_stats[i].bucket_slot_size == bucket_size) + return &bucket_stats[i]; } return 0; } private: - size_t m_totalResidentBytes; - size_t m_totalActiveBytes; - size_t m_totalDecommittableBytes; - size_t m_totalDiscardableBytes; + size_t total_resident_bytes; + size_t total_active_bytes; + size_t total_decommittable_bytes; + size_t total_discardable_bytes; - std::vector<PartitionBucketMemoryStats> m_bucketStats; + std::vector<PartitionBucketMemoryStats> bucket_stats; }; } // anonymous namespace @@ -251,12 +252,12 @@ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; PartitionPage* seedPage = &PartitionRootGeneric::gSeedPage; - EXPECT_FALSE(bucket->emptyPagesHead); - EXPECT_FALSE(bucket->decommittedPagesHead); - EXPECT_EQ(seedPage, bucket->activePagesHead); - EXPECT_EQ(0, bucket->activePagesHead->nextPage); + EXPECT_FALSE(bucket->empty_pages_head); + EXPECT_FALSE(bucket->decommitted_pages_head); + EXPECT_EQ(seedPage, bucket->active_pages_head); + EXPECT_EQ(0, bucket->active_pages_head->next_page); - void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); + void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); EXPECT_TRUE(ptr); EXPECT_EQ(kPointerOffset, reinterpret_cast<size_t>(ptr) & kPartitionPageOffsetMask); @@ -264,11 +265,11 @@ EXPECT_EQ(kPartitionPageSize + kPointerOffset, reinterpret_cast<size_t>(ptr) & kSuperPageOffsetMask); - partitionFree(ptr); + PartitionFree(ptr); // Expect that the last active page gets noticed as empty but doesn't get // decommitted. - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_FALSE(bucket->decommittedPagesHead); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_FALSE(bucket->decommitted_pages_head); TestShutdown(); } @@ -276,13 +277,13 @@ // Check that we can detect a memory leak. TEST(PartitionAllocTest, SimpleLeak) { TestSetup(); - void* leakedPtr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); + void* leakedPtr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); (void)leakedPtr; - void* leakedPtr2 = - partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); + void* leakedPtr2 = PartitionAllocGeneric(generic_allocator.root(), + kTestAllocSize, type_name); (void)leakedPtr2; EXPECT_FALSE(allocator.shutdown()); - EXPECT_FALSE(genericAllocator.shutdown()); + EXPECT_FALSE(generic_allocator.shutdown()); } // Test multiple allocations, and freelist handling. @@ -290,37 +291,37 @@ TestSetup(); char* ptr1 = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); char* ptr2 = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); EXPECT_TRUE(ptr1); EXPECT_TRUE(ptr2); ptrdiff_t diff = ptr2 - ptr1; EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); // Check that we re-use the just-freed slot. - partitionFree(ptr2); + PartitionFree(ptr2); ptr2 = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); EXPECT_TRUE(ptr2); diff = ptr2 - ptr1; EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); - partitionFree(ptr1); + PartitionFree(ptr1); ptr1 = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); EXPECT_TRUE(ptr1); diff = ptr2 - ptr1; EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize), diff); char* ptr3 = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); EXPECT_TRUE(ptr3); diff = ptr3 - ptr1; EXPECT_EQ(static_cast<ptrdiff_t>(kRealAllocSize * 2), diff); - partitionFree(ptr1); - partitionFree(ptr2); - partitionFree(ptr3); + PartitionFree(ptr1); + PartitionFree(ptr2); + PartitionFree(ptr3); TestShutdown(); } @@ -332,39 +333,39 @@ PartitionPage* page = GetFullPage(kTestAllocSize); FreeFullPage(page); - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); - EXPECT_EQ(0, page->nextPage); - EXPECT_EQ(0, page->numAllocatedSlots); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); + EXPECT_EQ(0, page->next_page); + EXPECT_EQ(0, page->num_allocated_slots); page = GetFullPage(kTestAllocSize); PartitionPage* page2 = GetFullPage(kTestAllocSize); - EXPECT_EQ(page2, bucket->activePagesHead); - EXPECT_EQ(0, page2->nextPage); - EXPECT_EQ(reinterpret_cast<uintptr_t>(partitionPageToPointer(page)) & + EXPECT_EQ(page2, bucket->active_pages_head); + EXPECT_EQ(0, page2->next_page); + EXPECT_EQ(reinterpret_cast<uintptr_t>(PartitionPageToPointer(page)) & kSuperPageBaseMask, - reinterpret_cast<uintptr_t>(partitionPageToPointer(page2)) & + reinterpret_cast<uintptr_t>(PartitionPageToPointer(page2)) & kSuperPageBaseMask); // Fully free the non-current page. This will leave us with no current // active page because one is empty and the other is full. FreeFullPage(page); - EXPECT_EQ(0, page->numAllocatedSlots); - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); + EXPECT_EQ(0, page->num_allocated_slots); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); // Allocate a new page, it should pull from the freelist. page = GetFullPage(kTestAllocSize); - EXPECT_FALSE(bucket->emptyPagesHead); - EXPECT_EQ(page, bucket->activePagesHead); + EXPECT_FALSE(bucket->empty_pages_head); + EXPECT_EQ(page, bucket->active_pages_head); FreeFullPage(page); FreeFullPage(page2); - EXPECT_EQ(0, page->numAllocatedSlots); - EXPECT_EQ(0, page2->numAllocatedSlots); - EXPECT_EQ(0, page2->numUnprovisionedSlots); - EXPECT_NE(-1, page2->emptyCacheIndex); + EXPECT_EQ(0, page->num_allocated_slots); + EXPECT_EQ(0, page2->num_allocated_slots); + EXPECT_EQ(0, page2->num_unprovisioned_slots); + EXPECT_NE(-1, page2->empty_cache_index); TestShutdown(); } @@ -375,49 +376,49 @@ PartitionBucket* bucket = &allocator.root()->buckets()[kTestBucketIndex]; PartitionPage* page1 = GetFullPage(kTestAllocSize); - EXPECT_EQ(page1, bucket->activePagesHead); - EXPECT_EQ(0, page1->nextPage); + EXPECT_EQ(page1, bucket->active_pages_head); + EXPECT_EQ(0, page1->next_page); PartitionPage* page2 = GetFullPage(kTestAllocSize); - EXPECT_EQ(page2, bucket->activePagesHead); - EXPECT_EQ(0, page2->nextPage); + EXPECT_EQ(page2, bucket->active_pages_head); + EXPECT_EQ(0, page2->next_page); // Bounce page1 back into the non-full list then fill it up again. char* ptr = - reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset; - partitionFree(ptr); - EXPECT_EQ(page1, bucket->activePagesHead); - (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName); - EXPECT_EQ(page1, bucket->activePagesHead); - EXPECT_EQ(page2, bucket->activePagesHead->nextPage); + reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset; + PartitionFree(ptr); + EXPECT_EQ(page1, bucket->active_pages_head); + (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); + EXPECT_EQ(page1, bucket->active_pages_head); + EXPECT_EQ(page2, bucket->active_pages_head->next_page); // Allocating another page at this point should cause us to scan over page1 // (which is both full and NOT our current page), and evict it from the // freelist. Older code had a O(n^2) condition due to failure to do this. PartitionPage* page3 = GetFullPage(kTestAllocSize); - EXPECT_EQ(page3, bucket->activePagesHead); - EXPECT_EQ(0, page3->nextPage); + EXPECT_EQ(page3, bucket->active_pages_head); + EXPECT_EQ(0, page3->next_page); // Work out a pointer into page2 and free it. - ptr = reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset; - partitionFree(ptr); + ptr = reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset; + PartitionFree(ptr); // Trying to allocate at this time should cause us to cycle around to page2 // and find the recently freed slot. char* newPtr = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); EXPECT_EQ(ptr, newPtr); - EXPECT_EQ(page2, bucket->activePagesHead); - EXPECT_EQ(page3, page2->nextPage); + EXPECT_EQ(page2, bucket->active_pages_head); + EXPECT_EQ(page3, page2->next_page); // Work out a pointer into page1 and free it. This should pull the page // back into the list of available pages. - ptr = reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset; - partitionFree(ptr); + ptr = reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset; + PartitionFree(ptr); // This allocation should be satisfied by page1. newPtr = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); EXPECT_EQ(ptr, newPtr); - EXPECT_EQ(page1, bucket->activePagesHead); - EXPECT_EQ(page2, page1->nextPage); + EXPECT_EQ(page1, bucket->active_pages_head); + EXPECT_EQ(page2, page1->next_page); FreeFullPage(page3); FreeFullPage(page2); @@ -425,8 +426,8 @@ // Allocating whilst in this state exposed a bug, so keep the test. ptr = reinterpret_cast<char*>( - partitionAlloc(allocator.root(), kTestAllocSize, typeName)); - partitionFree(ptr); + PartitionAlloc(allocator.root(), kTestAllocSize, type_name)); + PartitionFree(ptr); TestShutdown(); } @@ -449,11 +450,11 @@ for (i = 0; i < numToFillFreeListPage; ++i) { pages[i] = GetFullPage(kTestAllocSize); } - EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead); + EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head); for (i = 0; i < numToFillFreeListPage; ++i) FreeFullPage(pages[i]); - EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); - EXPECT_TRUE(bucket->emptyPagesHead); + EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); + EXPECT_TRUE(bucket->empty_pages_head); // Allocate / free in a different bucket size so we get control of a // different free page list. We need two pages because one will be the last @@ -466,12 +467,12 @@ for (i = 0; i < numToFillFreeListPage; ++i) { pages[i] = GetFullPage(kTestAllocSize); } - EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->activePagesHead); + EXPECT_EQ(pages[numToFillFreeListPage - 1], bucket->active_pages_head); for (i = 0; i < numToFillFreeListPage; ++i) FreeFullPage(pages[i]); - EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); - EXPECT_TRUE(bucket->emptyPagesHead); + EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); + EXPECT_TRUE(bucket->empty_pages_head); TestShutdown(); } @@ -494,7 +495,7 @@ size_t i; for (i = 0; i < numPagesNeeded; ++i) { pages[i] = GetFullPage(kTestAllocSize); - void* storagePtr = partitionPageToPointer(pages[i]); + void* storagePtr = PartitionPageToPointer(pages[i]); if (!i) firstSuperPageBase = reinterpret_cast<uintptr_t>(storagePtr) & kSuperPageBaseMask; @@ -519,15 +520,15 @@ TEST(PartitionAllocTest, GenericAlloc) { TestSetup(); - void* ptr = partitionAllocGeneric(genericAllocator.root(), 1, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); EXPECT_TRUE(ptr); - partitionFreeGeneric(genericAllocator.root(), ptr); - ptr = partitionAllocGeneric(genericAllocator.root(), kGenericMaxBucketed + 1, - typeName); + PartitionFreeGeneric(generic_allocator.root(), ptr); + ptr = PartitionAllocGeneric(generic_allocator.root(), kGenericMaxBucketed + 1, + type_name); EXPECT_TRUE(ptr); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); - ptr = partitionAllocGeneric(genericAllocator.root(), 1, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), 1, type_name); EXPECT_TRUE(ptr); void* origPtr = ptr; char* charPtr = static_cast<char*>(ptr); @@ -535,17 +536,17 @@ // Change the size of the realloc, remaining inside the same bucket. void* newPtr = - partitionReallocGeneric(genericAllocator.root(), ptr, 2, typeName); + PartitionReallocGeneric(generic_allocator.root(), ptr, 2, type_name); EXPECT_EQ(ptr, newPtr); - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); EXPECT_EQ(ptr, newPtr); - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, - kGenericSmallestBucket, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, + kGenericSmallestBucket, type_name); EXPECT_EQ(ptr, newPtr); // Change the size of the realloc, switching buckets. - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, - kGenericSmallestBucket + 1, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, + kGenericSmallestBucket + 1, type_name); EXPECT_NE(newPtr, ptr); // Check that the realloc copied correctly. char* newCharPtr = static_cast<char*>(newPtr); @@ -561,13 +562,14 @@ // The realloc moved. To check that the old allocation was freed, we can // do an alloc of the old allocation size and check that the old allocation // address is at the head of the freelist and reused. - void* reusedPtr = partitionAllocGeneric(genericAllocator.root(), 1, typeName); + void* reusedPtr = + PartitionAllocGeneric(generic_allocator.root(), 1, type_name); EXPECT_EQ(reusedPtr, origPtr); - partitionFreeGeneric(genericAllocator.root(), reusedPtr); + PartitionFreeGeneric(generic_allocator.root(), reusedPtr); // Downsize the realloc. ptr = newPtr; - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); EXPECT_EQ(newPtr, origPtr); newCharPtr = static_cast<char*>(newPtr); EXPECT_EQ(*newCharPtr, 'B'); @@ -575,8 +577,8 @@ // Upsize the realloc to outside the partition. ptr = newPtr; - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, - kGenericMaxBucketed + 1, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, + kGenericMaxBucketed + 1, type_name); EXPECT_NE(newPtr, ptr); newCharPtr = static_cast<char*>(newPtr); EXPECT_EQ(*newCharPtr, 'C'); @@ -584,27 +586,27 @@ // Upsize and downsize the realloc, remaining outside the partition. ptr = newPtr; - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, - kGenericMaxBucketed * 10, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, + kGenericMaxBucketed * 10, type_name); newCharPtr = static_cast<char*>(newPtr); EXPECT_EQ(*newCharPtr, 'D'); *newCharPtr = 'E'; ptr = newPtr; - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, - kGenericMaxBucketed * 2, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, + kGenericMaxBucketed * 2, type_name); newCharPtr = static_cast<char*>(newPtr); EXPECT_EQ(*newCharPtr, 'E'); *newCharPtr = 'F'; // Downsize the realloc to inside the partition. ptr = newPtr; - newPtr = partitionReallocGeneric(genericAllocator.root(), ptr, 1, typeName); + newPtr = PartitionReallocGeneric(generic_allocator.root(), ptr, 1, type_name); EXPECT_NE(newPtr, ptr); EXPECT_EQ(newPtr, origPtr); newCharPtr = static_cast<char*>(newPtr); EXPECT_EQ(*newCharPtr, 'F'); - partitionFreeGeneric(genericAllocator.root(), newPtr); + PartitionFreeGeneric(generic_allocator.root(), newPtr); TestShutdown(); } @@ -613,53 +615,54 @@ TEST(PartitionAllocTest, GenericAllocSizes) { TestSetup(); - void* ptr = partitionAllocGeneric(genericAllocator.root(), 0, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), 0, type_name); EXPECT_TRUE(ptr); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // kPartitionPageSize is interesting because it results in just one // allocation per page, which tripped up some corner cases. size_t size = kPartitionPageSize - kExtraAllocSize; - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr); - void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Should be freeable at this point. PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_NE(-1, page->emptyCacheIndex); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_NE(-1, page->empty_cache_index); + PartitionFreeGeneric(generic_allocator.root(), ptr2); size = (((kPartitionPageSize * kMaxPartitionPagesPerSlotSpan) - kSystemPageSize) / 2) - kExtraAllocSize; - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr); memset(ptr, 'A', size); - ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr2); - void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr3); - void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr4); - page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); + page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); PartitionPage* page2 = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr3)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3)); EXPECT_NE(page, page2); - partitionFreeGeneric(genericAllocator.root(), ptr); - partitionFreeGeneric(genericAllocator.root(), ptr3); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr3); + PartitionFreeGeneric(generic_allocator.root(), ptr2); // Should be freeable at this point. - EXPECT_NE(-1, page->emptyCacheIndex); - EXPECT_EQ(0, page->numAllocatedSlots); - EXPECT_EQ(0, page->numUnprovisionedSlots); - void* newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + EXPECT_NE(-1, page->empty_cache_index); + EXPECT_EQ(0, page->num_allocated_slots); + EXPECT_EQ(0, page->num_unprovisioned_slots); + void* newPtr = + PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_EQ(ptr3, newPtr); - newPtr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + newPtr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_EQ(ptr2, newPtr); #if defined(OS_LINUX) && !DCHECK_IS_ON() // On Linux, we have a guarantee that freelisting a page should cause its @@ -671,18 +674,18 @@ // byte pattern. EXPECT_EQ(0, *(reinterpret_cast<char*>(newPtr) + (size - 1))); #endif - partitionFreeGeneric(genericAllocator.root(), newPtr); - partitionFreeGeneric(genericAllocator.root(), ptr3); - partitionFreeGeneric(genericAllocator.root(), ptr4); + PartitionFreeGeneric(generic_allocator.root(), newPtr); + PartitionFreeGeneric(generic_allocator.root(), ptr3); + PartitionFreeGeneric(generic_allocator.root(), ptr4); // Can we allocate a massive (512MB) size? // Allocate 512MB, but +1, to test for cookie writing alignment issues. // Test this only if the device has enough memory or it might fail due // to OOM. if (IsLargeMemoryDevice()) { - ptr = partitionAllocGeneric(genericAllocator.root(), 512 * 1024 * 1024 + 1, - typeName); - partitionFreeGeneric(genericAllocator.root(), ptr); + ptr = PartitionAllocGeneric(generic_allocator.root(), 512 * 1024 * 1024 + 1, + type_name); + PartitionFreeGeneric(generic_allocator.root(), ptr); } // Check a more reasonable, but still direct mapped, size. @@ -690,18 +693,18 @@ size = 20 * 1024 * 1024; size -= kSystemPageSize; size -= 1; - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); char* charPtr = reinterpret_cast<char*>(ptr); *(charPtr + (size - 1)) = 'A'; - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Can we free null? - partitionFreeGeneric(genericAllocator.root(), 0); + PartitionFreeGeneric(generic_allocator.root(), 0); // Do we correctly get a null for a failed allocation? - EXPECT_EQ(0, partitionAllocGenericFlags(genericAllocator.root(), + EXPECT_EQ(0, PartitionAllocGenericFlags(generic_allocator.root(), PartitionAllocReturnNull, - 3u * 1024 * 1024 * 1024, typeName)); + 3u * 1024 * 1024 * 1024, type_name)); TestShutdown(); } @@ -713,64 +716,67 @@ void* ptr; size_t requestedSize, actualSize, predictedSize; - EXPECT_TRUE(partitionAllocSupportsGetSize()); + EXPECT_TRUE(PartitionAllocSupportsGetSize()); // Allocate something small. requestedSize = 511 - kExtraAllocSize; predictedSize = - partitionAllocActualSize(genericAllocator.root(), requestedSize); - ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); + PartitionAllocActualSize(generic_allocator.root(), requestedSize); + ptr = + PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name); EXPECT_TRUE(ptr); - actualSize = partitionAllocGetSize(ptr); + actualSize = PartitionAllocGetSize(ptr); EXPECT_EQ(predictedSize, actualSize); EXPECT_LT(requestedSize, actualSize); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Allocate a size that should be a perfect match for a bucket, because it // is an exact power of 2. requestedSize = (256 * 1024) - kExtraAllocSize; predictedSize = - partitionAllocActualSize(genericAllocator.root(), requestedSize); - ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); + PartitionAllocActualSize(generic_allocator.root(), requestedSize); + ptr = + PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name); EXPECT_TRUE(ptr); - actualSize = partitionAllocGetSize(ptr); + actualSize = PartitionAllocGetSize(ptr); EXPECT_EQ(predictedSize, actualSize); EXPECT_EQ(requestedSize, actualSize); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Allocate a size that is a system page smaller than a bucket. GetSize() // should return a larger size than we asked for now. requestedSize = (256 * 1024) - kSystemPageSize - kExtraAllocSize; predictedSize = - partitionAllocActualSize(genericAllocator.root(), requestedSize); - ptr = partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); + PartitionAllocActualSize(generic_allocator.root(), requestedSize); + ptr = + PartitionAllocGeneric(generic_allocator.root(), requestedSize, type_name); EXPECT_TRUE(ptr); - actualSize = partitionAllocGetSize(ptr); + actualSize = PartitionAllocGetSize(ptr); EXPECT_EQ(predictedSize, actualSize); EXPECT_EQ(requestedSize + kSystemPageSize, actualSize); // Check that we can write at the end of the reported size too. char* charPtr = reinterpret_cast<char*>(ptr); *(charPtr + (actualSize - 1)) = 'A'; - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Allocate something very large, and uneven. if (IsLargeMemoryDevice()) { requestedSize = 512 * 1024 * 1024 - 1; predictedSize = - partitionAllocActualSize(genericAllocator.root(), requestedSize); - ptr = - partitionAllocGeneric(genericAllocator.root(), requestedSize, typeName); + PartitionAllocActualSize(generic_allocator.root(), requestedSize); + ptr = PartitionAllocGeneric(generic_allocator.root(), requestedSize, + type_name); EXPECT_TRUE(ptr); - actualSize = partitionAllocGetSize(ptr); + actualSize = PartitionAllocGetSize(ptr); EXPECT_EQ(predictedSize, actualSize); EXPECT_LT(requestedSize, actualSize); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); } // Too large allocation. requestedSize = INT_MAX; predictedSize = - partitionAllocActualSize(genericAllocator.root(), requestedSize); + PartitionAllocActualSize(generic_allocator.root(), requestedSize); EXPECT_EQ(requestedSize, predictedSize); TestShutdown(); @@ -781,25 +787,25 @@ TestSetup(); // realloc(0, size) should be equivalent to malloc(). - void* ptr = partitionReallocGeneric(genericAllocator.root(), 0, - kTestAllocSize, typeName); + void* ptr = PartitionReallocGeneric(generic_allocator.root(), 0, + kTestAllocSize, type_name); memset(ptr, 'A', kTestAllocSize); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); // realloc(ptr, 0) should be equivalent to free(). void* ptr2 = - partitionReallocGeneric(genericAllocator.root(), ptr, 0, typeName); + PartitionReallocGeneric(generic_allocator.root(), ptr, 0, type_name); EXPECT_EQ(0, ptr2); - EXPECT_EQ(partitionCookieFreePointerAdjust(ptr), page->freelistHead); + EXPECT_EQ(PartitionCookieFreePointerAdjust(ptr), page->freelist_head); // Test that growing an allocation with realloc() copies everything from the // old allocation. size_t size = kSystemPageSize - kExtraAllocSize; - EXPECT_EQ(size, partitionAllocActualSize(genericAllocator.root(), size)); - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + EXPECT_EQ(size, PartitionAllocActualSize(generic_allocator.root(), size)); + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); memset(ptr, 'A', size); - ptr2 = - partitionReallocGeneric(genericAllocator.root(), ptr, size + 1, typeName); + ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, size + 1, + type_name); EXPECT_NE(ptr, ptr2); char* charPtr2 = static_cast<char*>(ptr2); EXPECT_EQ('A', charPtr2[0]); @@ -810,8 +816,8 @@ // Test that shrinking an allocation with realloc() also copies everything // from the old allocation. - ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, size - 1, - typeName); + ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, size - 1, + type_name); EXPECT_NE(ptr2, ptr); char* charPtr = static_cast<char*>(ptr); EXPECT_EQ('A', charPtr[0]); @@ -820,32 +826,32 @@ EXPECT_EQ(kUninitializedByte, static_cast<unsigned char>(charPtr[size - 1])); #endif - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Test that shrinking a direct mapped allocation happens in-place. size = kGenericMaxBucketed + 16 * kSystemPageSize; - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); - size_t actualSize = partitionAllocGetSize(ptr); - ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); + size_t actualSize = PartitionAllocGetSize(ptr); + ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kGenericMaxBucketed + 8 * kSystemPageSize, - typeName); + type_name); EXPECT_EQ(ptr, ptr2); - EXPECT_EQ(actualSize - 8 * kSystemPageSize, partitionAllocGetSize(ptr2)); + EXPECT_EQ(actualSize - 8 * kSystemPageSize, PartitionAllocGetSize(ptr2)); // Test that a previously in-place shrunk direct mapped allocation can be // expanded up again within its original size. - ptr = partitionReallocGeneric(genericAllocator.root(), ptr2, - size - kSystemPageSize, typeName); + ptr = PartitionReallocGeneric(generic_allocator.root(), ptr2, + size - kSystemPageSize, type_name); EXPECT_EQ(ptr2, ptr); - EXPECT_EQ(actualSize - kSystemPageSize, partitionAllocGetSize(ptr)); + EXPECT_EQ(actualSize - kSystemPageSize, PartitionAllocGetSize(ptr)); // Test that a direct mapped allocation is performed not in-place when the // new size is small enough. - ptr2 = partitionReallocGeneric(genericAllocator.root(), ptr, kSystemPageSize, - typeName); + ptr2 = PartitionReallocGeneric(generic_allocator.root(), ptr, kSystemPageSize, + type_name); EXPECT_NE(ptr, ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr2); TestShutdown(); } @@ -854,151 +860,156 @@ TEST(PartitionAllocTest, PartialPageFreelists) { TestSetup(); - size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; + size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; EXPECT_EQ(kSystemPageSize - kAllocationGranularity, - bigSize + kExtraAllocSize); - size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; - EXPECT_EQ(0, bucket->emptyPagesHead); + big_size + kExtraAllocSize); + size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; + PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; + EXPECT_EQ(0, bucket->empty_pages_head); - void* ptr = partitionAlloc(allocator.root(), bigSize, typeName); + void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); EXPECT_TRUE(ptr); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); size_t totalSlots = - (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / - (bigSize + kExtraAllocSize); + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / + (big_size + kExtraAllocSize); EXPECT_EQ(4u, totalSlots); // The freelist should have one entry, because we were able to exactly fit // one object slot and one freelist pointer (the null that the head points // to) into a system page. - EXPECT_TRUE(page->freelistHead); - EXPECT_EQ(1, page->numAllocatedSlots); - EXPECT_EQ(2, page->numUnprovisionedSlots); + EXPECT_TRUE(page->freelist_head); + EXPECT_EQ(1, page->num_allocated_slots); + EXPECT_EQ(2, page->num_unprovisioned_slots); - void* ptr2 = partitionAlloc(allocator.root(), bigSize, typeName); + void* ptr2 = PartitionAlloc(allocator.root(), big_size, type_name); EXPECT_TRUE(ptr2); - EXPECT_FALSE(page->freelistHead); - EXPECT_EQ(2, page->numAllocatedSlots); - EXPECT_EQ(2, page->numUnprovisionedSlots); + EXPECT_FALSE(page->freelist_head); + EXPECT_EQ(2, page->num_allocated_slots); + EXPECT_EQ(2, page->num_unprovisioned_slots); - void* ptr3 = partitionAlloc(allocator.root(), bigSize, typeName); + void* ptr3 = PartitionAlloc(allocator.root(), big_size, type_name); EXPECT_TRUE(ptr3); - EXPECT_TRUE(page->freelistHead); - EXPECT_EQ(3, page->numAllocatedSlots); - EXPECT_EQ(0, page->numUnprovisionedSlots); + EXPECT_TRUE(page->freelist_head); + EXPECT_EQ(3, page->num_allocated_slots); + EXPECT_EQ(0, page->num_unprovisioned_slots); - void* ptr4 = partitionAlloc(allocator.root(), bigSize, typeName); + void* ptr4 = PartitionAlloc(allocator.root(), big_size, type_name); EXPECT_TRUE(ptr4); - EXPECT_FALSE(page->freelistHead); - EXPECT_EQ(4, page->numAllocatedSlots); - EXPECT_EQ(0, page->numUnprovisionedSlots); + EXPECT_FALSE(page->freelist_head); + EXPECT_EQ(4, page->num_allocated_slots); + EXPECT_EQ(0, page->num_unprovisioned_slots); - void* ptr5 = partitionAlloc(allocator.root(), bigSize, typeName); + void* ptr5 = PartitionAlloc(allocator.root(), big_size, type_name); EXPECT_TRUE(ptr5); PartitionPage* page2 = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr5)); - EXPECT_EQ(1, page2->numAllocatedSlots); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr5)); + EXPECT_EQ(1, page2->num_allocated_slots); // Churn things a little whilst there's a partial page freelist. - partitionFree(ptr); - ptr = partitionAlloc(allocator.root(), bigSize, typeName); - void* ptr6 = partitionAlloc(allocator.root(), bigSize, typeName); + PartitionFree(ptr); + ptr = PartitionAlloc(allocator.root(), big_size, type_name); + void* ptr6 = PartitionAlloc(allocator.root(), big_size, type_name); - partitionFree(ptr); - partitionFree(ptr2); - partitionFree(ptr3); - partitionFree(ptr4); - partitionFree(ptr5); - partitionFree(ptr6); - EXPECT_NE(-1, page->emptyCacheIndex); - EXPECT_NE(-1, page2->emptyCacheIndex); - EXPECT_TRUE(page2->freelistHead); - EXPECT_EQ(0, page2->numAllocatedSlots); + PartitionFree(ptr); + PartitionFree(ptr2); + PartitionFree(ptr3); + PartitionFree(ptr4); + PartitionFree(ptr5); + PartitionFree(ptr6); + EXPECT_NE(-1, page->empty_cache_index); + EXPECT_NE(-1, page2->empty_cache_index); + EXPECT_TRUE(page2->freelist_head); + EXPECT_EQ(0, page2->num_allocated_slots); // And test a couple of sizes that do not cross kSystemPageSize with a single // allocation. size_t mediumSize = (kSystemPageSize / 2) - kExtraAllocSize; - bucketIdx = (mediumSize + kExtraAllocSize) >> kBucketShift; - bucket = &allocator.root()->buckets()[bucketIdx]; - EXPECT_EQ(0, bucket->emptyPagesHead); + bucket_index = (mediumSize + kExtraAllocSize) >> kBucketShift; + bucket = &allocator.root()->buckets()[bucket_index]; + EXPECT_EQ(0, bucket->empty_pages_head); - ptr = partitionAlloc(allocator.root(), mediumSize, typeName); + ptr = PartitionAlloc(allocator.root(), mediumSize, type_name); EXPECT_TRUE(ptr); - page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(1, page->numAllocatedSlots); - totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / - (mediumSize + kExtraAllocSize); + page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(1, page->num_allocated_slots); + totalSlots = + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / + (mediumSize + kExtraAllocSize); size_t firstPageSlots = kSystemPageSize / (mediumSize + kExtraAllocSize); EXPECT_EQ(2u, firstPageSlots); - EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); + EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots); - partitionFree(ptr); + PartitionFree(ptr); size_t smallSize = (kSystemPageSize / 4) - kExtraAllocSize; - bucketIdx = (smallSize + kExtraAllocSize) >> kBucketShift; - bucket = &allocator.root()->buckets()[bucketIdx]; - EXPECT_EQ(0, bucket->emptyPagesHead); + bucket_index = (smallSize + kExtraAllocSize) >> kBucketShift; + bucket = &allocator.root()->buckets()[bucket_index]; + EXPECT_EQ(0, bucket->empty_pages_head); - ptr = partitionAlloc(allocator.root(), smallSize, typeName); + ptr = PartitionAlloc(allocator.root(), smallSize, type_name); EXPECT_TRUE(ptr); - page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(1, page->numAllocatedSlots); - totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / - (smallSize + kExtraAllocSize); + page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(1, page->num_allocated_slots); + totalSlots = + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / + (smallSize + kExtraAllocSize); firstPageSlots = kSystemPageSize / (smallSize + kExtraAllocSize); - EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); + EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots); - partitionFree(ptr); - EXPECT_TRUE(page->freelistHead); - EXPECT_EQ(0, page->numAllocatedSlots); + PartitionFree(ptr); + EXPECT_TRUE(page->freelist_head); + EXPECT_EQ(0, page->num_allocated_slots); size_t verySmallSize = 32 - kExtraAllocSize; - bucketIdx = (verySmallSize + kExtraAllocSize) >> kBucketShift; - bucket = &allocator.root()->buckets()[bucketIdx]; - EXPECT_EQ(0, bucket->emptyPagesHead); + bucket_index = (verySmallSize + kExtraAllocSize) >> kBucketShift; + bucket = &allocator.root()->buckets()[bucket_index]; + EXPECT_EQ(0, bucket->empty_pages_head); - ptr = partitionAlloc(allocator.root(), verySmallSize, typeName); + ptr = PartitionAlloc(allocator.root(), verySmallSize, type_name); EXPECT_TRUE(ptr); - page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(1, page->numAllocatedSlots); - totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / - (verySmallSize + kExtraAllocSize); + page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(1, page->num_allocated_slots); + totalSlots = + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / + (verySmallSize + kExtraAllocSize); firstPageSlots = kSystemPageSize / (verySmallSize + kExtraAllocSize); - EXPECT_EQ(totalSlots - firstPageSlots, page->numUnprovisionedSlots); + EXPECT_EQ(totalSlots - firstPageSlots, page->num_unprovisioned_slots); - partitionFree(ptr); - EXPECT_TRUE(page->freelistHead); - EXPECT_EQ(0, page->numAllocatedSlots); + PartitionFree(ptr); + EXPECT_TRUE(page->freelist_head); + EXPECT_EQ(0, page->num_allocated_slots); // And try an allocation size (against the generic allocator) that is // larger than a system page. size_t pageAndAHalfSize = (kSystemPageSize + (kSystemPageSize / 2)) - kExtraAllocSize; - ptr = partitionAllocGeneric(genericAllocator.root(), pageAndAHalfSize, - typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), pageAndAHalfSize, + type_name); EXPECT_TRUE(ptr); - page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(1, page->numAllocatedSlots); - EXPECT_TRUE(page->freelistHead); - totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / - (pageAndAHalfSize + kExtraAllocSize); - EXPECT_EQ(totalSlots - 2, page->numUnprovisionedSlots); - partitionFreeGeneric(genericAllocator.root(), ptr); + page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(1, page->num_allocated_slots); + EXPECT_TRUE(page->freelist_head); + totalSlots = + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / + (pageAndAHalfSize + kExtraAllocSize); + EXPECT_EQ(totalSlots - 2, page->num_unprovisioned_slots); + PartitionFreeGeneric(generic_allocator.root(), ptr); // And then make sure than exactly the page size only faults one page. size_t pageSize = kSystemPageSize - kExtraAllocSize; - ptr = partitionAllocGeneric(genericAllocator.root(), pageSize, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), pageSize, type_name); EXPECT_TRUE(ptr); - page = partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(1, page->numAllocatedSlots); - EXPECT_FALSE(page->freelistHead); - totalSlots = (page->bucket->numSystemPagesPerSlotSpan * kSystemPageSize) / - (pageSize + kExtraAllocSize); - EXPECT_EQ(totalSlots - 1, page->numUnprovisionedSlots); - partitionFreeGeneric(genericAllocator.root(), ptr); + page = PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(1, page->num_allocated_slots); + EXPECT_FALSE(page->freelist_head); + totalSlots = + (page->bucket->num_system_pages_per_slot_span * kSystemPageSize) / + (pageSize + kExtraAllocSize); + EXPECT_EQ(totalSlots - 1, page->num_unprovisioned_slots); + PartitionFreeGeneric(generic_allocator.root(), ptr); TestShutdown(); } @@ -1011,31 +1022,31 @@ // Grab two full pages and a non-full page. PartitionPage* page1 = GetFullPage(kTestAllocSize); PartitionPage* page2 = GetFullPage(kTestAllocSize); - void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); + void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); EXPECT_TRUE(ptr); - EXPECT_NE(page1, bucket->activePagesHead); - EXPECT_NE(page2, bucket->activePagesHead); + EXPECT_NE(page1, bucket->active_pages_head); + EXPECT_NE(page2, bucket->active_pages_head); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(1, page->numAllocatedSlots); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(1, page->num_allocated_slots); // Work out a pointer into page2 and free it; and then page1 and free it. char* ptr2 = - reinterpret_cast<char*>(partitionPageToPointer(page1)) + kPointerOffset; - partitionFree(ptr2); + reinterpret_cast<char*>(PartitionPageToPointer(page1)) + kPointerOffset; + PartitionFree(ptr2); ptr2 = - reinterpret_cast<char*>(partitionPageToPointer(page2)) + kPointerOffset; - partitionFree(ptr2); + reinterpret_cast<char*>(PartitionPageToPointer(page2)) + kPointerOffset; + PartitionFree(ptr2); // If we perform two allocations from the same bucket now, we expect to // refill both the nearly full pages. - (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName); - (void)partitionAlloc(allocator.root(), kTestAllocSize, typeName); - EXPECT_EQ(1, page->numAllocatedSlots); + (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); + (void)PartitionAlloc(allocator.root(), kTestAllocSize, type_name); + EXPECT_EQ(1, page->num_allocated_slots); FreeFullPage(page2); FreeFullPage(page1); - partitionFree(ptr); + PartitionFree(ptr); TestShutdown(); } @@ -1049,7 +1060,8 @@ PartitionBucket* bucket = 0; while (size < kTestMaxAllocation) { bucket = &allocator.root()->buckets()[size >> kBucketShift]; - if (bucket->numSystemPagesPerSlotSpan % kNumSystemPagesPerPartitionPage) + if (bucket->num_system_pages_per_slot_span % + kNumSystemPagesPerPartitionPage) break; size += sizeof(void*); } @@ -1079,63 +1091,63 @@ firstSuperPagePages[i] = GetFullPage(kTestAllocSize); char* pageBase = - reinterpret_cast<char*>(partitionPageToPointer(firstSuperPagePages[0])); + reinterpret_cast<char*>(PartitionPageToPointer(firstSuperPagePages[0])); EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); pageBase -= kPartitionPageSize; // Map a single system page either side of the mapping for our allocations, // with the goal of tripping up alignment of the next mapping. - void* map1 = allocPages(pageBase - kPageAllocationGranularity, + void* map1 = AllocPages(pageBase - kPageAllocationGranularity, kPageAllocationGranularity, kPageAllocationGranularity, PageInaccessible); EXPECT_TRUE(map1); - void* map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, + void* map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, kPageAllocationGranularity, PageInaccessible); EXPECT_TRUE(map2); for (i = 0; i < numPartitionPagesNeeded; ++i) secondSuperPagePages[i] = GetFullPage(kTestAllocSize); - freePages(map1, kPageAllocationGranularity); - freePages(map2, kPageAllocationGranularity); + FreePages(map1, kPageAllocationGranularity); + FreePages(map2, kPageAllocationGranularity); pageBase = - reinterpret_cast<char*>(partitionPageToPointer(secondSuperPagePages[0])); + reinterpret_cast<char*>(PartitionPageToPointer(secondSuperPagePages[0])); EXPECT_EQ(kPartitionPageSize, reinterpret_cast<uintptr_t>(pageBase) & kSuperPageOffsetMask); pageBase -= kPartitionPageSize; // Map a single system page either side of the mapping for our allocations, // with the goal of tripping up alignment of the next mapping. - map1 = allocPages(pageBase - kPageAllocationGranularity, + map1 = AllocPages(pageBase - kPageAllocationGranularity, kPageAllocationGranularity, kPageAllocationGranularity, PageAccessible); EXPECT_TRUE(map1); - map2 = allocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, + map2 = AllocPages(pageBase + kSuperPageSize, kPageAllocationGranularity, kPageAllocationGranularity, PageAccessible); EXPECT_TRUE(map2); - setSystemPagesInaccessible(map1, kPageAllocationGranularity); - setSystemPagesInaccessible(map2, kPageAllocationGranularity); + SetSystemPagesInaccessible(map1, kPageAllocationGranularity); + SetSystemPagesInaccessible(map2, kPageAllocationGranularity); PartitionPage* pageInThirdSuperPage = GetFullPage(kTestAllocSize); - freePages(map1, kPageAllocationGranularity); - freePages(map2, kPageAllocationGranularity); + FreePages(map1, kPageAllocationGranularity); + FreePages(map2, kPageAllocationGranularity); EXPECT_EQ(0u, reinterpret_cast<uintptr_t>( - partitionPageToPointer(pageInThirdSuperPage)) & + PartitionPageToPointer(pageInThirdSuperPage)) & kPartitionPageOffsetMask); // And make sure we really did get a page in a new superpage. EXPECT_NE(reinterpret_cast<uintptr_t>( - partitionPageToPointer(firstSuperPagePages[0])) & + PartitionPageToPointer(firstSuperPagePages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>( - partitionPageToPointer(pageInThirdSuperPage)) & + PartitionPageToPointer(pageInThirdSuperPage)) & kSuperPageBaseMask); EXPECT_NE(reinterpret_cast<uintptr_t>( - partitionPageToPointer(secondSuperPagePages[0])) & + PartitionPageToPointer(secondSuperPagePages[0])) & kSuperPageBaseMask, reinterpret_cast<uintptr_t>( - partitionPageToPointer(pageInThirdSuperPage)) & + PartitionPageToPointer(pageInThirdSuperPage)) & kSuperPageBaseMask); FreeFullPage(pageInThirdSuperPage); @@ -1151,50 +1163,53 @@ TEST(PartitionAllocTest, FreeCache) { TestSetup(); - EXPECT_EQ(0U, allocator.root()->totalSizeOfCommittedPages); + EXPECT_EQ(0U, allocator.root()->total_size_of_committed_pages); - size_t bigSize = allocator.root()->maxAllocation - kExtraAllocSize; - size_t bucketIdx = (bigSize + kExtraAllocSize) >> kBucketShift; - PartitionBucket* bucket = &allocator.root()->buckets()[bucketIdx]; + size_t big_size = allocator.root()->max_allocation - kExtraAllocSize; + size_t bucket_index = (big_size + kExtraAllocSize) >> kBucketShift; + PartitionBucket* bucket = &allocator.root()->buckets()[bucket_index]; - void* ptr = partitionAlloc(allocator.root(), bigSize, typeName); + void* ptr = PartitionAlloc(allocator.root(), big_size, type_name); EXPECT_TRUE(ptr); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); - EXPECT_EQ(0, bucket->emptyPagesHead); - EXPECT_EQ(1, page->numAllocatedSlots); - EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages); - partitionFree(ptr); - EXPECT_EQ(0, page->numAllocatedSlots); - EXPECT_NE(-1, page->emptyCacheIndex); - EXPECT_TRUE(page->freelistHead); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); + EXPECT_EQ(0, bucket->empty_pages_head); + EXPECT_EQ(1, page->num_allocated_slots); + EXPECT_EQ(kPartitionPageSize, + allocator.root()->total_size_of_committed_pages); + PartitionFree(ptr); + EXPECT_EQ(0, page->num_allocated_slots); + EXPECT_NE(-1, page->empty_cache_index); + EXPECT_TRUE(page->freelist_head); CycleFreeCache(kTestAllocSize); // Flushing the cache should have really freed the unused page. - EXPECT_FALSE(page->freelistHead); - EXPECT_EQ(-1, page->emptyCacheIndex); - EXPECT_EQ(0, page->numAllocatedSlots); - PartitionBucket* cycleFreeCacheBucket = + EXPECT_FALSE(page->freelist_head); + EXPECT_EQ(-1, page->empty_cache_index); + EXPECT_EQ(0, page->num_allocated_slots); + PartitionBucket* cycle_free_cache_bucket = &allocator.root()->buckets()[kTestBucketIndex]; - EXPECT_EQ(cycleFreeCacheBucket->numSystemPagesPerSlotSpan * kSystemPageSize, - allocator.root()->totalSizeOfCommittedPages); + EXPECT_EQ( + cycle_free_cache_bucket->num_system_pages_per_slot_span * kSystemPageSize, + allocator.root()->total_size_of_committed_pages); // Check that an allocation works ok whilst in this state (a free'd page // as the active pages head). - ptr = partitionAlloc(allocator.root(), bigSize, typeName); - EXPECT_FALSE(bucket->emptyPagesHead); - partitionFree(ptr); + ptr = PartitionAlloc(allocator.root(), big_size, type_name); + EXPECT_FALSE(bucket->empty_pages_head); + PartitionFree(ptr); // Also check that a page that is bouncing immediately between empty and // used does not get freed. for (size_t i = 0; i < kMaxFreeableSpans * 2; ++i) { - ptr = partitionAlloc(allocator.root(), bigSize, typeName); - EXPECT_TRUE(page->freelistHead); - partitionFree(ptr); - EXPECT_TRUE(page->freelistHead); + ptr = PartitionAlloc(allocator.root(), big_size, type_name); + EXPECT_TRUE(page->freelist_head); + PartitionFree(ptr); + EXPECT_TRUE(page->freelist_head); } - EXPECT_EQ(kPartitionPageSize, allocator.root()->totalSizeOfCommittedPages); + EXPECT_EQ(kPartitionPageSize, + allocator.root()->total_size_of_committed_pages); TestShutdown(); } @@ -1204,61 +1219,61 @@ size_t size = kPartitionPageSize - kExtraAllocSize; - void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr); - void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr2); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr)); PartitionPage* page2 = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr2)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr2)); PartitionBucket* bucket = page->bucket; - EXPECT_EQ(0, bucket->emptyPagesHead); - EXPECT_EQ(-1, page->numAllocatedSlots); - EXPECT_EQ(1, page2->numAllocatedSlots); + EXPECT_EQ(0, bucket->empty_pages_head); + EXPECT_EQ(-1, page->num_allocated_slots); + EXPECT_EQ(1, page2->num_allocated_slots); - partitionFreeGeneric(genericAllocator.root(), ptr); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr2); - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_TRUE(bucket->emptyPagesHead->nextPage); - EXPECT_EQ(0, page->numAllocatedSlots); - EXPECT_EQ(0, page2->numAllocatedSlots); - EXPECT_TRUE(page->freelistHead); - EXPECT_TRUE(page2->freelistHead); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_TRUE(bucket->empty_pages_head->next_page); + EXPECT_EQ(0, page->num_allocated_slots); + EXPECT_EQ(0, page2->num_allocated_slots); + EXPECT_TRUE(page->freelist_head); + EXPECT_TRUE(page2->freelist_head); CycleGenericFreeCache(kTestAllocSize); - EXPECT_FALSE(page->freelistHead); - EXPECT_FALSE(page2->freelistHead); + EXPECT_FALSE(page->freelist_head); + EXPECT_FALSE(page2->freelist_head); - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_TRUE(bucket->emptyPagesHead->nextPage); - EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_TRUE(bucket->empty_pages_head->next_page); + EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); // At this moment, we have two decommitted pages, on the empty list. - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); - EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->activePagesHead); - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_TRUE(bucket->decommittedPagesHead); + EXPECT_EQ(&PartitionRootGeneric::gSeedPage, bucket->active_pages_head); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_TRUE(bucket->decommitted_pages_head); CycleGenericFreeCache(kTestAllocSize); // We're now set up to trigger a historical bug by scanning over the active // pages list. The current code gets into a different state, but we'll keep // the test as being an interesting corner case. - ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); - EXPECT_TRUE(bucket->activePagesHead); - EXPECT_TRUE(bucket->emptyPagesHead); - EXPECT_TRUE(bucket->decommittedPagesHead); + EXPECT_TRUE(bucket->active_pages_head); + EXPECT_TRUE(bucket->empty_pages_head); + EXPECT_TRUE(bucket->decommitted_pages_head); TestShutdown(); } @@ -1281,19 +1296,20 @@ // Work out the number of allocations for 6 GB of memory. const int numAllocations = (6 * 1024 * 1024) / (allocSize / 1024); - void** ptrs = reinterpret_cast<void**>(partitionAllocGeneric( - genericAllocator.root(), numAllocations * sizeof(void*), typeName)); + void** ptrs = reinterpret_cast<void**>(PartitionAllocGeneric( + generic_allocator.root(), numAllocations * sizeof(void*), type_name)); int i; for (i = 0; i < numAllocations; ++i) { - ptrs[i] = partitionAllocGenericFlags( - genericAllocator.root(), PartitionAllocReturnNull, allocSize, typeName); + ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), + PartitionAllocReturnNull, allocSize, + type_name); if (!i) EXPECT_TRUE(ptrs[0]); if (!ptrs[i]) { - ptrs[i] = partitionAllocGenericFlags(genericAllocator.root(), + ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), PartitionAllocReturnNull, allocSize, - typeName); + type_name); EXPECT_FALSE(ptrs[i]); break; } @@ -1306,14 +1322,15 @@ // Free, reallocate and free again each block we allocated. We do this to // check that freeing memory also works correctly after a failed allocation. for (--i; i >= 0; --i) { - partitionFreeGeneric(genericAllocator.root(), ptrs[i]); - ptrs[i] = partitionAllocGenericFlags( - genericAllocator.root(), PartitionAllocReturnNull, allocSize, typeName); + PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); + ptrs[i] = PartitionAllocGenericFlags(generic_allocator.root(), + PartitionAllocReturnNull, allocSize, + type_name); EXPECT_TRUE(ptrs[i]); - partitionFreeGeneric(genericAllocator.root(), ptrs[i]); + PartitionFreeGeneric(generic_allocator.root(), ptrs[i]); } - partitionFreeGeneric(genericAllocator.root(), ptrs); + PartitionFreeGeneric(generic_allocator.root(), ptrs); EXPECT_TRUE(ClearAddressSpaceLimit()); @@ -1362,13 +1379,13 @@ TEST(PartitionAllocDeathTest, LargeAllocs) { TestSetup(); // Largest alloc. - EXPECT_DEATH(partitionAllocGeneric(genericAllocator.root(), - static_cast<size_t>(-1), typeName), + EXPECT_DEATH(PartitionAllocGeneric(generic_allocator.root(), + static_cast<size_t>(-1), type_name), ""); // And the smallest allocation we expect to die. EXPECT_DEATH( - partitionAllocGeneric(genericAllocator.root(), - static_cast<size_t>(INT_MAX) + 1, typeName), + PartitionAllocGeneric(generic_allocator.root(), + static_cast<size_t>(INT_MAX) + 1, type_name), ""); TestShutdown(); @@ -1378,12 +1395,12 @@ TEST(PartitionAllocDeathTest, ImmediateDoubleFree) { TestSetup(); - void* ptr = - partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, + type_name); EXPECT_TRUE(ptr); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); - EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), ""); + EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); TestShutdown(); } @@ -1392,18 +1409,18 @@ TEST(PartitionAllocDeathTest, RefcountDoubleFree) { TestSetup(); - void* ptr = - partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, + type_name); EXPECT_TRUE(ptr); - void* ptr2 = - partitionAllocGeneric(genericAllocator.root(), kTestAllocSize, typeName); + void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), kTestAllocSize, + type_name); EXPECT_TRUE(ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr2); // This is not an immediate double-free so our immediate detection won't // fire. However, it does take the "refcount" of the partition page to -1, // which is illegal and should be trapped. - EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), ptr), ""); + EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), ptr), ""); TestShutdown(); } @@ -1412,17 +1429,17 @@ TEST(PartitionAllocDeathTest, GuardPages) { TestSetup(); -// partitionAlloc adds kPartitionPageSize to the requested size +// PartitionAlloc adds kPartitionPageSize to the requested size // (for metadata), and then rounds that size to kPageAllocationGranularity. // To be able to reliably write one past a direct allocation, choose a size // that's // a) larger than kGenericMaxBucketed (to make the allocation direct) // b) aligned at kPageAllocationGranularity boundaries after // kPartitionPageSize has been added to it. -// (On 32-bit, partitionAlloc adds another kSystemPageSize to the +// (On 32-bit, PartitionAlloc adds another kSystemPageSize to the // allocation size before rounding, but there it marks the memory right // after size as inaccessible, so it's fine to write 1 past the size we -// hand to partitionAlloc and we don't need to worry about allocation +// hand to PartitionAlloc and we don't need to worry about allocation // granularities.) #define ALIGN(N, A) (((N) + (A)-1) / (A) * (A)) const int kSize = ALIGN(kGenericMaxBucketed + 1 + kPartitionPageSize, @@ -1432,7 +1449,7 @@ static_assert(kSize > kGenericMaxBucketed, "allocation not large enough for direct allocation"); size_t size = kSize - kExtraAllocSize; - void* ptr = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_TRUE(ptr); char* charPtr = reinterpret_cast<char*>(ptr) - kPointerOffset; @@ -1440,7 +1457,7 @@ EXPECT_DEATH(*(charPtr - 1) = 'A', ""); EXPECT_DEATH(*(charPtr + size + kExtraAllocSize) = 'A', ""); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); TestShutdown(); } @@ -1452,290 +1469,291 @@ // This large size will result in a direct mapped allocation with guard // pages at either end. - void* ptr = partitionAllocGeneric(genericAllocator.root(), - kPartitionPageSize * 2, typeName); + void* ptr = PartitionAllocGeneric(generic_allocator.root(), + kPartitionPageSize * 2, type_name); EXPECT_TRUE(ptr); char* badPtr = reinterpret_cast<char*>(ptr) + kPartitionPageSize; - EXPECT_DEATH(partitionFreeGeneric(genericAllocator.root(), badPtr), ""); + EXPECT_DEATH(PartitionFreeGeneric(generic_allocator.root(), badPtr), ""); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); TestShutdown(); } #endif // !defined(OS_ANDROID) && !defined(OS_IOS) -// Tests that partitionDumpStatsGeneric and partitionDumpStats runs without +// Tests that PartitionDumpStatsGeneric and PartitionDumpStats runs without // crashing and returns non zero values when memory is allocated. TEST(PartitionAllocTest, DumpMemoryStats) { TestSetup(); { - void* ptr = partitionAlloc(allocator.root(), kTestAllocSize, typeName); + void* ptr = PartitionAlloc(allocator.root(), kTestAllocSize, type_name); MockPartitionStatsDumper mockStatsDumper; - partitionDumpStats(allocator.root(), "mock_allocator", + PartitionDumpStats(allocator.root(), "mock_allocator", false /* detailed dump */, &mockStatsDumper); EXPECT_TRUE(mockStatsDumper.IsMemoryAllocationRecorded()); - partitionFree(ptr); + PartitionFree(ptr); } // This series of tests checks the active -> empty -> decommitted states. { - void* genericPtr = partitionAllocGeneric(genericAllocator.root(), - 2048 - kExtraAllocSize, typeName); + void* genericPtr = PartitionAllocGeneric(generic_allocator.root(), + 2048 - kExtraAllocSize, type_name); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); - const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(2048); + const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(2048u, stats->bucketSlotSize); - EXPECT_EQ(2048u, stats->activeBytes); - EXPECT_EQ(kSystemPageSize, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(0u, stats->numFullPages); - EXPECT_EQ(1u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(2048u, stats->bucket_slot_size); + EXPECT_EQ(2048u, stats->active_bytes); + EXPECT_EQ(kSystemPageSize, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(0u, stats->num_full_pages); + EXPECT_EQ(1u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); } - partitionFreeGeneric(genericAllocator.root(), genericPtr); + PartitionFreeGeneric(generic_allocator.root(), genericPtr); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); - const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(2048); + const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(2048u, stats->bucketSlotSize); - EXPECT_EQ(0u, stats->activeBytes); - EXPECT_EQ(kSystemPageSize, stats->residentBytes); - EXPECT_EQ(kSystemPageSize, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(0u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(1u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(2048u, stats->bucket_slot_size); + EXPECT_EQ(0u, stats->active_bytes); + EXPECT_EQ(kSystemPageSize, stats->resident_bytes); + EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(0u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(1u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); } CycleGenericFreeCache(kTestAllocSize); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); - const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(2048); + const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(2048u, stats->bucketSlotSize); - EXPECT_EQ(0u, stats->activeBytes); - EXPECT_EQ(0u, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(0u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(1u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(2048u, stats->bucket_slot_size); + EXPECT_EQ(0u, stats->active_bytes); + EXPECT_EQ(0u, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(0u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(1u, stats->num_decommitted_pages); } } // This test checks for correct empty page list accounting. { size_t size = kPartitionPageSize - kExtraAllocSize; - void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - partitionFreeGeneric(genericAllocator.root(), ptr1); - partitionFreeGeneric(genericAllocator.root(), ptr2); + void* ptr1 = + PartitionAllocGeneric(generic_allocator.root(), size, type_name); + void* ptr2 = + PartitionAllocGeneric(generic_allocator.root(), size, type_name); + PartitionFreeGeneric(generic_allocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr2); CycleGenericFreeCache(kTestAllocSize); - ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(kPartitionPageSize); + dumper.GetBucketStats(kPartitionPageSize); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(kPartitionPageSize, stats->bucketSlotSize); - EXPECT_EQ(kPartitionPageSize, stats->activeBytes); - EXPECT_EQ(kPartitionPageSize, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(1u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(1u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(kPartitionPageSize, stats->bucket_slot_size); + EXPECT_EQ(kPartitionPageSize, stats->active_bytes); + EXPECT_EQ(kPartitionPageSize, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(1u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(1u, stats->num_decommitted_pages); } - partitionFreeGeneric(genericAllocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr1); } // This test checks for correct direct mapped accounting. { - size_t sizeSmaller = kGenericMaxBucketed + 1; - size_t sizeBigger = (kGenericMaxBucketed * 2) + 1; - size_t realSizeSmaller = - (sizeSmaller + kSystemPageOffsetMask) & kSystemPageBaseMask; - size_t realSizeBigger = - (sizeBigger + kSystemPageOffsetMask) & kSystemPageBaseMask; - void* ptr = - partitionAllocGeneric(genericAllocator.root(), sizeSmaller, typeName); + size_t size_smaller = kGenericMaxBucketed + 1; + size_t size_bigger = (kGenericMaxBucketed * 2) + 1; + size_t real_size_smaller = + (size_smaller + kSystemPageOffsetMask) & kSystemPageBaseMask; + size_t real_size_bigger = + (size_bigger + kSystemPageOffsetMask) & kSystemPageBaseMask; + void* ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, + type_name); void* ptr2 = - partitionAllocGeneric(genericAllocator.root(), sizeBigger, typeName); + PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(realSizeSmaller); + dumper.GetBucketStats(real_size_smaller); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_TRUE(stats->isDirectMap); - EXPECT_EQ(realSizeSmaller, stats->bucketSlotSize); - EXPECT_EQ(realSizeSmaller, stats->activeBytes); - EXPECT_EQ(realSizeSmaller, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(1u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_TRUE(stats->is_direct_map); + EXPECT_EQ(real_size_smaller, stats->bucket_slot_size); + EXPECT_EQ(real_size_smaller, stats->active_bytes); + EXPECT_EQ(real_size_smaller, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(1u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); - stats = mockStatsDumperGeneric.GetBucketStats(realSizeBigger); + stats = dumper.GetBucketStats(real_size_bigger); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_TRUE(stats->isDirectMap); - EXPECT_EQ(realSizeBigger, stats->bucketSlotSize); - EXPECT_EQ(realSizeBigger, stats->activeBytes); - EXPECT_EQ(realSizeBigger, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(1u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_TRUE(stats->is_direct_map); + EXPECT_EQ(real_size_bigger, stats->bucket_slot_size); + EXPECT_EQ(real_size_bigger, stats->active_bytes); + EXPECT_EQ(real_size_bigger, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(1u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); } - partitionFreeGeneric(genericAllocator.root(), ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr); // Whilst we're here, allocate again and free with different ordering // to give a workout to our linked list code. - ptr = partitionAllocGeneric(genericAllocator.root(), sizeSmaller, typeName); - ptr2 = partitionAllocGeneric(genericAllocator.root(), sizeBigger, typeName); - partitionFreeGeneric(genericAllocator.root(), ptr); - partitionFreeGeneric(genericAllocator.root(), ptr2); + ptr = PartitionAllocGeneric(generic_allocator.root(), size_smaller, + type_name); + ptr2 = + PartitionAllocGeneric(generic_allocator.root(), size_bigger, type_name); + PartitionFreeGeneric(generic_allocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr2); } // This test checks large-but-not-quite-direct allocations. { void* ptr = - partitionAllocGeneric(genericAllocator.root(), 65536 + 1, typeName); + PartitionAllocGeneric(generic_allocator.root(), 65536 + 1, type_name); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); - size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder); + size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(slotSize); + dumper.GetBucketStats(slot_size); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_FALSE(stats->isDirectMap); - EXPECT_EQ(slotSize, stats->bucketSlotSize); - EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->activeBytes); - EXPECT_EQ(slotSize, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(1u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_FALSE(stats->is_direct_map); + EXPECT_EQ(slot_size, stats->bucket_slot_size); + EXPECT_EQ(65536u + 1 + kExtraAllocSize, stats->active_bytes); + EXPECT_EQ(slot_size, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(1u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); } - partitionFreeGeneric(genericAllocator.root(), ptr); + PartitionFreeGeneric(generic_allocator.root(), ptr); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); - size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder); + size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(slotSize); + dumper.GetBucketStats(slot_size); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_FALSE(stats->isDirectMap); - EXPECT_EQ(slotSize, stats->bucketSlotSize); - EXPECT_EQ(0u, stats->activeBytes); - EXPECT_EQ(slotSize, stats->residentBytes); - EXPECT_EQ(slotSize, stats->decommittableBytes); - EXPECT_EQ(0u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(1u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + EXPECT_TRUE(stats->is_valid); + EXPECT_FALSE(stats->is_direct_map); + EXPECT_EQ(slot_size, stats->bucket_slot_size); + EXPECT_EQ(0u, stats->active_bytes); + EXPECT_EQ(slot_size, stats->resident_bytes); + EXPECT_EQ(slot_size, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(1u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); } - void* ptr2 = partitionAllocGeneric(genericAllocator.root(), - 65536 + kSystemPageSize + 1, typeName); + void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), + 65536 + kSystemPageSize + 1, type_name); EXPECT_EQ(ptr, ptr2); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); - size_t slotSize = 65536 + (65536 / kGenericNumBucketsPerOrder); + size_t slot_size = 65536 + (65536 / kGenericNumBucketsPerOrder); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(slotSize); + dumper.GetBucketStats(slot_size); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_FALSE(stats->isDirectMap); - EXPECT_EQ(slotSize, stats->bucketSlotSize); + EXPECT_TRUE(stats->is_valid); + EXPECT_FALSE(stats->is_direct_map); + EXPECT_EQ(slot_size, stats->bucket_slot_size); EXPECT_EQ(65536u + kSystemPageSize + 1 + kExtraAllocSize, - stats->activeBytes); - EXPECT_EQ(slotSize, stats->residentBytes); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->discardableBytes); - EXPECT_EQ(1u, stats->numFullPages); - EXPECT_EQ(0u, stats->numActivePages); - EXPECT_EQ(0u, stats->numEmptyPages); - EXPECT_EQ(0u, stats->numDecommittedPages); + stats->active_bytes); + EXPECT_EQ(slot_size, stats->resident_bytes); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->discardable_bytes); + EXPECT_EQ(1u, stats->num_full_pages); + EXPECT_EQ(0u, stats->num_active_pages); + EXPECT_EQ(0u, stats->num_empty_pages); + EXPECT_EQ(0u, stats->num_decommitted_pages); } - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr2); } TestShutdown(); @@ -1745,48 +1763,46 @@ TEST(PartitionAllocTest, Purge) { TestSetup(); - char* ptr = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), 2048 - kExtraAllocSize, typeName)); - partitionFreeGeneric(genericAllocator.root(), ptr); + char* ptr = reinterpret_cast<char*>(PartitionAllocGeneric( + generic_allocator.root(), 2048 - kExtraAllocSize, type_name)); + PartitionFreeGeneric(generic_allocator.root(), ptr); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, - &mockStatsDumperGeneric); - EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); - const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(2048); + const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(kSystemPageSize, stats->decommittableBytes); - EXPECT_EQ(kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(kSystemPageSize, stats->decommittable_bytes); + EXPECT_EQ(kSystemPageSize, stats->resident_bytes); } - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDecommitEmptyPages); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric(genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, - &mockStatsDumperGeneric); - EXPECT_FALSE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_FALSE(dumper.IsMemoryAllocationRecorded()); - const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(2048); + const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(2048); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(0u, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(0u, stats->resident_bytes); } // Calling purge again here is a good way of testing we didn't mess up the // state of the free cache ring. - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDecommitEmptyPages); char* bigPtr = reinterpret_cast<char*>( - partitionAllocGeneric(genericAllocator.root(), 256 * 1024, typeName)); - partitionFreeGeneric(genericAllocator.root(), bigPtr); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionAllocGeneric(generic_allocator.root(), 256 * 1024, type_name)); + PartitionFreeGeneric(generic_allocator.root(), bigPtr); + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDecommitEmptyPages); CheckPageInCore(ptr - kPointerOffset, false); @@ -1805,47 +1821,47 @@ // Allocate 3 full slot spans worth of 8192-byte allocations. // Each slot span for this size is 16384 bytes, or 1 partition page and 2 // slots. - void* ptr1 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - void* ptr2 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - void* ptr3 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - void* ptr4 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - void* ptr5 = partitionAllocGeneric(genericAllocator.root(), size, typeName); - void* ptr6 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr1 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); + void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); + void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); + void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); + void* ptr5 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); + void* ptr6 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); PartitionPage* page1 = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); PartitionPage* page2 = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr3)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr3)); PartitionPage* page3 = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr6)); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr6)); EXPECT_NE(page1, page2); EXPECT_NE(page2, page3); PartitionBucket* bucket = page1->bucket; - EXPECT_EQ(page3, bucket->activePagesHead); + EXPECT_EQ(page3, bucket->active_pages_head); // Free up the 2nd slot in each slot span. // This leaves the active list containing 3 pages, each with 1 used and 1 // free slot. The active page will be the one containing ptr1. - partitionFreeGeneric(genericAllocator.root(), ptr6); - partitionFreeGeneric(genericAllocator.root(), ptr4); - partitionFreeGeneric(genericAllocator.root(), ptr2); - EXPECT_EQ(page1, bucket->activePagesHead); + PartitionFreeGeneric(generic_allocator.root(), ptr6); + PartitionFreeGeneric(generic_allocator.root(), ptr4); + PartitionFreeGeneric(generic_allocator.root(), ptr2); + EXPECT_EQ(page1, bucket->active_pages_head); // Empty the middle page in the active list. - partitionFreeGeneric(genericAllocator.root(), ptr3); - EXPECT_EQ(page1, bucket->activePagesHead); + PartitionFreeGeneric(generic_allocator.root(), ptr3); + EXPECT_EQ(page1, bucket->active_pages_head); // Empty the the first page in the active list -- also the current page. - partitionFreeGeneric(genericAllocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr1); // A good choice here is to re-fill the third page since the first two are // empty. We used to fail that. - void* ptr7 = partitionAllocGeneric(genericAllocator.root(), size, typeName); + void* ptr7 = PartitionAllocGeneric(generic_allocator.root(), size, type_name); EXPECT_EQ(ptr6, ptr7); - EXPECT_EQ(page3, bucket->activePagesHead); + EXPECT_EQ(page3, bucket->active_pages_head); - partitionFreeGeneric(genericAllocator.root(), ptr5); - partitionFreeGeneric(genericAllocator.root(), ptr7); + PartitionFreeGeneric(generic_allocator.root(), ptr5); + PartitionFreeGeneric(generic_allocator.root(), ptr7); TestShutdown(); } @@ -1856,103 +1872,104 @@ // Free the second of two 4096 byte allocations and then purge. { - void* ptr1 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); - char* ptr2 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); - partitionFreeGeneric(genericAllocator.root(), ptr2); + void* ptr1 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); + char* ptr2 = reinterpret_cast<char*>( + PartitionAllocGeneric(generic_allocator.root(), + kSystemPageSize - kExtraAllocSize, type_name)); + PartitionFreeGeneric(generic_allocator.root(), ptr2); PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); - EXPECT_EQ(2u, page->numUnprovisionedSlots); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); + EXPECT_EQ(2u, page->num_unprovisioned_slots); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); + dumper.GetBucketStats(kSystemPageSize); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(kSystemPageSize, stats->activeBytes); - EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(kSystemPageSize, stats->active_bytes); + EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes); } CheckPageInCore(ptr2 - kPointerOffset, true); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDiscardUnusedSystemPages); CheckPageInCore(ptr2 - kPointerOffset, false); - EXPECT_EQ(3u, page->numUnprovisionedSlots); + EXPECT_EQ(3u, page->num_unprovisioned_slots); - partitionFreeGeneric(genericAllocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr1); } // Free the first of two 4096 byte allocations and then purge. { - char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); - void* ptr2 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); - partitionFreeGeneric(genericAllocator.root(), ptr1); + char* ptr1 = reinterpret_cast<char*>( + PartitionAllocGeneric(generic_allocator.root(), + kSystemPageSize - kExtraAllocSize, type_name)); + void* ptr2 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); + PartitionFreeGeneric(generic_allocator.root(), ptr1); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); + dumper.GetBucketStats(kSystemPageSize); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(kSystemPageSize, stats->activeBytes); - EXPECT_EQ(2 * kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(kSystemPageSize, stats->active_bytes); + EXPECT_EQ(2 * kSystemPageSize, stats->resident_bytes); } CheckPageInCore(ptr1 - kPointerOffset, true); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDiscardUnusedSystemPages); CheckPageInCore(ptr1 - kPointerOffset, false); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr2); } { - char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), 9216 - kExtraAllocSize, typeName)); - void* ptr2 = partitionAllocGeneric(genericAllocator.root(), - 9216 - kExtraAllocSize, typeName); - void* ptr3 = partitionAllocGeneric(genericAllocator.root(), - 9216 - kExtraAllocSize, typeName); - void* ptr4 = partitionAllocGeneric(genericAllocator.root(), - 9216 - kExtraAllocSize, typeName); + char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( + generic_allocator.root(), 9216 - kExtraAllocSize, type_name)); + void* ptr2 = PartitionAllocGeneric(generic_allocator.root(), + 9216 - kExtraAllocSize, type_name); + void* ptr3 = PartitionAllocGeneric(generic_allocator.root(), + 9216 - kExtraAllocSize, type_name); + void* ptr4 = PartitionAllocGeneric(generic_allocator.root(), + 9216 - kExtraAllocSize, type_name); memset(ptr1, 'A', 9216 - kExtraAllocSize); memset(ptr2, 'A', 9216 - kExtraAllocSize); - partitionFreeGeneric(genericAllocator.root(), ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr1); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); - const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(9216); + const PartitionBucketMemoryStats* stats = dumper.GetBucketStats(9216); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(9216u * 2, stats->activeBytes); - EXPECT_EQ(9 * kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(9216u * 2, stats->active_bytes); + EXPECT_EQ(9 * kSystemPageSize, stats->resident_bytes); } CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDiscardUnusedSystemPages); CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); @@ -1960,166 +1977,168 @@ CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 4), true); - partitionFreeGeneric(genericAllocator.root(), ptr3); - partitionFreeGeneric(genericAllocator.root(), ptr4); + PartitionFreeGeneric(generic_allocator.root(), ptr3); + PartitionFreeGeneric(generic_allocator.root(), ptr4); } { - char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), (64 * kSystemPageSize) - kExtraAllocSize, - typeName)); + char* ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( + generic_allocator.root(), (64 * kSystemPageSize) - kExtraAllocSize, + type_name)); memset(ptr1, 'A', (64 * kSystemPageSize) - kExtraAllocSize); - partitionFreeGeneric(genericAllocator.root(), ptr1); - ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), (61 * kSystemPageSize) - kExtraAllocSize, - typeName)); + PartitionFreeGeneric(generic_allocator.root(), ptr1); + ptr1 = reinterpret_cast<char*>(PartitionAllocGeneric( + generic_allocator.root(), (61 * kSystemPageSize) - kExtraAllocSize, + type_name)); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(64 * kSystemPageSize); + dumper.GetBucketStats(64 * kSystemPageSize); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(3 * kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(61 * kSystemPageSize, stats->activeBytes); - EXPECT_EQ(64 * kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(3 * kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(61 * kSystemPageSize, stats->active_bytes); + EXPECT_EQ(64 * kSystemPageSize, stats->resident_bytes); } CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), true); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDiscardUnusedSystemPages); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 60), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 61), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 62), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 63), false); - partitionFreeGeneric(genericAllocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr1); } // This sub-test tests truncation of the provisioned slots in a trickier // case where the freelist is rewritten. - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDecommitEmptyPages); { - char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); - void* ptr2 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); - void* ptr3 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); - void* ptr4 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); + char* ptr1 = reinterpret_cast<char*>( + PartitionAllocGeneric(generic_allocator.root(), + kSystemPageSize - kExtraAllocSize, type_name)); + void* ptr2 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); + void* ptr3 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); + void* ptr4 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); ptr1[0] = 'A'; ptr1[kSystemPageSize] = 'A'; ptr1[kSystemPageSize * 2] = 'A'; ptr1[kSystemPageSize * 3] = 'A'; PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); - partitionFreeGeneric(genericAllocator.root(), ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr4); - partitionFreeGeneric(genericAllocator.root(), ptr1); - EXPECT_EQ(0u, page->numUnprovisionedSlots); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); + PartitionFreeGeneric(generic_allocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr4); + PartitionFreeGeneric(generic_allocator.root(), ptr1); + EXPECT_EQ(0u, page->num_unprovisioned_slots); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); + dumper.GetBucketStats(kSystemPageSize); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(kSystemPageSize, stats->activeBytes); - EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(kSystemPageSize, stats->active_bytes); + EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes); } CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDiscardUnusedSystemPages); - EXPECT_EQ(1u, page->numUnprovisionedSlots); + EXPECT_EQ(1u, page->num_unprovisioned_slots); CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); // Let's check we didn't brick the freelist. - void* ptr1b = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); + void* ptr1b = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); EXPECT_EQ(ptr1, ptr1b); - void* ptr2b = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); + void* ptr2b = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); EXPECT_EQ(ptr2, ptr2b); - EXPECT_FALSE(page->freelistHead); + EXPECT_FALSE(page->freelist_head); - partitionFreeGeneric(genericAllocator.root(), ptr1); - partitionFreeGeneric(genericAllocator.root(), ptr2); - partitionFreeGeneric(genericAllocator.root(), ptr3); + PartitionFreeGeneric(generic_allocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr3); } // This sub-test is similar, but tests a double-truncation. - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDecommitEmptyPages); { - char* ptr1 = reinterpret_cast<char*>(partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName)); - void* ptr2 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); - void* ptr3 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); - void* ptr4 = partitionAllocGeneric( - genericAllocator.root(), kSystemPageSize - kExtraAllocSize, typeName); + char* ptr1 = reinterpret_cast<char*>( + PartitionAllocGeneric(generic_allocator.root(), + kSystemPageSize - kExtraAllocSize, type_name)); + void* ptr2 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); + void* ptr3 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); + void* ptr4 = PartitionAllocGeneric( + generic_allocator.root(), kSystemPageSize - kExtraAllocSize, type_name); ptr1[0] = 'A'; ptr1[kSystemPageSize] = 'A'; ptr1[kSystemPageSize * 2] = 'A'; ptr1[kSystemPageSize * 3] = 'A'; PartitionPage* page = - partitionPointerToPage(partitionCookieFreePointerAdjust(ptr1)); - partitionFreeGeneric(genericAllocator.root(), ptr4); - partitionFreeGeneric(genericAllocator.root(), ptr3); - EXPECT_EQ(0u, page->numUnprovisionedSlots); + PartitionPointerToPage(PartitionCookieFreePointerAdjust(ptr1)); + PartitionFreeGeneric(generic_allocator.root(), ptr4); + PartitionFreeGeneric(generic_allocator.root(), ptr3); + EXPECT_EQ(0u, page->num_unprovisioned_slots); { - MockPartitionStatsDumper mockStatsDumperGeneric; - partitionDumpStatsGeneric( - genericAllocator.root(), "mock_generic_allocator", - false /* detailed dump */, &mockStatsDumperGeneric); - EXPECT_TRUE(mockStatsDumperGeneric.IsMemoryAllocationRecorded()); + MockPartitionStatsDumper dumper; + PartitionDumpStatsGeneric(generic_allocator.root(), + "mock_generic_allocator", + false /* detailed dump */, &dumper); + EXPECT_TRUE(dumper.IsMemoryAllocationRecorded()); const PartitionBucketMemoryStats* stats = - mockStatsDumperGeneric.GetBucketStats(kSystemPageSize); + dumper.GetBucketStats(kSystemPageSize); EXPECT_TRUE(stats); - EXPECT_TRUE(stats->isValid); - EXPECT_EQ(0u, stats->decommittableBytes); - EXPECT_EQ(2 * kSystemPageSize, stats->discardableBytes); - EXPECT_EQ(2 * kSystemPageSize, stats->activeBytes); - EXPECT_EQ(4 * kSystemPageSize, stats->residentBytes); + EXPECT_TRUE(stats->is_valid); + EXPECT_EQ(0u, stats->decommittable_bytes); + EXPECT_EQ(2 * kSystemPageSize, stats->discardable_bytes); + EXPECT_EQ(2 * kSystemPageSize, stats->active_bytes); + EXPECT_EQ(4 * kSystemPageSize, stats->resident_bytes); } CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), true); - partitionPurgeMemoryGeneric(genericAllocator.root(), + PartitionPurgeMemoryGeneric(generic_allocator.root(), PartitionPurgeDiscardUnusedSystemPages); - EXPECT_EQ(2u, page->numUnprovisionedSlots); + EXPECT_EQ(2u, page->num_unprovisioned_slots); CheckPageInCore(ptr1 - kPointerOffset, true); CheckPageInCore(ptr1 - kPointerOffset + kSystemPageSize, true); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 2), false); CheckPageInCore(ptr1 - kPointerOffset + (kSystemPageSize * 3), false); - EXPECT_FALSE(page->freelistHead); + EXPECT_FALSE(page->freelist_head); - partitionFreeGeneric(genericAllocator.root(), ptr1); - partitionFreeGeneric(genericAllocator.root(), ptr2); + PartitionFreeGeneric(generic_allocator.root(), ptr1); + PartitionFreeGeneric(generic_allocator.root(), ptr2); } TestShutdown();
diff --git a/base/files/file_path_watcher.h b/base/files/file_path_watcher.h index b9ddb0d5..9e29d0a9 100644 --- a/base/files/file_path_watcher.h +++ b/base/files/file_path_watcher.h
@@ -7,6 +7,8 @@ #ifndef BASE_FILES_FILE_PATH_WATCHER_H_ #define BASE_FILES_FILE_PATH_WATCHER_H_ +#include <memory> + #include "base/base_export.h" #include "base/callback.h" #include "base/files/file_path.h" @@ -36,9 +38,10 @@ typedef base::Callback<void(const FilePath& path, bool error)> Callback; // Used internally to encapsulate different members on different platforms. - class PlatformDelegate : public base::RefCountedThreadSafe<PlatformDelegate> { + class PlatformDelegate { public: PlatformDelegate(); + virtual ~PlatformDelegate(); // Start watching for the given |path| and notify |delegate| about changes. virtual bool Watch(const FilePath& path, @@ -50,11 +53,8 @@ virtual void Cancel() = 0; protected: - friend class base::RefCountedThreadSafe<PlatformDelegate>; friend class FilePathWatcher; - virtual ~PlatformDelegate(); - scoped_refptr<SequencedTaskRunner> task_runner() const { return task_runner_; } @@ -75,16 +75,13 @@ private: scoped_refptr<SequencedTaskRunner> task_runner_; bool cancelled_; + + DISALLOW_COPY_AND_ASSIGN(PlatformDelegate); }; FilePathWatcher(); ~FilePathWatcher(); - // A callback that always cleans up the PlatformDelegate, either when executed - // or when deleted without having been executed at all, as can happen during - // shutdown. - static void CancelWatch(const scoped_refptr<PlatformDelegate>& delegate); - // Returns true if the platform and OS version support recursive watches. static bool RecursiveWatchAvailable(); @@ -101,7 +98,7 @@ bool Watch(const FilePath& path, bool recursive, const Callback& callback); private: - scoped_refptr<PlatformDelegate> impl_; + std::unique_ptr<PlatformDelegate> impl_; SequenceChecker sequence_checker_;
diff --git a/base/files/file_path_watcher_fsevents.cc b/base/files/file_path_watcher_fsevents.cc index 8e73b55..e9a87b0 100644 --- a/base/files/file_path_watcher_fsevents.cc +++ b/base/files/file_path_watcher_fsevents.cc
@@ -13,7 +13,6 @@ #include "base/lazy_instance.h" #include "base/logging.h" #include "base/mac/scoped_cftyperef.h" -#include "base/macros.h" #include "base/strings/stringprintf.h" #include "base/threading/sequenced_task_runner_handle.h" @@ -69,10 +68,16 @@ FilePathWatcherFSEvents::FilePathWatcherFSEvents() : queue_(dispatch_queue_create( - base::StringPrintf( - "org.chromium.base.FilePathWatcher.%p", this).c_str(), + base::StringPrintf("org.chromium.base.FilePathWatcher.%p", this) + .c_str(), DISPATCH_QUEUE_SERIAL)), - fsevent_stream_(nullptr) { + fsevent_stream_(nullptr), + weak_factory_(this) {} + +FilePathWatcherFSEvents::~FilePathWatcherFSEvents() { + DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread()); + DCHECK(callback_.is_null()) + << "Cancel() must be called before FilePathWatcher is destroyed."; } bool FilePathWatcherFSEvents::Watch(const FilePath& path, @@ -105,11 +110,15 @@ set_cancelled(); callback_.Reset(); - // Switch to the dispatch queue to tear down the event stream. As the queue - // is owned by this object, and this method is called from the destructor, - // execute the block synchronously. + // Switch to the dispatch queue to tear down the event stream. As the queue is + // owned by |this|, and this method is called from the destructor, execute the + // block synchronously. dispatch_sync(queue_, ^{ - CancelOnMessageLoopThread(); + if (fsevent_stream_) { + DestroyEventStream(); + target_.clear(); + resolved_target_.clear(); + } }); } @@ -140,31 +149,40 @@ // the directory to be watched gets created. if (root_changed) { // Resetting the event stream from within the callback fails (FSEvents spews - // bad file descriptor errors), so post a task to do the reset. - dispatch_async(watcher->queue_, ^{ - watcher->UpdateEventStream(root_change_at); - }); + // bad file descriptor errors), so do the reset asynchronously. + // + // We can't dispatch_async a call to UpdateEventStream() directly because + // there would be no guarantee that |watcher| still exists when it runs. + // + // Instead, bounce on task_runner() and use a WeakPtr to verify that + // |watcher| still exists. If it does, dispatch_async a call to + // UpdateEventStream(). Because the destructor of |watcher| runs on + // task_runner() and calls dispatch_sync, it is guaranteed that |watcher| + // still exists when UpdateEventStream() runs. + watcher->task_runner()->PostTask( + FROM_HERE, Bind( + [](WeakPtr<FilePathWatcherFSEvents> weak_watcher, + FSEventStreamEventId root_change_at) { + if (!weak_watcher) + return; + FilePathWatcherFSEvents* watcher = weak_watcher.get(); + dispatch_async(watcher->queue_, ^{ + watcher->UpdateEventStream(root_change_at); + }); + }, + watcher->weak_factory_.GetWeakPtr(), root_change_at)); } watcher->OnFilePathsChanged(paths); } -FilePathWatcherFSEvents::~FilePathWatcherFSEvents() { - // This method may be called on either the libdispatch or task_runner() - // thread. Checking callback_ on the libdispatch thread here is safe because - // it is executing in a task posted by Cancel() which first reset callback_. - // PostTask forms a sufficient memory barrier to ensure that the value is - // consistent on the target thread. - DCHECK(callback_.is_null()) - << "Cancel() must be called before FilePathWatcher is destroyed."; -} - void FilePathWatcherFSEvents::OnFilePathsChanged( const std::vector<FilePath>& paths) { DCHECK(!resolved_target_.empty()); task_runner()->PostTask( - FROM_HERE, Bind(&FilePathWatcherFSEvents::DispatchEvents, this, paths, - target_, resolved_target_)); + FROM_HERE, + Bind(&FilePathWatcherFSEvents::DispatchEvents, weak_factory_.GetWeakPtr(), + paths, target_, resolved_target_)); } void FilePathWatcherFSEvents::DispatchEvents(const std::vector<FilePath>& paths, @@ -185,18 +203,6 @@ } } -void FilePathWatcherFSEvents::CancelOnMessageLoopThread() { - // For all other implementations, the "message loop thread" is the IO thread, - // as returned by task_runner(). This implementation, however, needs to - // cancel pending work on the Dispatch Queue thread. - - if (fsevent_stream_) { - DestroyEventStream(); - target_.clear(); - resolved_target_.clear(); - } -} - void FilePathWatcherFSEvents::UpdateEventStream( FSEventStreamEventId start_event) { // It can happen that the watcher gets canceled while tasks that call this @@ -232,8 +238,9 @@ FSEventStreamSetDispatchQueue(fsevent_stream_, queue_); if (!FSEventStreamStart(fsevent_stream_)) { - task_runner()->PostTask( - FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_)); + task_runner()->PostTask(FROM_HERE, + Bind(&FilePathWatcherFSEvents::ReportError, + weak_factory_.GetWeakPtr(), target_)); } } @@ -242,8 +249,9 @@ bool changed = resolved != resolved_target_; resolved_target_ = resolved; if (resolved_target_.empty()) { - task_runner()->PostTask( - FROM_HERE, Bind(&FilePathWatcherFSEvents::ReportError, this, target_)); + task_runner()->PostTask(FROM_HERE, + Bind(&FilePathWatcherFSEvents::ReportError, + weak_factory_.GetWeakPtr(), target_)); } return changed; }
diff --git a/base/files/file_path_watcher_fsevents.h b/base/files/file_path_watcher_fsevents.h index fbcca1f8..dcdf2fb 100644 --- a/base/files/file_path_watcher_fsevents.h +++ b/base/files/file_path_watcher_fsevents.h
@@ -14,6 +14,7 @@ #include "base/files/file_path_watcher.h" #include "base/mac/scoped_dispatch_object.h" #include "base/macros.h" +#include "base/memory/weak_ptr.h" namespace base { @@ -26,6 +27,7 @@ class FilePathWatcherFSEvents : public FilePathWatcher::PlatformDelegate { public: FilePathWatcherFSEvents(); + ~FilePathWatcherFSEvents() override; // FilePathWatcher::PlatformDelegate overrides. bool Watch(const FilePath& path, @@ -41,8 +43,6 @@ const FSEventStreamEventFlags flags[], const FSEventStreamEventId event_ids[]); - ~FilePathWatcherFSEvents() override; - // Called from FSEventsCallback whenever there is a change to the paths. void OnFilePathsChanged(const std::vector<FilePath>& paths); @@ -53,9 +53,6 @@ const FilePath& target, const FilePath& resolved_target); - // Cleans up and stops the event stream. - void CancelOnMessageLoopThread(); - // (Re-)Initialize the event stream to start reporting events from // |start_event|. void UpdateEventStream(FSEventStreamEventId start_event); @@ -92,6 +89,8 @@ // (Only accessed from the libdispatch queue.) FSEventStreamRef fsevent_stream_; + WeakPtrFactory<FilePathWatcherFSEvents> weak_factory_; + DISALLOW_COPY_AND_ASSIGN(FilePathWatcherFSEvents); };
diff --git a/base/files/file_path_watcher_kqueue.cc b/base/files/file_path_watcher_kqueue.cc index 7f4ef033..a28726a 100644 --- a/base/files/file_path_watcher_kqueue.cc +++ b/base/files/file_path_watcher_kqueue.cc
@@ -26,7 +26,9 @@ FilePathWatcherKQueue::FilePathWatcherKQueue() : kqueue_(-1) {} -FilePathWatcherKQueue::~FilePathWatcherKQueue() {} +FilePathWatcherKQueue::~FilePathWatcherKQueue() { + DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread()); +} void FilePathWatcherKQueue::ReleaseEvent(struct kevent& event) { CloseFileDescriptor(&event.ident); @@ -265,11 +267,13 @@ return false; } - // This creates an ownership cycle (|this| owns |kqueue_watch_controller_| - // which owns a callback which owns |this|). The cycle is broken when - // |kqueue_watch_controller_| is reset in Cancel(). + // It's safe to use Unretained() because the watch is cancelled and the + // callback cannot be invoked after |kqueue_watch_controller_| (which is a + // member of |this|) has been deleted. kqueue_watch_controller_ = FileDescriptorWatcher::WatchReadable( - kqueue_, Bind(&FilePathWatcherKQueue::OnKQueueReadable, this)); + kqueue_, + Bind(&FilePathWatcherKQueue::OnKQueueReadable, Unretained(this))); + return true; }
diff --git a/base/files/file_path_watcher_kqueue.h b/base/files/file_path_watcher_kqueue.h index 05e0941..ef79be55 100644 --- a/base/files/file_path_watcher_kqueue.h +++ b/base/files/file_path_watcher_kqueue.h
@@ -31,6 +31,7 @@ class FilePathWatcherKQueue : public FilePathWatcher::PlatformDelegate { public: FilePathWatcherKQueue(); + ~FilePathWatcherKQueue() override; // FilePathWatcher::PlatformDelegate overrides. bool Watch(const FilePath& path, @@ -38,9 +39,6 @@ const FilePathWatcher::Callback& callback) override; void Cancel() override; - protected: - ~FilePathWatcherKQueue() override; - private: class EventData { public:
diff --git a/base/files/file_path_watcher_linux.cc b/base/files/file_path_watcher_linux.cc index d060e49..9589e9b 100644 --- a/base/files/file_path_watcher_linux.cc +++ b/base/files/file_path_watcher_linux.cc
@@ -28,6 +28,8 @@ #include "base/location.h" #include "base/logging.h" #include "base/macros.h" +#include "base/memory/ptr_util.h" +#include "base/memory/weak_ptr.h" #include "base/posix/eintr_wrapper.h" #include "base/single_thread_task_runner.h" #include "base/stl_util.h" @@ -91,6 +93,7 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate { public: FilePathWatcherImpl(); + ~FilePathWatcherImpl() override; // Called for each event coming from the watch. |fired_watch| identifies the // watch that fired, |child| indicates what has changed, and is relative to @@ -105,13 +108,13 @@ bool deleted, bool is_dir); - protected: - ~FilePathWatcherImpl() override { - in_destructor_ = true; - CancelOnMessageLoopThreadOrInDestructor(); - } - private: + void OnFilePathChangedOnOriginSequence(InotifyReader::Watch fired_watch, + const FilePath::StringType& child, + bool created, + bool deleted, + bool is_dir); + // Start watching |path| for changes and notify |delegate| on each change. // Returns true if watch for |path| has been added successfully. bool Watch(const FilePath& path, @@ -120,7 +123,6 @@ // Cancel the watch. This unregisters the instance with InotifyReader. void Cancel() override; - void CancelOnMessageLoopThreadOrInDestructor(); // Inotify watches are installed for all directory components of |target_|. // A WatchEntry instance holds: @@ -185,7 +187,7 @@ hash_map<InotifyReader::Watch, FilePath> recursive_paths_by_watch_; std::map<FilePath, InotifyReader::Watch> recursive_watches_by_path_; - bool in_destructor_ = false; + WeakPtrFactory<FilePathWatcherImpl> weak_factory_; DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl); }; @@ -312,7 +314,10 @@ } FilePathWatcherImpl::FilePathWatcherImpl() - : recursive_(false) { + : recursive_(false), weak_factory_(this) {} + +FilePathWatcherImpl::~FilePathWatcherImpl() { + DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread()); } void FilePathWatcherImpl::OnFilePathChanged(InotifyReader::Watch fired_watch, @@ -320,22 +325,25 @@ bool created, bool deleted, bool is_dir) { - if (!task_runner()->RunsTasksOnCurrentThread()) { - // Switch to task_runner() to access |watches_| safely. - task_runner()->PostTask(FROM_HERE, - Bind(&FilePathWatcherImpl::OnFilePathChanged, this, - fired_watch, child, created, deleted, is_dir)); - return; - } + DCHECK(!task_runner()->RunsTasksOnCurrentThread()); - // Check to see if CancelOnMessageLoopThreadOrInDestructor() has already been - // called. May happen when code flow reaches here from the PostTask() above. - if (watches_.empty()) { - DCHECK(target_.empty()); - return; - } + // This method is invoked on the Inotify thread. Switch to task_runner() to + // access |watches_| safely. Use a WeakPtr to prevent the callback from + // running after |this| is destroyed (i.e. after the watch is cancelled). + task_runner()->PostTask( + FROM_HERE, Bind(&FilePathWatcherImpl::OnFilePathChangedOnOriginSequence, + weak_factory_.GetWeakPtr(), fired_watch, child, created, + deleted, is_dir)); +} +void FilePathWatcherImpl::OnFilePathChangedOnOriginSequence( + InotifyReader::Watch fired_watch, + const FilePath::StringType& child, + bool created, + bool deleted, + bool is_dir) { DCHECK(task_runner()->RunsTasksOnCurrentThread()); + DCHECK(!watches_.empty()); DCHECK(HasValidWatchVector()); // Used below to avoid multiple recursive updates. @@ -437,41 +445,23 @@ } void FilePathWatcherImpl::Cancel() { - if (callback_.is_null()) { - // Watch was never called, or the message_loop() thread is already gone. + if (!callback_) { + // Watch() was never called. set_cancelled(); return; } - // Switch to the task_runner() if necessary so we can access |watches_|. - if (!task_runner()->RunsTasksOnCurrentThread()) { - task_runner()->PostTask( - FROM_HERE, - Bind(&FilePathWatcherImpl::CancelOnMessageLoopThreadOrInDestructor, - this)); - } else { - CancelOnMessageLoopThreadOrInDestructor(); - } -} - -void FilePathWatcherImpl::CancelOnMessageLoopThreadOrInDestructor() { - DCHECK(in_destructor_ || task_runner()->RunsTasksOnCurrentThread()); - - if (is_cancelled()) - return; + DCHECK(task_runner()->RunsTasksOnCurrentThread()); + DCHECK(!is_cancelled()); set_cancelled(); - - if (!callback_.is_null()) - callback_.Reset(); + callback_.Reset(); for (size_t i = 0; i < watches_.size(); ++i) g_inotify_reader.Get().RemoveWatch(watches_[i].watch, this); watches_.clear(); target_.clear(); - - if (recursive_) - RemoveRecursiveWatches(); + RemoveRecursiveWatches(); } void FilePathWatcherImpl::UpdateWatches() { @@ -657,7 +647,7 @@ FilePathWatcher::FilePathWatcher() { sequence_checker_.DetachFromSequence(); - impl_ = new FilePathWatcherImpl(); + impl_ = MakeUnique<FilePathWatcherImpl>(); } } // namespace base
diff --git a/base/files/file_path_watcher_mac.cc b/base/files/file_path_watcher_mac.cc index d59ca215..2520b92 100644 --- a/base/files/file_path_watcher_mac.cc +++ b/base/files/file_path_watcher_mac.cc
@@ -2,8 +2,12 @@ // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. +#include <memory> + #include "base/files/file_path_watcher.h" #include "base/files/file_path_watcher_kqueue.h" +#include "base/macros.h" +#include "base/memory/ptr_util.h" #include "build/build_config.h" #if !defined(OS_IOS) @@ -16,6 +20,9 @@ class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate { public: + FilePathWatcherImpl() = default; + ~FilePathWatcherImpl() override = default; + bool Watch(const FilePath& path, bool recursive, const FilePathWatcher::Callback& callback) override { @@ -25,10 +32,10 @@ if (!FilePathWatcher::RecursiveWatchAvailable()) return false; #if !defined(OS_IOS) - impl_ = new FilePathWatcherFSEvents(); + impl_ = MakeUnique<FilePathWatcherFSEvents>(); #endif // OS_IOS } else { - impl_ = new FilePathWatcherKQueue(); + impl_ = MakeUnique<FilePathWatcherKQueue>(); } DCHECK(impl_.get()); return impl_->Watch(path, recursive, callback); @@ -40,17 +47,17 @@ set_cancelled(); } - protected: - ~FilePathWatcherImpl() override {} + private: + std::unique_ptr<PlatformDelegate> impl_; - scoped_refptr<PlatformDelegate> impl_; + DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl); }; } // namespace FilePathWatcher::FilePathWatcher() { sequence_checker_.DetachFromSequence(); - impl_ = new FilePathWatcherImpl(); + impl_ = MakeUnique<FilePathWatcherImpl>(); } } // namespace base
diff --git a/base/files/file_path_watcher_stub.cc b/base/files/file_path_watcher_stub.cc index c224e37..ae22c1ff 100644 --- a/base/files/file_path_watcher_stub.cc +++ b/base/files/file_path_watcher_stub.cc
@@ -7,12 +7,18 @@ #include "base/files/file_path_watcher.h" +#include "base/macros.h" +#include "base/memory/ptr_util.h" + namespace base { namespace { class FilePathWatcherImpl : public FilePathWatcher::PlatformDelegate { public: + FilePathWatcherImpl() = default; + ~FilePathWatcherImpl() override = default; + bool Watch(const FilePath& path, bool recursive, const FilePathWatcher::Callback& callback) override { @@ -21,15 +27,15 @@ void Cancel() override {} - protected: - ~FilePathWatcherImpl() override {} + private: + DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl); }; } // namespace FilePathWatcher::FilePathWatcher() { sequence_checker_.DetachFromSequence(); - impl_ = new FilePathWatcherImpl(); + impl_ = MakeUnique<FilePathWatcherImpl>(); } } // namespace base
diff --git a/base/files/file_path_watcher_win.cc b/base/files/file_path_watcher_win.cc index 65c13dae..fba6625 100644 --- a/base/files/file_path_watcher_win.cc +++ b/base/files/file_path_watcher_win.cc
@@ -10,7 +10,7 @@ #include "base/files/file_util.h" #include "base/logging.h" #include "base/macros.h" -#include "base/memory/ref_counted.h" +#include "base/memory/ptr_util.h" #include "base/threading/sequenced_task_runner_handle.h" #include "base/time/time.h" #include "base/win/object_watcher.h" @@ -25,6 +25,7 @@ FilePathWatcherImpl() : handle_(INVALID_HANDLE_VALUE), recursive_watch_(false) {} + ~FilePathWatcherImpl() override; // FilePathWatcher::PlatformDelegate: bool Watch(const FilePath& path, @@ -36,8 +37,6 @@ void OnObjectSignaled(HANDLE object) override; private: - ~FilePathWatcherImpl() override {} - // Setup a watch handle for directory |dir|. Set |recursive| to true to watch // the directory sub trees. Returns true if no fatal error occurs. |handle| // will receive the handle value if |dir| is watchable, otherwise @@ -58,6 +57,9 @@ // Path we're supposed to watch (passed to callback). FilePath target_; + // Set to true in the destructor. + bool* was_deleted_ptr_ = nullptr; + // Handle for FindFirstChangeNotification. HANDLE handle_; @@ -78,6 +80,12 @@ DISALLOW_COPY_AND_ASSIGN(FilePathWatcherImpl); }; +FilePathWatcherImpl::~FilePathWatcherImpl() { + DCHECK(!task_runner() || task_runner()->RunsTasksOnCurrentThread()); + if (was_deleted_ptr_) + *was_deleted_ptr_ = true; +} + bool FilePathWatcherImpl::Watch(const FilePath& path, bool recursive, const FilePathWatcher::Callback& callback) { @@ -119,9 +127,12 @@ } void FilePathWatcherImpl::OnObjectSignaled(HANDLE object) { - DCHECK(object == handle_); - // Make sure we stay alive through the body of this function. - scoped_refptr<FilePathWatcherImpl> keep_alive(this); + DCHECK(task_runner()->RunsTasksOnCurrentThread()); + DCHECK_EQ(object, handle_); + DCHECK(!was_deleted_ptr_); + + bool was_deleted = false; + was_deleted_ptr_ = &was_deleted; if (!UpdateWatch()) { callback_.Run(target_, true /* error */); @@ -171,8 +182,10 @@ } // The watch may have been cancelled by the callback. - if (handle_ != INVALID_HANDLE_VALUE) + if (!was_deleted) { watcher_.StartWatchingOnce(handle_, this); + was_deleted_ptr_ = nullptr; + } } // static @@ -269,7 +282,7 @@ FilePathWatcher::FilePathWatcher() { sequence_checker_.DetachFromSequence(); - impl_ = new FilePathWatcherImpl(); + impl_ = MakeUnique<FilePathWatcherImpl>(); } } // namespace base
diff --git a/blimp/client/core/compositor/blimp_compositor.cc b/blimp/client/core/compositor/blimp_compositor.cc index 185742c..2e225d4 100644 --- a/blimp/client/core/compositor/blimp_compositor.cc +++ b/blimp/client/core/compositor/blimp_compositor.cc
@@ -312,8 +312,7 @@ cc::SurfaceId surface_id(surface_factory_->frame_sink_id(), local_frame_id_); content_layer->SetSurfaceInfo( - cc::SurfaceInfo(surface_id, 1.f, surface_size), - false /* stretch_content_to_fill_bounds */); + cc::SurfaceInfo(surface_id, 1.f, surface_size)); content_layer->SetBounds(current_surface_size_); content_layer->SetIsDrawable(true); content_layer->SetContentsOpaque(true);
diff --git a/build/android/render_tests/process_render_test_results.py b/build/android/render_tests/process_render_test_results.py new file mode 100755 index 0000000..9ab0d1b --- /dev/null +++ b/build/android/render_tests/process_render_test_results.py
@@ -0,0 +1,214 @@ +#!/usr/bin/env python +# +# Copyright 2016 The Chromium Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. + +import argparse +import collections +import logging +import os +import posixpath +import re +import shutil +import sys +import tempfile +import zipfile + +sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir)) +import devil_chromium +from devil.android import device_utils +from devil.utils import cmd_helper +from pylib.constants import host_paths + +sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'build')) +import find_depot_tools # pylint: disable=import-error + +sys.path.append(os.path.join(host_paths.DIR_SOURCE_ROOT, 'third_party')) +import jinja2 # pylint: disable=import-error + +try: + from PIL import Image # pylint: disable=import-error + from PIL import ImageChops # pylint: disable=import-error + can_compute_diffs = True +except ImportError: + can_compute_diffs = False + logging.exception('Error importing PIL library. Image diffs will not be ' + 'displayed properly unless PIL module is installed.') + +_RE_IMAGE_NAME = re.compile( + r'(?P<test_class>\w+)\.' + r'(?P<description>\w+)\.' + r'(?P<device_model>\w+)\.' + r'(?P<orientation>port|land)\.png') + +_RENDER_TEST_BASE_URL = 'https://storage.googleapis.com/chromium-render-tests/' +_RENDER_TEST_BUCKET = 'gs://chromium-render-tests/' + +_JINJA_TEMPLATE_DIR = os.path.dirname(os.path.abspath(__file__)) +_JINJA_TEMPLATE_FILENAME = 'render_webpage.html.jinja2' + + +def _UploadFiles(upload_dir, files): + """Upload files to the render tests GS bucket.""" + if files: + google_storage_upload_dir = os.path.join(_RENDER_TEST_BUCKET, upload_dir) + cmd = [os.path.join(find_depot_tools.DEPOT_TOOLS_PATH, 'gsutil.py'), + '-m', 'cp'] + cmd.extend(files) + cmd.append(google_storage_upload_dir) + cmd_helper.RunCmd(cmd) + + +def _GoogleStorageUrl(upload_dir, filename): + return os.path.join( + _RENDER_TEST_BASE_URL, upload_dir, os.path.basename(filename)) + + +def _ComputeImageDiff(failure_image, golden_image): + """Compute mask showing which pixels are different between two images.""" + return (ImageChops.difference(failure_image, golden_image) + .convert('L') + .point(lambda i: 255 if i else 0)) + + +def ProcessRenderTestResults(devices, render_results_dir, + upload_dir, html_file): + """Grabs render results from device and generates webpage displaying results. + + Args: + devices: List of DeviceUtils objects to grab results from. + render_results_path: Path where render test results are storage. + Will look for failures render test results on the device in + /sdcard/chromium_tests_root/<render_results_path>/failures/ + and will look for golden images at Chromium src/<render_results_path>/. + upload_dir: Directory to upload the render test results to. + html_file: File to write the test results to. + """ + results_dict = collections.defaultdict(lambda: collections.defaultdict(list)) + + diff_upload_dir = os.path.join(upload_dir, 'diffs') + failure_upload_dir = os.path.join(upload_dir, 'failures') + golden_upload_dir = os.path.join(upload_dir, 'goldens') + + diff_images = [] + failure_images = [] + golden_images = [] + + temp_dir = None + try: + temp_dir = tempfile.mkdtemp() + + for device in devices: + failures_device_dir = posixpath.join( + device.GetExternalStoragePath(), + 'chromium_tests_root', render_results_dir, 'failures') + device.PullFile(failures_device_dir, temp_dir) + + for failure_filename in os.listdir(os.path.join(temp_dir, 'failures')): + m = _RE_IMAGE_NAME.match(failure_filename) + if not m: + logging.warning( + 'Unexpected file in render test failures, %s', failure_filename) + continue + failure_file = os.path.join(temp_dir, 'failures', failure_filename) + + # Check to make sure we have golden image for this failure. + golden_file = os.path.join( + host_paths.DIR_SOURCE_ROOT, render_results_dir, failure_filename) + if not os.path.exists(golden_file): + logging.error('Cannot find golden image for %s', failure_filename) + continue + + # Compute image diff between failure and golden. + if can_compute_diffs: + diff_image = _ComputeImageDiff( + Image.open(failure_file), Image.open(golden_file)) + diff_filename = '_diff'.join( + os.path.splitext(os.path.basename(failure_file))) + diff_file = os.path.join(temp_dir, diff_filename) + diff_image.save(diff_file) + diff_images.append(diff_file) + + failure_images.append(failure_file) + golden_images.append(golden_file) + + test_class = m.group('test_class') + device_model = m.group('device_model') + + results_entry = { + 'description': m.group('description'), + 'orientation': m.group('orientation'), + 'failure_image': _GoogleStorageUrl(failure_upload_dir, failure_file), + 'golden_image': _GoogleStorageUrl(golden_upload_dir, golden_file), + } + if can_compute_diffs: + results_entry.update( + {'diff_image': _GoogleStorageUrl(diff_upload_dir, diff_file)}) + results_dict[test_class][device_model].append(results_entry) + + if can_compute_diffs: + _UploadFiles(diff_upload_dir, diff_images) + _UploadFiles(failure_upload_dir, failure_images) + _UploadFiles(golden_upload_dir, golden_images) + + if failure_images: + failures_zipfile = os.path.join(temp_dir, 'failures.zip') + with zipfile.ZipFile(failures_zipfile, mode='w') as zf: + for failure_file in failure_images: + zf.write(failure_file, os.path.join( + render_results_dir, os.path.basename(failure_file))) + failure_zip_url = _GoogleStorageUrl(upload_dir, failures_zipfile) + _UploadFiles(upload_dir, [failures_zipfile]) + else: + failure_zip_url = None + + jinja2_env = jinja2.Environment( + loader=jinja2.FileSystemLoader(_JINJA_TEMPLATE_DIR), + trim_blocks=True) + template = jinja2_env.get_template(_JINJA_TEMPLATE_FILENAME) + # pylint: disable=no-member + processed_template_output = template.render( + full_results=dict(results_dict), + failure_zip_url=failure_zip_url, show_diffs=can_compute_diffs) + # pylint: enable=no-member + with open(html_file, 'wb') as f: + f.write(processed_template_output) + finally: + if temp_dir: + shutil.rmtree(temp_dir) + + +def main(): + parser = argparse.ArgumentParser() + + parser.add_argument('--render-results-dir', + required=True, + help='Path on device to look for render test images') + parser.add_argument('--output-html-file', + required=True, + help='File to output the results webpage.') + parser.add_argument('-d', '--device', dest='devices', action='append', + default=[], + help='Device to look for render test results on. ' + 'Default is to look on all connected devices.') + parser.add_argument('--adb-path', type=os.path.abspath, + help='Absolute path to the adb binary to use.') + parser.add_argument('--buildername', type=str, required=True, + help='Bot buildername. Used to generate path to upload ' + 'render test results') + parser.add_argument('--build-number', type=str, required=True, + help='Bot build number. Used to generate path to upload ' + 'render test results') + + args = parser.parse_args() + devil_chromium.Initialize(adb_path=args.adb_path) + devices = device_utils.DeviceUtils.HealthyDevices(device_arg=args.devices) + + upload_dir = os.path.join(args.buildername, args.build_number) + ProcessRenderTestResults( + devices, args.render_results_dir, upload_dir, args.output_html_file) + + +if __name__ == '__main__': + sys.exit(main())
diff --git a/build/android/render_tests/render_webpage.html.jinja2 b/build/android/render_tests/render_webpage.html.jinja2 new file mode 100644 index 0000000..b5ea6039 --- /dev/null +++ b/build/android/render_tests/render_webpage.html.jinja2
@@ -0,0 +1,84 @@ +<!-- + * Copyright 2016 The Chromium Authors. All rights reserved. + * Use of this source code is governed by a BSD-style license that can be + * found in the LICENSE file. +--> +<!DOCTYPE html> +<html> + <head> + <link rel="stylesheet" href="https://fonts.googleapis.com/icon?family=Material+Icons"> + <link rel="stylesheet" href="https://code.getmdl.io/1.2.1/material.blue-indigo.min.css"> + <script defer src="https://code.getmdl.io/1.2.1/material.min.js"></script> + + <style> + div.text-element { + text-align: center; + } + body { + background-color: #efefef; + } + </style> + </head> + + <body> + {% if failure_zip_url is not none %} + <a href="{{ failure_zip_url }}"> + <div class="mdl-color--primary" width="100%"> + <h3>Download Image Zip</h3> + </div> + </a> + {% endif %} + + {% for test_class, device_results in full_results.iteritems() %} + <div class="mdl-color--primary" width="100%"> + <h3>{{ test_class }}</h3> + </div> + + <div class="mdl-tabs mdl-js-tabs mdl-js-ripple-effect"> + <div class="mdl-tabs__tab-bar"> + {% for device_model, _ in device_results.iteritems() %} + <a href="#{{ device_model }}-panel" class="mdl-tabs__tab">{{ device_model }}</a> + {% endfor %} + </div> + + {% for device_model, test_results in device_results.iteritems() %} + <div class="mdl-tabs__panel" id="{{ device_model }}-panel"> + + <div class="mdl-grid"> + <div class="mdl-cell mdl-cell--3-col text-element"><b>Description</b></div> + <div class="mdl-cell mdl-cell--3-col text-element"><b>Golden</b></div> + <div class="mdl-cell mdl-cell--3-col text-element"><b>Failure</b></div> + {% if show_diffs %} + <div class="mdl-cell mdl-cell--3-col text-element"><b>Diff</b></div> + {% endif %} + </div> + {% for result in test_results %} + <div class="mdl-grid"> + <div class="mdl-cell mdl-cell--3-col text-element"> + {{ result['description'] }} + </div> + <div class="mdl-cell mdl-cell--3-col"> + <a href="{{ result['golden_image'] }}"> + <img class="mdl-shadow--2dp" src="{{ result['golden_image'] }}" width="100%"> + </a> + </div> + <div class="mdl-cell mdl-cell--3-col mdl-shadow--2dp"> + <a href="{{ result['failure_image'] }}"> + <img src="{{ result['failure_image'] }}" width="100%"> + </a> + </div> + {% if show_diffs %} + <div class="mdl-cell mdl-cell--3-col mdl-shadow--2dp"> + <a href="{{ result['diff_image'] }}"> + <img src="{{ result['diff_image'] }}" width="100%"> + </a> + </div> + {% endif %} + </div> + {% endfor %} + </div> + {% endfor %} + </div> + {% endfor %} + </body> +</html>
diff --git a/cc/layers/surface_layer.cc b/cc/layers/surface_layer.cc index a849589..8f90689 100644 --- a/cc/layers/surface_layer.cc +++ b/cc/layers/surface_layer.cc
@@ -62,19 +62,23 @@ DCHECK(!layer_tree_host()); } -void SurfaceLayer::SetSurfaceInfo(const SurfaceInfo& surface_info, - bool stretch_content_to_fill_bounds) { +void SurfaceLayer::SetSurfaceInfo(const SurfaceInfo& surface_info) { RemoveCurrentReference(); surface_info_ = surface_info; if (layer_tree_host()) { current_ref_ = ref_factory_->CreateReference(layer_tree_host(), surface_info_.id()); } - stretch_content_to_fill_bounds_ = stretch_content_to_fill_bounds; UpdateDrawsContent(HasDrawableContent()); SetNeedsPushProperties(); } +void SurfaceLayer::SetStretchContentToFillBounds( + bool stretch_content_to_fill_bounds) { + stretch_content_to_fill_bounds_ = stretch_content_to_fill_bounds; + SetNeedsPushProperties(); +} + std::unique_ptr<LayerImpl> SurfaceLayer::CreateLayerImpl( LayerTreeImpl* tree_impl) { return SurfaceLayerImpl::Create(tree_impl, id());
diff --git a/cc/layers/surface_layer.h b/cc/layers/surface_layer.h index a7b16abd..ac0da3d 100644 --- a/cc/layers/surface_layer.h +++ b/cc/layers/surface_layer.h
@@ -24,9 +24,11 @@ static scoped_refptr<SurfaceLayer> Create( scoped_refptr<SurfaceReferenceFactory> ref_factory); - // When stretch_content_to_fill_bounds is true, scale is unused. - void SetSurfaceInfo(const SurfaceInfo& surface_info, - bool stretch_content_to_fill_bounds); + void SetSurfaceInfo(const SurfaceInfo& surface_info); + + // When stretch_content_to_fill_bounds is true, the scale of the embedded + // surface is ignored and the content will be stretched to fill the bounds. + void SetStretchContentToFillBounds(bool stretch_content_to_fill_bounds); // Layer overrides. std::unique_ptr<LayerImpl> CreateLayerImpl(LayerTreeImpl* tree_impl) override;
diff --git a/cc/layers/surface_layer_unittest.cc b/cc/layers/surface_layer_unittest.cc index 56a8b8c..fa6465b 100644 --- a/cc/layers/surface_layer_unittest.cc +++ b/cc/layers/surface_layer_unittest.cc
@@ -102,7 +102,7 @@ SurfaceInfo info( SurfaceId(kArbitraryFrameSinkId, LocalFrameId(1, kArbitraryToken)), 1.f, gfx::Size(1, 1)); - layer->SetSurfaceInfo(info, false); + layer->SetSurfaceInfo(info); layer_tree_host_->GetSurfaceSequenceGenerator()->set_frame_sink_id( FrameSinkId(1, 1)); layer_tree_->SetRootLayer(layer); @@ -112,7 +112,7 @@ FakeLayerTreeHost::Create(&fake_client_, &task_graph_runner_, animation_host2.get()); auto layer2 = SurfaceLayer::Create(std::move(ref_factory)); - layer2->SetSurfaceInfo(info, false); + layer2->SetSurfaceInfo(info); layer_tree_host2->GetSurfaceSequenceGenerator()->set_frame_sink_id( FrameSinkId(2, 2)); layer_tree_host2->SetRootLayer(layer2); @@ -167,7 +167,7 @@ SurfaceInfo info( SurfaceId(kArbitraryFrameSinkId, LocalFrameId(1, kArbitraryToken)), 1.f, gfx::Size(1, 1)); - layer_->SetSurfaceInfo(info, false); + layer_->SetSurfaceInfo(info); // Layer hasn't been added to tree so no SurfaceSequence generated yet. EXPECT_EQ(0u, required_set_.size());
diff --git a/chrome/android/java/src/org/chromium/chrome/browser/omnibox/geo/GeolocationHeader.java b/chrome/android/java/src/org/chromium/chrome/browser/omnibox/geo/GeolocationHeader.java index b12d0ecc..f8d083d 100644 --- a/chrome/android/java/src/org/chromium/chrome/browser/omnibox/geo/GeolocationHeader.java +++ b/chrome/android/java/src/org/chromium/chrome/browser/omnibox/geo/GeolocationHeader.java
@@ -618,11 +618,11 @@ private static void recordLocationAgeHistogram(int locationSource, long durationMillis) { String name = ""; if (locationSource == LOCATION_SOURCE_HIGH_ACCURACY) { - name = "Geolocation.Header.LocationAgeHighAccuracy"; + name = "Geolocation.Header.LocationAge.HighAccuracy"; } else if (locationSource == LOCATION_SOURCE_GPS_ONLY) { - name = "Geolocation.Header.LocationAgeGpsOnly"; + name = "Geolocation.Header.LocationAge.GpsOnly"; } else if (locationSource == LOCATION_SOURCE_BATTERY_SAVING) { - name = "Geolocation.Header.LocationAgeBatterySaving"; + name = "Geolocation.Header.LocationAge.BatterySaving"; } else { Log.e(TAG, "Unexpected locationSource: " + locationSource); assert false : "Unexpected locationSource: " + locationSource;
diff --git a/chrome/android/java/src/org/chromium/chrome/browser/preferences/password/SavePasswordsPreferences.java b/chrome/android/java/src/org/chromium/chrome/browser/preferences/password/SavePasswordsPreferences.java index efd937c..dd1d2c0 100644 --- a/chrome/android/java/src/org/chromium/chrome/browser/preferences/password/SavePasswordsPreferences.java +++ b/chrome/android/java/src/org/chromium/chrome/browser/preferences/password/SavePasswordsPreferences.java
@@ -14,10 +14,6 @@ import android.preference.PreferenceScreen; import android.text.SpannableString; import android.text.style.ForegroundColorSpan; -import android.view.Gravity; -import android.view.View; -import android.view.ViewGroup; -import android.widget.TextView; import org.chromium.base.ApiCompatibilityUtils; import org.chromium.base.VisibleForTesting; @@ -32,6 +28,7 @@ import org.chromium.chrome.browser.preferences.PrefServiceBridge; import org.chromium.chrome.browser.preferences.Preferences; import org.chromium.chrome.browser.preferences.PreferencesLauncher; +import org.chromium.chrome.browser.preferences.TextMessagePreference; import org.chromium.ui.text.SpanApplier; /** @@ -58,20 +55,22 @@ private static final String PREF_CATEGORY_SAVED_PASSWORDS = "saved_passwords"; private static final String PREF_CATEGORY_EXCEPTIONS = "exceptions"; private static final String PREF_MANAGE_ACCOUNT_LINK = "manage_account_link"; + private static final String PREF_CATEGORY_SAVED_PASSWORDS_NO_TEXT = "saved_passwords_no_text"; private static final int ORDER_SWITCH = 0; private static final int ORDER_AUTO_SIGNIN_CHECKBOX = 1; private static final int ORDER_MANAGE_ACCOUNT_LINK = 2; private static final int ORDER_SAVED_PASSWORDS = 3; private static final int ORDER_EXCEPTIONS = 4; + private static final int ORDER_SAVED_PASSWORDS_NO_TEXT = 5; private final PasswordUIView mPasswordManagerHandler = new PasswordUIView(); - private TextView mEmptyView; private boolean mNoPasswords; private boolean mNoPasswordExceptions; private Preference mLinkPref; private ChromeSwitchPreference mSavePasswordsSwitch; private ChromeBaseCheckBoxPreference mAutoSignInSwitch; + private TextMessagePreference mEmptyView; @Override public void onCreate(Bundle savedInstanceState) { @@ -79,26 +78,17 @@ getActivity().setTitle(R.string.prefs_saved_passwords); setPreferenceScreen(getPreferenceManager().createPreferenceScreen(getActivity())); mPasswordManagerHandler.addObserver(this); - - mEmptyView = new TextView(getActivity(), null); - mEmptyView.setText(R.string.saved_passwords_none_text); - mEmptyView.setGravity(Gravity.CENTER); - mEmptyView.setVisibility(View.GONE); - } - - @Override - public void onActivityCreated(Bundle savedInstanceState) { - super.onActivityCreated(savedInstanceState); - ((ViewGroup) getActivity().findViewById(android.R.id.content)).addView(mEmptyView); } /** * Empty screen message when no passwords or exceptions are stored. */ private void displayEmptyScreenMessage() { - if (mEmptyView != null) { - mEmptyView.setVisibility(View.VISIBLE); - } + mEmptyView = new TextMessagePreference(getActivity(), null); + mEmptyView.setSummary(R.string.saved_passwords_none_text); + mEmptyView.setKey(PREF_CATEGORY_SAVED_PASSWORDS_NO_TEXT); + mEmptyView.setOrder(ORDER_SAVED_PASSWORDS_NO_TEXT); + getPreferenceScreen().addPreference(mEmptyView); } @Override @@ -111,7 +101,6 @@ mNoPasswords = false; mNoPasswordExceptions = false; getPreferenceScreen().removeAll(); - mEmptyView.setVisibility(View.GONE); createSavePasswordsSwitch(); createAutoSignInCheckbox(); mPasswordManagerHandler.updatePasswordLists(); @@ -124,13 +113,13 @@ profileCategory.removeAll(); getPreferenceScreen().removePreference(profileCategory); } - - mEmptyView.setVisibility(View.GONE); } @Override public void passwordListAvailable(int count) { resetList(PREF_CATEGORY_SAVED_PASSWORDS); + resetList(PREF_CATEGORY_SAVED_PASSWORDS_NO_TEXT); + mNoPasswords = count == 0; if (mNoPasswords) { if (mNoPasswordExceptions) displayEmptyScreenMessage(); @@ -164,6 +153,8 @@ @Override public void passwordExceptionListAvailable(int count) { resetList(PREF_CATEGORY_EXCEPTIONS); + resetList(PREF_CATEGORY_SAVED_PASSWORDS_NO_TEXT); + mNoPasswordExceptions = count == 0; if (mNoPasswordExceptions) { if (mNoPasswords) displayEmptyScreenMessage();
diff --git a/chrome/app/chromeos_strings.grdp b/chrome/app/chromeos_strings.grdp index 441a82a..cac754d 100644 --- a/chrome/app/chromeos_strings.grdp +++ b/chrome/app/chromeos_strings.grdp
@@ -6277,6 +6277,9 @@ <message name="IDS_NETWORK_UI_FORMAT_SHILL" desc="Label in network property format dropdown for Shill properties"> Shill </message> + <message name="IDS_NETWORK_UI_GLOBAL_POLICY" desc="Label for global policy properties"> + Global Policy: + </message> <message name="IDS_NETWORK_UI_VISIBLE_NETWORKS" desc="Label for list of visible networks"> Visible Networks: </message>
diff --git a/chrome/browser/android/offline_pages/offline_page_request_job.cc b/chrome/browser/android/offline_pages/offline_page_request_job.cc index fd252af..d327a5ae4 100644 --- a/chrome/browser/android/offline_pages/offline_page_request_job.cc +++ b/chrome/browser/android/offline_pages/offline_page_request_job.cc
@@ -641,6 +641,25 @@ return net::URLRequestRedirectJob::REDIRECT_302_FOUND; } +void OfflinePageRequestJob::OnOpenComplete(int result) { + UMA_HISTOGRAM_SPARSE_SLOWLY("OfflinePages.RequestJob.OpenFileErrorCode", + -result); +} + +void OfflinePageRequestJob::OnSeekComplete(int64_t result) { + if (result < 0) { + UMA_HISTOGRAM_SPARSE_SLOWLY("OfflinePages.RequestJob.SeekFileErrorCode", + static_cast<int>(-result)); + } +} + +void OfflinePageRequestJob::OnReadComplete(net::IOBuffer* buf, int result) { + if (result < 0) { + UMA_HISTOGRAM_SPARSE_SLOWLY("OfflinePages.RequestJob.ReadFileErrorCode", + -result); + } +} + void OfflinePageRequestJob::FallbackToDefault() { OfflinePageRequestInfo* info = OfflinePageRequestInfo::GetFromRequest(request());
diff --git a/chrome/browser/android/offline_pages/offline_page_request_job.h b/chrome/browser/android/offline_pages/offline_page_request_job.h index f06ebf1..15aa31c4 100644 --- a/chrome/browser/android/offline_pages/offline_page_request_job.h +++ b/chrome/browser/android/offline_pages/offline_page_request_job.h
@@ -93,6 +93,11 @@ bool CopyFragmentOnRedirect(const GURL& location) const override; int GetResponseCode() const override; + // net::URLRequestFileJob overrides: + void OnOpenComplete(int result) override; + void OnSeekComplete(int64_t result) override; + void OnReadComplete(net::IOBuffer* buf, int result) override; + void OnOfflineFilePathAvailable(const base::FilePath& offline_file_path); void OnOfflineRedirectAvailabe(const GURL& redirected_url);
diff --git a/chrome/browser/android/offline_pages/offline_page_request_job_unittest.cc b/chrome/browser/android/offline_pages/offline_page_request_job_unittest.cc index 952f9cbe..59853d3 100644 --- a/chrome/browser/android/offline_pages/offline_page_request_job_unittest.cc +++ b/chrome/browser/android/offline_pages/offline_page_request_job_unittest.cc
@@ -47,17 +47,22 @@ const GURL kTestUrl2("http://test.org/page2"); const GURL kTestUrl3("http://test.org/page3"); const GURL kTestUrl3WithFragment("http://test.org/page3#ref1"); +const GURL kTestUrl4("http://test.org/page4"); const GURL kTestOriginalUrl("http://test.org/first"); const ClientId kTestClientId = ClientId(kBookmarkNamespace, "1234"); const ClientId kTestClientId2 = ClientId(kDownloadNamespace, "1a2b3c4d"); const ClientId kTestClientId3 = ClientId(kDownloadNamespace, "3456abcd"); +const ClientId kTestClientId4 = ClientId(kDownloadNamespace, "5678"); const int kTestFileSize = 444; const int kTestFileSize2 = 450; const int kTestFileSize3 = 450; +const int kTestFileSize4 = 111; const int kTabId = 1; const int kBufSize = 1024; const char kAggregatedRequestResultHistogram[] = "OfflinePages.AggregatedRequestResult2"; +const char kOpenFileErrorCodeHistogram[] = + "OfflinePages.RequestJob.OpenFileErrorCode"; class OfflinePageRequestJobTestDelegate : public OfflinePageRequestJob::Delegate { @@ -262,6 +267,8 @@ void ExpectOneNonuniqueSampleForAggregatedRequestResult( OfflinePageRequestJob::AggregatedRequestResult result); + void ExpectOpenFileErrorCode(int result); + net::TestURLRequestContext* url_request_context() { return test_url_request_context_.get(); } @@ -272,6 +279,7 @@ int64_t offline_id() const { return offline_id_; } int64_t offline_id2() const { return offline_id2_; } int64_t offline_id3() const { return offline_id3_; } + int64_t offline_id4() const { return offline_id4_; } int bytes_read() const { return bytes_read_; } TestPreviewsDecider* test_previews_decider() { @@ -313,6 +321,7 @@ int64_t offline_id_; int64_t offline_id2_; int64_t offline_id3_; + int64_t offline_id4_; int bytes_read_; DISALLOW_COPY_AND_ASSIGN(OfflinePageRequestJobTest); @@ -325,8 +334,8 @@ offline_id_(-1), offline_id2_(-1), offline_id3_(-1), - bytes_read_(0) { -} + offline_id4_(-1), + bytes_read_(0) {} void OfflinePageRequestJobTest::SetUp() { // Create a test profile. @@ -392,6 +401,16 @@ SavePage(kTestUrl3WithFragment, kTestClientId3, kTestOriginalUrl, std::move(archiver3)); + // Save an offline page pointing to non-existent archive file. + base::FilePath archive_file_path4 = + test_data_dir_path.AppendASCII("offline_pages") + .AppendASCII("nonexistent.mhtml"); + std::unique_ptr<TestOfflinePageArchiver> archiver4( + new TestOfflinePageArchiver(kTestUrl4, archive_file_path4, + kTestFileSize4)); + + SavePage(kTestUrl4, kTestClientId4, GURL(), std::move(archiver4)); + // Create a context with delayed initialization. test_url_request_context_.reset(new net::TestURLRequestContext(true)); @@ -476,6 +495,10 @@ kAggregatedRequestResultHistogram, static_cast<int>(result), 1); } +void OfflinePageRequestJobTest::ExpectOpenFileErrorCode(int result) { + histogram_tester_.ExpectUniqueSample(kOpenFileErrorCodeHistogram, -result, 1); +} + void OfflinePageRequestJobTest::SavePage( const GURL& url, const ClientId& client_id, @@ -502,6 +525,8 @@ offline_id2_ = offline_id; else if (offline_id3_ == -1) offline_id3_ = offline_id; + else if (offline_id4_ == -1) + offline_id4_ = offline_id; } void OfflinePageRequestJobTest::InterceptRequestOnIO( @@ -865,4 +890,20 @@ SHOW_OFFLINE_ON_DISCONNECTED_NETWORK); } +TEST_F(OfflinePageRequestJobTest, LoadOfflinePageFromNonExistentFile) { + SimulateHasNetworkConnectivity(false); + + InterceptRequest(kTestUrl4, "GET", "", "", content::RESOURCE_TYPE_MAIN_FRAME); + base::RunLoop().Run(); + + EXPECT_EQ(0, bytes_read()); + ASSERT_TRUE(offline_page_tab_helper()->GetOfflinePageForTest()); + EXPECT_EQ(offline_id4(), + offline_page_tab_helper()->GetOfflinePageForTest()->offline_id); + ExpectOneUniqueSampleForAggregatedRequestResult( + OfflinePageRequestJob::AggregatedRequestResult:: + SHOW_OFFLINE_ON_DISCONNECTED_NETWORK); + ExpectOpenFileErrorCode(net::ERR_FILE_NOT_FOUND); +} + } // namespace offline_pages
diff --git a/chrome/browser/browsing_data/browsing_data_remover_unittest.cc b/chrome/browser/browsing_data/browsing_data_remover_unittest.cc index 5f88f21d..dc36730 100644 --- a/chrome/browser/browsing_data/browsing_data_remover_unittest.cc +++ b/chrome/browser/browsing_data/browsing_data_remover_unittest.cc
@@ -11,6 +11,7 @@ #include <memory> #include <set> #include <string> +#include <utility> #include <vector> #include "base/bind.h" @@ -25,6 +26,7 @@ #include "base/message_loop/message_loop.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" +#include "base/strings/string_number_conversions.h" #include "base/strings/utf_string_conversions.h" #include "base/task/cancelable_task_tracker.h" #include "base/threading/thread_task_runner_handle.h" @@ -133,6 +135,7 @@ using domain_reliability::DomainReliabilityServiceFactory; using testing::_; using testing::ByRef; +using testing::Eq; using testing::Invoke; using testing::IsEmpty; using testing::Matcher; @@ -141,6 +144,7 @@ using testing::MatchResultListener; using testing::Not; using testing::Return; +using testing::SizeIs; using testing::WithArgs; namespace { @@ -3083,6 +3087,9 @@ BookmarkModelFactory::GetForBrowserContext(&profile); bookmarks::test::WaitForBookmarkModelToLoad(bookmark_model); + const base::Time delete_begin = + base::Time::Now() - base::TimeDelta::FromDays(1); + // Create a couple of bookmarks. bookmark_model->AddURL(bookmark_model->bookmark_bar_node(), 0, base::string16(), @@ -3091,7 +3098,7 @@ base::string16(), GURL("http://foo.org/mobile")); - // Simulate their visits. + // Simulate their visits (this is using Time::Now() as timestamps). ntp_snippets::UpdateBookmarkOnURLVisitedInMainFrame( bookmark_model, GURL("http://foo.org/desktop"), /*is_mobile_platform=*/false); @@ -3099,6 +3106,16 @@ bookmark_model, GURL("http://foo.org/mobile"), /*is_mobile_platform=*/true); + // Add a bookmark with a visited timestamp before the deletion interval. + bookmarks::BookmarkNode::MetaInfoMap meta_info = { + {"last_visited", + base::Int64ToString((delete_begin - base::TimeDelta::FromSeconds(1)) + .ToInternalValue())}}; + bookmark_model->AddURLWithCreationTimeAndMetaInfo( + bookmark_model->mobile_node(), 0, base::ASCIIToUTF16("my title"), + GURL("http://foo-2.org/"), delete_begin - base::TimeDelta::FromDays(1), + &meta_info); + // There should be some recently visited bookmarks. EXPECT_THAT(ntp_snippets::GetRecentlyVisitedBookmarks( bookmark_model, 2, base::Time::UnixEpoch(), @@ -3110,18 +3127,16 @@ BrowsingDataRemoverFactory::GetForBrowserContext(&profile); BrowsingDataRemoverCompletionObserver completion_observer(remover); - remover->RemoveAndReply(base::Time(), base::Time::Max(), + remover->RemoveAndReply(delete_begin, base::Time::Max(), BrowsingDataRemover::REMOVE_HISTORY, BrowsingDataHelper::ALL, &completion_observer); completion_observer.BlockUntilCompletion(); - // There should be no recently visited bookmarks. - EXPECT_THAT(ntp_snippets::GetRecentlyVisitedBookmarks( - bookmark_model, 2, base::Time::UnixEpoch(), - /*consider_visits_from_desktop=*/false), - IsEmpty()); - EXPECT_THAT(ntp_snippets::GetRecentlyVisitedBookmarks( - bookmark_model, 2, base::Time::UnixEpoch(), - /*consider_visits_from_desktop=*/true), - IsEmpty()); + // There should be only 1 recently visited bookmarks. + std::vector<const bookmarks::BookmarkNode*> remaining_nodes = + ntp_snippets::GetRecentlyVisitedBookmarks( + bookmark_model, 3, base::Time::UnixEpoch(), + /*consider_visits_from_desktop=*/true); + EXPECT_THAT(remaining_nodes, SizeIs(1)); + EXPECT_THAT(remaining_nodes[0]->url().spec(), Eq("http://foo-2.org/")); }
diff --git a/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.cc b/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.cc index 2818aa4..2c8f794a 100644 --- a/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.cc +++ b/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.cc
@@ -4,6 +4,10 @@ #include "chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.h" +#include <set> +#include <string> +#include <utility> + #include "chrome/browser/autofill/personal_data_manager_factory.h" #include "chrome/browser/bookmarks/bookmark_model_factory.h" #include "chrome/browser/browser_process.h" @@ -294,12 +298,12 @@ } // Remove the last visit dates meta-data from the bookmark model. - // TODO(vitaliii): Do not remove all dates, but only the ones matched by the - // time range and the filter. bookmarks::BookmarkModel* bookmark_model = BookmarkModelFactory::GetForBrowserContext(profile_); - if (bookmark_model) - ntp_snippets::RemoveAllLastVisitDates(bookmark_model); + if (bookmark_model) { + ntp_snippets::RemoveLastVisitedDatesBetween(delete_begin_, delete_end_, + filter, bookmark_model); + } #if BUILDFLAG(ENABLE_EXTENSIONS) // The extension activity log contains details of which websites extensions
diff --git a/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.h b/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.h index 39f212be..1f4eb0f 100644 --- a/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.h +++ b/chrome/browser/browsing_data/chrome_browsing_data_remover_delegate.h
@@ -5,6 +5,8 @@ #ifndef CHROME_BROWSER_BROWSING_DATA_CHROME_BROWSING_DATA_REMOVER_DELEGATE_H_ #define CHROME_BROWSER_BROWSING_DATA_CHROME_BROWSING_DATA_REMOVER_DELEGATE_H_ +#include <memory> + #include "base/memory/ref_counted.h" #include "base/memory/weak_ptr.h" #include "base/synchronization/waitable_event_watcher.h"
diff --git a/chrome/browser/chromeos/note_taking_helper.cc b/chrome/browser/chromeos/note_taking_helper.cc index 34cc9b8..bd14ab9 100644 --- a/chrome/browser/chromeos/note_taking_helper.cc +++ b/chrome/browser/chromeos/note_taking_helper.cc
@@ -15,6 +15,7 @@ #include "base/logging.h" #include "base/memory/ptr_util.h" #include "base/memory/ref_counted.h" +#include "base/metrics/histogram_macros.h" #include "base/strings/string_split.h" #include "chrome/browser/browser_process.h" #include "chrome/browser/chrome_notification_types.h" @@ -76,6 +77,10 @@ "ogfjaccbdfhecploibfbhighmebiffla"; const char NoteTakingHelper::kProdKeepExtensionId[] = "hmjkmjkepdijhoojdojkdfohbdgmmhki"; +const char NoteTakingHelper::kPreferredLaunchResultHistogramName[] = + "Apps.NoteTakingApp.PreferredLaunchResult"; +const char NoteTakingHelper::kDefaultLaunchResultHistogramName[] = + "Apps.NoteTakingApp.DefaultLaunchResult"; // static void NoteTakingHelper::Initialize() { @@ -150,17 +155,30 @@ const base::FilePath& path) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); DCHECK(profile); + + LaunchResult result = LaunchResult::NO_APP_SPECIFIED; std::string app_id = profile->GetPrefs()->GetString(prefs::kNoteTakingAppId); - if (!app_id.empty() && LaunchAppInternal(profile, app_id, path)) + if (!app_id.empty()) + result = LaunchAppInternal(profile, app_id, path); + UMA_HISTOGRAM_ENUMERATION(kPreferredLaunchResultHistogramName, + static_cast<int>(result), + static_cast<int>(LaunchResult::MAX)); + if (result == LaunchResult::CHROME_SUCCESS || + result == LaunchResult::ANDROID_SUCCESS) { return; + } // If the user hasn't chosen an app or we were unable to launch the one that // they've chosen, just launch the first one we see. + result = LaunchResult::NO_APPS_AVAILABLE; NoteTakingAppInfos infos = GetAvailableApps(profile); if (infos.empty()) LOG(WARNING) << "Unable to launch note-taking app; none available"; else - LaunchAppInternal(profile, infos[0].app_id, path); + result = LaunchAppInternal(profile, infos[0].app_id, path); + UMA_HISTOGRAM_ENUMERATION(kDefaultLaunchResultHistogramName, + static_cast<int>(result), + static_cast<int>(LaunchResult::MAX)); } void NoteTakingHelper::OnArcShutdown() {} @@ -296,29 +314,30 @@ observer.OnAvailableNoteTakingAppsUpdated(); } -bool NoteTakingHelper::LaunchAppInternal(Profile* profile, - const std::string& app_id, - const base::FilePath& path) { +NoteTakingHelper::LaunchResult NoteTakingHelper::LaunchAppInternal( + Profile* profile, + const std::string& app_id, + const base::FilePath& path) { DCHECK(profile); if (LooksLikeAndroidPackageName(app_id)) { // Android app. if (!arc::ArcSessionManager::Get()->IsAllowedForProfile(profile)) { LOG(WARNING) << "Can't launch Android app " << app_id << " for profile"; - return false; + return LaunchResult::ANDROID_NOT_SUPPORTED_BY_PROFILE; } auto* helper = ARC_GET_INSTANCE_FOR_METHOD( arc::ArcServiceManager::Get()->arc_bridge_service()->intent_helper(), HandleIntent); if (!helper) - return false; + return LaunchResult::ANDROID_NOT_RUNNING; GURL clip_data_uri; if (!path.empty()) { if (!file_manager::util::ConvertPathToArcUrl(path, &clip_data_uri) || !clip_data_uri.is_valid()) { LOG(WARNING) << "Failed to convert " << path.value() << " to ARC URI"; - return false; + return LaunchResult::ANDROID_FAILED_TO_CONVERT_PATH; } } @@ -327,7 +346,10 @@ arc::mojom::ActivityNamePtr activity = arc::mojom::ActivityName::New(); activity->package_name = app_id; + // TODO(derat): Is there some way to detect whether this fails due to the + // package no longer being available? helper->HandleIntent(CreateIntentInfo(clip_data_uri), std::move(activity)); + return LaunchResult::ANDROID_SUCCESS; } else { // Chrome app. const extensions::ExtensionRegistry* extension_registry = @@ -336,14 +358,14 @@ app_id, extensions::ExtensionRegistry::ENABLED); if (!app) { LOG(WARNING) << "Failed to find Chrome note-taking app " << app_id; - return false; + return LaunchResult::CHROME_APP_MISSING; } auto action_data = base::MakeUnique<app_runtime::ActionData>(); action_data->action_type = app_runtime::ActionType::ACTION_TYPE_NEW_NOTE; launch_chrome_app_callback_.Run(profile, app, std::move(action_data), path); + return LaunchResult::CHROME_SUCCESS; } - - return true; + NOTREACHED(); } void NoteTakingHelper::Observe(int type,
diff --git a/chrome/browser/chromeos/note_taking_helper.h b/chrome/browser/chromeos/note_taking_helper.h index 441d8c2..3ec68792 100644 --- a/chrome/browser/chromeos/note_taking_helper.h +++ b/chrome/browser/chromeos/note_taking_helper.h
@@ -73,6 +73,32 @@ virtual void OnAvailableNoteTakingAppsUpdated() = 0; }; + // Describes the result of an attempt to launch a note-taking app. Values must + // not be renumbered, as this is used by histogram metrics. + enum class LaunchResult { + // A Chrome app was launched successfully. + CHROME_SUCCESS = 0, + // The requested Chrome app was unavailable. + CHROME_APP_MISSING = 1, + // An Android app was launched successfully. + ANDROID_SUCCESS = 2, + // An Android app couldn't be launched due to the profile not being allowed + // to use ARC. + ANDROID_NOT_SUPPORTED_BY_PROFILE = 3, + // An Android app couldn't be launched due to ARC not running. + ANDROID_NOT_RUNNING = 4, + // An Android app couldn't be launched due to a failure to convert the + // supplied path to an ARC URL. + ANDROID_FAILED_TO_CONVERT_PATH = 5, + // No attempt was made due to a preferred app not being specified. + NO_APP_SPECIFIED = 6, + // No Android or Chrome apps were available. + NO_APPS_AVAILABLE = 7, + // This value must remain last and should be incremented when a new reason + // is inserted. + MAX = 8, + }; + // Callback used to launch a Chrome app. using LaunchChromeAppCallback = base::Callback<void( Profile*, @@ -88,6 +114,10 @@ static const char kDevKeepExtensionId[]; static const char kProdKeepExtensionId[]; + // Names of histograms. + static const char kPreferredLaunchResultHistogramName[]; + static const char kDefaultLaunchResultHistogramName[]; + static void Initialize(); static void Shutdown(); static NoteTakingHelper* Get(); @@ -148,10 +178,10 @@ // Helper method that launches |app_id| (either an Android package name or a // Chrome extension ID) to create a new note with an optional attached file at - // |path|. Returns false if the app couldn't be launched. - bool LaunchAppInternal(Profile* profile, - const std::string& app_id, - const base::FilePath& path); + // |path|. Returns the attempt's result. + LaunchResult LaunchAppInternal(Profile* profile, + const std::string& app_id, + const base::FilePath& path); // content::NotificationObserver: void Observe(int type,
diff --git a/chrome/browser/chromeos/note_taking_helper_unittest.cc b/chrome/browser/chromeos/note_taking_helper_unittest.cc index 24e36c63..2958e7e 100644 --- a/chrome/browser/chromeos/note_taking_helper_unittest.cc +++ b/chrome/browser/chromeos/note_taking_helper_unittest.cc
@@ -13,6 +13,7 @@ #include "base/macros.h" #include "base/run_loop.h" #include "base/strings/stringprintf.h" +#include "base/test/histogram_tester.h" #include "chrome/browser/chrome_notification_types.h" #include "chrome/browser/chromeos/file_manager/path_util.h" #include "chrome/browser/extensions/extension_service.h" @@ -40,9 +41,13 @@ using arc::mojom::IntentHandlerInfo; using arc::mojom::IntentHandlerInfoPtr; +using base::HistogramTester; using HandledIntent = arc::FakeIntentHelperInstance::HandledIntent; namespace chromeos { + +using LaunchResult = NoteTakingHelper::LaunchResult; + namespace { // Name of default profile. @@ -348,12 +353,20 @@ InstallExtension(extension.get(), profile()); // Check the Chrome app is launched with the correct parameters. + HistogramTester histogram_tester; const base::FilePath kPath("/foo/bar/photo.jpg"); helper()->LaunchAppForNewNote(profile(), kPath); ASSERT_EQ(1u, launched_chrome_apps_.size()); EXPECT_EQ(NoteTakingHelper::kProdKeepExtensionId, launched_chrome_apps_[0].id); EXPECT_EQ(kPath, launched_chrome_apps_[0].path); + + histogram_tester.ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::NO_APP_SPECIFIED), 1); + histogram_tester.ExpectUniqueSample( + NoteTakingHelper::kDefaultLaunchResultHistogramName, + static_cast<int>(LaunchResult::CHROME_SUCCESS), 1); } TEST_F(NoteTakingHelperTest, FallBackIfPreferredAppUnavailable) { @@ -365,19 +378,34 @@ CreateExtension(NoteTakingHelper::kDevKeepExtensionId, "dev"); InstallExtension(dev_extension.get(), profile()); - // Set the prod app as the default and check that it's launched. + // Set the prod app as preferred and check that it's launched. + std::unique_ptr<HistogramTester> histogram_tester(new HistogramTester()); helper()->SetPreferredApp(profile(), NoteTakingHelper::kProdKeepExtensionId); helper()->LaunchAppForNewNote(profile(), base::FilePath()); ASSERT_EQ(1u, launched_chrome_apps_.size()); ASSERT_EQ(NoteTakingHelper::kProdKeepExtensionId, launched_chrome_apps_[0].id); + histogram_tester->ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::CHROME_SUCCESS), 1); + histogram_tester->ExpectTotalCount( + NoteTakingHelper::kDefaultLaunchResultHistogramName, 0); + // Now uninstall the prod app and check that we fall back to the dev app. UninstallExtension(prod_extension.get(), profile()); launched_chrome_apps_.clear(); + histogram_tester.reset(new HistogramTester()); helper()->LaunchAppForNewNote(profile(), base::FilePath()); ASSERT_EQ(1u, launched_chrome_apps_.size()); EXPECT_EQ(NoteTakingHelper::kDevKeepExtensionId, launched_chrome_apps_[0].id); + + histogram_tester->ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::CHROME_APP_MISSING), 1); + histogram_tester->ExpectUniqueSample( + NoteTakingHelper::kDefaultLaunchResultHistogramName, + static_cast<int>(LaunchResult::CHROME_SUCCESS), 1); } TEST_F(NoteTakingHelperTest, ArcInitiallyDisabled) { @@ -447,11 +475,19 @@ ASSERT_TRUE(helper()->IsAppAvailable(profile())); // The installed app should be launched. + std::unique_ptr<HistogramTester> histogram_tester(new HistogramTester()); helper()->LaunchAppForNewNote(profile(), base::FilePath()); ASSERT_EQ(1u, intent_helper_.handled_intents().size()); EXPECT_EQ(GetIntentString(kPackage1, ""), GetIntentString(intent_helper_.handled_intents()[0])); + histogram_tester->ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::NO_APP_SPECIFIED), 1); + histogram_tester->ExpectUniqueSample( + NoteTakingHelper::kDefaultLaunchResultHistogramName, + static_cast<int>(LaunchResult::ANDROID_SUCCESS), 1); + // Install a second app and set it as the preferred app. const std::string kPackage2 = "org.chromium.package2"; handlers.emplace_back(CreateIntentHandlerInfo("App 1", kPackage1)); @@ -464,10 +500,17 @@ // The second app should be launched now. intent_helper_.clear_handled_intents(); + histogram_tester.reset(new HistogramTester()); helper()->LaunchAppForNewNote(profile(), base::FilePath()); ASSERT_EQ(1u, intent_helper_.handled_intents().size()); EXPECT_EQ(GetIntentString(kPackage2, ""), GetIntentString(intent_helper_.handled_intents()[0])); + + histogram_tester->ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::ANDROID_SUCCESS), 1); + histogram_tester->ExpectTotalCount( + NoteTakingHelper::kDefaultLaunchResultHistogramName, 0); } TEST_F(NoteTakingHelperTest, LaunchAndroidAppWithPath) { @@ -500,9 +543,32 @@ // When a path that isn't accessible to ARC is passed, the request should be // dropped. + HistogramTester histogram_tester; intent_helper_.clear_handled_intents(); helper()->LaunchAppForNewNote(profile(), base::FilePath("/bad/path.jpg")); EXPECT_TRUE(intent_helper_.handled_intents().empty()); + + histogram_tester.ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::NO_APP_SPECIFIED), 1); + histogram_tester.ExpectUniqueSample( + NoteTakingHelper::kDefaultLaunchResultHistogramName, + static_cast<int>(LaunchResult::ANDROID_FAILED_TO_CONVERT_PATH), 1); +} + +TEST_F(NoteTakingHelperTest, NoAppsAvailable) { + Init(ENABLE_PALETTE | ENABLE_ARC); + + // When no note-taking apps are installed, the histograms should just be + // updated. + HistogramTester histogram_tester; + helper()->LaunchAppForNewNote(profile(), base::FilePath()); + histogram_tester.ExpectUniqueSample( + NoteTakingHelper::kPreferredLaunchResultHistogramName, + static_cast<int>(LaunchResult::NO_APP_SPECIFIED), 1); + histogram_tester.ExpectUniqueSample( + NoteTakingHelper::kDefaultLaunchResultHistogramName, + static_cast<int>(LaunchResult::NO_APPS_AVAILABLE), 1); } TEST_F(NoteTakingHelperTest, NotifyObserverAboutAndroidApps) {
diff --git a/chrome/browser/chromeos/profiles/multiprofiles_session_aborted_dialog.cc b/chrome/browser/chromeos/profiles/multiprofiles_session_aborted_dialog.cc index 6cdcce64..c9a5ddf 100644 --- a/chrome/browser/chromeos/profiles/multiprofiles_session_aborted_dialog.cc +++ b/chrome/browser/chromeos/profiles/multiprofiles_session_aborted_dialog.cc
@@ -4,7 +4,7 @@ #include "chrome/browser/chromeos/profiles/multiprofiles_session_aborted_dialog.h" -#include "ash/aura/wm_shelf_aura.h" +#include "ash/common/shelf/wm_shelf.h" #include "ash/root_window_controller.h" #include "ash/shell.h" #include "base/macros.h" @@ -83,7 +83,7 @@ std::vector<ash::RootWindowController*> controllers = ash::Shell::GetAllRootWindowControllers(); for (ash::RootWindowController* controller : controllers) { - controller->wm_shelf_aura()->SetAutoHideBehavior( + controller->wm_shelf()->SetAutoHideBehavior( ash::SHELF_AUTO_HIDE_ALWAYS_HIDDEN); } }
diff --git a/chrome/browser/extensions/api/networking_private/networking_private_apitest.cc b/chrome/browser/extensions/api/networking_private/networking_private_apitest.cc index 02d6d1a..d8d0b782 100644 --- a/chrome/browser/extensions/api/networking_private/networking_private_apitest.cc +++ b/chrome/browser/extensions/api/networking_private/networking_private_apitest.cc
@@ -183,6 +183,10 @@ return result; } + std::unique_ptr<base::DictionaryValue> GetGlobalPolicy() override { + return base::MakeUnique<base::DictionaryValue>(); + } + bool EnableNetworkType(const std::string& type) override { enabled_[type] = true; return !fail_; @@ -462,6 +466,10 @@ EXPECT_TRUE(RunNetworkingSubtest("setCellularSimState")) << message_; } +IN_PROC_BROWSER_TEST_F(NetworkingPrivateApiTest, GetGlobalPolicy) { + EXPECT_TRUE(RunNetworkingSubtest("getGlobalPolicy")) << message_; +} + // Test failure case class NetworkingPrivateApiTestFail : public NetworkingPrivateApiTest { @@ -516,6 +524,7 @@ // * disableNetworkType // * enableNetworkType // * requestNetworkScan +// * getGlobalPolicy IN_PROC_BROWSER_TEST_F(NetworkingPrivateApiTestFail, StartConnect) { EXPECT_FALSE(RunNetworkingSubtest("startConnect")) << message_;
diff --git a/chrome/browser/extensions/api/networking_private/networking_private_chromeos_apitest.cc b/chrome/browser/extensions/api/networking_private/networking_private_chromeos_apitest.cc index 220a962d..f1918f2 100644 --- a/chrome/browser/extensions/api/networking_private/networking_private_chromeos_apitest.cc +++ b/chrome/browser/extensions/api/networking_private/networking_private_chromeos_apitest.cc
@@ -26,6 +26,7 @@ #include "chromeos/dbus/shill_manager_client.h" #include "chromeos/dbus/shill_profile_client.h" #include "chromeos/dbus/shill_service_client.h" +#include "chromeos/network/managed_network_configuration_handler.h" #include "chromeos/network/network_handler.h" #include "chromeos/network/network_state_handler.h" #include "chromeos/network/onc/onc_utils.h" @@ -716,6 +717,24 @@ EXPECT_TRUE(RunNetworkingSubtest("cellularSimPuk")) << message_; } +IN_PROC_BROWSER_TEST_F(NetworkingPrivateChromeOSApiTest, GetGlobalPolicy) { + base::DictionaryValue global_config; + global_config.SetBooleanWithoutPathExpansion( + ::onc::global_network_config::kAllowOnlyPolicyNetworksToAutoconnect, + true); + global_config.SetBooleanWithoutPathExpansion( + ::onc::global_network_config::kAllowOnlyPolicyNetworksToConnect, false); + global_config.SetBooleanWithoutPathExpansion("SomeNewGlobalPolicy", false); + chromeos::NetworkHandler::Get() + ->managed_network_configuration_handler() + ->SetPolicy(::onc::ONC_SOURCE_DEVICE_POLICY, + std::string() /* no username hash */, base::ListValue(), + global_config); + base::RunLoop().RunUntilIdle(); + + EXPECT_TRUE(RunNetworkingSubtest("getGlobalPolicy")) << message_; +} + // Tests subset of networking API for the networking API alias - to verify that // using API methods and event does not cause access exceptions (due to // missing permissions).
diff --git a/chrome/browser/extensions/api/virtual_keyboard_private/chrome_virtual_keyboard_delegate.cc b/chrome/browser/extensions/api/virtual_keyboard_private/chrome_virtual_keyboard_delegate.cc index 5db3743..aeda9a0a 100644 --- a/chrome/browser/extensions/api/virtual_keyboard_private/chrome_virtual_keyboard_delegate.cc +++ b/chrome/browser/extensions/api/virtual_keyboard_private/chrome_virtual_keyboard_delegate.cc
@@ -151,12 +151,8 @@ int modifiers) { DCHECK_CURRENTLY_ON(content::BrowserThread::UI); aura::Window* window = GetKeyboardContainer(); - return window && keyboard::SendKeyEvent(type, - char_value, - key_code, - key_name, - modifiers | ui::EF_IS_SYNTHESIZED, - window->GetHost()); + return window && keyboard::SendKeyEvent(type, char_value, key_code, key_name, + modifiers, window->GetHost()); } bool ChromeVirtualKeyboardDelegate::ShowLanguageSettings() {
diff --git a/chrome/browser/resources/chromeos/network_ui/network_ui.html b/chrome/browser/resources/chromeos/network_ui/network_ui.html index 9e52942..3025115 100644 --- a/chrome/browser/resources/chromeos/network_ui/network_ui.html +++ b/chrome/browser/resources/chromeos/network_ui/network_ui.html
@@ -32,7 +32,7 @@ </select> </div> - <cr-network-select expandable show-active show-separators max-height="100" + <cr-network-select expandable show-active max-height="100" handle-network-item-selected> </cr-network-select> @@ -43,6 +43,9 @@ </div> </div> + <h3 i18n-content="globalPolicyLabel"></h3> + <div id="global-policy"></div> + <h3 i18n-content="visibleNetworksLabel"></h3> <table id="network-state-table" class="state-table"> <tr class="state-table-header">
diff --git a/chrome/browser/resources/chromeos/network_ui/network_ui.js b/chrome/browser/resources/chromeos/network_ui/network_ui.js index 7b02ed8d..7b31697 100644 --- a/chrome/browser/resources/chromeos/network_ui/network_ui.js +++ b/chrome/browser/resources/chromeos/network_ui/network_ui.js
@@ -314,6 +314,16 @@ }; /** + * Requests the global policy dictionary and updates the page. + */ + var requestGlobalPolicy = function() { + chrome.networkingPrivate.getGlobalPolicy(function(policy) { + document.querySelector('#global-policy').textContent = + JSON.stringify(policy); + }); + }; + + /** * Sets refresh rate if the interval is found in the url. */ var setRefresh = function() { @@ -334,6 +344,7 @@ $('refresh').onclick = requestNetworks; setRefresh(); requestNetworks(); + requestGlobalPolicy(); }); document.addEventListener('custom-item-selected', function(event) {
diff --git a/chrome/browser/resources/settings/internet_page/network_summary_item.html b/chrome/browser/resources/settings/internet_page/network_summary_item.html index f5be1048..5616c42c 100644 --- a/chrome/browser/resources/settings/internet_page/network_summary_item.html +++ b/chrome/browser/resources/settings/internet_page/network_summary_item.html
@@ -55,7 +55,7 @@ <div class="settings-box two-line" actionable on-tap="onDetailsTap_"> <div id="details" no-flex$="[[showSimInfo_(deviceState)]]"> - <cr-network-list-item item="[[activeNetworkState]]" class="flex"> + <cr-network-list-item item="[[activeNetworkState]]"> </cr-network-list-item> <paper-spinner active="[[scanningIsActive_(deviceState, expanded_)]]" hidden$="[[!scanningIsVisible_(deviceState)]]">
diff --git a/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.html b/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.html index 9b509bf5..b7ee06c 100644 --- a/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.html +++ b/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.html
@@ -37,8 +37,8 @@ } .username-column { - -webkit-margin-end: 16px; - -webkit-margin-start: 16px; + -webkit-margin-end: 8px; + -webkit-margin-start: 8px; flex: 2; } @@ -109,8 +109,7 @@ </div> <dialog is="cr-action-menu" id="menu"> <button id="menuEditPassword" class="dropdown-item" - on-tap="onMenuEditPasswordTap_" - hidden$="[[!showPasswords]]">$i18n{passwordViewDetails}</button> + on-tap="onMenuEditPasswordTap_">$i18n{passwordViewDetails}</button> <button id="menuRemovePassword" class="dropdown-item" on-tap="onMenuRemovePasswordTap_">$i18n{removePassword}</button> </dialog>
diff --git a/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.js b/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.js index 32fea6e..42fe312f 100644 --- a/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.js +++ b/chrome/browser/resources/settings/passwords_and_forms_page/passwords_section.js
@@ -39,14 +39,6 @@ }, /** - * Whether passwords can be shown or not. - * @type {boolean} - */ - showPasswords: { - type: Boolean, - }, - - /** * An array of sites to display. * @type {!Array<!chrome.passwordsPrivate.ExceptionPair>} */
diff --git a/chrome/browser/resources/settings/passwords_and_forms_page/passwords_shared_css.html b/chrome/browser/resources/settings/passwords_and_forms_page/passwords_shared_css.html index d7afeb6..4d709a9 100644 --- a/chrome/browser/resources/settings/passwords_and_forms_page/passwords_shared_css.html +++ b/chrome/browser/resources/settings/passwords_and_forms_page/passwords_shared_css.html
@@ -13,12 +13,8 @@ } paper-icon-button { - -webkit-margin-end: 0; - -webkit-margin-start: 20px; - -webkit-padding-end: 0; - -webkit-padding-start: 0; + -webkit-margin-end: -8px; color: var(--paper-grey-600); - width: 20px; } .list-with-header > div:first-of-type {
diff --git a/chrome/browser/resources/welcome/win10/inline.css b/chrome/browser/resources/welcome/win10/inline.css index ecc2605..215ded8b 100644 --- a/chrome/browser/resources/welcome/win10/inline.css +++ b/chrome/browser/resources/welcome/win10/inline.css
@@ -33,8 +33,8 @@ } .header-logo { - content: -webkit-image-set(url(/logo-large.png) 1x, - url(/logo-large2x.png) 2x); + content: url(chrome://welcome-win10/logo-large.png); + height: 4em; } .heading { @@ -45,7 +45,7 @@ } .sections { - margin-bottom: 3em; + margin-bottom: 2em; } .section.expandable { @@ -121,15 +121,14 @@ .button { -webkit-font-smoothing: antialiased; - background: var(--paper-blue-a200); + background: var(--google-blue-500); border-radius: 2px; box-shadow: inset 0 0 0 1px rgba(0, 0, 0, .1); color: #fff; display: inline-block; - font-size: .75em; + font-size: .8125em; font-weight: 500; - line-height: 1.75em; - min-width: 2em; + line-height: 2.25rem; padding: 0 1em; text-align: center; transition: 300ms cubic-bezier(.4, .2, 0, 1); @@ -142,8 +141,7 @@ } .logo-small { - content: -webkit-image-set(url(/logo-small.png) 1x, - url(/logo-small2x.png) 2x); + content: url(chrome://welcome-win10/logo-small.png); display: inline; height: 1.25em; vertical-align: top;
diff --git a/chrome/browser/resources/welcome/win10/inline.html b/chrome/browser/resources/welcome/win10/inline.html index 45fd189..8a5bfa4b 100644 --- a/chrome/browser/resources/welcome/win10/inline.html +++ b/chrome/browser/resources/welcome/win10/inline.html
@@ -8,6 +8,7 @@ <link rel="import" href="chrome://resources/html/polymer.html"> <link rel="import" href="chrome://resources/html/util.html"> <link rel="import" href="chrome://resources/polymer/v1_0/iron-icon/iron-icon.html"> + <link rel="import" href="chrome://resources/polymer/v1_0/paper-button/paper-button.html"> <link rel="import" href="chrome://resources/polymer/v1_0/paper-styles/color.html"> <link rel="stylesheet" href="chrome://resources/css/text_defaults_md.css"> @@ -76,7 +77,9 @@ </div> </template> </div> - <a href="#" class="button" on-tap="onContinue">$i18n{continueText}</a> + <paper-button class="button" on-tap="onContinue"> + $i18n{continueText} + </paper-button> </div> </template> </body>
diff --git a/chrome/browser/resources/welcome/win10/sectioned.css b/chrome/browser/resources/welcome/win10/sectioned.css index cd0ec14..77ec6744 100644 --- a/chrome/browser/resources/welcome/win10/sectioned.css +++ b/chrome/browser/resources/welcome/win10/sectioned.css
@@ -32,8 +32,8 @@ } .header-logo { - content: -webkit-image-set(url(/logo-large.png) 1x, - url(/logo-large2x.png) 2x); + content: url(chrome://welcome-win10/logo-large.png); + height: 4em; } .text { @@ -56,7 +56,7 @@ } .sections { - margin-bottom: 3em; + margin-bottom: 2em; } .section.expandable { @@ -132,15 +132,14 @@ .button { -webkit-font-smoothing: antialiased; - background: var(--paper-blue-a200); + background: var(--google-blue-500); border-radius: 2px; box-shadow: inset 0 0 0 1px rgba(0, 0, 0, .1); color: #fff; display: inline-block; - font-size: .74em; + font-size: .8125em; font-weight: 500; - line-height: 1.75em; - min-width: 2em; + line-height: 2.25rem; padding: 0 1em; text-align: center; transition: 300ms cubic-bezier(.4, .2, 0, 1); @@ -160,8 +159,7 @@ } .logo-small { - content: -webkit-image-set(url(/logo-small.png) 1x, - url(/logo-small2x.png) 2x); + content: url(chrome://welcome-win10/logo-small.png); display: inline; height: 1.25em; vertical-align: top;
diff --git a/chrome/browser/resources/welcome/win10/sectioned.html b/chrome/browser/resources/welcome/win10/sectioned.html index 378a957..d148da2 100644 --- a/chrome/browser/resources/welcome/win10/sectioned.html +++ b/chrome/browser/resources/welcome/win10/sectioned.html
@@ -8,6 +8,7 @@ <link rel="import" href="chrome://resources/html/polymer.html"> <link rel="import" href="chrome://resources/html/util.html"> <link rel="import" href="chrome://resources/polymer/v1_0/iron-icon/iron-icon.html"> + <link rel="import" href="chrome://resources/polymer/v1_0/paper-button/paper-button.html"> <link rel="import" href="chrome://resources/polymer/v1_0/paper-styles/color.html"> <link rel="stylesheet" href="chrome://resources/css/text_defaults_md.css"> @@ -54,7 +55,9 @@ </div> </template> </div> - <a href="#" class="button" on-tap="onContinue">$i18n{continueText}</a> + <paper-button class="button" on-tap="onContinue"> + $i18n{continueText} + </paper-button> </div> </div> <div class="bg">
diff --git a/chrome/browser/search_engines/template_url_service_android.h b/chrome/browser/search_engines/template_url_service_android.h index 48f978b..50882916 100644 --- a/chrome/browser/search_engines/template_url_service_android.h +++ b/chrome/browser/search_engines/template_url_service_android.h
@@ -76,11 +76,17 @@ JNIEnv* env, const base::android::JavaParamRef<jobject>& obj, const base::android::JavaParamRef<jstring>& jkeyword); + + // Adds a custom search engine, sets |jkeyword| as its short_name and keyword, + // and sets its date_created as |age_in_days| days before the current time. base::android::ScopedJavaLocalRef<jstring> AddSearchEngineForTesting( JNIEnv* env, const base::android::JavaParamRef<jobject>& obj, const base::android::JavaParamRef<jstring>& jkeyword, jint age_in_days); + + // Finds the search engine whose keyword matches |jkeyword| and sets its + // last_visited time as the current time. base::android::ScopedJavaLocalRef<jstring> UpdateLastVisitedForTesting( JNIEnv* env, const base::android::JavaParamRef<jobject>& obj,
diff --git a/chrome/browser/sync/test/integration/two_client_uss_sync_test.cc b/chrome/browser/sync/test/integration/two_client_uss_sync_test.cc index 8a6f76c0..f9e97a1 100644 --- a/chrome/browser/sync/test/integration/two_client_uss_sync_test.cc +++ b/chrome/browser/sync/test/integration/two_client_uss_sync_test.cc
@@ -16,6 +16,7 @@ #include "components/browser_sync/profile_sync_service.h" #include "components/sync/model/fake_model_type_sync_bridge.h" #include "components/sync/model/metadata_change_list.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_change_processor.h" using browser_sync::ChromeSyncClient; @@ -63,14 +64,13 @@ TestModelTypeSyncBridge() : FakeModelTypeSyncBridge(base::Bind(&ModelTypeChangeProcessor::Create)) { - change_processor()->OnMetadataLoaded(syncer::SyncError(), - db().CreateMetadataBatch()); + change_processor()->OnMetadataLoaded(db().CreateMetadataBatch()); } - syncer::SyncError ApplySyncChanges( + syncer::ModelError ApplySyncChanges( std::unique_ptr<syncer::MetadataChangeList> metadata_changes, syncer::EntityChangeList entity_changes) override { - syncer::SyncError error = FakeModelTypeSyncBridge::ApplySyncChanges( + syncer::ModelError error = FakeModelTypeSyncBridge::ApplySyncChanges( std::move(metadata_changes), entity_changes); NotifyObservers(); return error; @@ -324,7 +324,7 @@ ASSERT_TRUE(DataChecker(model1, kKey1, kValue1).Wait()); // Set an error in model 1 to trigger in the next GetUpdates. - model1->ErrorOnNextCall(syncer::SyncError::DATATYPE_ERROR); + model1->ErrorOnNextCall(); // Write an item on model 0 to trigger a GetUpdates in model 1. model0->WriteItem(kKey1, kValue2);
diff --git a/chrome/browser/ui/ash/launcher/chrome_launcher_controller_impl_browsertest.cc b/chrome/browser/ui/ash/launcher/chrome_launcher_controller_impl_browsertest.cc index 5e1d861..5a94a9c1 100644 --- a/chrome/browser/ui/ash/launcher/chrome_launcher_controller_impl_browsertest.cc +++ b/chrome/browser/ui/ash/launcher/chrome_launcher_controller_impl_browsertest.cc
@@ -76,6 +76,7 @@ #include "ui/aura/client/aura_constants.h" #include "ui/aura/window.h" #include "ui/base/window_open_disposition.h" +#include "ui/display/test/display_manager_test_api.h" #include "ui/events/event.h" #include "ui/events/event_constants.h" #include "ui/events/test/event_generator.h" @@ -1890,30 +1891,14 @@ generator.ReleaseLeftButton(); } -// Used to test drag & drop an item between app list and shelf with multi -// display environment. -class ShelfAppBrowserTestWithMultiMonitor - : public ShelfAppBrowserTestNoDefaultBrowser { - protected: - ShelfAppBrowserTestWithMultiMonitor() {} - ~ShelfAppBrowserTestWithMultiMonitor() override {} - - void SetUpCommandLine(base::CommandLine* command_line) override { - ShelfAppBrowserTestNoDefaultBrowser::SetUpCommandLine(command_line); - command_line->AppendSwitchASCII("ash-host-window-bounds", - "800x800,801+0-800x800"); - } - - private: - - DISALLOW_COPY_AND_ASSIGN(ShelfAppBrowserTestWithMultiMonitor); -}; - // Do basic drag and drop interaction tests between the application list and // the launcher in the secondary monitor. -// TODO(msw): fix, http://crbug.com/678622. -IN_PROC_BROWSER_TEST_F(ShelfAppBrowserTestWithMultiMonitor, - DISABLED_BasicDragAndDrop) { +IN_PROC_BROWSER_TEST_F(ShelfAppBrowserTest, MultiDisplayBasicDragAndDrop) { + // Update the display configuration to add a secondary display. + display::test::DisplayManagerTestApi( + ash::Shell::GetInstance()->display_manager()) + .UpdateDisplay("800x800,801+0-800x800"); + // Get a number of interfaces we need. DCHECK_EQ(ash::Shell::GetAllRootWindows().size(), 2U); aura::Window* secondary_root_window = ash::Shell::GetAllRootWindows()[1];
diff --git a/chrome/browser/ui/autofill/autofill_popup_controller.h b/chrome/browser/ui/autofill/autofill_popup_controller.h index 5bede9c..c9aec43 100644 --- a/chrome/browser/ui/autofill/autofill_popup_controller.h +++ b/chrome/browser/ui/autofill/autofill_popup_controller.h
@@ -14,6 +14,7 @@ #include "build/build_config.h" #include "chrome/browser/ui/autofill/autofill_popup_view_delegate.h" #include "third_party/skia/include/core/SkColor.h" +#include "ui/native_theme/native_theme.h" namespace autofill { @@ -46,9 +47,10 @@ // Removes the suggestion at the given index. virtual bool RemoveSuggestion(int index) = 0; - // Returns the background color of the row item according to its |index|, or - // transparent if the default popup background should be used. - virtual SkColor GetBackgroundColorForRow(int index) const = 0; + // Returns the background color ID of the row item according to its |index|, + // or default popup background otherwise. + virtual ui::NativeTheme::ColorId GetBackgroundColorIDForRow( + int index) const = 0; // Returns the index of the selected line. A line is "selected" when it is // hovered or has keyboard focus.
diff --git a/chrome/browser/ui/autofill/autofill_popup_controller_impl.cc b/chrome/browser/ui/autofill/autofill_popup_controller_impl.cc index a7a3ebf..0b4ac88 100644 --- a/chrome/browser/ui/autofill/autofill_popup_controller_impl.cc +++ b/chrome/browser/ui/autofill/autofill_popup_controller_impl.cc
@@ -13,7 +13,6 @@ #include "base/strings/utf_string_conversions.h" #include "build/build_config.h" #include "chrome/browser/ui/autofill/autofill_popup_view.h" -#include "chrome/browser/ui/autofill/popup_constants.h" #include "components/autofill/core/browser/autofill_popup_delegate.h" #include "components/autofill/core/browser/popup_item_ids.h" #include "components/autofill/core/browser/suggestion.h" @@ -375,11 +374,11 @@ return true; } -SkColor AutofillPopupControllerImpl::GetBackgroundColorForRow(int index) const { - if (index == selected_line_) - return kHoveredBackgroundColor; - - return SK_ColorTRANSPARENT; +ui::NativeTheme::ColorId +AutofillPopupControllerImpl::GetBackgroundColorIDForRow(int index) const { + return index == selected_line_ ? + ui::NativeTheme::kColorId_ResultsTableHoveredBackground : + ui::NativeTheme::kColorId_ResultsTableNormalBackground; } int AutofillPopupControllerImpl::selected_line() const {
diff --git a/chrome/browser/ui/autofill/autofill_popup_controller_impl.h b/chrome/browser/ui/autofill/autofill_popup_controller_impl.h index 5e7f869..e3332b5 100644 --- a/chrome/browser/ui/autofill/autofill_popup_controller_impl.h +++ b/chrome/browser/ui/autofill/autofill_popup_controller_impl.h
@@ -17,6 +17,7 @@ #include "chrome/browser/ui/autofill/popup_controller_common.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/geometry/rect_f.h" +#include "ui/native_theme/native_theme.h" namespace autofill { @@ -95,7 +96,7 @@ base::string16* title, base::string16* body) override; bool RemoveSuggestion(int list_index) override; - SkColor GetBackgroundColorForRow(int index) const override; + ui::NativeTheme::ColorId GetBackgroundColorIDForRow(int index) const override; int selected_line() const override; const AutofillPopupLayoutModel& layout_model() const override;
diff --git a/chrome/browser/ui/autofill/autofill_popup_layout_model.cc b/chrome/browser/ui/autofill/autofill_popup_layout_model.cc index 6746c16..a481c95 100644 --- a/chrome/browser/ui/autofill/autofill_popup_layout_model.cc +++ b/chrome/browser/ui/autofill/autofill_popup_layout_model.cc
@@ -191,15 +191,16 @@ return smaller_font_list_; } -SkColor AutofillPopupLayoutModel::GetValueFontColorForRow(size_t index) const { +ui::NativeTheme::ColorId AutofillPopupLayoutModel::GetValueFontColorIDForRow( + size_t index) const { std::vector<autofill::Suggestion> suggestions = delegate_->GetSuggestions(); switch (suggestions[index].frontend_id) { case POPUP_ITEM_ID_HTTP_NOT_SECURE_WARNING_MESSAGE: - return gfx::kGoogleRed700; + return ui::NativeTheme::kColorId_AlertSeverityHigh; case POPUP_ITEM_ID_INSECURE_CONTEXT_PAYMENT_DISABLED_MESSAGE: - return kLabelTextColor; + return ui::NativeTheme::kColorId_ResultsTableNormalDimmedText; default: - return kValueTextColor; + return ui::NativeTheme::kColorId_ResultsTableNormalText; } }
diff --git a/chrome/browser/ui/autofill/autofill_popup_layout_model.h b/chrome/browser/ui/autofill/autofill_popup_layout_model.h index 564b50c..8c6223ff 100644 --- a/chrome/browser/ui/autofill/autofill_popup_layout_model.h +++ b/chrome/browser/ui/autofill/autofill_popup_layout_model.h
@@ -14,6 +14,7 @@ #include "ui/gfx/font_list.h" #include "ui/gfx/geometry/rect.h" #include "ui/gfx/native_widget_types.h" +#include "ui/native_theme/native_theme.h" namespace gfx { class ImageSkia; @@ -74,8 +75,8 @@ const gfx::FontList& GetValueFontListForRow(size_t index) const; const gfx::FontList& GetLabelFontListForRow(size_t index) const; - // Returns the value font color of the row item according to its |index|. - SkColor GetValueFontColorForRow(size_t index) const; + // Returns the value font color ID of the row item according to its |index|. + ui::NativeTheme::ColorId GetValueFontColorIDForRow(size_t index) const; // Returns the icon image of the item at |index| in the popup. gfx::ImageSkia GetIconImage(size_t index) const;
diff --git a/chrome/browser/ui/autofill/popup_constants.h b/chrome/browser/ui/autofill/popup_constants.h index 30c1dfc7..ca3a74a24 100644 --- a/chrome/browser/ui/autofill/popup_constants.h +++ b/chrome/browser/ui/autofill/popup_constants.h
@@ -5,22 +5,11 @@ #ifndef CHROME_BROWSER_UI_AUTOFILL_POPUP_CONSTANTS_H_ #define CHROME_BROWSER_UI_AUTOFILL_POPUP_CONSTANTS_H_ -#include "third_party/skia/include/core/SkColor.h" - namespace autofill { -// The size of the border around the entire results popup, in pixels. +// TODO(crbug.com/676221): Change this to pixels const int kPopupBorderThickness = 1; -// Various colors used in the Autofill popup. -// TODO(crbug.com/666523): These colors ought to be replaced by getting colors -// from ui::NativeTheme for consistency and accessibility. -const SkColor kBorderColor = SkColorSetRGB(0xC7, 0xCA, 0xCE); -const SkColor kHoveredBackgroundColor = SkColorSetRGB(0xCD, 0xCD, 0xCD); -const SkColor kLabelTextColor = SkColorSetRGB(0x64, 0x64, 0x64); -constexpr SkColor kPopupBackground = SK_ColorWHITE; -constexpr SkColor kValueTextColor = SK_ColorBLACK; - } // namespace autofill #endif // CHROME_BROWSER_UI_AUTOFILL_POPUP_CONSTANTS_H_
diff --git a/chrome/browser/ui/cocoa/autofill/autofill_popup_view_cocoa.mm b/chrome/browser/ui/cocoa/autofill/autofill_popup_view_cocoa.mm index 30745de..f06cf3e 100644 --- a/chrome/browser/ui/cocoa/autofill/autofill_popup_view_cocoa.mm +++ b/chrome/browser/ui/cocoa/autofill/autofill_popup_view_cocoa.mm
@@ -10,7 +10,6 @@ #include "base/strings/utf_string_conversions.h" #include "chrome/browser/ui/autofill/autofill_popup_controller.h" #include "chrome/browser/ui/autofill/autofill_popup_layout_model.h" -#include "chrome/browser/ui/autofill/popup_constants.h" #include "chrome/browser/ui/cocoa/autofill/autofill_popup_view_bridge.h" #include "components/autofill/core/browser/popup_item_ids.h" #include "components/autofill/core/browser/suggestion.h" @@ -26,6 +25,8 @@ #include "ui/gfx/image/image_skia_util_mac.h" #include "ui/gfx/paint_vector_icon.h" #include "ui/gfx/vector_icons_public.h" +#include "ui/native_theme/native_theme.h" +#include "ui/native_theme/native_theme_mac.h" using autofill::AutofillPopupView; using autofill::AutofillPopupLayoutModel; @@ -170,7 +171,9 @@ [[self highlightColor] set]; [NSBezierPath fillRect:bounds]; } else { - SkColor backgroundColor = controller_->GetBackgroundColorForRow(index); + SkColor backgroundColor = + ui::NativeTheme::GetInstanceForNativeUi()->GetSystemColor( + controller_->GetBackgroundColorIDForRow(index)); [skia::SkColorToSRGBNSColor(backgroundColor) set]; [NSBezierPath fillRect:bounds]; } @@ -213,7 +216,8 @@ bounds:(NSRect)bounds textYOffset:(CGFloat)textYOffset { NSColor* nameColor = skia::SkColorToSRGBNSColor( - controller_->layout_model().GetValueFontColorForRow(index)); + ui::NativeTheme::GetInstanceForNativeUi()->GetSystemColor( + controller_->layout_model().GetValueFontColorIDForRow(index))); NSDictionary* nameAttributes = [NSDictionary dictionaryWithObjectsAndKeys:controller_->layout_model() .GetValueFontListForRow(index)
diff --git a/chrome/browser/ui/views/autofill/autofill_popup_base_view.cc b/chrome/browser/ui/views/autofill/autofill_popup_base_view.cc index dd09b9a..2f0d3864 100644 --- a/chrome/browser/ui/views/autofill/autofill_popup_base_view.cc +++ b/chrome/browser/ui/views/autofill/autofill_popup_base_view.cc
@@ -10,6 +10,7 @@ #include "base/threading/thread_task_runner_handle.h" #include "build/build_config.h" #include "chrome/browser/ui/autofill/popup_constants.h" +#include "ui/native_theme/native_theme.h" #include "ui/views/border.h" #include "ui/views/focus/focus_manager.h" #include "ui/views/widget/widget.h" @@ -60,7 +61,11 @@ show_time_ = base::Time::Now(); } - SetBorder(views::CreateSolidBorder(kPopupBorderThickness, kBorderColor)); + // TODO(crbug.com/676164): Show different border color when focused/unfocused + SetBorder(views::CreateSolidBorder( + kPopupBorderThickness, + GetNativeTheme()->GetSystemColor( + ui::NativeTheme::kColorId_UnfocusedBorderColor))); DoUpdateBoundsAndRedrawPopup(); GetWidget()->Show();
diff --git a/chrome/browser/ui/views/autofill/autofill_popup_view_views.cc b/chrome/browser/ui/views/autofill/autofill_popup_view_views.cc index 0f05966c..89925ce 100644 --- a/chrome/browser/ui/views/autofill/autofill_popup_view_views.cc +++ b/chrome/browser/ui/views/autofill/autofill_popup_view_views.cc
@@ -6,7 +6,6 @@ #include "chrome/browser/ui/autofill/autofill_popup_controller.h" #include "chrome/browser/ui/autofill/autofill_popup_layout_model.h" -#include "chrome/browser/ui/autofill/popup_constants.h" #include "components/autofill/core/browser/popup_item_ids.h" #include "components/autofill/core/browser/suggestion.h" #include "ui/events/keycodes/keyboard_codes.h" @@ -48,7 +47,8 @@ if (!controller_) return; - canvas->DrawColor(kPopupBackground); + canvas->DrawColor(GetNativeTheme()->GetSystemColor( + ui::NativeTheme::kColorId_ResultsTableNormalBackground)); OnPaintBorder(canvas); for (size_t i = 0; i < controller_->GetLineCount(); ++i) { @@ -56,7 +56,10 @@ if (controller_->GetSuggestionAt(i).frontend_id == POPUP_ITEM_ID_SEPARATOR) { - canvas->FillRect(line_rect, kLabelTextColor); + canvas->FillRect( + line_rect, + GetNativeTheme()->GetSystemColor( + ui::NativeTheme::kColorId_ResultsTableNormalDimmedText)); } else { DrawAutofillEntry(canvas, i, line_rect); } @@ -92,7 +95,10 @@ void AutofillPopupViewViews::DrawAutofillEntry(gfx::Canvas* canvas, int index, const gfx::Rect& entry_rect) { - canvas->FillRect(entry_rect, controller_->GetBackgroundColorForRow(index)); + canvas->FillRect( + entry_rect, + GetNativeTheme()->GetSystemColor( + controller_->GetBackgroundColorIDForRow(index))); const bool is_http_warning = (controller_->GetSuggestionAt(index).frontend_id == @@ -150,7 +156,8 @@ canvas->DrawStringRectWithFlags( controller_->GetElidedValueAt(index), controller_->layout_model().GetValueFontListForRow(index), - controller_->layout_model().GetValueFontColorForRow(index), + GetNativeTheme()->GetSystemColor( + controller_->layout_model().GetValueFontColorIDForRow(index)), gfx::Rect(value_x_align_left, value_rect.y(), value_width, value_rect.height()), text_align); @@ -169,11 +176,15 @@ label_x_align_left += is_rtl ? 0 : -label_width; } + // TODO(crbug.com/678033):Add a GetLabelFontColorForRow function similar to + // GetValueFontColorForRow so that the cocoa impl could use it too canvas->DrawStringRectWithFlags( controller_->GetElidedLabelAt(index), controller_->layout_model().GetLabelFontListForRow(index), - kLabelTextColor, gfx::Rect(label_x_align_left, entry_rect.y(), - label_width, entry_rect.height()), + GetNativeTheme()->GetSystemColor( + ui::NativeTheme::kColorId_ResultsTableNormalDimmedText), + gfx::Rect(label_x_align_left, entry_rect.y(), label_width, + entry_rect.height()), text_align); } }
diff --git a/chrome/browser/ui/views/autofill/password_generation_popup_view_views.cc b/chrome/browser/ui/views/autofill/password_generation_popup_view_views.cc index f705224..f7f9a962c 100644 --- a/chrome/browser/ui/views/autofill/password_generation_popup_view_views.cc +++ b/chrome/browser/ui/views/autofill/password_generation_popup_view_views.cc
@@ -14,6 +14,7 @@ #include "ui/gfx/color_palette.h" #include "ui/gfx/paint_vector_icon.h" #include "ui/gfx/vector_icons_public.h" +#include "ui/native_theme/native_theme.h" #include "ui/views/background.h" #include "ui/views/border.h" #include "ui/views/controls/image_view.h" @@ -153,7 +154,9 @@ PasswordGenerationPopupController::kHorizontalPadding)); AddChildView(help_label_); - set_background(views::Background::CreateSolidBackground(kPopupBackground)); + set_background(views::Background::CreateSolidBackground( + GetNativeTheme()->GetSystemColor( + ui::NativeTheme::kColorId_ResultsTableNormalBackground))); } PasswordGenerationPopupViewViews::~PasswordGenerationPopupViewViews() {} @@ -209,9 +212,10 @@ password_view_->set_background( views::Background::CreateSolidBackground( - controller_->password_selected() ? - kHoveredBackgroundColor : - kPopupBackground)); + GetNativeTheme()->GetSystemColor( + controller_->password_selected() ? + ui::NativeTheme::kColorId_ResultsTableHoveredBackground : + ui::NativeTheme::kColorId_ResultsTableNormalBackground))); } void PasswordGenerationPopupViewViews::Layout() {
diff --git a/chrome/browser/ui/webui/chromeos/network_ui.cc b/chrome/browser/ui/webui/chromeos/network_ui.cc index 8dfc7c2b4..af01934 100644 --- a/chrome/browser/ui/webui/chromeos/network_ui.cc +++ b/chrome/browser/ui/webui/chromeos/network_ui.cc
@@ -189,6 +189,9 @@ l10n_util::GetStringUTF16(IDS_NETWORK_UI_FORMAT_SHILL)); localized_strings->SetString( + "globalPolicyLabel", + l10n_util::GetStringUTF16(IDS_NETWORK_UI_GLOBAL_POLICY)); + localized_strings->SetString( "visibleNetworksLabel", l10n_util::GetStringUTF16(IDS_NETWORK_UI_VISIBLE_NETWORKS)); localized_strings->SetString(
diff --git a/chrome/browser/ui/webui/welcome_win10_ui.cc b/chrome/browser/ui/webui/welcome_win10_ui.cc index 9d382f6..714c44b 100644 --- a/chrome/browser/ui/webui/welcome_win10_ui.cc +++ b/chrome/browser/ui/webui/welcome_win10_ui.cc
@@ -117,11 +117,8 @@ html_source->SetDefaultResource(IDR_WELCOME_WIN10_SECTIONED_HTML); } - // Logo images of all scales. - html_source->AddResourcePath("logo-small.png", IDR_PRODUCT_LOGO_32); - html_source->AddResourcePath("logo-small2x.png", IDR_PRODUCT_LOGO_64); - html_source->AddResourcePath("logo-large.png", IDR_PRODUCT_LOGO_64); - html_source->AddResourcePath("logo-large2x.png", IDR_PRODUCT_LOGO_128); + html_source->AddResourcePath("logo-small.png", IDR_PRODUCT_LOGO_64); + html_source->AddResourcePath("logo-large.png", IDR_PRODUCT_LOGO_128); content::WebUIDataSource::Add(profile, html_source); }
diff --git a/chrome/common/extensions/docs/OWNERS b/chrome/common/extensions/docs/OWNERS index 34a01db..3c933bc 100644 --- a/chrome/common/extensions/docs/OWNERS +++ b/chrome/common/extensions/docs/OWNERS
@@ -1,6 +1,5 @@ # For documentation. mkearney@chromium.org -mkwst@chromium.org # For webview documentation. paulmeyer@chromium.org
diff --git a/chrome/common/extensions/docs/server2/OWNERS b/chrome/common/extensions/docs/server2/OWNERS new file mode 100644 index 0000000..8548009 --- /dev/null +++ b/chrome/common/extensions/docs/server2/OWNERS
@@ -0,0 +1,3 @@ +lazyboy@chromium.org +rdevlin.cronin@chromium.org +rockot@chromium.org
diff --git a/chrome/renderer/media/cast_receiver_audio_valve.cc b/chrome/renderer/media/cast_receiver_audio_valve.cc index d4a48a2f..e13ab140 100644 --- a/chrome/renderer/media/cast_receiver_audio_valve.cc +++ b/chrome/renderer/media/cast_receiver_audio_valve.cc
@@ -56,12 +56,6 @@ } } -void CastReceiverAudioValve::OnStarted() { - base::AutoLock lock(lock_); - if (cb_) - cb_->OnCaptureStarted(); -} - void CastReceiverAudioValve::Stop() { base::AutoLock lock(lock_); cb_ = nullptr;
diff --git a/chrome/renderer/media/cast_receiver_audio_valve.h b/chrome/renderer/media/cast_receiver_audio_valve.h index 1bf0203..f6c09a7 100644 --- a/chrome/renderer/media/cast_receiver_audio_valve.h +++ b/chrome/renderer/media/cast_receiver_audio_valve.h
@@ -31,10 +31,6 @@ void DeliverDecodedAudio(const media::AudioBus* audio_bus, base::TimeTicks playout_time); - // Called to indicate that audio has started streaming and our capture - // callback should be notified. - void OnStarted(); - // When this returns, no more calls will be forwarded to |cb|. void Stop();
diff --git a/chrome/renderer/media/cast_receiver_session_delegate.cc b/chrome/renderer/media/cast_receiver_session_delegate.cc index 2cfd6f8..790ed174 100644 --- a/chrome/renderer/media/cast_receiver_session_delegate.cc +++ b/chrome/renderer/media/cast_receiver_session_delegate.cc
@@ -48,8 +48,7 @@ void CastReceiverSessionDelegate::StartAudio( scoped_refptr<CastReceiverAudioValve> audio_valve) { DCHECK(io_task_runner_->BelongsToCurrentThread()); - audio_valve_ = std::move(audio_valve); - audio_valve_->OnStarted(); + audio_valve_ = audio_valve; cast_receiver_->RequestDecodedAudioFrame(on_audio_decoded_cb_); }
diff --git a/chrome/test/data/extensions/api_test/networking_private/chromeos/test.js b/chrome/test/data/extensions/api_test/networking_private/chromeos/test.js index 51c3de1..28bca7f 100644 --- a/chrome/test/data/extensions/api_test/networking_private/chromeos/test.js +++ b/chrome/test/data/extensions/api_test/networking_private/chromeos/test.js
@@ -873,7 +873,15 @@ })); })); }))); - } + }, + function getGlobalPolicy() { + chrome.networkingPrivate.getGlobalPolicy(callbackPass(function(result) { + assertEq({ + AllowOnlyPolicyNetworksToAutoconnect: true, + AllowOnlyPolicyNetworksToConnect: false, + }, result); + })); + }, ]; chrome.test.getConfig(function(config) {
diff --git a/chrome/test/data/extensions/api_test/networking_private/test.js b/chrome/test/data/extensions/api_test/networking_private/test.js index aa2380b..3ca9ca05 100644 --- a/chrome/test/data/extensions/api_test/networking_private/test.js +++ b/chrome/test/data/extensions/api_test/networking_private/test.js
@@ -127,6 +127,9 @@ chrome.networkingPrivate.setCellularSimState( kGuid, simState, callbackPass(callbackResult)); }, + function getGlobalPolicy() { + chrome.networkingPrivate.getGlobalPolicy(callbackPass(callbackResult)); + } ]; var testToRun = window.location.search.substring(1);
diff --git a/chromeos/BUILD.gn b/chromeos/BUILD.gn index ba62962..2413b9e4 100644 --- a/chromeos/BUILD.gn +++ b/chromeos/BUILD.gn
@@ -39,6 +39,7 @@ "//components/prefs", "//components/proxy_config", "//components/signin/core/account_id", + "//components/url_formatter", "//components/user_manager", "//crypto", "//crypto:platform",
diff --git a/chromeos/network/DEPS b/chromeos/network/DEPS index 392d14e..49abef0 100644 --- a/chromeos/network/DEPS +++ b/chromeos/network/DEPS
@@ -3,6 +3,7 @@ "+components/onc", "+components/pref_registry", "+components/proxy_config", + "+components/url_formatter", "+components/user_manager", "+net", "+url"
diff --git a/chromeos/network/onc/onc_utils.cc b/chromeos/network/onc/onc_utils.cc index a696ca5..4f956c4 100644 --- a/chromeos/network/onc/onc_utils.cc +++ b/chromeos/network/onc/onc_utils.cc
@@ -36,6 +36,7 @@ #include "components/prefs/pref_service.h" #include "components/proxy_config/proxy_config_dictionary.h" #include "components/signin/core/account_id/account_id.h" +#include "components/url_formatter/url_fixer.h" #include "components/user_manager/user.h" #include "components/user_manager/user_manager.h" #include "crypto/encryptor.h" @@ -930,9 +931,9 @@ std::string pac_url; onc_proxy_settings.GetStringWithoutPathExpansion(::onc::proxy::kPAC, &pac_url); - GURL url(pac_url); - DCHECK(url.is_valid()) << "Invalid URL in ProxySettings.PAC"; - proxy_dict.reset(ProxyConfigDictionary::CreatePacScript(url.spec(), false)); + GURL url(url_formatter::FixupURL(pac_url, std::string())); + proxy_dict.reset(ProxyConfigDictionary::CreatePacScript( + url.is_valid() ? url.spec() : std::string(), false)); } else if (type == ::onc::proxy::kManual) { const base::DictionaryValue* manual_dict = nullptr; onc_proxy_settings.GetDictionaryWithoutPathExpansion(::onc::proxy::kManual,
diff --git a/chromeos/network/onc/onc_utils_unittest.cc b/chromeos/network/onc/onc_utils_unittest.cc index a8577b1..540c219 100644 --- a/chromeos/network/onc/onc_utils_unittest.cc +++ b/chromeos/network/onc/onc_utils_unittest.cc
@@ -171,6 +171,13 @@ test_data->GetAsList(&list_of_tests); ASSERT_TRUE(list_of_tests); + // Additional ONC -> ProxyConfig test cases to test fixup. + test_data = ReadTestJson("proxy_config_from_onc.json"); + base::ListValue* list_of_tests2; + test_data->GetAsList(&list_of_tests2); + ASSERT_TRUE(list_of_tests2); + list_of_tests->Append(list_of_tests2->CreateDeepCopy()); + int index = 0; for (base::ListValue::iterator it = list_of_tests->begin(); it != list_of_tests->end(); ++it, ++index) {
diff --git a/chromeos/test/data/network/proxy_config_from_onc.json b/chromeos/test/data/network/proxy_config_from_onc.json new file mode 100644 index 0000000..a798702 --- /dev/null +++ b/chromeos/test/data/network/proxy_config_from_onc.json
@@ -0,0 +1,22 @@ +[ + { "ONC_ProxySettings": { + "Type": "PAC", + "PAC": "proxycfg.my.domain.com" + }, + "ProxyConfig": { + "mode":"pac_script", + "pac_mandatory":false, + "pac_url":"http://proxycfg.my.domain.com/" + } + }, + { "ONC_ProxySettings": { + "Type": "PAC", + "PAC": "proxycfg.my.domain.com/proxy.dat" + }, + "ProxyConfig": { + "mode":"pac_script", + "pac_mandatory":false, + "pac_url":"http://proxycfg.my.domain.com/proxy.dat" + } + }, +]
diff --git a/components/autofill/core/browser/autofill_experiments.cc b/components/autofill/core/browser/autofill_experiments.cc index 4c2070c..92dfff2 100644 --- a/components/autofill/core/browser/autofill_experiments.cc +++ b/components/autofill/core/browser/autofill_experiments.cc
@@ -28,8 +28,6 @@ "AutofillCreditCardAssist", base::FEATURE_DISABLED_BY_DEFAULT}; const base::Feature kAutofillCreditCardSigninPromo{ "AutofillCreditCardSigninPromo", base::FEATURE_DISABLED_BY_DEFAULT}; -const base::Feature kAutofillProfileCleanup{"AutofillProfileCleanup", - base::FEATURE_DISABLED_BY_DEFAULT}; const base::Feature kAutofillScanCardholderName{ "AutofillScanCardholderName", base::FEATURE_DISABLED_BY_DEFAULT}; const base::Feature kAutofillCreditCardPopupLayout{ @@ -71,10 +69,6 @@ return group_name == "Disabled"; } -bool IsAutofillProfileCleanupEnabled() { - return base::FeatureList::IsEnabled(kAutofillProfileCleanup); -} - bool IsAutofillCreditCardSigninPromoEnabled() { return base::FeatureList::IsEnabled(kAutofillCreditCardSigninPromo); }
diff --git a/components/autofill/core/browser/autofill_experiments.h b/components/autofill/core/browser/autofill_experiments.h index 02e891b..828c1890 100644 --- a/components/autofill/core/browser/autofill_experiments.h +++ b/components/autofill/core/browser/autofill_experiments.h
@@ -26,7 +26,6 @@ extern const base::Feature kAutofillCreditCardAssist; extern const base::Feature kAutofillCreditCardSigninPromo; -extern const base::Feature kAutofillProfileCleanup; extern const base::Feature kAutofillScanCardholderName; extern const base::Feature kAutofillCreditCardPopupLayout; extern const char kCreditCardSigninPromoImpressionLimitParamKey[]; @@ -42,9 +41,6 @@ // disables providing suggestions. bool IsInAutofillSuggestionsDisabledExperiment(); -// Returns whether the Autofill profile cleanup feature is enabled. -bool IsAutofillProfileCleanupEnabled(); - // Returns whether the Autofill credit card signin promo should be shown. bool IsAutofillCreditCardSigninPromoEnabled();
diff --git a/components/autofill/core/browser/autofill_merge_unittest.cc b/components/autofill/core/browser/autofill_merge_unittest.cc index f38ffaf..a4aceaa 100644 --- a/components/autofill/core/browser/autofill_merge_unittest.cc +++ b/components/autofill/core/browser/autofill_merge_unittest.cc
@@ -8,7 +8,6 @@ #include <memory> #include <vector> -#include "base/feature_list.h" #include "base/files/file_enumerator.h" #include "base/files/file_path.h" #include "base/macros.h" @@ -17,7 +16,6 @@ #include "base/strings/string_split.h" #include "base/strings/string_util.h" #include "base/strings/utf_string_conversions.h" -#include "base/test/scoped_feature_list.h" #include "components/autofill/core/browser/autofill_experiments.h" #include "components/autofill/core/browser/autofill_test_utils.h" #include "components/autofill/core/browser/autofill_type.h" @@ -186,7 +184,6 @@ PersonalDataManagerMock personal_data_; private: - base::test::ScopedFeatureList scoped_feature_list_; std::map<std::string, ServerFieldType> string_to_field_type_map_; DISALLOW_COPY_AND_ASSIGN(AutofillMergeTest); @@ -205,7 +202,6 @@ void AutofillMergeTest::SetUp() { test::DisableSystemServices(nullptr); - scoped_feature_list_.InitAndEnableFeature(kAutofillProfileCleanup); } void AutofillMergeTest::TearDown() {
diff --git a/components/autofill/core/browser/personal_data_manager.cc b/components/autofill/core/browser/personal_data_manager.cc index f8f6c587..61182fe 100644 --- a/components/autofill/core/browser/personal_data_manager.cc +++ b/components/autofill/core/browser/personal_data_manager.cc
@@ -292,7 +292,15 @@ LoadCreditCards(); database_->AddObserver(this); - is_autofill_profile_dedupe_pending_ = IsAutofillProfileCleanupEnabled(); + + // Check if profile cleanup has already been performed this major version. + is_autofill_profile_cleanup_pending_ = + pref_service_->GetInteger(prefs::kAutofillLastVersionDeduped) >= + atoi(version_info::GetVersionNumber().c_str()); + DVLOG(1) << "Autofill profile cleanup " + << (is_autofill_profile_cleanup_pending_ ? "needs to be" + : "has already been") + << " performed for this version"; } PersonalDataManager::~PersonalDataManager() { @@ -309,7 +317,7 @@ syncer::SyncService* sync_service) { // We want to know when, if at all, we need to run autofill profile de- // duplication: now or after waiting until sync has started. - if (!is_autofill_profile_dedupe_pending_) { + if (!is_autofill_profile_cleanup_pending_) { // De-duplication isn't enabled. return; } @@ -419,7 +427,8 @@ } void PersonalDataManager::SyncStarted(syncer::ModelType model_type) { - if (model_type == syncer::AUTOFILL_PROFILE) { + if (model_type == syncer::AUTOFILL_PROFILE && + is_autofill_profile_cleanup_pending_) { // This runs as a one-time fix, tracked in syncable prefs. If it has already // run, it is a NOP (other than checking the pref). ApplyProfileUseDatesFix(); @@ -1681,11 +1690,10 @@ } bool PersonalDataManager::ApplyDedupingRoutine() { - if (!is_autofill_profile_dedupe_pending_) + if (!is_autofill_profile_cleanup_pending_) return false; - DCHECK(IsAutofillProfileCleanupEnabled()); - is_autofill_profile_dedupe_pending_ = false; + is_autofill_profile_cleanup_pending_ = false; // No need to de-duplicate if there are less than two profiles. if (web_profiles_.size() < 2) {
diff --git a/components/autofill/core/browser/personal_data_manager.h b/components/autofill/core/browser/personal_data_manager.h index 948146d..239c88d6 100644 --- a/components/autofill/core/browser/personal_data_manager.h +++ b/components/autofill/core/browser/personal_data_manager.h
@@ -538,9 +538,8 @@ // An observer to listen for changes to prefs::kAutofillWalletImportEnabled. std::unique_ptr<BooleanPrefMember> wallet_enabled_pref_; - // Set to true if autofill profile deduplication is enabled and needs to be - // performed on the next data refresh. - bool is_autofill_profile_dedupe_pending_ = false; + // True if autofill profile cleanup needs to be performed. + bool is_autofill_profile_cleanup_pending_ = false; #if defined(OS_ANDROID) // The context for the request to be used to fetch libaddressinput's address
diff --git a/components/autofill/core/browser/personal_data_manager_unittest.cc b/components/autofill/core/browser/personal_data_manager_unittest.cc index da28a16..18bb0aeb 100644 --- a/components/autofill/core/browser/personal_data_manager_unittest.cc +++ b/components/autofill/core/browser/personal_data_manager_unittest.cc
@@ -15,7 +15,6 @@ #include <vector> #include "base/command_line.h" -#include "base/feature_list.h" #include "base/files/scoped_temp_dir.h" #include "base/guid.h" #include "base/memory/ptr_util.h" @@ -24,7 +23,6 @@ #include "base/strings/utf_string_conversions.h" #include "base/synchronization/waitable_event.h" #include "base/test/histogram_tester.h" -#include "base/test/scoped_feature_list.h" #include "base/threading/thread_task_runner_handle.h" #include "base/time/time.h" #include "build/build_config.h" @@ -150,7 +148,6 @@ // There are no field trials enabled by default. field_trial_list_.reset(); - scoped_feature_list_.reset(); // Reset the deduping pref to its default value. personal_data_->pref_service_->SetInteger( @@ -204,9 +201,7 @@ } void EnableAutofillProfileCleanup() { - scoped_feature_list_.reset(new base::test::ScopedFeatureList); - scoped_feature_list_->InitAndEnableFeature(kAutofillProfileCleanup); - personal_data_->is_autofill_profile_dedupe_pending_ = true; + personal_data_->is_autofill_profile_cleanup_pending_ = true; } void SetupReferenceProfile() { @@ -371,7 +366,6 @@ std::unique_ptr<base::FieldTrialList> field_trial_list_; scoped_refptr<base::FieldTrial> field_trial_; - std::unique_ptr<base::test::ScopedFeatureList> scoped_feature_list_; }; TEST_F(PersonalDataManagerTest, AddProfile) { @@ -5633,9 +5627,6 @@ "homer.simpson@abc.com", "Fox", "742 Evergreen Terrace.", "", "Springfield", "IL", "91601", "", ""); - // Disable the profile cleanup before adding |profile3|. - scoped_feature_list_.reset(); - personal_data_->AddProfile(profile3); EXPECT_CALL(personal_data_observer_, OnPersonalDataChanged()) .WillOnce(QuitMainMessageLoop());
diff --git a/components/autofill/core/browser/webdata/autocomplete_sync_bridge.cc b/components/autofill/core/browser/webdata/autocomplete_sync_bridge.cc index 2edafc505..304cfe1d 100644 --- a/components/autofill/core/browser/webdata/autocomplete_sync_bridge.cc +++ b/components/autofill/core/browser/webdata/autocomplete_sync_bridge.cc
@@ -19,7 +19,6 @@ #include "components/sync/model/entity_data.h" #include "components/sync/model/model_type_change_processor.h" #include "components/sync/model/mutable_data_batch.h" -#include "components/sync/model/sync_error.h" #include "net/base/escape.h" namespace autofill { @@ -105,52 +104,63 @@ syncer::AUTOFILL); } -syncer::SyncError AutocompleteSyncBridge::MergeSyncData( +syncer::ModelError AutocompleteSyncBridge::MergeSyncData( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityDataMap entity_data_map) { DCHECK(thread_checker_.CalledOnValidThread()); NOTIMPLEMENTED(); - return syncer::SyncError(); + return syncer::ModelError(); } -syncer::SyncError AutocompleteSyncBridge::ApplySyncChanges( +syncer::ModelError AutocompleteSyncBridge::ApplySyncChanges( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityChangeList entity_changes) { DCHECK(thread_checker_.CalledOnValidThread()); NOTIMPLEMENTED(); - return syncer::SyncError(); + return syncer::ModelError(); } void AutocompleteSyncBridge::AutocompleteSyncBridge::GetData( StorageKeyList storage_keys, DataCallback callback) { DCHECK(thread_checker_.CalledOnValidThread()); + std::vector<AutofillEntry> entries; + if (!GetAutofillTable()->GetAllAutofillEntries(&entries)) { + change_processor()->ReportError(FROM_HERE, + "Failed to load entries from table."); + return; + } + std::unordered_set<std::string> keys_set; for (const auto& key : storage_keys) { keys_set.insert(key); } auto batch = base::MakeUnique<syncer::MutableDataBatch>(); - std::vector<AutofillEntry> entries; - GetAutofillTable()->GetAllAutofillEntries(&entries); for (const AutofillEntry& entry : entries) { std::string key = GetStorageKeyFromModel(entry.key()); if (keys_set.find(key) != keys_set.end()) { batch->Put(key, CreateEntityData(entry)); } } - callback.Run(syncer::SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } void AutocompleteSyncBridge::GetAllData(DataCallback callback) { DCHECK(thread_checker_.CalledOnValidThread()); - auto batch = base::MakeUnique<syncer::MutableDataBatch>(); + std::vector<AutofillEntry> entries; - GetAutofillTable()->GetAllAutofillEntries(&entries); + if (!GetAutofillTable()->GetAllAutofillEntries(&entries)) { + change_processor()->ReportError(FROM_HERE, + "Failed to load entries from table."); + return; + } + + auto batch = base::MakeUnique<syncer::MutableDataBatch>(); for (const AutofillEntry& entry : entries) { batch->Put(GetStorageKeyFromModel(entry.key()), CreateEntityData(entry)); } - callback.Run(syncer::SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } std::string AutocompleteSyncBridge::GetClientTag(
diff --git a/components/autofill/core/browser/webdata/autocomplete_sync_bridge.h b/components/autofill/core/browser/webdata/autocomplete_sync_bridge.h index 8bf3460..951ba905 100644 --- a/components/autofill/core/browser/webdata/autocomplete_sync_bridge.h +++ b/components/autofill/core/browser/webdata/autocomplete_sync_bridge.h
@@ -14,12 +14,9 @@ #include "components/autofill/core/browser/webdata/autofill_change.h" #include "components/autofill/core/browser/webdata/autofill_webdata_service_observer.h" #include "components/sync/model/metadata_change_list.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_sync_bridge.h" -namespace syncer { -class SyncError; -} - namespace autofill { class AutofillTable; @@ -46,10 +43,10 @@ // syncer::ModelTypeService implementation. std::unique_ptr<syncer::MetadataChangeList> CreateMetadataChangeList() override; - syncer::SyncError MergeSyncData( + syncer::ModelError MergeSyncData( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityDataMap entity_data_map) override; - syncer::SyncError ApplySyncChanges( + syncer::ModelError ApplySyncChanges( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityChangeList entity_changes) override; void GetData(StorageKeyList storage_keys, DataCallback callback) override;
diff --git a/components/autofill/core/browser/webdata/autocomplete_sync_bridge_unittest.cc b/components/autofill/core/browser/webdata/autocomplete_sync_bridge_unittest.cc index 596d67d1..4e7fd42 100644 --- a/components/autofill/core/browser/webdata/autocomplete_sync_bridge_unittest.cc +++ b/components/autofill/core/browser/webdata/autocomplete_sync_bridge_unittest.cc
@@ -28,7 +28,6 @@ using sync_pb::EntitySpecifics; using syncer::EntityDataPtr; using syncer::EntityData; -using syncer::SyncError; namespace autofill { @@ -42,9 +41,7 @@ } void VerifyDataBatch(std::map<std::string, AutofillSpecifics> expected, - SyncError error, std::unique_ptr<syncer::DataBatch> batch) { - EXPECT_FALSE(error.IsSet()); while (batch->HasNext()) { const syncer::KeyAndData& pair = batch->Next(); auto iter = expected.find(pair.first);
diff --git a/components/browser_sync/abstract_profile_sync_service_test.cc b/components/browser_sync/abstract_profile_sync_service_test.cc index d5d88e60..d034600 100644 --- a/components/browser_sync/abstract_profile_sync_service_test.cc +++ b/components/browser_sync/abstract_profile_sync_service_test.cc
@@ -49,13 +49,7 @@ void Initialize(InitParams params) override; - void RequestConfigureSyncer( - syncer::ConfigureReason reason, - syncer::ModelTypeSet to_download, - const syncer::ModelSafeRoutingInfo& routing_info, - const base::Callback<void(syncer::ModelTypeSet, syncer::ModelTypeSet)>& - ready_task, - const base::Closure& retry_callback) override; + void ConfigureDataTypes(ConfigureParams params) override; private: // Invoked at the start of HandleSyncManagerInitializationOnFrontendLoop. @@ -104,15 +98,7 @@ SyncBackendHostImpl::Initialize(std::move(params)); } -void SyncEngineForProfileSyncTest::RequestConfigureSyncer( - syncer::ConfigureReason reason, - syncer::ModelTypeSet to_download, - const syncer::ModelSafeRoutingInfo& routing_info, - const base::Callback<void(syncer::ModelTypeSet, syncer::ModelTypeSet)>& - ready_task, - const base::Closure& retry_callback) { - syncer::ModelTypeSet failed_configuration_types; - +void SyncEngineForProfileSyncTest::ConfigureDataTypes(ConfigureParams params) { // The first parameter there should be the set of enabled types. That's not // something we have access to from this strange test harness. We'll just // send back the list of newly configured types instead and hope it doesn't @@ -122,10 +108,8 @@ FROM_HERE, base::Bind( &SyncEngineForProfileSyncTest::FinishConfigureDataTypesOnFrontendLoop, - base::Unretained(this), - syncer::Difference(to_download, failed_configuration_types), - syncer::Difference(to_download, failed_configuration_types), - failed_configuration_types, ready_task)); + base::Unretained(this), params.to_download, params.to_download, + syncer::ModelTypeSet(), params.ready_task)); } // Helper function for return-type-upcasting of the callback.
diff --git a/components/exo/compositor_frame_sink_holder.cc b/components/exo/compositor_frame_sink_holder.cc index e6cb2b8d..7c2d2c4f 100644 --- a/components/exo/compositor_frame_sink_holder.cc +++ b/components/exo/compositor_frame_sink_holder.cc
@@ -36,20 +36,6 @@ release_callbacks_[id] = callback; } -void CompositorFrameSinkHolder::ActivateFrameCallbacks( - std::list<FrameCallback>* frame_callbacks) { - active_frame_callbacks_.splice(active_frame_callbacks_.end(), - *frame_callbacks); - UpdateNeedsBeginFrame(); -} - -void CompositorFrameSinkHolder::CancelFrameCallbacks() { - // Call pending frame callbacks with a null frame time to indicate that they - // have been cancelled. - for (const auto& frame_callback : active_frame_callbacks_) - frame_callback.Run(base::TimeTicks()); -} - void CompositorFrameSinkHolder::SetNeedsBeginFrame(bool needs_begin_frame) { needs_begin_frame_ = needs_begin_frame; OnNeedsBeginFrames(needs_begin_frame); @@ -72,10 +58,9 @@ } void CompositorFrameSinkHolder::OnBeginFrame(const cc::BeginFrameArgs& args) { - while (!active_frame_callbacks_.empty()) { - active_frame_callbacks_.front().Run(args.frame_time); - active_frame_callbacks_.pop_front(); - } + if (surface_) + surface_->BeginFrame(args.frame_time); + begin_frame_source_->OnBeginFrame(args); } @@ -135,7 +120,7 @@ if (!begin_frame_source_) return; - bool needs_begin_frame = !active_frame_callbacks_.empty(); + bool needs_begin_frame = surface_ && surface_->NeedsBeginFrame(); if (needs_begin_frame == needs_begin_frame_) return;
diff --git a/components/exo/compositor_frame_sink_holder.h b/components/exo/compositor_frame_sink_holder.h index d2f8ab8f..0f03d01a 100644 --- a/components/exo/compositor_frame_sink_holder.h +++ b/components/exo/compositor_frame_sink_holder.h
@@ -46,10 +46,6 @@ return weak_factory_.GetWeakPtr(); } - using FrameCallback = base::Callback<void(base::TimeTicks frame_time)>; - void ActivateFrameCallbacks(std::list<FrameCallback>* frame_callbacks); - void CancelFrameCallbacks(); - void SetNeedsBeginFrame(bool needs_begin_frame); void Satisfy(const cc::SurfaceSequence& sequence); @@ -85,7 +81,6 @@ Surface* surface_; std::unique_ptr<CompositorFrameSink> frame_sink_; - std::list<FrameCallback> active_frame_callbacks_; std::unique_ptr<cc::ExternalBeginFrameSource> begin_frame_source_; bool needs_begin_frame_ = false; cc::BeginFrameArgs last_begin_frame_args_;
diff --git a/components/exo/surface.cc b/components/exo/surface.cc index e2f8d4a..cbd8cf3 100644 --- a/components/exo/surface.cc +++ b/components/exo/surface.cc
@@ -208,8 +208,23 @@ window_->layer()->SetShowSolidColorContent(); frame_callbacks_.splice(frame_callbacks_.end(), pending_frame_callbacks_); - compositor_frame_sink_holder_->ActivateFrameCallbacks(&frame_callbacks_); - compositor_frame_sink_holder_->CancelFrameCallbacks(); + active_frame_callbacks_.splice(active_frame_callbacks_.end(), + frame_callbacks_); + // Call all frame callbacks with a null frame time to indicate that they + // have been cancelled. + for (const auto& frame_callback : active_frame_callbacks_) + frame_callback.Run(base::TimeTicks()); + + presentation_callbacks_.splice(presentation_callbacks_.end(), + pending_presentation_callbacks_); + swapping_presentation_callbacks_.splice( + swapping_presentation_callbacks_.end(), presentation_callbacks_); + swapped_presentation_callbacks_.splice(swapped_presentation_callbacks_.end(), + swapping_presentation_callbacks_); + // Call all presentation callbacks with a null presentation time to indicate + // that they have been cancelled. + for (const auto& presentation_callback : swapped_presentation_callbacks_) + presentation_callback.Run(base::TimeTicks(), base::TimeDelta()); compositor_frame_sink_holder_->GetCompositorFrameSink()->EvictFrame(); } @@ -243,6 +258,13 @@ pending_frame_callbacks_.push_back(callback); } +void Surface::RequestPresentationCallback( + const PresentationCallback& callback) { + TRACE_EVENT0("exo", "Surface::RequestPresentationCallback"); + + pending_presentation_callbacks_.push_back(callback); +} + void Surface::SetOpaqueRegion(const SkRegion& region) { TRACE_EVENT1("exo", "Surface::SetOpaqueRegion", "region", gfx::SkIRectToRect(region.getBounds()).ToString()); @@ -470,6 +492,10 @@ // Move pending frame callbacks to the end of frame_callbacks_. frame_callbacks_.splice(frame_callbacks_.end(), pending_frame_callbacks_); + // Move pending presentation callbacks to the end of presentation_callbacks_. + presentation_callbacks_.splice(presentation_callbacks_.end(), + pending_presentation_callbacks_); + // Synchronize window hierarchy. This will position and update the stacking // order of all sub-surfaces after committing all pending state of sub-surface // descendants. @@ -570,7 +596,21 @@ } void Surface::WillDraw() { - compositor_frame_sink_holder_->ActivateFrameCallbacks(&frame_callbacks_); + active_frame_callbacks_.splice(active_frame_callbacks_.end(), + frame_callbacks_); + swapping_presentation_callbacks_.splice( + swapping_presentation_callbacks_.end(), presentation_callbacks_); +} + +bool Surface::NeedsBeginFrame() const { + return !active_frame_callbacks_.empty(); +} + +void Surface::BeginFrame(base::TimeTicks frame_time) { + while (!active_frame_callbacks_.empty()) { + active_frame_callbacks_.front().Run(frame_time); + active_frame_callbacks_.pop_front(); + } } void Surface::CheckIfSurfaceHierarchyNeedsCommitToNewSurfaces() { @@ -578,6 +618,9 @@ SetSurfaceHierarchyNeedsCommitToNewSurfaces(); } +//////////////////////////////////////////////////////////////////////////////// +// ui::ContextFactoryObserver overrides: + void Surface::OnLostResources() { if (!local_frame_id_.is_valid()) return; @@ -586,15 +629,45 @@ UpdateSurface(true); } +//////////////////////////////////////////////////////////////////////////////// +// aura::WindowObserver overrides: + void Surface::OnWindowAddedToRootWindow(aura::Window* window) { window->layer()->GetCompositor()->AddFrameSink(frame_sink_id_); + window->layer()->GetCompositor()->vsync_manager()->AddObserver(this); } void Surface::OnWindowRemovingFromRootWindow(aura::Window* window, aura::Window* new_root) { window->layer()->GetCompositor()->RemoveFrameSink(frame_sink_id_); + window->layer()->GetCompositor()->vsync_manager()->RemoveObserver(this); } +//////////////////////////////////////////////////////////////////////////////// +// ui::CompositorVSyncManager::Observer overrides: + +void Surface::OnUpdateVSyncParameters(base::TimeTicks timebase, + base::TimeDelta interval) { + // Use current time if platform doesn't provide an accurate timebase. + if (timebase.is_null()) + timebase = base::TimeTicks::Now(); + + while (!swapped_presentation_callbacks_.empty()) { + swapped_presentation_callbacks_.front().Run(timebase, interval); + swapped_presentation_callbacks_.pop_front(); + } + + // VSync parameters updates are generated at the start of a new swap. Move + // the swapping presentation callbacks to swapped callbacks so they fire + // at the next VSync parameters update as that will contain the presentation + // time for the previous frame. + swapped_presentation_callbacks_.splice(swapped_presentation_callbacks_.end(), + swapping_presentation_callbacks_); +} + +//////////////////////////////////////////////////////////////////////////////// +// Buffer, private: + Surface::State::State() : input_region(SkIRect::MakeLargest()) {} Surface::State::~State() = default;
diff --git a/components/exo/surface.h b/components/exo/surface.h index fcc0a71..0aeaa0e 100644 --- a/components/exo/surface.h +++ b/components/exo/surface.h
@@ -24,6 +24,7 @@ #include "third_party/skia/include/core/SkRegion.h" #include "ui/aura/window.h" #include "ui/aura/window_observer.h" +#include "ui/compositor/compositor_vsync_manager.h" #include "ui/gfx/geometry/rect.h" namespace base { @@ -60,7 +61,9 @@ // This class represents a rectangular area that is displayed on the screen. // It has a location, size and pixel contents. -class Surface : public ui::ContextFactoryObserver, public aura::WindowObserver { +class Surface : public ui::ContextFactoryObserver, + public aura::WindowObserver, + public ui::CompositorVSyncManager::Observer { public: using PropertyDeallocator = void (*)(int64_t value); @@ -87,11 +90,18 @@ // repainted. void Damage(const gfx::Rect& rect); - // Request notification when the next frame is displayed. Useful for - // throttling redrawing operations, and driving animations. + // Request notification when it's a good time to produce a new frame. Useful + // for throttling redrawing operations, and driving animations. using FrameCallback = base::Callback<void(base::TimeTicks frame_time)>; void RequestFrameCallback(const FrameCallback& callback); + // Request notification when the next frame is displayed. Useful for + // throttling redrawing operations, and driving animations. + using PresentationCallback = + base::Callback<void(base::TimeTicks presentation_time, + base::TimeDelta refresh)>; + void RequestPresentationCallback(const PresentationCallback& callback); + // This sets the region of the surface that contains opaque content. void SetOpaqueRegion(const SkRegion& region); @@ -179,11 +189,15 @@ // Returns a trace value representing the state of the surface. std::unique_ptr<base::trace_event::TracedValue> AsTracedValue() const; - // Called when surface is being scheduled for a draw. + // Call this to indicate that surface is being scheduled for a draw. void WillDraw(); - // Called when the begin frame source has changed. - void SetBeginFrameSource(cc::BeginFrameSource* begin_frame_source); + // Returns true when there's an active frame callback that requires a + // BeginFrame() call. + bool NeedsBeginFrame() const; + + // Call this to indicate that it's a good time to start producing a new frame. + void BeginFrame(base::TimeTicks frame_time); // Check whether this Surface and its children need to create new cc::Surface // IDs for their contents next time they get new buffer contents. @@ -200,6 +214,10 @@ void OnWindowRemovingFromRootWindow(aura::Window* window, aura::Window* new_root) override; + // Overridden from ui::CompositorVSyncManager::Observer: + void OnUpdateVSyncParameters(base::TimeTicks timebase, + base::TimeDelta interval) override; + // Sets the |value| of the given surface |property|. Setting to the default // value (e.g., NULL) removes the property. The caller is responsible for the // lifetime of any object set as a property on the Surface. @@ -333,6 +351,19 @@ // be drawn. They fire at the first begin frame notification after this. std::list<FrameCallback> pending_frame_callbacks_; std::list<FrameCallback> frame_callbacks_; + std::list<FrameCallback> active_frame_callbacks_; + + // These lists contains the callbacks to notify the client when surface + // contents have been presented. These callbacks move to + // |presentation_callbacks_| when Commit() is called. Later they are moved to + // |swapping_presentation_callbacks_| when the effect of the Commit() is + // scheduled to be drawn and then moved to |swapped_presentation_callbacks_| + // after receiving VSync parameters update for the previous frame. They fire + // at the next VSync parameters update after that. + std::list<PresentationCallback> pending_presentation_callbacks_; + std::list<PresentationCallback> presentation_callbacks_; + std::list<PresentationCallback> swapping_presentation_callbacks_; + std::list<PresentationCallback> swapped_presentation_callbacks_; // This is the state that has yet to be committed. State pending_state_;
diff --git a/components/exo/wayland/BUILD.gn b/components/exo/wayland/BUILD.gn index cdd54c21..231b7bc 100644 --- a/components/exo/wayland/BUILD.gn +++ b/components/exo/wayland/BUILD.gn
@@ -40,6 +40,7 @@ "//third_party/wayland-protocols:alpha_compositing_protocol", "//third_party/wayland-protocols:gaming_input_protocol", "//third_party/wayland-protocols:keyboard_configuration_protocol", + "//third_party/wayland-protocols:presentation_time_protocol", "//third_party/wayland-protocols:remote_shell_protocol", "//third_party/wayland-protocols:secure_output_protocol", "//third_party/wayland-protocols:stylus_protocol", @@ -95,6 +96,7 @@ "//skia", "//third_party/wayland:wayland_client", "//third_party/wayland-protocols:linux_dmabuf_protocol", + "//third_party/wayland-protocols:presentation_time_protocol", "//ui/gfx/geometry", "//ui/gl", "//ui/gl/init",
diff --git a/components/exo/wayland/clients/motion_events.cc b/components/exo/wayland/clients/motion_events.cc index 92235d9..806e02a 100644 --- a/components/exo/wayland/clients/motion_events.cc +++ b/components/exo/wayland/clients/motion_events.cc
@@ -8,6 +8,7 @@ #include <fcntl.h> #include <linux-dmabuf-unstable-v1-client-protocol.h> +#include <presentation-time-client-protocol.h> #include <wayland-client-core.h> #include <wayland-client-protocol.h> @@ -21,6 +22,7 @@ #include "base/command_line.h" #include "base/logging.h" #include "base/macros.h" +#include "base/memory/ptr_util.h" #include "base/memory/shared_memory.h" #include "base/scoped_generic.h" #include "base/strings/string_number_conversions.h" @@ -69,6 +71,9 @@ DEFAULT_DELETER(wl_pointer, wl_pointer_destroy) DEFAULT_DELETER(wl_touch, wl_touch_destroy) DEFAULT_DELETER(wl_callback, wl_callback_destroy) +DEFAULT_DELETER(wp_presentation, wp_presentation_destroy) +DEFAULT_DELETER(struct wp_presentation_feedback, + wp_presentation_feedback_destroy) DEFAULT_DELETER(zwp_linux_buffer_params_v1, zwp_linux_buffer_params_v1_destroy) DEFAULT_DELETER(zwp_linux_dmabuf_v1, zwp_linux_dmabuf_v1_destroy) @@ -111,6 +116,7 @@ struct Globals { std::unique_ptr<wl_compositor> compositor; std::unique_ptr<wl_shm> shm; + std::unique_ptr<wp_presentation> presentation; std::unique_ptr<zwp_linux_dmabuf_v1> linux_dmabuf; std::unique_ptr<wl_shell> shell; std::unique_ptr<wl_seat> seat; @@ -135,6 +141,9 @@ } else if (strcmp(interface, "wl_seat") == 0) { globals->seat.reset(static_cast<wl_seat*>( wl_registry_bind(registry, id, &wl_seat_interface, 5))); + } else if (strcmp(interface, "wp_presentation") == 0) { + globals->presentation.reset(static_cast<wp_presentation*>( + wl_registry_bind(registry, id, &wp_presentation_interface, 1))); } else if (strcmp(interface, "zwp_linux_dmabuf_v1") == 0) { globals->linux_dmabuf.reset(static_cast<zwp_linux_dmabuf_v1*>( wl_registry_bind(registry, id, &zwp_linux_dmabuf_v1_interface, 1))); @@ -271,17 +280,78 @@ void TouchCancel(void* data, wl_touch* touch) {} -struct Frame { +struct Schedule { uint32_t time = 0; bool callback_pending = false; }; void FrameCallback(void* data, wl_callback* callback, uint32_t time) { - Frame* frame = static_cast<Frame*>(data); + Schedule* schedule = static_cast<Schedule*>(data); static uint32_t initial_time = time; - frame->time = time - initial_time; - frame->callback_pending = false; + schedule->time = time - initial_time; + schedule->callback_pending = false; +} + +struct Frame { + Buffer* buffer = nullptr; + base::TimeDelta wall_time; + base::TimeDelta cpu_time; + std::vector<base::TimeTicks> event_times; + std::unique_ptr<struct wp_presentation_feedback> feedback; +}; + +struct Presentation { + std::deque<std::unique_ptr<Frame>> scheduled_frames; + base::TimeDelta wall_time; + base::TimeDelta cpu_time; + base::TimeDelta latency_time; + uint32_t num_frames_presented = 0; + uint32_t num_events_presented = 0; +}; + +void FeedbackSyncOutput(void* data, + struct wp_presentation_feedback* presentation_feedback, + wl_output* output) {} + +void FeedbackPresented(void* data, + struct wp_presentation_feedback* presentation_feedback, + uint32_t tv_sec_hi, + uint32_t tv_sec_lo, + uint32_t tv_nsec, + uint32_t refresh, + uint32_t seq_hi, + uint32_t seq_lo, + uint32_t flags) { + Presentation* presentation = static_cast<Presentation*>(data); + DCHECK_GT(presentation->scheduled_frames.size(), 0u); + std::unique_ptr<Frame> frame = + std::move(presentation->scheduled_frames.front()); + presentation->scheduled_frames.pop_front(); + + presentation->wall_time += frame->wall_time; + presentation->cpu_time += frame->cpu_time; + ++presentation->num_frames_presented; + + int64_t seconds = (static_cast<int64_t>(tv_sec_hi) << 32) + tv_sec_lo; + int64_t microseconds = seconds * base::Time::kMicrosecondsPerSecond + + tv_nsec / base::Time::kNanosecondsPerMicrosecond; + base::TimeTicks presentation_time = + base::TimeTicks::FromInternalValue(microseconds); + for (const auto& event_time : frame->event_times) { + presentation->latency_time += presentation_time - event_time; + ++presentation->num_events_presented; + } +} + +void FeedbackDiscarded(void* data, + struct wp_presentation_feedback* presentation_feedback) { + Presentation* presentation = static_cast<Presentation*>(data); + DCHECK_GT(presentation->scheduled_frames.size(), 0u); + std::unique_ptr<Frame> frame = + std::move(presentation->scheduled_frames.front()); + presentation->scheduled_frames.pop_front(); + LOG(WARNING) << "Frame discarded"; } #if defined(OZONE_PLATFORM_GBM) @@ -385,6 +455,10 @@ LOG(ERROR) << "Can't find shm interface"; return 1; } + if (!globals_.presentation) { + LOG(ERROR) << "Can't find presentation interface"; + return 1; + } if (use_drm_ && !globals_.linux_dmabuf) { LOG(ERROR) << "Can't find linux_dmabuf interface"; return 1; @@ -400,6 +474,7 @@ #if defined(OZONE_PLATFORM_GBM) EGLenum egl_sync_type = 0; + sk_sp<const GrGLInterface> native_interface; if (use_drm_) { // Number of files to look for when discovering DRM devices. const uint32_t kDrmMaxMinor = 15; @@ -453,14 +528,14 @@ if (gl::GLSurfaceEGL::HasEGLExtension("EGL_ANDROID_native_fence_sync")) { egl_sync_type = EGL_SYNC_NATIVE_FENCE_ANDROID; } - } - sk_sp<const GrGLInterface> native_interface(GrGLCreateNativeInterface()); - DCHECK(native_interface); - gr_context_ = sk_sp<GrContext>(GrContext::Create( - kOpenGL_GrBackend, - reinterpret_cast<GrBackendContext>(native_interface.get()))); - DCHECK(gr_context_); + native_interface = sk_sp<const GrGLInterface>(GrGLCreateNativeInterface()); + DCHECK(native_interface); + gr_context_ = sk_sp<GrContext>(GrContext::Create( + kOpenGL_GrBackend, + reinterpret_cast<GrBackendContext>(native_interface.get()))); + DCHECK(gr_context_); + } #endif wl_buffer_listener buffer_listener = {BufferRelease}; @@ -539,18 +614,20 @@ TouchFrame, TouchCancel}; wl_touch_add_listener(touch.get(), &touch_listener, &event_times); - Frame frame; + Schedule schedule; std::unique_ptr<wl_callback> frame_callback; wl_callback_listener frame_listener = {FrameCallback}; - std::deque<Buffer*> pending_frames; - uint32_t frames = 0; + Presentation presentation; + std::deque<std::unique_ptr<Frame>> pending_frames; + size_t num_benchmark_runs_left = num_benchmark_runs_; - base::TimeTicks benchmark_time; - base::TimeDelta benchmark_wall_time; - base::TimeDelta benchmark_cpu_time; + base::TimeTicks benchmark_start_time; std::string fps_counter_text("??"); + wp_presentation_feedback_listener feedback_listener = { + FeedbackSyncOutput, FeedbackPresented, FeedbackDiscarded}; + SkPaint text_paint; text_paint.setTextSize(32.0f); text_paint.setColor(SK_ColorWHITE); @@ -558,7 +635,7 @@ int dispatch_status = 0; do { - bool enqueue_frame = frame.callback_pending + bool enqueue_frame = schedule.callback_pending ? pending_frames.size() < max_frames_pending_ : pending_frames.empty(); if (enqueue_frame) { @@ -570,22 +647,28 @@ return 1; } + auto frame = base::MakeUnique<Frame>(); + frame->buffer = buffer; + base::TimeTicks wall_time_start; base::ThreadTicks cpu_time_start; if (num_benchmark_runs_ || show_fps_counter_) { wall_time_start = base::TimeTicks::Now(); - if (frames <= kBenchmarkWarmupFrames) - benchmark_time = wall_time_start; + if (presentation.num_frames_presented <= kBenchmarkWarmupFrames) + benchmark_start_time = wall_time_start; - if ((wall_time_start - benchmark_time) > benchmark_interval_) { - uint32_t benchmark_frames = frames - kBenchmarkWarmupFrames; + base::TimeDelta benchmark_time = wall_time_start - benchmark_start_time; + if (benchmark_time > benchmark_interval_) { + uint32_t benchmark_frames = + presentation.num_frames_presented - kBenchmarkWarmupFrames; if (num_benchmark_runs_left) { - // Print benchmark statistics for the frames produced and exit. - // Note: frames produced is not necessarily the same as frames - // displayed. + // Print benchmark statistics for the frames presented and exit. std::cout << benchmark_frames << '\t' - << benchmark_wall_time.InMilliseconds() << '\t' - << benchmark_cpu_time.InMilliseconds() << '\t' + << benchmark_time.InMilliseconds() << '\t' + << presentation.wall_time.InMilliseconds() << '\t' + << presentation.cpu_time.InMilliseconds() << '\t' + << presentation.num_events_presented << '\t' + << presentation.latency_time.InMilliseconds() << '\t' << std::endl; if (!--num_benchmark_runs_left) return 0; @@ -595,10 +678,12 @@ fps_counter_text = base::UintToString( std::round(benchmark_frames / benchmark_interval_.InSecondsF())); - frames = kBenchmarkWarmupFrames; - benchmark_time = wall_time_start; - benchmark_wall_time = base::TimeDelta(); - benchmark_cpu_time = base::TimeDelta(); + benchmark_start_time = wall_time_start; + presentation.wall_time = base::TimeDelta(); + presentation.cpu_time = base::TimeDelta(); + presentation.latency_time = base::TimeDelta(); + presentation.num_frames_presented = kBenchmarkWarmupFrames; + presentation.num_events_presented = 0; } cpu_time_start = base::ThreadTicks::Now(); @@ -622,6 +707,8 @@ canvas->drawIRect(rect, paint); std::string text = base::UintToString(event_times.back()); canvas->drawText(text.c_str(), text.length(), 8, y + 32, text_paint); + frame->event_times.push_back(base::TimeTicks::FromInternalValue( + event_times.back() * base::Time::kMicrosecondsPerMillisecond)); event_times.pop_back(); y += h; } @@ -633,7 +720,7 @@ SkIRect rect = SkIRect::MakeXYWH(-SkScalarHalf(half_width), -SkScalarHalf(half_height), half_width, half_height); - SkScalar rotation = SkScalarMulDiv(frame.time, kRotationSpeed, 1000); + SkScalar rotation = SkScalarMulDiv(schedule.time, kRotationSpeed, 1000); canvas->save(); canvas->translate(half_width, half_height); for (size_t i = 0; i < num_rects_; ++i) { @@ -668,35 +755,43 @@ } buffer->busy = true; - pending_frames.push_back(buffer); - if (num_benchmark_runs_ || show_fps_counter_) { - ++frames; - benchmark_wall_time += base::TimeTicks::Now() - wall_time_start; - benchmark_cpu_time += base::ThreadTicks::Now() - cpu_time_start; + if (num_benchmark_runs_) { + frame->wall_time = base::TimeTicks::Now() - wall_time_start; + frame->cpu_time = base::ThreadTicks::Now() - cpu_time_start; } + pending_frames.push_back(std::move(frame)); continue; } - if (!frame.callback_pending) { + if (!schedule.callback_pending) { DCHECK_GT(pending_frames.size(), 0u); - Buffer* buffer = pending_frames.front(); + std::unique_ptr<Frame> frame = std::move(pending_frames.front()); pending_frames.pop_front(); wl_surface_set_buffer_scale(surface.get(), scale_); wl_surface_damage(surface.get(), 0, 0, width_ / scale_, height_ / scale_); - wl_surface_attach(surface.get(), buffer->buffer.get(), 0, 0); + wl_surface_attach(surface.get(), frame->buffer->buffer.get(), 0, 0); #if defined(OZONE_PLATFORM_GBM) - if (buffer->egl_sync) { - eglClientWaitSyncKHR(eglGetCurrentDisplay(), buffer->egl_sync->get(), + if (frame->buffer->egl_sync) { + eglClientWaitSyncKHR(eglGetCurrentDisplay(), + frame->buffer->egl_sync->get(), EGL_SYNC_FLUSH_COMMANDS_BIT_KHR, EGL_FOREVER_KHR); } #endif frame_callback.reset(wl_surface_frame(surface.get())); - wl_callback_add_listener(frame_callback.get(), &frame_listener, &frame); - frame.callback_pending = true; + wl_callback_add_listener(frame_callback.get(), &frame_listener, + &schedule); + schedule.callback_pending = true; + + frame->feedback.reset( + wp_presentation_feedback(globals_.presentation.get(), surface.get())); + wp_presentation_feedback_add_listener(frame->feedback.get(), + &feedback_listener, &presentation); + presentation.scheduled_frames.push_back(std::move(frame)); + wl_surface_commit(surface.get()); wl_display_flush(display_.get()); continue;
diff --git a/components/exo/wayland/server.cc b/components/exo/wayland/server.cc index 9ac009a5..ae0d0126 100644 --- a/components/exo/wayland/server.cc +++ b/components/exo/wayland/server.cc
@@ -4,24 +4,23 @@ #include "components/exo/wayland/server.h" +#include <alpha-compositing-unstable-v1-server-protocol.h> +#include <gaming-input-unstable-v1-server-protocol.h> #include <grp.h> +#include <keyboard-configuration-unstable-v1-server-protocol.h> #include <linux/input.h> +#include <presentation-time-server-protocol.h> +#include <remote-shell-unstable-v1-server-protocol.h> +#include <secure-output-unstable-v1-server-protocol.h> #include <stddef.h> #include <stdint.h> +#include <stylus-unstable-v1-server-protocol.h> +#include <stylus-unstable-v2-server-protocol.h> #include <viewporter-server-protocol.h> +#include <vsync-feedback-unstable-v1-server-protocol.h> #include <wayland-server-core.h> #include <wayland-server-protocol-core.h> - -// Note: core wayland headers need to be included before protocol headers. -#include <alpha-compositing-unstable-v1-server-protocol.h> // NOLINT -#include <keyboard-configuration-unstable-v1-server-protocol.h> // NOLINT -#include <gaming-input-unstable-v1-server-protocol.h> // NOLINT -#include <remote-shell-unstable-v1-server-protocol.h> // NOLINT -#include <secure-output-unstable-v1-server-protocol.h> // NOLINT -#include <stylus-unstable-v1-server-protocol.h> // NOLINT -#include <stylus-unstable-v2-server-protocol.h> // NOLINT -#include <vsync-feedback-unstable-v1-server-protocol.h> // NOLINT -#include <xdg-shell-unstable-v5-server-protocol.h> // NOLINT +#include <xdg-shell-unstable-v5-server-protocol.h> #include <algorithm> #include <cstdlib> @@ -223,10 +222,10 @@ wl_resource_create(client, &wl_callback_interface, 1, callback); // base::Unretained is safe as the resource owns the callback. - std::unique_ptr<base::CancelableCallback<void(base::TimeTicks)>> - cancelable_callback( - new base::CancelableCallback<void(base::TimeTicks)>(base::Bind( - &HandleSurfaceFrameCallback, base::Unretained(callback_resource)))); + auto cancelable_callback = + base::MakeUnique<base::CancelableCallback<void(base::TimeTicks)>>( + base::Bind(&HandleSurfaceFrameCallback, + base::Unretained(callback_resource))); GetUserDataAs<Surface>(resource) ->RequestFrameCallback(cancelable_callback->callback()); @@ -2649,6 +2648,71 @@ } //////////////////////////////////////////////////////////////////////////////// +// presentation_interface: + +void HandleSurfacePresentationCallback(wl_resource* resource, + base::TimeTicks presentation_time, + base::TimeDelta refresh) { + if (presentation_time.is_null()) { + wp_presentation_feedback_send_discarded(resource); + } else { + int64_t presentation_time_us = presentation_time.ToInternalValue(); + int64_t seconds = presentation_time_us / base::Time::kMicrosecondsPerSecond; + int64_t microseconds = + presentation_time_us % base::Time::kMicrosecondsPerSecond; + wp_presentation_feedback_send_presented( + resource, seconds >> 32, seconds & 0xffffffff, + microseconds * base::Time::kNanosecondsPerMicrosecond, + refresh.InMicroseconds() * base::Time::kNanosecondsPerMicrosecond, 0, 0, + WP_PRESENTATION_FEEDBACK_KIND_VSYNC | + WP_PRESENTATION_FEEDBACK_KIND_HW_CLOCK | + WP_PRESENTATION_FEEDBACK_KIND_HW_COMPLETION); + } + wl_client_flush(wl_resource_get_client(resource)); +} + +void presentation_destroy(wl_client* client, wl_resource* resource) { + wl_resource_destroy(resource); +} + +void presentation_feedback(wl_client* client, + wl_resource* resource, + wl_resource* surface_resource, + uint32_t id) { + wl_resource* presentation_feedback_resource = + wl_resource_create(client, &wp_presentation_feedback_interface, + wl_resource_get_version(resource), id); + + // base::Unretained is safe as the resource owns the callback. + auto cancelable_callback = base::MakeUnique< + base::CancelableCallback<void(base::TimeTicks, base::TimeDelta)>>( + base::Bind(&HandleSurfacePresentationCallback, + base::Unretained(presentation_feedback_resource))); + + GetUserDataAs<Surface>(surface_resource) + ->RequestPresentationCallback(cancelable_callback->callback()); + + SetImplementation(presentation_feedback_resource, nullptr, + std::move(cancelable_callback)); +} + +const struct wp_presentation_interface presentation_implementation = { + presentation_destroy, presentation_feedback}; + +void bind_presentation(wl_client* client, + void* data, + uint32_t version, + uint32_t id) { + wl_resource* resource = + wl_resource_create(client, &wp_presentation_interface, 1, id); + + wl_resource_set_implementation(resource, &presentation_implementation, data, + nullptr); + + wp_presentation_send_clock_id(resource, CLOCK_MONOTONIC); +} + +//////////////////////////////////////////////////////////////////////////////// // security_interface: // Implements the security interface to a Surface. The "only visible on secure @@ -3189,6 +3253,8 @@ display_, bind_seat); wl_global_create(wl_display_.get(), &wp_viewporter_interface, 1, display_, bind_viewporter); + wl_global_create(wl_display_.get(), &wp_presentation_interface, 1, display_, + bind_presentation); wl_global_create(wl_display_.get(), &zcr_secure_output_v1_interface, 1, display_, bind_secure_output); wl_global_create(wl_display_.get(), &zcr_alpha_compositing_v1_interface, 1,
diff --git a/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.cc b/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.cc index 63be8ad..21ecd811 100644 --- a/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.cc +++ b/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.cc
@@ -42,8 +42,8 @@ } bool ExtractLastVisitDate(const BookmarkNode& node, - const std::string& meta_info_key, - base::Time* out) { + const std::string& meta_info_key, + base::Time* out) { std::string last_visit_date_string; if (!node.GetMetaInfo(meta_info_key, &last_visit_date_string)) { return false; @@ -269,21 +269,44 @@ return result; } -void RemoveAllLastVisitDates(bookmarks::BookmarkModel* bookmark_model) { +namespace { + +void ClearLastVisitedMetadataIfBetween(bookmarks::BookmarkModel* model, + const BookmarkNode& node, + const base::Time& begin, + const base::Time& end, + const std::string& meta_key) { + base::Time last_visit_time; + if (ExtractLastVisitDate(node, meta_key, &last_visit_time) && + begin <= last_visit_time && last_visit_time <= end) { + model->DeleteNodeMetaInfo(&node, meta_key); + } +} + +} // namespace + +void RemoveLastVisitedDatesBetween(const base::Time& begin, + const base::Time& end, + base::Callback<bool(const GURL& url)> filter, + bookmarks::BookmarkModel* bookmark_model) { // Get all the bookmark URLs. std::vector<BookmarkModel::URLAndTitle> bookmark_urls; bookmark_model->GetBookmarks(&bookmark_urls); for (const BookmarkModel::URLAndTitle& url_and_title : bookmark_urls) { + if (!filter.Run(url_and_title.url)) { + continue; + } // Get all bookmarks for the given URL. std::vector<const BookmarkNode*> bookmarks_for_url; bookmark_model->GetNodesByURL(url_and_title.url, &bookmarks_for_url); for (const BookmarkNode* bookmark : bookmarks_for_url) { - bookmark_model->DeleteNodeMetaInfo(bookmark, - kBookmarkLastVisitDateOnMobileKey); - bookmark_model->DeleteNodeMetaInfo(bookmark, - kBookmarkLastVisitDateOnDesktopKey); + // The dismissal metadata is managed by the BookmarkSuggestionsProvider. + ClearLastVisitedMetadataIfBetween(bookmark_model, *bookmark, begin, end, + kBookmarkLastVisitDateOnMobileKey); + ClearLastVisitedMetadataIfBetween(bookmark_model, *bookmark, begin, end, + kBookmarkLastVisitDateOnDesktopKey); } } }
diff --git a/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.h b/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.h index b022e8e3..128a0012 100644 --- a/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.h +++ b/components/ntp_snippets/bookmarks/bookmark_last_visit_utils.h
@@ -7,6 +7,8 @@ #include <vector> +#include "base/callback.h" + class GURL; namespace base { @@ -67,8 +69,13 @@ std::vector<const bookmarks::BookmarkNode*> GetDismissedBookmarksForDebugging( bookmarks::BookmarkModel* bookmark_model); -// Removes last visited date metadata for all bookmarks. -void RemoveAllLastVisitDates(bookmarks::BookmarkModel* bookmark_model); +// Removes last-visited data (incl. any other metadata managed by content +// suggestions) for bookmarks within the provided time range. +// TODO(tschumann): Implement URL filtering. +void RemoveLastVisitedDatesBetween(const base::Time& begin, + const base::Time& end, + base::Callback<bool(const GURL& url)> filter, + bookmarks::BookmarkModel* bookmark_model); } // namespace ntp_snippets
diff --git a/components/ntp_snippets/bookmarks/bookmark_last_visit_utils_unittest.cc b/components/ntp_snippets/bookmarks/bookmark_last_visit_utils_unittest.cc index c99c8695..6f06527 100644 --- a/components/ntp_snippets/bookmarks/bookmark_last_visit_utils_unittest.cc +++ b/components/ntp_snippets/bookmarks/bookmark_last_visit_utils_unittest.cc
@@ -4,8 +4,10 @@ #include "components/ntp_snippets/bookmarks/bookmark_last_visit_utils.h" +#include <memory> #include <string> +#include "base/callback.h" #include "base/strings/string_number_conversions.h" #include "base/strings/stringprintf.h" #include "base/strings/utf_string_conversions.h" @@ -19,6 +21,7 @@ using bookmarks::BookmarkModel; using bookmarks::BookmarkNode; +using testing::Eq; using testing::IsEmpty; using testing::SizeIs; @@ -48,32 +51,35 @@ void AddBookmarksRecentOnMobile(BookmarkModel* model, int num, - const base::Time& threshold_time) { - base::TimeDelta week = base::TimeDelta::FromDays(7); - base::Time recent_time = threshold_time + week; - std::string recent_time_string = - base::Int64ToString(recent_time.ToInternalValue()); - + const base::Time& visit_time) { AddBookmarks(model, num, kBookmarkLastVisitDateOnMobileKey, - recent_time_string); + base::Int64ToString(visit_time.ToInternalValue())); } void AddBookmarksRecentOnDesktop(BookmarkModel* model, int num, - const base::Time& threshold_time) { - base::TimeDelta week = base::TimeDelta::FromDays(7); - base::Time recent_time = threshold_time + week; - std::string recent_time_string = - base::Int64ToString(recent_time.ToInternalValue()); - + const base::Time& visit_time) { AddBookmarks(model, num, kBookmarkLastVisitDateOnDesktopKey, - recent_time_string); + base::Int64ToString(visit_time.ToInternalValue())); } void AddBookmarksNonVisited(BookmarkModel* model, int num) { AddBookmarks(model, num, std::string(), std::string()); } +const BookmarkNode* AddSingleBookmark(BookmarkModel* model, + const std::string& url, + const std::string& last_visit_key, + const base::Time& visit_time) { + base::string16 title = + base::ASCIIToUTF16(base::StringPrintf("title-%s", url.c_str())); + const BookmarkNode* node = + model->AddURL(model->bookmark_bar_node(), 0, title, GURL(url)); + model->SetNodeMetaInfo(node, last_visit_key, + base::Int64ToString(visit_time.ToInternalValue())); + return node; +} + } // namespace class GetRecentlyVisitedBookmarksTest : public testing::Test { @@ -85,6 +91,10 @@ const base::Time& threshold_time() const { return threshold_time_; } + base::Time GetRecentTime() const { + return threshold_time_ + base::TimeDelta::FromDays(7); + } + private: base::Time threshold_time_; @@ -109,7 +119,7 @@ std::unique_ptr<BookmarkModel> model = bookmarks::TestBookmarkClient::CreateModel(); AddBookmarksRecentOnDesktop(model.get(), number_of_bookmarks, - threshold_time()); + GetRecentTime()); std::vector<const bookmarks::BookmarkNode*> result = GetRecentlyVisitedBookmarks(model.get(), number_of_bookmarks, @@ -124,7 +134,7 @@ std::unique_ptr<BookmarkModel> model = bookmarks::TestBookmarkClient::CreateModel(); AddBookmarksRecentOnDesktop(model.get(), number_of_recent_desktop, - threshold_time()); + GetRecentTime()); AddBookmarksNonVisited(model.get(), number_of_bookmarks - number_of_recent_desktop); @@ -139,8 +149,7 @@ const int number_of_bookmarks = 3; std::unique_ptr<BookmarkModel> model = bookmarks::TestBookmarkClient::CreateModel(); - AddBookmarksRecentOnMobile(model.get(), number_of_bookmarks, - threshold_time()); + AddBookmarksRecentOnMobile(model.get(), number_of_bookmarks, GetRecentTime()); const int max_count = number_of_bookmarks - 1; std::vector<const bookmarks::BookmarkNode*> result = @@ -149,4 +158,142 @@ EXPECT_THAT(result, SizeIs(max_count)); } +namespace { + +base::Callback<bool(const GURL& url)> DeleteAllFilter() { + return base::Bind([] (const GURL& url) { return true; }); +} + +base::Callback<bool(const GURL& url)> DeleteOneURLFilter( + const GURL& to_delete) { + return base::Bind( + [](const GURL& to_delete, const GURL& url) { return url == to_delete; }, + to_delete); +} + +} // namespace + +TEST(RemoveLastVisitedDatesBetween, ShouldRemoveTimestampsWithinTimeRange) { + const base::Time delete_begin = + base::Time::Now() - base::TimeDelta::FromDays(2); + const base::Time delete_end = base::Time::Max(); + + std::unique_ptr<BookmarkModel> model = + bookmarks::TestBookmarkClient::CreateModel(); + AddSingleBookmark(model.get(), "http://url-1.com", + kBookmarkLastVisitDateOnMobileKey, + delete_begin + base::TimeDelta::FromSeconds(1)); + AddSingleBookmark(model.get(), "http://url-1.com", + kBookmarkLastVisitDateOnDesktopKey, + delete_begin + base::TimeDelta::FromSeconds(1)); + ASSERT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/true), + SizeIs(1)); + + RemoveLastVisitedDatesBetween(delete_begin, delete_end, DeleteAllFilter(), + model.get()); + + EXPECT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/true), + IsEmpty()); + // Verify that the bookmark model nodes themselve still exist. + std::vector<const BookmarkNode*> remaining_nodes; + model->GetNodesByURL(GURL("http://url-1.com"), &remaining_nodes); + EXPECT_THAT(remaining_nodes, SizeIs(2)); +} + +TEST(RemoveLastVisitedDatesBetween, + ShouldHandleMetadataFromOtherDeviceTypesSeparately) { + const base::Time delete_begin = + base::Time::Now() - base::TimeDelta::FromDays(2); + const base::Time delete_end = base::Time::Max(); + + std::unique_ptr<BookmarkModel> model = + bookmarks::TestBookmarkClient::CreateModel(); + // Create a bookmark with last visited times from both, mobile and desktop. + // The mobile one is within the deletion interval, the desktop one outside. + // Only the mobile one should get deleted. + const BookmarkNode* node = AddSingleBookmark( + model.get(), "http://url-1.com", kBookmarkLastVisitDateOnMobileKey, + delete_begin + base::TimeDelta::FromSeconds(1)); + model->SetNodeMetaInfo( + node, kBookmarkLastVisitDateOnDesktopKey, + base::Int64ToString( + (delete_begin - base::TimeDelta::FromSeconds(1)).ToInternalValue())); + ASSERT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/true), + SizeIs(1)); + + RemoveLastVisitedDatesBetween(delete_begin, delete_end, DeleteAllFilter(), + model.get()); + + EXPECT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/false), + IsEmpty()); + EXPECT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/true), + SizeIs(1)); +} + +TEST(RemoveLastVisitedDatesBetween, ShouldNotRemoveTimestampsOutsideTimeRange) { + const base::Time delete_begin = + base::Time::Now() - base::TimeDelta::FromDays(2); + const base::Time delete_end = delete_begin + base::TimeDelta::FromDays(5); + + std::unique_ptr<BookmarkModel> model = + bookmarks::TestBookmarkClient::CreateModel(); + AddSingleBookmark(model.get(), "http://url-1.com", + kBookmarkLastVisitDateOnMobileKey, + delete_begin - base::TimeDelta::FromSeconds(1)); + AddSingleBookmark(model.get(), "http://url-2.com", + kBookmarkLastVisitDateOnDesktopKey, + delete_end + base::TimeDelta::FromSeconds(1)); + ASSERT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/true), + SizeIs(2)); + + RemoveLastVisitedDatesBetween(delete_begin, delete_end, DeleteAllFilter(), + model.get()); + + EXPECT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/true), + SizeIs(2)); +} + +TEST(RemoveLastVisitedDatesBetween, ShouldOnlyRemoveURLsWithinFilter) { + const base::Time delete_begin = + base::Time::Now() - base::TimeDelta::FromDays(2); + const base::Time delete_end = base::Time::Max(); + + std::unique_ptr<BookmarkModel> model = + bookmarks::TestBookmarkClient::CreateModel(); + AddSingleBookmark(model.get(), "http://url-1.com", + kBookmarkLastVisitDateOnMobileKey, + delete_begin + base::TimeDelta::FromSeconds(1)); + AddSingleBookmark(model.get(), "http://url-2.com", + kBookmarkLastVisitDateOnMobileKey, + delete_begin + base::TimeDelta::FromSeconds(1)); + ASSERT_THAT( + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/false), + SizeIs(2)); + + RemoveLastVisitedDatesBetween(delete_begin, delete_end, + DeleteOneURLFilter(GURL("http://url-2.com")), + model.get()); + + std::vector<const bookmarks::BookmarkNode*> remaining_nodes = + GetRecentlyVisitedBookmarks(model.get(), 20, base::Time(), + /*consider_visits_from_desktop=*/false); + EXPECT_THAT(remaining_nodes, SizeIs(1)); + EXPECT_THAT(remaining_nodes[0]->url(), Eq(GURL("http://url-1.com"))); +} + } // namespace ntp_snippets
diff --git a/components/ntp_snippets/bookmarks/bookmark_suggestions_provider.cc b/components/ntp_snippets/bookmarks/bookmark_suggestions_provider.cc index 0c946f2..1f8fdf1 100644 --- a/components/ntp_snippets/bookmarks/bookmark_suggestions_provider.cc +++ b/components/ntp_snippets/bookmarks/bookmark_suggestions_provider.cc
@@ -149,13 +149,14 @@ base::Time begin, base::Time end, const base::Callback<bool(const GURL& url)>& filter) { - // The last visit dates are not "owned" by the bookmark suggestion provider so - // it is cleared directly from browsing_data_remover.cc. + // To avoid race conditions with the history-removal of the last-visited + // timestamps we also trigger a deletion here. The problem is that we need to + // update the bookmarks data here and otherwise (depending on the order in + // which the code runs) could pick up to-be-deleted data again. + if (bookmark_model_->loaded()) { + RemoveLastVisitedDatesBetween(begin, end, filter, bookmark_model_); + } ClearDismissedSuggestionsForDebugging(provided_category_); - // TODO(tschumann): Before re-fetching bookmarks we need to trigger a clean-up - // of the last-visit dates -- otherwise we depend on the order in which the - // ClearHistory events are done and might just pick-up to-be-deleted data - // again. FetchBookmarks(); }
diff --git a/components/payments/payment_request.cc b/components/payments/payment_request.cc index 538bea1..98182c1 100644 --- a/components/payments/payment_request.cc +++ b/components/payments/payment_request.cc
@@ -37,6 +37,7 @@ if (!payments::validatePaymentDetails(details, &error)) { LOG(ERROR) << error; OnError(); + client_.reset(); return; } client_ = std::move(client); @@ -44,6 +45,10 @@ } void PaymentRequest::Show() { + if (!client_.is_bound() || !binding_.is_bound()) { + OnError(); + return; + } delegate_->ShowPaymentRequestDialog(this); }
diff --git a/components/printing/renderer/print_web_view_helper.cc b/components/printing/renderer/print_web_view_helper.cc index 6e1213be..3144c40 100644 --- a/components/printing/renderer/print_web_view_helper.cc +++ b/components/printing/renderer/print_web_view_helper.cc
@@ -347,6 +347,7 @@ } #endif // BUILDFLAG(ENABLE_PRINT_PREVIEW) +#if BUILDFLAG(ENABLE_PRINTING) // Disable scaling when either: // - The PDF specifies disabling scaling. // - All the pages in the PDF are the same size, @@ -389,6 +390,7 @@ return PDFShouldDisableScalingBasedOnPreset(preset_options, params, ignore_page_size); } +#endif #if BUILDFLAG(ENABLE_BASIC_PRINTING) MarginType GetMarginsForPdf(blink::WebLocalFrame* frame,
diff --git a/components/reading_list/ios/reading_list_model_unittest.mm b/components/reading_list/ios/reading_list_model_unittest.mm index 768b346..f5ee9c99 100644 --- a/components/reading_list/ios/reading_list_model_unittest.mm +++ b/components/reading_list/ios/reading_list_model_unittest.mm
@@ -11,6 +11,7 @@ #include "components/reading_list/ios/reading_list_model_storage.h" #include "components/reading_list/ios/reading_list_store_delegate.h" #include "components/sync/model/metadata_change_list.h" +#include "components/sync/model/model_error.h" #include "testing/gtest/include/gtest/gtest.h" namespace { @@ -109,18 +110,18 @@ return std::unique_ptr<syncer::MetadataChangeList>(); } - syncer::SyncError MergeSyncData( + syncer::ModelError MergeSyncData( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityDataMap entity_data_map) override { NOTREACHED(); - return syncer::SyncError(); + return syncer::ModelError(); } - syncer::SyncError ApplySyncChanges( + syncer::ModelError ApplySyncChanges( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityChangeList entity_changes) override { NOTREACHED(); - return syncer::SyncError(); + return syncer::ModelError(); } void GetData(StorageKeyList storage_keys, DataCallback callback) override {
diff --git a/components/reading_list/ios/reading_list_store.cc b/components/reading_list/ios/reading_list_store.cc index fd02b5cc..4be2da2 100644 --- a/components/reading_list/ios/reading_list_store.cc +++ b/components/reading_list/ios/reading_list_store.cc
@@ -118,10 +118,8 @@ std::unique_ptr<syncer::ModelTypeStore::RecordList> entries) { DCHECK(CalledOnValidThread()); if (result != syncer::ModelTypeStore::Result::SUCCESS) { - change_processor()->OnMetadataLoaded( - change_processor()->CreateAndUploadError( - FROM_HERE, "Cannot load Reading List Database."), - nullptr); + change_processor()->ReportError(FROM_HERE, + "Cannot load Reading List Database."); return; } auto loaded_entries = @@ -152,10 +150,14 @@ } void ReadingListStore::OnReadAllMetadata( - syncer::SyncError sync_error, + syncer::ModelError error, std::unique_ptr<syncer::MetadataBatch> metadata_batch) { DCHECK(CalledOnValidThread()); - change_processor()->OnMetadataLoaded(sync_error, std::move(metadata_batch)); + if (error.IsSet()) { + change_processor()->ReportError(FROM_HERE, "Failed to read metadata."); + } else { + change_processor()->OnMetadataLoaded(std::move(metadata_batch)); + } } void ReadingListStore::OnDatabaseSave(syncer::ModelTypeStore::Result result) { @@ -196,7 +198,7 @@ // combine all change atomically, should save the metadata after the data // changes, so that this merge will be re-driven by sync if is not completely // saved during the current run. -syncer::SyncError ReadingListStore::MergeSyncData( +syncer::ModelError ReadingListStore::MergeSyncData( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityDataMap entity_data_map) { DCHECK(CalledOnValidThread()); @@ -275,7 +277,7 @@ } batch_->TransferMetadataChanges(std::move(metadata_change_list)); - return syncer::SyncError(); + return syncer::ModelError(); } // Apply changes from the sync server locally. @@ -283,7 +285,7 @@ // |metadata_change_list| in case when some of the data changes are filtered // out, or even be empty in case when a commit confirmation is processed and // only the metadata needs to persisted. -syncer::SyncError ReadingListStore::ApplySyncChanges( +syncer::ModelError ReadingListStore::ApplySyncChanges( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityChangeList entity_changes) { DCHECK(CalledOnValidThread()); @@ -345,7 +347,7 @@ } batch_->TransferMetadataChanges(std::move(metadata_change_list)); - return syncer::SyncError(); + return syncer::ModelError(); } void ReadingListStore::GetData(StorageKeyList storage_keys, @@ -359,7 +361,7 @@ } } - callback.Run(syncer::SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } void ReadingListStore::GetAllData(DataCallback callback) { @@ -371,7 +373,7 @@ AddEntryToBatch(batch.get(), *entry); } - callback.Run(syncer::SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } void ReadingListStore::AddEntryToBatch(syncer::MutableDataBatch* batch,
diff --git a/components/reading_list/ios/reading_list_store.h b/components/reading_list/ios/reading_list_store.h index a32e39d..4eef459 100644 --- a/components/reading_list/ios/reading_list_store.h +++ b/components/reading_list/ios/reading_list_store.h
@@ -8,6 +8,7 @@ #include "base/threading/non_thread_safe.h" #include "components/reading_list/ios/reading_list_model_storage.h" #include "components/reading_list/ios/reading_list_store_delegate.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_store.h" namespace syncer { @@ -54,7 +55,7 @@ // combine all change atomically, should save the metadata after the data // changes, so that this merge will be re-driven by sync if is not completely // saved during the current run. - syncer::SyncError MergeSyncData( + syncer::ModelError MergeSyncData( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityDataMap entity_data_map) override; @@ -63,7 +64,7 @@ // |metadata_change_list| in case when some of the data changes are filtered // out, or even be empty in case when a commit confirmation is processed and // only the metadata needs to persisted. - syncer::SyncError ApplySyncChanges( + syncer::ModelError ApplySyncChanges( std::unique_ptr<syncer::MetadataChangeList> metadata_change_list, syncer::EntityChangeList entity_changes) override; @@ -147,7 +148,7 @@ syncer::ModelTypeStore::Result result, std::unique_ptr<syncer::ModelTypeStore::RecordList> entries); void OnDatabaseSave(syncer::ModelTypeStore::Result result); - void OnReadAllMetadata(syncer::SyncError sync_error, + void OnReadAllMetadata(syncer::ModelError error, std::unique_ptr<syncer::MetadataBatch> metadata_batch); void AddEntryToBatch(syncer::MutableDataBatch* batch,
diff --git a/components/reading_list/ios/reading_list_store_unittest.mm b/components/reading_list/ios/reading_list_store_unittest.mm index 7bc3f71..d25ce02 100644 --- a/components/reading_list/ios/reading_list_store_unittest.mm +++ b/components/reading_list/ios/reading_list_store_unittest.mm
@@ -220,7 +220,7 @@ std::unique_ptr<syncer::MetadataChangeList> metadata_changes( reading_list_store_->CreateMetadataChangeList()); - const syncer::SyncError error = reading_list_store_->MergeSyncData( + const syncer::ModelError error = reading_list_store_->MergeSyncData( std::move(metadata_changes), remote_input); AssertCounts(0, 0, 1, 0, 0); EXPECT_EQ(sync_added_.size(), 1u); @@ -242,7 +242,7 @@ add_changes.push_back(syncer::EntityChange::CreateAdd( "http://read.example.com/", data.PassToPtr())); - syncer::SyncError error = reading_list_store_->ApplySyncChanges( + syncer::ModelError error = reading_list_store_->ApplySyncChanges( reading_list_store_->CreateMetadataChangeList(), add_changes); AssertCounts(0, 0, 1, 0, 0); EXPECT_EQ(sync_added_.size(), 1u); @@ -268,7 +268,7 @@ syncer::EntityChangeList add_changes; add_changes.push_back(syncer::EntityChange::CreateAdd( "http://unread.example.com/", data.PassToPtr())); - syncer::SyncError error = reading_list_store_->ApplySyncChanges( + syncer::ModelError error = reading_list_store_->ApplySyncChanges( reading_list_store_->CreateMetadataChangeList(), add_changes); AssertCounts(1, 0, 0, 0, 1); EXPECT_EQ(sync_merged_.size(), 1u); @@ -297,7 +297,7 @@ syncer::EntityChangeList add_changes; add_changes.push_back(syncer::EntityChange::CreateAdd( "http://unread.example.com/", data.PassToPtr())); - syncer::SyncError error = reading_list_store_->ApplySyncChanges( + syncer::ModelError error = reading_list_store_->ApplySyncChanges( reading_list_store_->CreateMetadataChangeList(), add_changes); AssertCounts(1, 0, 0, 0, 1); EXPECT_EQ(sync_merged_.size(), 1u); @@ -307,7 +307,7 @@ syncer::EntityChangeList delete_changes; delete_changes.push_back( syncer::EntityChange::CreateDelete("http://read.example.com/")); - syncer::SyncError error = reading_list_store_->ApplySyncChanges( + syncer::ModelError error = reading_list_store_->ApplySyncChanges( reading_list_store_->CreateMetadataChangeList(), delete_changes); AssertCounts(0, 0, 0, 1, 0); EXPECT_EQ(sync_removed_.size(), 1u);
diff --git a/components/safe_browsing_db/v4_local_database_manager_unittest.cc b/components/safe_browsing_db/v4_local_database_manager_unittest.cc index 3b1a68619..acf25b7 100644 --- a/components/safe_browsing_db/v4_local_database_manager_unittest.cc +++ b/components/safe_browsing_db/v4_local_database_manager_unittest.cc
@@ -6,8 +6,8 @@ #include "base/memory/ptr_util.h" #include "base/memory/ref_counted.h" #include "base/run_loop.h" -#include "base/strings/stringprintf.h" #include "base/test/test_simple_task_runner.h" +#include "base/threading/thread_task_runner_handle.h" #include "components/safe_browsing_db/v4_database.h" #include "components/safe_browsing_db/v4_local_database_manager.h" #include "components/safe_browsing_db/v4_test_util.h" @@ -28,15 +28,50 @@ return full_hashes[0]; } -// A fullhash response containing no matches. -std::string GetEmptyV4HashResponse() { - FindFullHashesResponse res; - res.mutable_negative_cache_duration()->set_seconds(600); +// Always returns misses from GetFullHashes(). +class FakeGetHashProtocolManager : public V4GetHashProtocolManager { + public: + FakeGetHashProtocolManager( + net::URLRequestContextGetter* request_context_getter, + const StoresToCheck& stores_to_check, + const V4ProtocolConfig& config) + : V4GetHashProtocolManager(request_context_getter, + stores_to_check, + config) {} - std::string res_data; - res.SerializeToString(&res_data); - return res_data; -} + void GetFullHashes(const FullHashToStoreAndHashPrefixesMap&, + FullHashCallback callback) override { + std::vector<FullHashInfo> full_hash_infos; + + // Async, since the real manager might use a fetcher. + base::ThreadTaskRunnerHandle::Get()->PostTask( + FROM_HERE, base::Bind(callback, full_hash_infos)); + } +}; + +class FakeGetHashProtocolManagerFactory + : public V4GetHashProtocolManagerFactory { + public: + std::unique_ptr<V4GetHashProtocolManager> CreateProtocolManager( + net::URLRequestContextGetter* request_context_getter, + const StoresToCheck& stores_to_check, + const V4ProtocolConfig& config) override { + return base::MakeUnique<FakeGetHashProtocolManager>( + request_context_getter, stores_to_check, config); + } +}; + +// Use FakeGetHashProtocolManagerFactory in scope, then reset. +class ScopedFakeGetHashProtocolManagerFactory { + public: + ScopedFakeGetHashProtocolManagerFactory() { + V4GetHashProtocolManager::RegisterFactory( + base::MakeUnique<FakeGetHashProtocolManagerFactory>()); + } + ~ScopedFakeGetHashProtocolManagerFactory() { + V4GetHashProtocolManager::RegisterFactory(nullptr); + } +}; } // namespace @@ -214,6 +249,14 @@ WaitForTasksOnTaskRunner(); } + void ResetLocalDatabaseManager() { + StopLocalDatabaseManager(); + v4_local_database_manager_ = + make_scoped_refptr(new V4LocalDatabaseManager(base_dir_.GetPath())); + SetTaskRunnerForTest(); + StartLocalDatabaseManager(); + } + void ResetV4Database() { V4Database::Destroy(std::move(v4_local_database_manager_->v4_database_)); } @@ -372,36 +415,18 @@ // Verify that a window where checks cannot be cancelled is closed. TEST_F(V4LocalDatabaseManagerTest, CancelPending) { + // Setup to receive full-hash misses. + ScopedFakeGetHashProtocolManagerFactory pin; + + // Reset the database manager so it picks up the replacement protocol manager. + ResetLocalDatabaseManager(); WaitForTasksOnTaskRunner(); - net::FakeURLFetcherFactory factory(NULL); - // TODO(shess): Modify this to use a mock protocol manager instead - // of faking the requests. - const char* kReqs[] = { - // OSX - "Cg8KCHVuaXR0ZXN0EgMxLjAaJwgBCAIIAwgGCAcICAgJCAoQBBAIGgcKBWVXGg-" - "pIAEgAyAEIAUgBg==", - // Linux - "Cg8KCHVuaXR0ZXN0EgMxLjAaJwgBCAIIAwgGCAcICAgJCAoQAhAIGgcKBWVXGg-" - "pIAEgAyAEIAUgBg==", - - // Windows - "Cg8KCHVuaXR0ZXN0EgMxLjAaJwgBCAIIAwgGCAcICAgJCAoQARAIGgcKBWVXGg-" - "pIAEgAyAEIAUgBg==", - }; - for (const char* req : kReqs) { - const GURL url( - base::StringPrintf("https://safebrowsing.googleapis.com/v4/" - "fullHashes:find?$req=%s" - "&$ct=application/x-protobuf&key=test_key_param", - req)); - factory.SetFakeResponse(url, GetEmptyV4HashResponse(), net::HTTP_OK, - net::URLRequestStatus::SUCCESS); - } - + // An URL and matching prefix. const GURL url("http://example.com/a/"); const HashPrefix hash_prefix("eW\x1A\xF\xA9"); + // Put a match in the db that will cause a protocol-manager request. StoreAndHashPrefixes store_and_hash_prefixes; store_and_hash_prefixes.emplace_back(GetUrlMalwareId(), hash_prefix); ReplaceV4Database(store_and_hash_prefixes);
diff --git a/components/search_engines/template_url_service.cc b/components/search_engines/template_url_service.cc index 29687f2..246072ab 100644 --- a/components/search_engines/template_url_service.cc +++ b/components/search_engines/template_url_service.cc
@@ -603,13 +603,6 @@ NotifyObservers(); } -void TemplateURLService::UpdateTemplateURLVisitTime(TemplateURL* url) { - TemplateURLData data(url->data()); - data.last_visited = clock_->Now(); - Update(url, TemplateURL(data)); -} - - bool TemplateURLService::CanMakeDefault(const TemplateURL* url) { return ((default_search_provider_source_ == DefaultSearchManager::FROM_USER) || @@ -1808,6 +1801,12 @@ UpdateTemplateURLVisitTime(visited_url); } +void TemplateURLService::UpdateTemplateURLVisitTime(TemplateURL* url) { + TemplateURLData data(url->data()); + data.last_visited = clock_->Now(); + Update(url, TemplateURL(data)); +} + void TemplateURLService::AddTabToSearchVisit(const TemplateURL& t_url) { // Only add visits for entries the user hasn't modified. If the user modified // the entry the keyword may no longer correspond to the host name. It may be
diff --git a/components/search_engines/template_url_service.h b/components/search_engines/template_url_service.h index d7e4e87..86045e0 100644 --- a/components/search_engines/template_url_service.h +++ b/components/search_engines/template_url_service.h
@@ -244,9 +244,6 @@ const base::string16& keyword, const std::string& search_url); - // Updates the last_visited time of |url| to the current time. - void UpdateTemplateURLVisitTime(TemplateURL* url); - // Return true if the given |url| can be made the default. This returns false // regardless of |url| if the default search provider is managed by policy or // controlled by an extension. @@ -429,6 +426,7 @@ friend class InstantUnitTestBase; friend class TemplateURLServiceTestUtil; + friend class TemplateUrlServiceAndroid; using GUIDToTURL = std::map<std::string, TemplateURL*>; @@ -573,6 +571,9 @@ // SetKeywordSearchTermsForURL is invoked. void UpdateKeywordSearchTermsForURL(const URLVisitedDetails& details); + // Updates the last_visited time of |url| to the current time. + void UpdateTemplateURLVisitTime(TemplateURL* url); + // If necessary, generates a visit for the site http:// + t_url.keyword(). void AddTabToSearchVisit(const TemplateURL& t_url);
diff --git a/components/security_interstitials/core/browser/resources/interstitial_v2.css b/components/security_interstitials/core/browser/resources/interstitial_v2.css index d67982a..c1696f92b 100644 --- a/components/security_interstitials/core/browser/resources/interstitial_v2.css +++ b/components/security_interstitials/core/browser/resources/interstitial_v2.css
@@ -402,6 +402,7 @@ font-weight: 600; margin: 6px 0; text-transform: uppercase; + transform: translatez(0); } .nav-wrapper {
diff --git a/components/sync/BUILD.gn b/components/sync/BUILD.gn index 3c047ea..f05878b0 100644 --- a/components/sync/BUILD.gn +++ b/components/sync/BUILD.gn
@@ -436,6 +436,8 @@ "model/metadata_batch.cc", "model/metadata_batch.h", "model/metadata_change_list.h", + "model/model_error.cc", + "model/model_error.h", "model/model_type_change_processor.cc", "model/model_type_change_processor.h", "model/model_type_debug_info.cc", @@ -680,6 +682,8 @@ "engine/fake_model_type_processor.h", "engine/fake_sync_manager.cc", "engine/fake_sync_manager.h", + "engine/sync_engine_host_stub.cc", + "engine/sync_engine_host_stub.h", "engine/sync_manager_factory_for_profile_sync_test.cc", "engine/sync_manager_factory_for_profile_sync_test.h", "engine/test_engine_components_factory.cc",
diff --git a/components/sync/device_info/device_info_sync_bridge.cc b/components/sync/device_info/device_info_sync_bridge.cc index e1387abe..95def977 100644 --- a/components/sync/device_info/device_info_sync_bridge.cc +++ b/components/sync/device_info/device_info_sync_bridge.cc
@@ -18,8 +18,8 @@ #include "components/sync/device_info/device_info_util.h" #include "components/sync/model/entity_change.h" #include "components/sync/model/metadata_batch.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/mutable_data_batch.h" -#include "components/sync/model/sync_error.h" #include "components/sync/protocol/model_type_state.pb.h" #include "components/sync/protocol/sync.pb.h" @@ -113,7 +113,7 @@ return WriteBatch::CreateMetadataChangeList(); } -SyncError DeviceInfoSyncBridge::MergeSyncData( +ModelError DeviceInfoSyncBridge::MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityDataMap entity_data_map) { DCHECK(has_provider_initialized_); @@ -123,7 +123,7 @@ // If our dependency was yanked out from beneath us, we cannot correctly // handle this request, and all our data will be deleted soon. if (local_info == nullptr) { - return SyncError(); + return ModelError(); } // Local data should typically be near empty, with the only possible value @@ -164,10 +164,10 @@ batch->TransferMetadataChanges(std::move(metadata_change_list)); CommitAndNotify(std::move(batch), has_changes); - return SyncError(); + return ModelError(); } -SyncError DeviceInfoSyncBridge::ApplySyncChanges( +ModelError DeviceInfoSyncBridge::ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityChangeList entity_changes) { DCHECK(has_provider_initialized_); @@ -176,7 +176,7 @@ // If our dependency was yanked out from beneath us, we cannot correctly // handle this request, and all our data will be deleted soon. if (local_info == nullptr) { - return SyncError(); + return ModelError(); } std::unique_ptr<WriteBatch> batch = store_->CreateWriteBatch(); @@ -203,7 +203,7 @@ batch->TransferMetadataChanges(std::move(metadata_change_list)); CommitAndNotify(std::move(batch), has_changes); - return SyncError(); + return ModelError(); } void DeviceInfoSyncBridge::GetData(StorageKeyList storage_keys, @@ -216,7 +216,7 @@ batch->Put(key, CopyToEntityData(*iter->second)); } } - callback.Run(SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } void DeviceInfoSyncBridge::GetAllData(DataCallback callback) { @@ -224,7 +224,7 @@ for (const auto& kv : all_data_) { batch->Put(kv.first, CopyToEntityData(*kv.second)); } - callback.Run(SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } std::string DeviceInfoSyncBridge::GetClientTag(const EntityData& entity_data) { @@ -343,7 +343,8 @@ store_->ReadAllData(base::Bind(&DeviceInfoSyncBridge::OnReadAllData, base::AsWeakPtr(this))); } else { - ReportStartupErrorToSync("ModelTypeStore creation failed."); + change_processor()->ReportError(FROM_HERE, + "ModelTypeStore creation failed."); } } @@ -351,7 +352,7 @@ Result result, std::unique_ptr<RecordList> record_list) { if (result != Result::SUCCESS) { - ReportStartupErrorToSync("Initial load of data failed."); + change_processor()->ReportError(FROM_HERE, "Initial load of data failed."); return; } @@ -361,7 +362,9 @@ if (specifics->ParseFromString(r.value)) { all_data_[specifics->cache_guid()] = std::move(specifics); } else { - ReportStartupErrorToSync("Failed to deserialize specifics."); + change_processor()->ReportError(FROM_HERE, + "Failed to deserialize specifics."); + return; } } @@ -377,16 +380,20 @@ } void DeviceInfoSyncBridge::OnReadAllMetadata( - SyncError error, + ModelError error, std::unique_ptr<MetadataBatch> metadata_batch) { - change_processor()->OnMetadataLoaded(error, std::move(metadata_batch)); + if (error.IsSet()) { + change_processor()->ReportError(error); + return; + } + + change_processor()->OnMetadataLoaded(std::move(metadata_batch)); ReconcileLocalAndStored(); } void DeviceInfoSyncBridge::OnCommit(Result result) { if (result != Result::SUCCESS) { - change_processor()->CreateAndUploadError(FROM_HERE, - "Failed a write to store."); + change_processor()->ReportError(FROM_HERE, "Failed a write to store."); } } @@ -472,11 +479,4 @@ }); } -void DeviceInfoSyncBridge::ReportStartupErrorToSync(const std::string& msg) { - // TODO(skym): Shouldn't need to log this here, reporting should always log. - LOG(WARNING) << msg; - change_processor()->OnMetadataLoaded( - change_processor()->CreateAndUploadError(FROM_HERE, msg), nullptr); -} - } // namespace syncer
diff --git a/components/sync/device_info/device_info_sync_bridge.h b/components/sync/device_info/device_info_sync_bridge.h index 5af1995..433aca3 100644 --- a/components/sync/device_info/device_info_sync_bridge.h +++ b/components/sync/device_info/device_info_sync_bridge.h
@@ -17,6 +17,7 @@ #include "base/timer/timer.h" #include "components/sync/device_info/device_info_tracker.h" #include "components/sync/device_info/local_device_info_provider.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_store.h" #include "components/sync/model/model_type_sync_bridge.h" @@ -26,8 +27,6 @@ namespace syncer { -class SyncError; - // Sync bridge implementation for DEVICE_INFO model type. Handles storage of // device info and associated sync metadata, applying/merging foreign changes, // and allows public read access. @@ -44,10 +43,10 @@ // ModelTypeSyncBridge implementation. std::unique_ptr<MetadataChangeList> CreateMetadataChangeList() override; - SyncError MergeSyncData( + ModelError MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityDataMap entity_data_map) override; - SyncError ApplySyncChanges( + ModelError ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityChangeList entity_changes) override; void GetData(StorageKeyList storage_keys, DataCallback callback) override; @@ -91,7 +90,7 @@ std::unique_ptr<ModelTypeStore> store); void OnReadAllData(ModelTypeStore::Result result, std::unique_ptr<ModelTypeStore::RecordList> record_list); - void OnReadAllMetadata(SyncError error, + void OnReadAllMetadata(ModelError error, std::unique_ptr<MetadataBatch> metadata_batch); void OnCommit(ModelTypeStore::Result result); @@ -119,10 +118,6 @@ // allow unit tests to control expected results. int CountActiveDevices(const base::Time now) const; - // Report an error starting up to sync if it tries to connect to this - // datatype, since these errors prevent us from knowing if sync is enabled. - void ReportStartupErrorToSync(const std::string& msg); - // |local_device_info_provider_| isn't owned. const LocalDeviceInfoProvider* const local_device_info_provider_;
diff --git a/components/sync/device_info/device_info_sync_bridge_unittest.cc b/components/sync/device_info/device_info_sync_bridge_unittest.cc index 3ed624e..75f94aa 100644 --- a/components/sync/device_info/device_info_sync_bridge_unittest.cc +++ b/components/sync/device_info/device_info_sync_bridge_unittest.cc
@@ -115,9 +115,7 @@ } void VerifyDataBatch(std::map<std::string, DeviceInfoSpecifics> expected, - SyncError error, std::unique_ptr<DataBatch> batch) { - EXPECT_FALSE(error.IsSet()); while (batch->HasNext()) { const KeyAndData& pair = batch->Next(); auto iter = expected.find(pair.first); @@ -192,8 +190,7 @@ delete_set_.insert(storage_key); } - void OnMetadataLoaded(SyncError error, - std::unique_ptr<MetadataBatch> batch) override { + void OnMetadataLoaded(std::unique_ptr<MetadataBatch> batch) override { std::swap(metadata_, batch); } @@ -501,7 +498,7 @@ TEST_F(DeviceInfoSyncBridgeTest, ApplySyncChangesEmpty) { InitializeAndPump(); EXPECT_EQ(1, change_count()); - const SyncError error = bridge()->ApplySyncChanges( + const ModelError error = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), EntityChangeList()); EXPECT_FALSE(error.IsSet()); EXPECT_EQ(1, change_count()); @@ -512,7 +509,7 @@ EXPECT_EQ(1, change_count()); const DeviceInfoSpecifics specifics = CreateSpecifics(1); - const SyncError error_on_add = bridge()->ApplySyncChanges( + const ModelError error_on_add = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), EntityAddList({specifics})); EXPECT_FALSE(error_on_add.IsSet()); @@ -522,7 +519,7 @@ VerifyEqual(specifics, *info.get()); EXPECT_EQ(2, change_count()); - const SyncError error_on_delete = bridge()->ApplySyncChanges( + const ModelError error_on_delete = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), {EntityChange::CreateDelete(specifics.cache_guid())}); @@ -541,7 +538,7 @@ bridge()->CreateMetadataChangeList(); metadata_changes->UpdateModelTypeState(state); - const SyncError error = bridge()->ApplySyncChanges( + const ModelError error = bridge()->ApplySyncChanges( std::move(metadata_changes), EntityAddList({specifics})); EXPECT_FALSE(error.IsSet()); EXPECT_EQ(2, change_count()); @@ -578,12 +575,12 @@ EXPECT_LT(Time::Now() - TimeDelta::FromMinutes(1), last_updated); EXPECT_GT(Time::Now() + TimeDelta::FromMinutes(1), last_updated); - const SyncError error_on_add = bridge()->ApplySyncChanges( + const ModelError error_on_add = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), EntityAddList({specifics})); EXPECT_FALSE(error_on_add.IsSet()); EXPECT_EQ(1, change_count()); - const SyncError error_on_delete = bridge()->ApplySyncChanges( + const ModelError error_on_delete = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), {EntityChange::CreateDelete(specifics.cache_guid())}); EXPECT_FALSE(error_on_delete.IsSet()); @@ -593,7 +590,7 @@ TEST_F(DeviceInfoSyncBridgeTest, ApplyDeleteNonexistent) { InitializeAndPump(); EXPECT_EQ(1, change_count()); - const SyncError error = + const ModelError error = bridge()->ApplySyncChanges(bridge()->CreateMetadataChangeList(), {EntityChange::CreateDelete("guid")}); EXPECT_FALSE(error.IsSet()); @@ -608,7 +605,7 @@ const DeviceInfoSpecifics specifics = CreateSpecifics(1, Time::Now()); local_device()->Clear(); - SyncError error = bridge()->ApplySyncChanges( + ModelError error = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), EntityAddList({specifics})); EXPECT_FALSE(error.IsSet()); EXPECT_EQ(1u, bridge()->GetAllDeviceInfo().size()); @@ -622,7 +619,7 @@ TEST_F(DeviceInfoSyncBridgeTest, MergeEmpty) { InitializeAndPump(); EXPECT_EQ(1, change_count()); - const SyncError error = bridge()->MergeSyncData( + const ModelError error = bridge()->MergeSyncData( bridge()->CreateMetadataChangeList(), EntityDataMap()); EXPECT_FALSE(error.IsSet()); EXPECT_EQ(1, change_count()); @@ -654,7 +651,7 @@ bridge()->CreateMetadataChangeList(); metadata_changes->UpdateModelTypeState(state); - const SyncError error = bridge()->MergeSyncData( + const ModelError error = bridge()->MergeSyncData( std::move(metadata_changes), InlineEntityDataMap({conflict_remote, unique_remote})); EXPECT_FALSE(error.IsSet()); @@ -689,7 +686,7 @@ WriteToStore({specifics}); InitializeAndPump(); - const SyncError error = bridge()->MergeSyncData( + const ModelError error = bridge()->MergeSyncData( bridge()->CreateMetadataChangeList(), InlineEntityDataMap({specifics})); EXPECT_FALSE(error.IsSet()); EXPECT_EQ(0, change_count()); @@ -704,7 +701,7 @@ // The message loop is never pumped, which means local data/metadata is never // loaded, and thus reconcile is never called. The bridge should ignore this // EntityData because its cache guid is the same the local device's. - const SyncError error = bridge()->MergeSyncData( + const ModelError error = bridge()->MergeSyncData( bridge()->CreateMetadataChangeList(), InlineEntityDataMap({CreateSpecifics(kDefaultLocalSuffix)})); EXPECT_FALSE(error.IsSet()); @@ -720,7 +717,7 @@ const DeviceInfoSpecifics specifics = CreateSpecifics(1, Time::Now()); local_device()->Clear(); - SyncError error = bridge()->MergeSyncData( + ModelError error = bridge()->MergeSyncData( bridge()->CreateMetadataChangeList(), InlineEntityDataMap({specifics})); EXPECT_FALSE(error.IsSet()); EXPECT_EQ(1u, bridge()->GetAllDeviceInfo().size()); @@ -798,7 +795,7 @@ EXPECT_EQ(1, change_count()); const DeviceInfoSpecifics specifics = CreateSpecifics(1); - const SyncError error = bridge()->ApplySyncChanges( + const ModelError error = bridge()->ApplySyncChanges( bridge()->CreateMetadataChangeList(), EntityAddList({specifics})); EXPECT_FALSE(error.IsSet());
diff --git a/components/sync/driver/data_type_controller.h b/components/sync/driver/data_type_controller.h index 76d0c63b..1973b84 100644 --- a/components/sync/driver/data_type_controller.h +++ b/components/sync/driver/data_type_controller.h
@@ -100,9 +100,10 @@ // Registers with sync backend if needed. This function is called by // DataTypeManager before downloading initial data. Non-blocking types need to - // pass activation context containing progress marker to sync backend before - // initial download starts. - virtual void RegisterWithBackend(ModelTypeConfigurer* configurer) = 0; + // pass activation context containing progress marker to sync backend and use + // |set_downloaded| to inform the manager whether their initial sync is done. + virtual void RegisterWithBackend(base::Callback<void(bool)> set_downloaded, + ModelTypeConfigurer* configurer) = 0; // Will start a potentially asynchronous operation to perform the // model association. Once the model association is done the callback will
diff --git a/components/sync/driver/data_type_manager_impl.cc b/components/sync/driver/data_type_manager_impl.cc index 15c30f1..071d4c42 100644 --- a/components/sync/driver/data_type_manager_impl.cc +++ b/components/sync/driver/data_type_manager_impl.cc
@@ -6,6 +6,7 @@ #include <algorithm> #include <functional> +#include <utility> #include "base/bind.h" #include "base/bind_helpers.h" @@ -52,7 +53,8 @@ const DataTypeEncryptionHandler* encryption_handler, ModelTypeConfigurer* configurer, DataTypeManagerObserver* observer) - : sync_client_(sync_client), + : downloaded_types_(initial_types), + sync_client_(sync_client), configurer_(configurer), controllers_(controllers), state_(DataTypeManager::STOPPED), @@ -165,12 +167,36 @@ // successfully. Such types shouldn't be in an error state at the same // time. DCHECK(!data_type_status_table_.GetFailedTypes().Has(dtc->type())); - dtc->RegisterWithBackend(configurer_); + dtc->RegisterWithBackend( + base::Bind(&DataTypeManagerImpl::SetTypeDownloaded, + base::Unretained(this), dtc->type()), + configurer_); } } } -ModelTypeConfigurer::DataTypeConfigStateMap +// static +ModelTypeSet DataTypeManagerImpl::GetDataTypesInState( + DataTypeConfigState state, + const DataTypeConfigStateMap& state_map) { + ModelTypeSet types; + for (const auto& kv : state_map) { + if (kv.second == state) + types.Put(kv.first); + } + return types; +} + +// static +void DataTypeManagerImpl::SetDataTypesState(DataTypeConfigState state, + ModelTypeSet types, + DataTypeConfigStateMap* state_map) { + for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) { + (*state_map)[it.Get()] = state; + } +} + +DataTypeManagerImpl::DataTypeConfigStateMap DataTypeManagerImpl::BuildDataTypeConfigStateMap( const ModelTypeSet& types_being_configured) const { // 1. Get the failed types (due to fatal, crypto, and unready errors). @@ -207,22 +233,14 @@ DVLOG(1) << "Configuring: " << ModelTypeSetToString(to_configure); DVLOG(1) << "Disabling: " << ModelTypeSetToString(disabled_types); - ModelTypeConfigurer::DataTypeConfigStateMap config_state_map; - ModelTypeConfigurer::SetDataTypesState( - ModelTypeConfigurer::CONFIGURE_INACTIVE, enabled_types, - &config_state_map); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::CONFIGURE_ACTIVE, - to_configure, &config_state_map); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::CONFIGURE_CLEAN, - clean_types, &config_state_map); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::DISABLED, - disabled_types, &config_state_map); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::FATAL, - fatal_types, &config_state_map); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::CRYPTO, - crypto_types, &config_state_map); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::UNREADY, - unready_types, &config_state_map); + DataTypeConfigStateMap config_state_map; + SetDataTypesState(CONFIGURE_INACTIVE, enabled_types, &config_state_map); + SetDataTypesState(CONFIGURE_ACTIVE, to_configure, &config_state_map); + SetDataTypesState(CONFIGURE_CLEAN, clean_types, &config_state_map); + SetDataTypesState(DISABLED, disabled_types, &config_state_map); + SetDataTypesState(FATAL, fatal_types, &config_state_map); + SetDataTypesState(CRYPTO, crypto_types, &config_state_map); + SetDataTypesState(UNREADY, unready_types, &config_state_map); return config_state_map; } @@ -447,15 +465,21 @@ if (download_types_queue_.empty()) return; - // Tell the backend about the new set of data types we wish to sync. - // The task will be invoked when updates are downloaded. - ModelTypeSet ready_types = configurer_->ConfigureDataTypes( - last_configure_reason_, - BuildDataTypeConfigStateMap(download_types_queue_.front()), - base::Bind(&DataTypeManagerImpl::DownloadReady, - weak_ptr_factory_.GetWeakPtr(), download_types_queue_.front()), - base::Bind(&DataTypeManagerImpl::OnDownloadRetry, - weak_ptr_factory_.GetWeakPtr())); + ModelTypeConfigurer::ConfigureParams config_params; + ModelTypeSet ready_types = PrepareConfigureParams(&config_params); + + // The engine's state was initially derived from the types detected to have + // been downloaded in the database. Afterwards it is modified only by this + // function. We expect |downloaded_types_| to remain consistent because + // configuration requests are never aborted; they are retried until they + // succeed or the engine is shut down. + // + // Only one configure is allowed at a time. This is guaranteed by our callers. + // The sync engine requests one configure as it is initializing and waits for + // it to complete. After engine initialization, all configurations pass + // through the DataTypeManager, and we are careful to never send a new + // configure request until the current request succeeds. + configurer_->ConfigureDataTypes(std::move(config_params)); AssociationTypesInfo association_info; association_info.types = download_types_queue_.front(); @@ -469,6 +493,121 @@ StartNextAssociation(READY_AT_CONFIG); } +ModelTypeSet DataTypeManagerImpl::PrepareConfigureParams( + ModelTypeConfigurer::ConfigureParams* params) { + // Divide up the types into their corresponding actions: + // - Types which are newly enabled are downloaded. + // - Types which have encountered a fatal error (fatal_types) are deleted + // from the directory and journaled in the delete journal. + // - Types which have encountered a cryptographer error (crypto_types) are + // unapplied (local state is purged but sync state is not). + // - All other types not in the routing info (types just disabled) are deleted + // from the directory. + // - Everything else (enabled types and already disabled types) is not + // touched. + const DataTypeConfigStateMap config_state_map = + BuildDataTypeConfigStateMap(download_types_queue_.front()); + const ModelTypeSet fatal_types = GetDataTypesInState(FATAL, config_state_map); + const ModelTypeSet crypto_types = + GetDataTypesInState(CRYPTO, config_state_map); + const ModelTypeSet unready_types = + GetDataTypesInState(UNREADY, config_state_map); + const ModelTypeSet active_types = + GetDataTypesInState(CONFIGURE_ACTIVE, config_state_map); + const ModelTypeSet clean_types = + GetDataTypesInState(CONFIGURE_CLEAN, config_state_map); + const ModelTypeSet inactive_types = + GetDataTypesInState(CONFIGURE_INACTIVE, config_state_map); + + ModelTypeSet enabled_types = Union(active_types, clean_types); + ModelTypeSet disabled_types = GetDataTypesInState(DISABLED, config_state_map); + disabled_types.PutAll(fatal_types); + disabled_types.PutAll(crypto_types); + disabled_types.PutAll(unready_types); + + DCHECK(Intersection(enabled_types, disabled_types).Empty()); + + // The sync engine's enabled types will be updated by adding |enabled_types| + // to the list then removing |disabled_types|. Any types which are not in + // either of those sets will remain untouched. Types which were not in + // |downloaded_types_| previously are not fully downloaded, so we must ask the + // engine to download them. Any newly supported datatypes won't have been in + // |downloaded_types_|, so they will also be downloaded if they are enabled. + ModelTypeSet types_to_download = Difference(enabled_types, downloaded_types_); + downloaded_types_.PutAll(enabled_types); + downloaded_types_.RemoveAll(disabled_types); + + types_to_download.PutAll(clean_types); + types_to_download.RemoveAll(ProxyTypes()); + if (!types_to_download.Empty()) + types_to_download.Put(NIGORI); + + // TODO(sync): crbug.com/137550. + // It's dangerous to configure types that have progress markers. Types with + // progress markers can trigger a MIGRATION_DONE response. We are not + // prepared to handle a migration during a configure, so we must ensure that + // all our types_to_download actually contain no data before we sync them. + // + // One common way to end up in this situation used to be types which + // downloaded some or all of their data but have not applied it yet. We avoid + // problems with those types by purging the data of any such partially synced + // types soon after we load the directory. + // + // Another possible scenario is that we have newly supported or newly enabled + // data types being downloaded here but the nigori type, which is always + // included in any GetUpdates request, requires migration. The server has + // code to detect this scenario based on the configure reason, the fact that + // the nigori type is the only requested type which requires migration, and + // that the requested types list includes at least one non-nigori type. It + // will not send a MIGRATION_DONE response in that case. We still need to be + // careful to not send progress markers for non-nigori types, though. If a + // non-nigori type in the request requires migration, a MIGRATION_DONE + // response will be sent. + + ModelTypeSet types_to_purge = + Difference(ModelTypeSet::All(), downloaded_types_); + // Include clean_types in types_to_purge, they are part of + // |downloaded_types_|, but still need to be cleared. + DCHECK(downloaded_types_.HasAll(clean_types)); + types_to_purge.PutAll(clean_types); + types_to_purge.RemoveAll(inactive_types); + types_to_purge.RemoveAll(unready_types); + + // If a type has already been disabled and unapplied or journaled, it will + // not be part of the |types_to_purge| set, and therefore does not need + // to be acted on again. + ModelTypeSet types_to_journal = Intersection(fatal_types, types_to_purge); + ModelTypeSet unapply_types = Union(crypto_types, clean_types); + unapply_types.RetainAll(types_to_purge); + + DCHECK(Intersection(downloaded_types_, types_to_journal).Empty()); + DCHECK(Intersection(downloaded_types_, crypto_types).Empty()); + // |downloaded_types_| was already updated to include all enabled types. + DCHECK(downloaded_types_.HasAll(types_to_download)); + + DVLOG(1) << "Types " << ModelTypeSetToString(types_to_download) + << " added; calling ConfigureDataTypes"; + + params->reason = last_configure_reason_; + params->enabled_types = enabled_types; + params->disabled_types = disabled_types; + params->to_download = types_to_download; + params->to_purge = types_to_purge; + params->to_journal = types_to_journal; + params->to_unapply = unapply_types; + params->ready_task = + base::Bind(&DataTypeManagerImpl::DownloadReady, + weak_ptr_factory_.GetWeakPtr(), download_types_queue_.front()); + params->retry_callback = base::Bind(&DataTypeManagerImpl::OnDownloadRetry, + weak_ptr_factory_.GetWeakPtr()); + + DCHECK(Intersection(active_types, types_to_purge).Empty()); + DCHECK(Intersection(active_types, fatal_types).Empty()); + DCHECK(Intersection(active_types, unapply_types).Empty()); + DCHECK(Intersection(active_types, inactive_types).Empty()); + return Difference(active_types, types_to_download); +} + void DataTypeManagerImpl::StartNextAssociation(AssociationGroup group) { CHECK(!association_types_queue_.empty()); @@ -701,4 +840,12 @@ data_type_status_table_.GetFailedTypes()); } +void DataTypeManagerImpl::SetTypeDownloaded(ModelType type, bool downloaded) { + if (downloaded) { + downloaded_types_.Put(type); + } else { + downloaded_types_.Remove(type); + } +} + } // namespace syncer
diff --git a/components/sync/driver/data_type_manager_impl.h b/components/sync/driver/data_type_manager_impl.h index b20f75b..2f64a457 100644 --- a/components/sync/driver/data_type_manager_impl.h +++ b/components/sync/driver/data_type_manager_impl.h
@@ -74,7 +74,30 @@ return &model_association_manager_; } + protected: + // Returns the priority types (control + priority user types). + // Virtual for overriding during tests. + virtual ModelTypeSet GetPriorityTypes() const; + + // The set of types whose initial download of sync data has completed. + ModelTypeSet downloaded_types_; + private: + enum DataTypeConfigState { + CONFIGURE_ACTIVE, // Actively being configured. Data of such types + // will be downloaded if not present locally. + CONFIGURE_INACTIVE, // Already configured or to be configured in future. + // Data of such types is left as it is, no + // downloading or purging. + CONFIGURE_CLEAN, // Actively being configured but requiring unapply + // and GetUpdates first (e.g. for persistence errors). + DISABLED, // Not syncing. Disabled by user. + FATAL, // Not syncing due to unrecoverable error. + CRYPTO, // Not syncing due to a cryptographer error. + UNREADY, // Not syncing due to transient error. + }; + using DataTypeConfigStateMap = std::map<ModelType, DataTypeConfigState>; + // Helper enum for identifying which types within a priority group to // associate. enum AssociationGroup { @@ -89,15 +112,24 @@ UNREADY_AT_CONFIG, }; - friend class TestDataTypeManager; + // Return model types in |state_map| that match |state|. + static ModelTypeSet GetDataTypesInState( + DataTypeConfigState state, + const DataTypeConfigStateMap& state_map); + + // Set state of |types| in |state_map| to |state|. + static void SetDataTypesState(DataTypeConfigState state, + ModelTypeSet types, + DataTypeConfigStateMap* state_map); + + // Prepare the parameters for the configurer's configuration. Returns the set + // of types that are already ready for association. + ModelTypeSet PrepareConfigureParams( + ModelTypeConfigurer::ConfigureParams* params); // Abort configuration and stop all data types due to configuration errors. void Abort(ConfigureStatus status); - // Returns the priority types (control + priority user types). - // Virtual for overriding during tests. - virtual ModelTypeSet GetPriorityTypes() const; - // Divide |types| into sets by their priorities and return the sets from // high priority to low priority. TypeSetPriorityList PrioritizeTypes(const ModelTypeSet& types); @@ -125,7 +157,7 @@ // Calls data type controllers of requested types to register with backend. void RegisterTypesWithBackend(); - ModelTypeConfigurer::DataTypeConfigStateMap BuildDataTypeConfigStateMap( + DataTypeConfigStateMap BuildDataTypeConfigStateMap( const ModelTypeSet& types_being_configured) const; // Start download of next set of types in |download_types_queue_| (if @@ -144,6 +176,9 @@ // Returns the currently enabled types. ModelTypeSet GetEnabledTypes() const; + // Adds or removes |type| from |downloaded_types_| based on |downloaded|. + void SetTypeDownloaded(ModelType type, bool downloaded); + SyncClient* sync_client_; ModelTypeConfigurer* configurer_;
diff --git a/components/sync/driver/data_type_manager_impl_unittest.cc b/components/sync/driver/data_type_manager_impl_unittest.cc index b1d450ae..af1bf7e 100644 --- a/components/sync/driver/data_type_manager_impl_unittest.cc +++ b/components/sync/driver/data_type_manager_impl_unittest.cc
@@ -23,11 +23,13 @@ namespace { -// Helper for unioning with control types. +// Helpers for unioning with control types. +ModelTypeSet AddControlTypesTo(ModelType type) { + return Union(ControlTypes(), ModelTypeSet(type)); +} + ModelTypeSet AddControlTypesTo(ModelTypeSet types) { - ModelTypeSet result = ControlTypes(); - result.PutAll(types); - return result; + return Union(ControlTypes(), types); } DataTypeStatusTable BuildStatusTable(ModelTypeSet crypto_errors, @@ -70,29 +72,12 @@ // callback passed into ConfigureDataTypes. class FakeModelTypeConfigurer : public ModelTypeConfigurer { public: - FakeModelTypeConfigurer() : configure_call_count_(0) {} + FakeModelTypeConfigurer() {} ~FakeModelTypeConfigurer() override {} - ModelTypeSet ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) override { + void ConfigureDataTypes(ConfigureParams params) override { configure_call_count_++; - last_ready_task_ = ready_task; - - for (auto iter = expected_configure_types_.begin(); - iter != expected_configure_types_.end(); ++iter) { - if (!iter->second.Empty()) { - EXPECT_EQ(iter->second, - GetDataTypesInState(iter->first, config_state_map)) - << "State " << iter->first << " : " - << ModelTypeSetToString(iter->second) << " v.s. " - << ModelTypeSetToString( - GetDataTypesInState(iter->first, config_state_map)); - } - } - return ready_types_; + last_params_ = std::move(params); } void ActivateDirectoryDataType(ModelType type, @@ -100,6 +85,7 @@ ChangeProcessor* change_processor) override { activated_types_.Put(type); } + void DeactivateDirectoryDataType(ModelType type) override { activated_types_.Remove(type); } @@ -114,27 +100,16 @@ // TODO(stanisc): crbug.com/515962: Add test coverage. } - base::Callback<void(ModelTypeSet, ModelTypeSet)> last_ready_task() const { - return last_ready_task_; - } - - void set_expected_configure_types(DataTypeConfigState config_state, - ModelTypeSet types) { - expected_configure_types_[config_state] = types; - } - - void set_ready_types(ModelTypeSet types) { ready_types_ = types; } - const ModelTypeSet activated_types() { return activated_types_; } int configure_call_count() const { return configure_call_count_; } + const ConfigureParams& last_params() const { return last_params_; } + private: - base::Callback<void(ModelTypeSet, ModelTypeSet)> last_ready_task_; - std::map<DataTypeConfigState, ModelTypeSet> expected_configure_types_; ModelTypeSet activated_types_; - ModelTypeSet ready_types_; - int configure_call_count_; + int configure_call_count_ = 0; + ConfigureParams last_params_; }; // DataTypeManagerObserver implementation. @@ -238,11 +213,16 @@ DataTypeManagerImpl::OnModelAssociationDone(result); } - private: + void set_downloaded_types(ModelTypeSet downloaded_types) { + downloaded_types_ = downloaded_types; + } + + protected: ModelTypeSet GetPriorityTypes() const override { return custom_priority_types_; } + private: ModelTypeSet custom_priority_types_ = ControlTypes(); DataTypeManager::ConfigureResult configure_result_; }; @@ -273,18 +253,17 @@ } // Configure the given DTM with the given desired types. - void Configure(DataTypeManagerImpl* dtm, const ModelTypeSet& desired_types) { - dtm->Configure(desired_types, CONFIGURE_REASON_RECONFIGURATION); + void Configure(ModelTypeSet desired_types) { + dtm_->Configure(desired_types, CONFIGURE_REASON_RECONFIGURATION); } // Finish downloading for the given DTM. Should be done only after // a call to Configure(). - void FinishDownload(const DataTypeManager& dtm, - ModelTypeSet types_to_configure, + void FinishDownload(ModelTypeSet types_to_configure, ModelTypeSet failed_download_types) { - EXPECT_EQ(DataTypeManager::CONFIGURING, dtm.state()); - ASSERT_FALSE(configurer_.last_ready_task().is_null()); - configurer_.last_ready_task().Run( + EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + ASSERT_FALSE(last_configure_params().ready_task.is_null()); + last_configure_params().ready_task.Run( Difference(types_to_configure, failed_download_types), failed_download_types); } @@ -324,6 +303,10 @@ encryption_handler_.set_encrypted_types(encrypted_types); } + const ModelTypeConfigurer::ConfigureParams& last_configure_params() const { + return configurer_.last_params(); + } + base::MessageLoopForUI ui_loop_; DataTypeController::TypeMap controllers_; TestSyncClient sync_client_; @@ -339,10 +322,10 @@ SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet()); + Configure(ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); dtm_->Stop(); @@ -357,11 +340,11 @@ SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); @@ -384,14 +367,15 @@ SetConfigureDoneExpectation(DataTypeManager::ABORTED, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); dtm_->Stop(); EXPECT_EQ(DataTypeManager::STOPPED, dtm_->state()); } - configurer_.last_ready_task().Run(ModelTypeSet(BOOKMARKS), ModelTypeSet()); + last_configure_params().ready_task.Run(ModelTypeSet(BOOKMARKS), + ModelTypeSet()); EXPECT_TRUE(configurer_.activated_types().Empty()); } @@ -407,11 +391,11 @@ SetConfigureDoneExpectation(DataTypeManager::ABORTED, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); dtm_->Stop(); @@ -436,11 +420,11 @@ SetConfigureDoneExpectation(DataTypeManager::ABORTED, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); EXPECT_TRUE(configurer_.activated_types().Empty()); @@ -473,11 +457,11 @@ dtm_->set_priority_types(AddControlTypesTo(types)); // Step 1. - Configure(dtm_.get(), types); + Configure(types); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. @@ -486,7 +470,7 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 4. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); // Step 5. @@ -511,12 +495,12 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. @@ -528,12 +512,12 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 4. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 5. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 6. @@ -564,12 +548,12 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. @@ -581,12 +565,12 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 4. - Configure(dtm_.get(), ModelTypeSet(PREFERENCES)); + Configure(ModelTypeSet(PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 5. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 6. @@ -617,16 +601,16 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 4. @@ -634,8 +618,8 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 5. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 6. @@ -661,11 +645,11 @@ BuildStatusTable(ModelTypeSet(), ModelTypeSet(), ModelTypeSet(), ModelTypeSet(BOOKMARKS))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); EXPECT_TRUE(configurer_.activated_types().Empty()); @@ -694,12 +678,12 @@ ModelTypeSet(PREFERENCES))); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. @@ -736,12 +720,12 @@ ModelTypeSet(), ModelTypeSet())); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. @@ -754,8 +738,8 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 5. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(1U, configurer_.activated_types().Size()); @@ -781,20 +765,20 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 4. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 5. @@ -827,21 +811,21 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Step 1. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 2. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 3. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 4. - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Step 5. @@ -859,14 +843,14 @@ // operations that would be invoked by the BackendMigrator. TEST_F(SyncDataTypeManagerImplTest, MigrateAll) { AddController(BOOKMARKS); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(BOOKMARKS))); + dtm_->set_priority_types(AddControlTypesTo(BOOKMARKS)); SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Initial setup. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); // We've now configured bookmarks and (implicitly) the control types. @@ -884,15 +868,15 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // The DTM will call ConfigureDataTypes(), even though it is unnecessary. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); observer_.ResetExpectations(); // Re-enable the migrated types. SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - Configure(dtm_.get(), to_migrate); - FinishDownload(*dtm_, to_migrate, ModelTypeSet()); + Configure(to_migrate); + FinishDownload(to_migrate, ModelTypeSet()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); } @@ -905,9 +889,9 @@ // Initial configure. SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); observer_.ResetExpectations(); @@ -924,19 +908,19 @@ // - PREFERENCES: which is new and will need to be downloaded, and // - NIGORI: (added implicitly because it is a control type) which // the DTM is part-way through purging. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Invoke the callback we've been waiting for since we asked to purge NIGORI. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); observer_.ResetExpectations(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Now invoke the callback for the second configure request. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Start the preferences controller. We don't need to start controller for @@ -950,26 +934,25 @@ AddController(BOOKMARKS); AddController(PREFERENCES); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); // Initial configure. SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - // Initially only PREFERENCES is configured. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + // Initially only PREFERENCES is downloaded. + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(PREFERENCES), + last_configure_params().to_download); - // BOOKMARKS is configured after download of PREFERENCES finishes. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + // BOOKMARKS is downloaded after PREFERENCES finishes. + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(BOOKMARKS, NIGORI), + last_configure_params().to_download); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); GetController(PREFERENCES)->FinishStart(DataTypeController::OK); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); @@ -982,43 +965,39 @@ AddController(PREFERENCES); AddController(APPS); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); // Initial configure. SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Reconfigure while associating PREFERENCES and downloading BOOKMARKS. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(PREFERENCES), + last_configure_params().to_download); - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(BOOKMARKS, NIGORI), + last_configure_params().to_download); // Enable syncing for APPS. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES, APPS)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES, APPS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Reconfiguration starts after downloading and association of previous // types finish. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); GetController(PREFERENCES)->FinishStart(DataTypeController::OK); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(), last_configure_params().to_download); - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS, APPS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(APPS, NIGORI), last_configure_params().to_download); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, APPS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, APPS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Skip calling FinishStart() for PREFENCES because it's already started in @@ -1032,24 +1011,23 @@ AddController(BOOKMARKS); AddController(PREFERENCES); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); // Initial configure. SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::ABORTED, DataTypeStatusTable()); // Initially only PREFERENCES is configured. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(PREFERENCES), + last_configure_params().to_download); // BOOKMARKS is configured after download of PREFERENCES finishes. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(BOOKMARKS, NIGORI), + last_configure_params().to_download); // PREFERENCES controller is associating while BOOKMARKS is downloading. EXPECT_EQ(DataTypeController::ASSOCIATING, @@ -1068,7 +1046,7 @@ AddController(BOOKMARKS); AddController(PREFERENCES); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); // Initial configure. Bookmarks will fail to associate due to the download // failure. @@ -1079,17 +1057,16 @@ ModelTypeSet())); // Initially only PREFERENCES is configured. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(PREFERENCES), + last_configure_params().to_download); // BOOKMARKS is configured after download of PREFERENCES finishes. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(BOOKMARKS, NIGORI), + last_configure_params().to_download); // PREFERENCES controller is associating while BOOKMARKS is downloading. EXPECT_EQ(DataTypeController::ASSOCIATING, @@ -1098,19 +1075,17 @@ GetController(BOOKMARKS)->state()); // Make BOOKMARKS download fail. Preferences is still associating. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(), ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); EXPECT_EQ(DataTypeController::ASSOCIATING, GetController(PREFERENCES)->state()); // Finish association of PREFERENCES. This will trigger a reconfiguration to // disable bookmarks. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); GetController(PREFERENCES)->FinishStart(DataTypeController::OK); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); + EXPECT_EQ(ModelTypeSet(), last_configure_params().to_download); EXPECT_EQ(DataTypeController::RUNNING, GetController(PREFERENCES)->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); } @@ -1119,7 +1094,7 @@ AddController(PREFERENCES); // Will fail. AddController(BOOKMARKS); // Will succeed. - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); // Initial configure. SetConfigureStartExpectation(); @@ -1129,17 +1104,16 @@ ModelTypeSet(), ModelTypeSet())); // Initially only PREFERENCES is configured. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(PREFERENCES), + last_configure_params().to_download); // BOOKMARKS is configured after download of PREFERENCES finishes. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(BOOKMARKS, NIGORI), + last_configure_params().to_download); // PREFERENCES controller is associating while BOOKMARKS is downloading. EXPECT_EQ(DataTypeController::ASSOCIATING, @@ -1154,15 +1128,11 @@ // Reconfigure without PREFERENCES after the BOOKMARKS download completes, // then reconfigure with BOOKMARKS. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ControlTypes()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); // Reconfigure with BOOKMARKS. - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeController::ASSOCIATING, GetController(BOOKMARKS)->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); @@ -1176,7 +1146,7 @@ AddController(PREFERENCES); // Will succeed. AddController(BOOKMARKS); // Will fail. - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); // Initial configure. SetConfigureStartExpectation(); @@ -1186,17 +1156,16 @@ ModelTypeSet())); // Initially only PREFERENCES is configured. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(PREFERENCES), + last_configure_params().to_download); // BOOKMARKS is configured after download of PREFERENCES finishes. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(ModelTypeSet(BOOKMARKS, NIGORI), + last_configure_params().to_download); // PREFERENCES controller is associating while BOOKMARKS is downloading. EXPECT_EQ(DataTypeController::ASSOCIATING, @@ -1205,24 +1174,20 @@ GetController(BOOKMARKS)->state()); // BOOKMARKS finishes downloading and PREFERENCES finishes associating. - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); GetController(PREFERENCES)->FinishStart(DataTypeController::OK); EXPECT_EQ(DataTypeController::RUNNING, GetController(PREFERENCES)->state()); // Make BOOKMARKS association fail, which triggers reconfigure with only // PREFERENCES. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, - AddControlTypesTo(ModelTypeSet(PREFERENCES))); GetController(BOOKMARKS)->FinishStart(DataTypeController::ASSOCIATION_FAILED); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Finish configuration with only PREFERENCES. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); + EXPECT_EQ(ModelTypeSet(), last_configure_params().to_download); EXPECT_EQ(DataTypeController::RUNNING, GetController(PREFERENCES)->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); } @@ -1236,13 +1201,9 @@ SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - ModelTypeSet expected_types = ControlTypes(); - expected_types.Put(BOOKMARKS); - // APPS is filtered out because there's no controller for it. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_ACTIVE, expected_types); - Configure(dtm_.get(), types); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + Configure(types); + EXPECT_EQ(AddControlTypesTo(BOOKMARKS), last_configure_params().to_download); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); dtm_->Stop(); @@ -1259,13 +1220,13 @@ BuildStatusTable(ModelTypeSet(), ModelTypeSet(BOOKMARKS), ModelTypeSet(), ModelTypeSet())); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PREFERENCES)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES, BOOKMARKS), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS, PREFERENCES)); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES, BOOKMARKS), ModelTypeSet()); GetController(PREFERENCES)->FinishStart(DataTypeController::OK); GetController(BOOKMARKS)->FinishStart(DataTypeController::ASSOCIATION_FAILED); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); // Reconfig for error. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); // Reconfig for error. + FinishDownload(ModelTypeSet(), ModelTypeSet()); // Reconfig for error. + FinishDownload(ModelTypeSet(), ModelTypeSet()); // Reconfig for error. EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(DataTypeController::RUNNING, GetController(PREFERENCES)->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); @@ -1277,8 +1238,8 @@ dtm_->ReenableType(BOOKMARKS); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); @@ -1299,8 +1260,8 @@ DataTypeManager::OK, BuildStatusTable(ModelTypeSet(), ModelTypeSet(), ModelTypeSet(BOOKMARKS), ModelTypeSet())); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(0U, configurer_.activated_types().Size()); @@ -1312,8 +1273,8 @@ dtm_->ReenableType(BOOKMARKS); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); @@ -1341,17 +1302,17 @@ DataTypeManager::OK, BuildStatusTable(ModelTypeSet(), ModelTypeSet(), ModelTypeSet(BOOKMARKS), ModelTypeSet())); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); // Second Configure sets a flag to perform reconfiguration after the first one // is done. - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); // Reset errors before triggering reconfiguration. dtm_->ResetDataTypeErrors(); // Reconfiguration should update unready errors. Bookmarks shouldn't start. - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(0U, configurer_.activated_types().Size()); @@ -1368,9 +1329,9 @@ DataTypeManager::OK, BuildStatusTable(ModelTypeSet(), ModelTypeSet(BOOKMARKS), ModelTypeSet(), ModelTypeSet())); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); @@ -1387,14 +1348,14 @@ DataTypeManager::OK, BuildStatusTable(ModelTypeSet(), ModelTypeSet(BOOKMARKS), ModelTypeSet(), ModelTypeSet())); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + Configure(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(), ModelTypeSet()); GetController(BOOKMARKS)->CreateErrorHandler()->OnUnrecoverableError( SyncError(FROM_HERE, SyncError::DATATYPE_ERROR, "bookmarks error", BOOKMARKS)); base::RunLoop().RunUntilIdle(); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); // Reconfig for error. + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); // Reconfig for error. EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); @@ -1410,11 +1371,11 @@ DataTypeManager::OK, BuildStatusTable(ModelTypeSet(), ModelTypeSet(BOOKMARKS), ModelTypeSet(), ModelTypeSet())); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); GetController(BOOKMARKS)->SetDelayModelLoad(); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); @@ -1426,7 +1387,7 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(0U, configurer_.activated_types().Size()); @@ -1437,16 +1398,16 @@ AddController(PREFERENCES); AddController(BOOKMARKS); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(PREFERENCES, BOOKMARKS)); + Configure(ModelTypeSet(PREFERENCES, BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - configurer_.set_ready_types(ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + dtm_->set_downloaded_types(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); // Association of Bookmarks can't happen until higher priority types are // finished. @@ -1468,7 +1429,7 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Finishing the download should complete the configuration. - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeController::RUNNING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(2U, configurer_.activated_types().Size()); @@ -1483,13 +1444,13 @@ AddController(PREFERENCES); AddController(BOOKMARKS); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - configurer_.set_ready_types(ModelTypeSet(PREFERENCES)); - Configure(dtm_.get(), ModelTypeSet(PREFERENCES, BOOKMARKS)); + dtm_->set_downloaded_types(ModelTypeSet(PREFERENCES)); + Configure(ModelTypeSet(PREFERENCES, BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Association of Bookmarks can't happen until higher priority types are @@ -1507,11 +1468,11 @@ // Because Bookmarks aren't a ready type, they'll need to wait until the // low priority download also finishes. - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeController::MODEL_LOADED, GetController(BOOKMARKS)->state()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeController::ASSOCIATING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); @@ -1532,13 +1493,13 @@ AddController(PREFERENCES); AddController(BOOKMARKS); - dtm_->set_priority_types(AddControlTypesTo(ModelTypeSet(PREFERENCES))); + dtm_->set_priority_types(AddControlTypesTo(PREFERENCES)); SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - configurer_.set_ready_types(ModelTypeSet(PREFERENCES)); - Configure(dtm_.get(), ModelTypeSet(PREFERENCES, BOOKMARKS)); + dtm_->set_downloaded_types(ModelTypeSet(PREFERENCES)); + Configure(ModelTypeSet(PREFERENCES, BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Association of Bookmarks can't happen until higher priority types are @@ -1556,8 +1517,8 @@ // Because Bookmarks are a ready type, it can start associating immediately // after the high priority types finish downloading. - configurer_.set_ready_types(ModelTypeSet(BOOKMARKS)); - FinishDownload(*dtm_, ModelTypeSet(PREFERENCES), ModelTypeSet()); + dtm_->set_downloaded_types(ModelTypeSet(BOOKMARKS)); + FinishDownload(ModelTypeSet(PREFERENCES), ModelTypeSet()); EXPECT_EQ(DataTypeController::ASSOCIATING, GetController(BOOKMARKS)->state()); // Finishing the Bookmarks association leaves the DTM waiting for the low @@ -1567,7 +1528,7 @@ EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Finishing the low priority download ends the configuration. - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURED, dtm_->state()); EXPECT_EQ(2U, configurer_.activated_types().Size()); @@ -1582,18 +1543,18 @@ AddController(BOOKMARKS); AddController(PASSWORDS); + ModelTypeSet clean_types(BOOKMARKS, PASSWORDS); + SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_CLEAN, - AddControlTypesTo(ModelTypeSet(BOOKMARKS, PASSWORDS))); - dtm_->Configure(ModelTypeSet(BOOKMARKS, PASSWORDS), - CONFIGURE_REASON_CATCH_UP); + dtm_->Configure(clean_types, CONFIGURE_REASON_CATCH_UP); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(clean_types), last_configure_params().to_unapply); + EXPECT_TRUE(last_configure_params().to_purge.HasAll(clean_types)); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PASSWORDS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(clean_types, ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); @@ -1619,25 +1580,22 @@ SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); // Configure (catch up) with one type. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_CLEAN, - AddControlTypesTo(ModelTypeSet(BOOKMARKS))); dtm_->Configure(ModelTypeSet(BOOKMARKS), CONFIGURE_REASON_CATCH_UP); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(BOOKMARKS), last_configure_params().to_unapply); // Configure with both types before the first one completes. Both types should // end up in CONFIGURE_CLEAN. - configurer_.set_expected_configure_types( - ModelTypeConfigurer::CONFIGURE_CLEAN, - AddControlTypesTo(ModelTypeSet(BOOKMARKS, PASSWORDS))); dtm_->Configure(ModelTypeSet(BOOKMARKS, PASSWORDS), CONFIGURE_REASON_RECONFIGURATION); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); + EXPECT_EQ(AddControlTypesTo(ModelTypeSet(BOOKMARKS)), + last_configure_params().to_unapply); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS, PASSWORDS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS, PASSWORDS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); @@ -1657,7 +1615,7 @@ SetConfigureStartExpectation(); SetConfigureDoneExpectation(DataTypeManager::OK, DataTypeStatusTable()); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS)); + Configure(ModelTypeSet(BOOKMARKS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); // Bookmarks model isn't loaded yet and it is required to complete before // call to configure. Ensure that configure wasn't called. @@ -1669,8 +1627,8 @@ EXPECT_EQ(1, configurer_.configure_call_count()); EXPECT_EQ(1, GetController(BOOKMARKS)->register_with_backend_call_count()); - FinishDownload(*dtm_, ModelTypeSet(), ModelTypeSet()); - FinishDownload(*dtm_, ModelTypeSet(BOOKMARKS), ModelTypeSet()); + FinishDownload(ModelTypeSet(), ModelTypeSet()); + FinishDownload(ModelTypeSet(BOOKMARKS), ModelTypeSet()); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); GetController(BOOKMARKS)->FinishStart(DataTypeController::OK); @@ -1686,7 +1644,7 @@ SetConfigureStartExpectation(); FailEncryptionFor(ModelTypeSet(BOOKMARKS)); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PASSWORDS)); + Configure(ModelTypeSet(BOOKMARKS, PASSWORDS)); EXPECT_EQ(DataTypeManager::CONFIGURING, dtm_->state()); EXPECT_EQ(DataTypeController::NOT_RUNNING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeController::MODEL_STARTING, @@ -1706,7 +1664,7 @@ AddController(BOOKMARKS, true, true); AddController(PASSWORDS, true, true); SetConfigureStartExpectation(); - Configure(dtm_.get(), ModelTypeSet(BOOKMARKS, PASSWORDS)); + Configure(ModelTypeSet(BOOKMARKS, PASSWORDS)); EXPECT_EQ(DataTypeController::MODEL_STARTING, GetController(BOOKMARKS)->state()); EXPECT_EQ(DataTypeController::MODEL_STARTING,
diff --git a/components/sync/driver/directory_data_type_controller.cc b/components/sync/driver/directory_data_type_controller.cc index 08fd79d5..86fdd73 100644 --- a/components/sync/driver/directory_data_type_controller.cc +++ b/components/sync/driver/directory_data_type_controller.cc
@@ -58,6 +58,7 @@ } void DirectoryDataTypeController::RegisterWithBackend( + base::Callback<void(bool)> set_downloaded, ModelTypeConfigurer* configurer) {} void DirectoryDataTypeController::ActivateDataType(
diff --git a/components/sync/driver/directory_data_type_controller.h b/components/sync/driver/directory_data_type_controller.h index 48571e6..350b055 100644 --- a/components/sync/driver/directory_data_type_controller.h +++ b/components/sync/driver/directory_data_type_controller.h
@@ -29,7 +29,8 @@ // Directory based data types don't need to register with backend. // ModelTypeRegistry will create all necessary objects in // SetEnabledDirectoryTypes based on routing info. - void RegisterWithBackend(ModelTypeConfigurer* configurer) override; + void RegisterWithBackend(base::Callback<void(bool)> set_downloaded, + ModelTypeConfigurer* configurer) override; // Directory specific implementation of ActivateDataType with the // type specific ChangeProcessor and ModelSafeGroup.
diff --git a/components/sync/driver/fake_data_type_controller.cc b/components/sync/driver/fake_data_type_controller.cc index 2587431b..de640d19 100644 --- a/components/sync/driver/fake_data_type_controller.cc +++ b/components/sync/driver/fake_data_type_controller.cc
@@ -53,6 +53,7 @@ } void FakeDataTypeController::RegisterWithBackend( + base::Callback<void(bool)> set_downloaded, ModelTypeConfigurer* configurer) { ++register_with_backend_call_count_; }
diff --git a/components/sync/driver/fake_data_type_controller.h b/components/sync/driver/fake_data_type_controller.h index 509e6b3..5add35c6 100644 --- a/components/sync/driver/fake_data_type_controller.h +++ b/components/sync/driver/fake_data_type_controller.h
@@ -30,7 +30,8 @@ // DirectoryDataTypeController implementation. bool ShouldLoadModelBeforeConfigure() const override; void LoadModels(const ModelLoadCallback& model_load_callback) override; - void RegisterWithBackend(ModelTypeConfigurer* configurer) override; + void RegisterWithBackend(base::Callback<void(bool)> set_downloaded, + ModelTypeConfigurer* configurer) override; void StartAssociating(const StartCallback& start_callback) override; void Stop() override; std::string name() const override;
diff --git a/components/sync/driver/glue/sync_backend_host_core.cc b/components/sync/driver/glue/sync_backend_host_core.cc index c448500..b934544 100644 --- a/components/sync/driver/glue/sync_backend_host_core.cc +++ b/components/sync/driver/glue/sync_backend_host_core.cc
@@ -508,22 +508,26 @@ } void SyncBackendHostCore::DoConfigureSyncer( - ConfigureReason reason, - const ModelTypeSet& to_download, - const ModelSafeRoutingInfo routing_info, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Closure& retry_callback) { + ModelTypeConfigurer::ConfigureParams params) { DCHECK(thread_checker_.CalledOnValidThread()); - DCHECK(!ready_task.is_null()); - DCHECK(!retry_callback.is_null()); - base::Closure chained_ready_task( - base::Bind(&SyncBackendHostCore::DoFinishConfigureDataTypes, - weak_ptr_factory_.GetWeakPtr(), to_download, ready_task)); + DCHECK(!params.ready_task.is_null()); + DCHECK(!params.retry_callback.is_null()); + + registrar_->ConfigureDataTypes(params.enabled_types, params.disabled_types); + + ModelSafeRoutingInfo routing_info; + registrar_->GetModelSafeRoutingInfo(&routing_info); + + base::Closure chained_ready_task(base::Bind( + &SyncBackendHostCore::DoFinishConfigureDataTypes, + weak_ptr_factory_.GetWeakPtr(), params.to_download, params.ready_task)); base::Closure chained_retry_task( base::Bind(&SyncBackendHostCore::DoRetryConfiguration, - weak_ptr_factory_.GetWeakPtr(), retry_callback)); - sync_manager_->ConfigureSyncer(reason, to_download, routing_info, - chained_ready_task, chained_retry_task); + weak_ptr_factory_.GetWeakPtr(), params.retry_callback)); + + sync_manager_->ConfigureSyncer(params.reason, params.to_download, + routing_info, chained_ready_task, + chained_retry_task); } void SyncBackendHostCore::DoFinishConfigureDataTypes(
diff --git a/components/sync/driver/glue/sync_backend_host_core.h b/components/sync/driver/glue/sync_backend_host_core.h index 27e0d57e..787a1eb 100644 --- a/components/sync/driver/glue/sync_backend_host_core.h +++ b/components/sync/driver/glue/sync_backend_host_core.h
@@ -23,6 +23,7 @@ #include "components/sync/base/system_encryptor.h" #include "components/sync/driver/glue/sync_backend_host_impl.h" #include "components/sync/engine/cycle/type_debug_info_observer.h" +#include "components/sync/engine/model_type_configurer.h" #include "components/sync/engine/shutdown_reason.h" #include "components/sync/engine/sync_encryption_handler.h" #include "url/gurl.h" @@ -145,12 +146,7 @@ void DoPurgeDisabledTypes(const ModelTypeSet& to_purge, const ModelTypeSet& to_journal, const ModelTypeSet& to_unapply); - void DoConfigureSyncer( - ConfigureReason reason, - const ModelTypeSet& to_download, - const ModelSafeRoutingInfo routing_info, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Closure& retry_callback); + void DoConfigureSyncer(ModelTypeConfigurer::ConfigureParams params); void DoFinishConfigureDataTypes( ModelTypeSet types_to_config, const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task);
diff --git a/components/sync/driver/glue/sync_backend_host_impl.cc b/components/sync/driver/glue/sync_backend_host_impl.cc index 5a6ed1c..6e825e66 100644 --- a/components/sync/driver/glue/sync_backend_host_impl.cc +++ b/components/sync/driver/glue/sync_backend_host_impl.cc
@@ -201,124 +201,14 @@ registrar_ = nullptr; } -ModelTypeSet SyncBackendHostImpl::ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) { - // Only one configure is allowed at a time. This is guaranteed by our - // callers. The SyncBackendHostImpl requests one configure as the backend is - // initializing and waits for it to complete. After initialization, all - // configurations will pass through the DataTypeManager, which is careful to - // never send a new configure request until the current request succeeds. - - // The SyncBackendRegistrar's routing info will be updated by adding the - // types_to_add to the list then removing types_to_remove. Any types which - // are not in either of those sets will remain untouched. - // - // Types which were not in the list previously are not fully downloaded, so we - // must ask the syncer to download them. Any newly supported datatypes will - // not have been in that routing info list, so they will be among the types - // downloaded if they are enabled. - // - // The SyncBackendRegistrar's state was initially derived from the types - // detected to have been downloaded in the database. Afterwards it is - // modified only by this function. We expect it to remain in sync with the - // backend because configuration requests are never aborted; they are retried - // until they succeed or the backend is shut down. - - ModelTypeSet disabled_types = GetDataTypesInState(DISABLED, config_state_map); - ModelTypeSet fatal_types = GetDataTypesInState(FATAL, config_state_map); - ModelTypeSet crypto_types = GetDataTypesInState(CRYPTO, config_state_map); - ModelTypeSet unready_types = GetDataTypesInState(UNREADY, config_state_map); - - disabled_types.PutAll(fatal_types); - disabled_types.PutAll(crypto_types); - disabled_types.PutAll(unready_types); - - ModelTypeSet active_types = - GetDataTypesInState(CONFIGURE_ACTIVE, config_state_map); - ModelTypeSet clean_first_types = - GetDataTypesInState(CONFIGURE_CLEAN, config_state_map); - ModelTypeSet types_to_download = registrar_->ConfigureDataTypes( - Union(active_types, clean_first_types), disabled_types); - types_to_download.PutAll(clean_first_types); - types_to_download.RemoveAll(ProxyTypes()); - if (!types_to_download.Empty()) - types_to_download.Put(NIGORI); - - // TODO(sync): crbug.com/137550. - // It's dangerous to configure types that have progress markers. Types with - // progress markers can trigger a MIGRATION_DONE response. We are not - // prepared to handle a migration during a configure, so we must ensure that - // all our types_to_download actually contain no data before we sync them. - // - // One common way to end up in this situation used to be types which - // downloaded some or all of their data but have not applied it yet. We avoid - // problems with those types by purging the data of any such partially synced - // types soon after we load the directory. - // - // Another possible scenario is that we have newly supported or newly enabled - // data types being downloaded here but the nigori type, which is always - // included in any GetUpdates request, requires migration. The server has - // code to detect this scenario based on the configure reason, the fact that - // the nigori type is the only requested type which requires migration, and - // that the requested types list includes at least one non-nigori type. It - // will not send a MIGRATION_DONE response in that case. We still need to be - // careful to not send progress markers for non-nigori types, though. If a - // non-nigori type in the request requires migration, a MIGRATION_DONE - // response will be sent. - - ModelSafeRoutingInfo routing_info; - registrar_->GetModelSafeRoutingInfo(&routing_info); - - ModelTypeSet current_types = registrar_->GetLastConfiguredTypes(); - ModelTypeSet types_to_purge = Difference(ModelTypeSet::All(), current_types); - ModelTypeSet inactive_types = - GetDataTypesInState(CONFIGURE_INACTIVE, config_state_map); - // Include clean_first_types in types_to_purge, they are part of - // current_types, but still need to be cleared. - DCHECK(current_types.HasAll(clean_first_types)); - types_to_purge.PutAll(clean_first_types); - types_to_purge.RemoveAll(inactive_types); - types_to_purge.RemoveAll(unready_types); - - // If a type has already been disabled and unapplied or journaled, it will - // not be part of the |types_to_purge| set, and therefore does not need - // to be acted on again. - fatal_types.RetainAll(types_to_purge); - ModelTypeSet unapply_types = Union(crypto_types, clean_first_types); - unapply_types.RetainAll(types_to_purge); - - DCHECK(Intersection(current_types, fatal_types).Empty()); - DCHECK(Intersection(current_types, crypto_types).Empty()); - DCHECK(current_types.HasAll(types_to_download)); - - SDVLOG(1) << "Types " << ModelTypeSetToString(types_to_download) - << " added; calling DoConfigureSyncer"; - // Divide up the types into their corresponding actions (each is mutually - // exclusive): - // - Types which have just been added to the routing info (types_to_download): - // are downloaded. - // - Types which have encountered a fatal error (fatal_types) are deleted - // from the directory and journaled in the delete journal. - // - Types which have encountered a cryptographer error (crypto_types) are - // unapplied (local state is purged but sync state is not). - // - All other types not in the routing info (types just disabled) are deleted - // from the directory. - // - Everything else (enabled types and already disabled types) is not - // touched. +void SyncBackendHostImpl::ConfigureDataTypes(ConfigureParams params) { sync_task_runner_->PostTask( - FROM_HERE, base::Bind(&SyncBackendHostCore::DoPurgeDisabledTypes, core_, - types_to_purge, fatal_types, unapply_types)); - RequestConfigureSyncer(reason, types_to_download, routing_info, ready_task, - retry_callback); - - DCHECK(Intersection(active_types, types_to_purge).Empty()); - DCHECK(Intersection(active_types, fatal_types).Empty()); - DCHECK(Intersection(active_types, unapply_types).Empty()); - DCHECK(Intersection(active_types, inactive_types).Empty()); - return Difference(active_types, types_to_download); + FROM_HERE, + base::Bind(&SyncBackendHostCore::DoPurgeDisabledTypes, core_, + params.to_purge, params.to_journal, params.to_unapply)); + sync_task_runner_->PostTask( + FROM_HERE, base::Bind(&SyncBackendHostCore::DoConfigureSyncer, core_, + base::Passed(¶ms))); } void SyncBackendHostImpl::EnableEncryptEverything() { @@ -433,18 +323,6 @@ core_)); } -void SyncBackendHostImpl::RequestConfigureSyncer( - ConfigureReason reason, - ModelTypeSet to_download, - const ModelSafeRoutingInfo& routing_info, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Closure& retry_callback) { - sync_task_runner_->PostTask( - FROM_HERE, - base::Bind(&SyncBackendHostCore::DoConfigureSyncer, core_, reason, - to_download, routing_info, ready_task, retry_callback)); -} - void SyncBackendHostImpl::FinishConfigureDataTypesOnFrontendLoop( const ModelTypeSet enabled_types, const ModelTypeSet succeeded_configuration_types,
diff --git a/components/sync/driver/glue/sync_backend_host_impl.h b/components/sync/driver/glue/sync_backend_host_impl.h index 5836f50..0f33a9d 100644 --- a/components/sync/driver/glue/sync_backend_host_impl.h +++ b/components/sync/driver/glue/sync_backend_host_impl.h
@@ -67,11 +67,7 @@ WARN_UNUSED_RESULT; void StopSyncingForShutdown() override; void Shutdown(ShutdownReason reason) override; - ModelTypeSet ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) override; + void ConfigureDataTypes(ConfigureParams params) override; void ActivateDirectoryDataType(ModelType type, ModelSafeGroup group, ChangeProcessor* change_processor) override; @@ -109,15 +105,6 @@ // The types and functions below are protected so that test // subclasses can use them. - // Request the syncer to reconfigure with the specfied params. - // Virtual for testing. - virtual void RequestConfigureSyncer( - ConfigureReason reason, - ModelTypeSet to_download, - const ModelSafeRoutingInfo& routing_info, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Closure& retry_callback); - // Called when the syncer has finished performing a configuration. void FinishConfigureDataTypesOnFrontendLoop( const ModelTypeSet enabled_types,
diff --git a/components/sync/driver/glue/sync_backend_host_impl_unittest.cc b/components/sync/driver/glue/sync_backend_host_impl_unittest.cc index 8769048..21d8d42 100644 --- a/components/sync/driver/glue/sync_backend_host_impl_unittest.cc +++ b/components/sync/driver/glue/sync_backend_host_impl_unittest.cc
@@ -35,7 +35,7 @@ #include "components/sync/engine/net/http_bridge_network_resources.h" #include "components/sync/engine/net/network_resources.h" #include "components/sync/engine/passive_model_worker.h" -#include "components/sync/engine/sync_engine_host.h" +#include "components/sync/engine/sync_engine_host_stub.h" #include "components/sync/engine/sync_manager_factory.h" #include "components/sync/test/callback_counter.h" #include "components/sync_preferences/pref_service_syncable.h" @@ -67,42 +67,29 @@ const base::TimeDelta&, const base::TimeDelta&) {} -void QuitMessageLoop() { - base::MessageLoop::current()->QuitWhenIdle(); -} - -class MockSyncEngineHost : public SyncEngineHost { +class TestSyncEngineHost : public SyncEngineHostStub { public: - virtual ~MockSyncEngineHost() {} + explicit TestSyncEngineHost( + base::Callback<void(ModelTypeSet)> set_engine_types) + : set_engine_types_(set_engine_types) {} - MOCK_METHOD5(OnEngineInitialized, - void(ModelTypeSet initial_types, - const WeakHandle<JsBackend>&, - const WeakHandle<DataTypeDebugInfoListener>&, - const std::string&, - bool)); - MOCK_METHOD0(OnSyncCycleCompleted, void()); - MOCK_METHOD1(OnConnectionStatusChange, void(ConnectionStatus status)); - MOCK_METHOD0(OnClearServerDataSucceeded, void()); - MOCK_METHOD0(OnClearServerDataFailed, void()); - MOCK_METHOD2(OnPassphraseRequired, - void(PassphraseRequiredReason, const sync_pb::EncryptedData&)); - MOCK_METHOD0(OnPassphraseAccepted, void()); - MOCK_METHOD2(OnEncryptedTypesChanged, void(ModelTypeSet, bool)); - MOCK_METHOD0(OnEncryptionComplete, void()); - MOCK_METHOD1(OnMigrationNeededForTypes, void(ModelTypeSet)); - MOCK_METHOD1(OnProtocolEvent, void(const ProtocolEvent&)); - MOCK_METHOD2(OnDirectoryTypeCommitCounterUpdated, - void(ModelType, const CommitCounters&)); - MOCK_METHOD2(OnDirectoryTypeUpdateCounterUpdated, - void(ModelType, const UpdateCounters&)); - MOCK_METHOD2(OnDatatypeStatusCounterUpdated, - void(ModelType, const StatusCounters&)); - MOCK_METHOD1(OnExperimentsChanged, void(const Experiments&)); - MOCK_METHOD1(OnActionableError, void(const SyncProtocolError& sync_error)); - MOCK_METHOD0(OnSyncConfigureRetry, void()); - MOCK_METHOD1(OnLocalSetPassphraseEncryption, - void(const SyncEncryptionHandler::NigoriState& nigori_state)); + void OnEngineInitialized(ModelTypeSet initial_types, + const WeakHandle<JsBackend>&, + const WeakHandle<DataTypeDebugInfoListener>&, + const std::string&, + bool success) override { + EXPECT_EQ(expect_success_, success); + set_engine_types_.Run(initial_types); + base::MessageLoop::current()->QuitWhenIdle(); + } + + void SetExpectSuccess(bool expect_success) { + expect_success_ = expect_success; + } + + private: + base::Callback<void(ModelTypeSet)> set_engine_types_; + bool expect_success_ = false; }; class FakeSyncManagerFactory : public SyncManagerFactory { @@ -157,7 +144,10 @@ class SyncEngineTest : public testing::Test { protected: SyncEngineTest() - : sync_thread_("SyncThreadForTest"), fake_manager_(nullptr) {} + : sync_thread_("SyncThreadForTest"), + host_(base::Bind(&SyncEngineTest::SetEngineTypes, + base::Unretained(this))), + fake_manager_(nullptr) {} ~SyncEngineTest() override {} @@ -206,8 +196,7 @@ // Synchronously initializes the backend. void InitializeBackend(bool expect_success) { - EXPECT_CALL(mock_host_, OnEngineInitialized(_, _, _, _, expect_success)) - .WillOnce(InvokeWithoutArgs(QuitMessageLoop)); + host_.SetExpectSuccess(expect_success); SyncEngine::HttpPostProviderFactoryGetter http_post_provider_factory_getter = base::Bind(&NetworkResources::GetHttpPostProviderFactory, @@ -216,7 +205,7 @@ SyncEngine::InitParams params; params.sync_task_runner = sync_thread_.task_runner(); - params.host = &mock_host_; + params.host = &host_; params.registrar = base::MakeUnique<SyncBackendRegistrar>( std::string(), base::Bind(&SyncClient::CreateModelWorkerForGroup, base::Unretained(&sync_client_))); @@ -231,55 +220,69 @@ backend_->Initialize(std::move(params)); - base::RunLoop run_loop; - base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( - FROM_HERE, run_loop.QuitClosure(), TestTimeouts::action_timeout()); - run_loop.Run(); + PumpSyncThread(); // |fake_manager_factory_|'s fake_manager() is set on the sync // thread, but we can rely on the message loop barriers to // guarantee that we see the updated value. DCHECK(fake_manager_); } - // Returns DataTypeConfigStateMap with all |enabled_types_| in - // CONFIGURE_ACTIVE state and all remaining types DISABLED. - ModelTypeConfigurer::DataTypeConfigStateMap ConfigStateMapForEnabledTypes() { - ModelTypeConfigurer::DataTypeConfigStateMap config_state_map; - ModelTypeConfigurer::SetDataTypesState( - ModelTypeConfigurer::CONFIGURE_ACTIVE, enabled_types_, - &config_state_map); - ModelTypeConfigurer::SetDataTypesState( - ModelTypeConfigurer::DISABLED, - Difference(ModelTypeSet::All(), enabled_types_), &config_state_map); - return config_state_map; + // Synchronously configures the backend's datatypes. + ModelTypeSet ConfigureDataTypes() { + return ConfigureDataTypesWithUnready(ModelTypeSet()); } - // Synchronously configures the backend's datatypes. - ModelTypeSet ConfigureDataTypes( - const ModelTypeConfigurer::DataTypeConfigStateMap& config_state_map) { - ModelTypeSet ready_types = backend_->ConfigureDataTypes( - CONFIGURE_REASON_RECONFIGURATION, config_state_map, - base::Bind(&SyncEngineTest::DownloadReady, base::Unretained(this)), - base::Bind(&SyncEngineTest::OnDownloadRetry, base::Unretained(this))); - base::RunLoop run_loop; - base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( - FROM_HERE, run_loop.QuitClosure(), TestTimeouts::action_timeout()); - run_loop.Run(); + ModelTypeSet ConfigureDataTypesWithUnready(ModelTypeSet unready_types) { + ModelTypeSet disabled_types = + Difference(ModelTypeSet::All(), enabled_types_); + + ModelTypeConfigurer::ConfigureParams params; + params.reason = CONFIGURE_REASON_RECONFIGURATION; + params.enabled_types = Difference(enabled_types_, unready_types); + params.disabled_types = Union(disabled_types, unready_types); + params.to_download = Difference(params.enabled_types, engine_types_); + if (!params.to_download.Empty()) { + params.to_download.Put(NIGORI); + } + params.to_purge = Intersection(engine_types_, disabled_types); + params.ready_task = + base::Bind(&SyncEngineTest::DownloadReady, base::Unretained(this)); + params.retry_callback = + base::Bind(&SyncEngineTest::OnDownloadRetry, base::Unretained(this)); + + ModelTypeSet ready_types = + Difference(params.enabled_types, params.to_download); + backend_->ConfigureDataTypes(std::move(params)); + PumpSyncThread(); + return ready_types; } protected: void DownloadReady(ModelTypeSet succeeded_types, ModelTypeSet failed_types) { + engine_types_.PutAll(succeeded_types); base::MessageLoop::current()->QuitWhenIdle(); } void OnDownloadRetry() { NOTIMPLEMENTED(); } + void SetEngineTypes(ModelTypeSet engine_types) { + EXPECT_TRUE(engine_types_.Empty()); + engine_types_ = engine_types; + } + + void PumpSyncThread() { + base::RunLoop run_loop; + base::ThreadTaskRunnerHandle::Get()->PostDelayedTask( + FROM_HERE, run_loop.QuitClosure(), TestTimeouts::action_timeout()); + run_loop.Run(); + } + base::MessageLoop message_loop_; base::ScopedTempDir temp_dir_; sync_preferences::TestingPrefServiceSyncable pref_service_; base::Thread sync_thread_; - StrictMock<MockSyncEngineHost> mock_host_; + TestSyncEngineHost host_; SyncCredentials credentials_; BackendSyncClient sync_client_; TestUnrecoverableErrorHandler test_unrecoverable_error_handler_; @@ -287,6 +290,7 @@ std::unique_ptr<SyncBackendHostImpl> backend_; std::unique_ptr<FakeSyncManagerFactory> fake_manager_factory_; FakeSyncManager* fake_manager_; + ModelTypeSet engine_types_; ModelTypeSet enabled_types_; std::unique_ptr<NetworkResources> network_resources_; std::unique_ptr<SyncEncryptionHandler::NigoriState> saved_nigori_state_; @@ -313,8 +317,7 @@ fake_manager_->GetTypesWithEmptyProgressMarkerToken(ControlTypes()) .Empty()); - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); // Nigori is always downloaded so won't be ready. EXPECT_EQ(Difference(ControlTypes(), ModelTypeSet(NIGORI)), ready_types); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().HasAll( @@ -343,8 +346,7 @@ fake_manager_->GetTypesWithEmptyProgressMarkerToken(enabled_types_) .Empty()); - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); EXPECT_EQ(enabled_types_, ready_types); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().Empty()); EXPECT_TRUE( @@ -380,8 +382,7 @@ fake_manager_->GetTypesWithEmptyProgressMarkerToken(enabled_types_)); // Now do the actual configuration, which should download and apply bookmarks. - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); EXPECT_EQ(full_types, ready_types); EXPECT_TRUE( Intersection(fake_manager_->GetAndResetPurgedTypes(), enabled_types_) @@ -414,8 +415,7 @@ fake_manager_->GetAndResetPurgedTypes(); // The actual configuration should redownload and apply all the enabled types. - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); // Nigori is always downloaded so won't be ready. EXPECT_EQ(Difference(ControlTypes(), ModelTypeSet(NIGORI)), ready_types); EXPECT_TRUE(fake_manager_->GetAndResetDownloadedTypes().HasAll( @@ -434,8 +434,7 @@ // Simulate first time sync. InitializeBackend(true); fake_manager_->GetAndResetPurgedTypes(); - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); // Nigori is always downloaded so won't be ready. EXPECT_EQ(Difference(ControlTypes(), ModelTypeSet(NIGORI)), ready_types); EXPECT_EQ(enabled_types_, fake_manager_->GetAndResetDownloadedTypes()); @@ -451,7 +450,7 @@ ModelTypeSet disabled_types(BOOKMARKS, SEARCH_ENGINES); ModelTypeSet old_types = enabled_types_; enabled_types_.RemoveAll(disabled_types); - ready_types = ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ready_types = ConfigureDataTypes(); // Only those datatypes disabled should be cleaned. Nothing should be // downloaded. @@ -470,8 +469,7 @@ // Simulate first time sync. InitializeBackend(true); fake_manager_->GetAndResetPurgedTypes(); - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); // Nigori is always downloaded so won't be ready. EXPECT_EQ(Difference(ControlTypes(), ModelTypeSet(NIGORI)), ready_types); EXPECT_EQ(enabled_types_, fake_manager_->GetAndResetDownloadedTypes()); @@ -486,7 +484,7 @@ // Then add two datatypes. ModelTypeSet new_types(EXTENSIONS, APPS); enabled_types_.PutAll(new_types); - ready_types = ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ready_types = ConfigureDataTypes(); // Only those datatypes added should be downloaded (plus nigori). Nothing // should be cleaned aside from the disabled types. @@ -508,8 +506,7 @@ // Simulate first time sync. InitializeBackend(true); fake_manager_->GetAndResetPurgedTypes(); - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); // Nigori is always downloaded so won't be ready. EXPECT_EQ(Difference(ControlTypes(), ModelTypeSet(NIGORI)), ready_types); EXPECT_EQ(enabled_types_, fake_manager_->GetAndResetDownloadedTypes()); @@ -527,7 +524,7 @@ ModelTypeSet new_types(EXTENSIONS, APPS); enabled_types_.PutAll(new_types); enabled_types_.RemoveAll(disabled_types); - ready_types = ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ready_types = ConfigureDataTypes(); // Only those datatypes added should be downloaded (plus nigori). Nothing // should be cleaned aside from the disabled types. @@ -564,8 +561,7 @@ enabled_types_)); // Downloads and applies the new types (plus nigori). - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); new_types.Put(NIGORI); EXPECT_EQ(Difference(old_types, ModelTypeSet(NIGORI)), ready_types); @@ -608,8 +604,7 @@ // Downloads and applies the new types and partial types (which includes // nigori anyways). - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); EXPECT_EQ(full_types, ready_types); EXPECT_EQ(Union(new_types, partial_types), fake_manager_->GetAndResetDownloadedTypes()); @@ -716,25 +711,20 @@ InitializeBackend(true); // First enable the types. - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ModelTypeSet ready_types = ConfigureDataTypes(); // Nigori is always downloaded so won't be ready. EXPECT_EQ(Difference(ControlTypes(), ModelTypeSet(NIGORI)), ready_types); // Then mark the error types as unready (disables without purging). - ModelTypeConfigurer::DataTypeConfigStateMap config_state_map = - ConfigStateMapForEnabledTypes(); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::UNREADY, - error_types, &config_state_map); - ready_types = ConfigureDataTypes(config_state_map); + ready_types = ConfigureDataTypesWithUnready(error_types); EXPECT_EQ(Difference(enabled_types_, error_types), ready_types); EXPECT_TRUE( fake_manager_->GetTypesWithEmptyProgressMarkerToken(error_types).Empty()); // Lastly explicitly disable the error types, which should result in a purge. enabled_types_.RemoveAll(error_types); - ready_types = ConfigureDataTypes(ConfigStateMapForEnabledTypes()); + ready_types = ConfigureDataTypes(); EXPECT_EQ(Difference(enabled_types_, error_types), ready_types); EXPECT_FALSE( fake_manager_->GetTypesWithEmptyProgressMarkerToken(error_types).Empty()); @@ -823,36 +813,6 @@ backend_.reset(); } -// Ensure that types in CONFIGURE_CLEAN state are unapplied. -TEST_F(SyncEngineTest, ConfigureCelanTypesAreUnapplied) { - ModelTypeSet clean_types(AUTOFILL); - - InitializeBackend(true); - - // First enable the types. - ModelTypeSet ready_types = - ConfigureDataTypes(ConfigStateMapForEnabledTypes()); - EXPECT_TRUE( - fake_manager_->GetTypesWithEmptyProgressMarkerToken(clean_types).Empty()); - - // Then unapply AUTOFILL. - ModelTypeConfigurer::DataTypeConfigStateMap config_state_map = - ConfigStateMapForEnabledTypes(); - ModelTypeConfigurer::SetDataTypesState(ModelTypeConfigurer::CONFIGURE_CLEAN, - clean_types, &config_state_map); - - ready_types = ConfigureDataTypes(config_state_map); - - // Autofill should be unapplied as part of PurgeDisabledTypes, but should - // retain progress markers. - ModelTypeSet purged_types = fake_manager_->GetAndResetPurgedTypes(); - ModelTypeSet unapplied_types = fake_manager_->GetAndResetUnappliedTypes(); - EXPECT_EQ(unapplied_types, clean_types); - EXPECT_TRUE(purged_types.HasAll(clean_types)); - EXPECT_TRUE( - fake_manager_->GetTypesWithEmptyProgressMarkerToken(clean_types).Empty()); -} - } // namespace } // namespace syncer
diff --git a/components/sync/driver/model_type_controller.cc b/components/sync/driver/model_type_controller.cc index 9dbe973e..bb93d049 100644 --- a/components/sync/driver/model_type_controller.cc +++ b/components/sync/driver/model_type_controller.cc
@@ -159,13 +159,19 @@ LoadModelsDone(result, error); } -void ModelTypeController::RegisterWithBackend(ModelTypeConfigurer* configurer) { +void ModelTypeController::RegisterWithBackend( + base::Callback<void(bool)> set_downloaded, + ModelTypeConfigurer* configurer) { DCHECK(CalledOnValidThread()); if (activated_) return; DCHECK(configurer); DCHECK(activation_context_); DCHECK_EQ(MODEL_LOADED, state_); + // Inform the DataTypeManager whether our initial download is complete. + set_downloaded.Run(activation_context_->model_type_state.initial_sync_done()); + // Pass activation context to ModelTypeRegistry, where ModelTypeWorker gets + // created and connected with ModelTypeProcessor. configurer->ActivateNonBlockingDataType(type(), std::move(activation_context_)); activated_ = true;
diff --git a/components/sync/driver/model_type_controller.h b/components/sync/driver/model_type_controller.h index 7efa58f..caf3021 100644 --- a/components/sync/driver/model_type_controller.h +++ b/components/sync/driver/model_type_controller.h
@@ -36,11 +36,8 @@ void LoadModels(const ModelLoadCallback& model_load_callback) override; void GetAllNodes(const AllNodesCallback& callback) override; void GetStatusCounters(const StatusCountersCallback& callback) override; - - // Registers non-blocking data type with sync backend. In the process the - // activation context is passed to ModelTypeRegistry, where ModelTypeWorker - // gets created and connected with ModelTypeProcessor. - void RegisterWithBackend(ModelTypeConfigurer* configurer) override; + void RegisterWithBackend(base::Callback<void(bool)> set_downloaded, + ModelTypeConfigurer* configurer) override; void StartAssociating(const StartCallback& start_callback) override; void ActivateDataType(ModelTypeConfigurer* configurer) override; void DeactivateDataType(ModelTypeConfigurer* configurer) override;
diff --git a/components/sync/driver/model_type_controller_unittest.cc b/components/sync/driver/model_type_controller_unittest.cc index c675192..3f491b3a 100644 --- a/components/sync/driver/model_type_controller_unittest.cc +++ b/components/sync/driver/model_type_controller_unittest.cc
@@ -34,6 +34,11 @@ const ModelType kTestModelType = AUTOFILL; +void SetBool(bool* called, bool* out, bool in) { + *called = true; + *out = in; +} + // A change processor for testing that connects using a thread-jumping proxy, // tracks connected state, and counts DisableSync calls. class TestModelTypeProcessor : public FakeModelTypeChangeProcessor, @@ -48,6 +53,8 @@ const StartCallback& callback) override { std::unique_ptr<ActivationContext> activation_context = base::MakeUnique<ActivationContext>(); + activation_context->model_type_state.set_initial_sync_done( + initial_sync_done_); activation_context->type_processor = base::MakeUnique<ModelTypeProcessorProxy>( weak_factory_.GetWeakPtr(), base::ThreadTaskRunnerHandle::Get()); @@ -61,9 +68,14 @@ } void DisconnectSync() override { is_connected_ = false; } + void set_initial_sync_done(bool initial_sync_done) { + initial_sync_done_ = initial_sync_done; + } + bool is_connected() { return is_connected_; } private: + bool initial_sync_done_ = false; bool is_connected_ = false; int* disable_sync_call_count_; base::WeakPtrFactory<TestModelTypeProcessor> weak_factory_; @@ -76,13 +88,8 @@ TestModelTypeConfigurer() {} ~TestModelTypeConfigurer() override {} - ModelTypeSet ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) override { + void ConfigureDataTypes(ConfigureParams params) override { NOTREACHED() << "Not implemented."; - return ModelTypeSet(); } void ActivateDirectoryDataType(ModelType type, @@ -144,7 +151,14 @@ base::Unretained(this))); } - void RegisterWithBackend() { controller_->RegisterWithBackend(&configurer_); } + void RegisterWithBackend(bool expect_downloaded) { + bool called = false; + bool downloaded; + controller_->RegisterWithBackend(base::Bind(&SetBool, &called, &downloaded), + &configurer_); + EXPECT_TRUE(called); + EXPECT_EQ(expect_downloaded, downloaded); + } void StartAssociating() { controller_->StartAssociating(base::Bind( @@ -186,6 +200,10 @@ } } + void SetInitialSyncDone(bool initial_sync_done) { + processor_->set_initial_sync_done(initial_sync_done); + } + SyncPrefs* sync_prefs() { return &sync_prefs_; } DataTypeController* controller() { return controller_.get(); } int load_models_done_count() { return load_models_done_count_; } @@ -275,27 +293,37 @@ TEST_F(ModelTypeControllerTest, LoadModelsTwice) { LoadModels(); RunAllTasks(); - LoadModels(); EXPECT_EQ(DataTypeController::MODEL_LOADED, controller()->state()); - // The second LoadModels call should set the error. + EXPECT_FALSE(load_models_last_error().IsSet()); + // A second LoadModels call should set the error. + LoadModels(); EXPECT_TRUE(load_models_last_error().IsSet()); } -TEST_F(ModelTypeControllerTest, ActivateDataTypeOnBackendThread) { +TEST_F(ModelTypeControllerTest, Activate) { LoadModels(); RunAllTasks(); EXPECT_EQ(DataTypeController::MODEL_LOADED, controller()->state()); - RegisterWithBackend(); + RegisterWithBackend(false); ExpectProcessorConnected(true); StartAssociating(); EXPECT_EQ(DataTypeController::RUNNING, controller()->state()); } +TEST_F(ModelTypeControllerTest, ActivateWithInitialSyncDone) { + SetInitialSyncDone(true); + LoadModels(); + RunAllTasks(); + EXPECT_EQ(DataTypeController::MODEL_LOADED, controller()->state()); + RegisterWithBackend(true); + ExpectProcessorConnected(true); +} + TEST_F(ModelTypeControllerTest, Stop) { LoadModels(); RunAllTasks(); - RegisterWithBackend(); + RegisterWithBackend(false); ExpectProcessorConnected(true); StartAssociating();
diff --git a/components/sync/driver/proxy_data_type_controller.cc b/components/sync/driver/proxy_data_type_controller.cc index d2ff0d7..f3edbba 100644 --- a/components/sync/driver/proxy_data_type_controller.cc +++ b/components/sync/driver/proxy_data_type_controller.cc
@@ -29,6 +29,7 @@ } void ProxyDataTypeController::RegisterWithBackend( + base::Callback<void(bool)> set_downloaded, ModelTypeConfigurer* configurer) {} void ProxyDataTypeController::StartAssociating(
diff --git a/components/sync/driver/proxy_data_type_controller.h b/components/sync/driver/proxy_data_type_controller.h index 670a33f..ba8645c 100644 --- a/components/sync/driver/proxy_data_type_controller.h +++ b/components/sync/driver/proxy_data_type_controller.h
@@ -24,7 +24,8 @@ // DataTypeController interface. bool ShouldLoadModelBeforeConfigure() const override; void LoadModels(const ModelLoadCallback& model_load_callback) override; - void RegisterWithBackend(ModelTypeConfigurer* configurer) override; + void RegisterWithBackend(base::Callback<void(bool)> set_downloaded, + ModelTypeConfigurer* configurer) override; void StartAssociating(const StartCallback& start_callback) override; void Stop() override; std::string name() const override;
diff --git a/components/sync/engine/fake_sync_engine.cc b/components/sync/engine/fake_sync_engine.cc index 3dab254..284b3b1 100644 --- a/components/sync/engine/fake_sync_engine.cc +++ b/components/sync/engine/fake_sync_engine.cc
@@ -37,13 +37,7 @@ void FakeSyncEngine::Shutdown(ShutdownReason reason) {} -ModelTypeSet FakeSyncEngine::ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) { - return ModelTypeSet(); -} +void FakeSyncEngine::ConfigureDataTypes(ConfigureParams params) {} void FakeSyncEngine::EnableEncryptEverything() {}
diff --git a/components/sync/engine/fake_sync_engine.h b/components/sync/engine/fake_sync_engine.h index 618fcdec..242d4010 100644 --- a/components/sync/engine/fake_sync_engine.h +++ b/components/sync/engine/fake_sync_engine.h
@@ -43,11 +43,7 @@ void Shutdown(ShutdownReason reason) override; - ModelTypeSet ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) override; + void ConfigureDataTypes(ConfigureParams params) override; void EnableEncryptEverything() override;
diff --git a/components/sync/engine/model_type_configurer.cc b/components/sync/engine/model_type_configurer.cc index 914acc4..f44117b8 100644 --- a/components/sync/engine/model_type_configurer.cc +++ b/components/sync/engine/model_type_configurer.cc
@@ -6,26 +6,14 @@ namespace syncer { -// static -ModelTypeSet ModelTypeConfigurer::GetDataTypesInState( - DataTypeConfigState state, - const DataTypeConfigStateMap& state_map) { - ModelTypeSet types; - for (DataTypeConfigStateMap::const_iterator type_it = state_map.begin(); - type_it != state_map.end(); ++type_it) { - if (type_it->second == state) - types.Put(type_it->first); - } - return types; -} +ModelTypeConfigurer::ConfigureParams::ConfigureParams() = default; +ModelTypeConfigurer::ConfigureParams::ConfigureParams(ConfigureParams&& other) = + default; +ModelTypeConfigurer::ConfigureParams::~ConfigureParams() = default; +ModelTypeConfigurer::ConfigureParams& ModelTypeConfigurer::ConfigureParams:: +operator=(ConfigureParams&& other) = default; -// static -void ModelTypeConfigurer::SetDataTypesState(DataTypeConfigState state, - ModelTypeSet types, - DataTypeConfigStateMap* state_map) { - for (ModelTypeSet::Iterator it = types.First(); it.Good(); it.Inc()) { - (*state_map)[it.Get()] = state; - } -} +ModelTypeConfigurer::ModelTypeConfigurer() = default; +ModelTypeConfigurer::~ModelTypeConfigurer() = default; } // namespace syncer
diff --git a/components/sync/engine/model_type_configurer.h b/components/sync/engine/model_type_configurer.h index d1f9433..a913a35 100644 --- a/components/sync/engine/model_type_configurer.h +++ b/components/sync/engine/model_type_configurer.h
@@ -23,43 +23,38 @@ // removed data types. class ModelTypeConfigurer { public: - enum DataTypeConfigState { - CONFIGURE_ACTIVE, // Actively being configured. Data of such types - // will be downloaded if not present locally. - CONFIGURE_INACTIVE, // Already configured or to be configured in future. - // Data of such types is left as it is, no - // downloading or purging. - CONFIGURE_CLEAN, // Actively being configured but requiring unapply - // and GetUpdates first (e.g. for persistence errors). - DISABLED, // Not syncing. Disabled by user. - FATAL, // Not syncing due to unrecoverable error. - CRYPTO, // Not syncing due to a cryptographer error. - UNREADY, // Not syncing due to transient error. + // Utility struct for holding ConfigureDataTypes options. + struct ConfigureParams { + ConfigureParams(); + ConfigureParams(ConfigureParams&& other); + ~ConfigureParams(); + ConfigureParams& operator=(ConfigureParams&& other); + + ConfigureReason reason; + ModelTypeSet enabled_types; + ModelTypeSet disabled_types; + ModelTypeSet to_download; + ModelTypeSet to_purge; + ModelTypeSet to_journal; + ModelTypeSet to_unapply; + // Run when configuration is done with the set of all types that failed + // configuration (if its argument isn't empty, an error was encountered). + // TODO(akalin): Use a Delegate class with OnConfigureSuccess, + // OnConfigureFailure, and OnConfigureRetry instead of a pair of callbacks. + // The awkward part is handling when SyncEngine calls ConfigureDataTypes on + // itself to configure Nigori. + base::Callback<void(ModelTypeSet, ModelTypeSet)> ready_task; + base::Closure retry_callback; + + private: + DISALLOW_COPY_AND_ASSIGN(ConfigureParams); }; - typedef std::map<ModelType, DataTypeConfigState> DataTypeConfigStateMap; - // Configures sync for data types in config_state_map according to the states. - // |ready_task| is called on the same thread as ConfigureDataTypes - // is called when configuration is done with the set of data types - // that succeeded/failed configuration (i.e., configuration succeeded iff - // the failed set is empty). - // Returns: the set of types that are already configured and are ready to - // start. - // - // TODO(akalin): Use a Delegate class with OnConfigureSuccess, - // OnConfigureFailure, and OnConfigureRetry instead of a pair of callbacks. - // The awkward part is handling when SyncEngine calls ConfigureDataTypes on - // itself to configure Nigori. - virtual ModelTypeSet ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) = 0; + ModelTypeConfigurer(); + virtual ~ModelTypeConfigurer(); - // Return model types in |state_map| that match |state|. - static ModelTypeSet GetDataTypesInState( - DataTypeConfigState state, - const DataTypeConfigStateMap& state_map); + // Changes the set of data types that are currently being synced. + virtual void ConfigureDataTypes(ConfigureParams params) = 0; // Activates change processing for the given directory data type. This must // be called synchronously with the data type's model association so @@ -80,14 +75,6 @@ // Deactivates change processing for the given non-blocking data type. virtual void DeactivateNonBlockingDataType(ModelType type) = 0; - - // Set state of |types| in |state_map| to |state|. - static void SetDataTypesState(DataTypeConfigState state, - ModelTypeSet types, - DataTypeConfigStateMap* state_map); - - protected: - virtual ~ModelTypeConfigurer() {} }; } // namespace syncer
diff --git a/components/sync/engine/sync_engine.h b/components/sync/engine/sync_engine.h index cdfff52..6c1d9dc 100644 --- a/components/sync/engine/sync_engine.h +++ b/components/sync/engine/sync_engine.h
@@ -131,19 +131,6 @@ // Must be called *after* StopSyncingForShutdown. virtual void Shutdown(ShutdownReason reason) = 0; - // Changes the set of data types that are currently being synced. - // The ready_task will be run when configuration is done with the - // set of all types that failed configuration (i.e., if its argument - // is non-empty, then an error was encountered). - // Returns the set of types that are ready to start without needing any - // further sync activity. - // ModelTypeConfigurer implementation. - ModelTypeSet ConfigureDataTypes( - ConfigureReason reason, - const DataTypeConfigStateMap& config_state_map, - const base::Callback<void(ModelTypeSet, ModelTypeSet)>& ready_task, - const base::Callback<void()>& retry_callback) override = 0; - // Turns on encryption of all present and future sync data. virtual void EnableEncryptEverything() = 0;
diff --git a/components/sync/engine/sync_engine_host_stub.cc b/components/sync/engine/sync_engine_host_stub.cc new file mode 100644 index 0000000..027025e --- /dev/null +++ b/components/sync/engine/sync_engine_host_stub.cc
@@ -0,0 +1,57 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/sync/engine/sync_engine_host_stub.h" + +namespace syncer { + +SyncEngineHostStub::SyncEngineHostStub() = default; +SyncEngineHostStub::~SyncEngineHostStub() = default; + +void SyncEngineHostStub::OnEngineInitialized( + ModelTypeSet initial_types, + const WeakHandle<JsBackend>& js_backend, + const WeakHandle<DataTypeDebugInfoListener>& debug_info_listener, + const std::string& cache_guid, + bool success) {} + +void SyncEngineHostStub::OnSyncCycleCompleted() {} + +void SyncEngineHostStub::OnProtocolEvent(const ProtocolEvent& event) {} + +void SyncEngineHostStub::OnDirectoryTypeCommitCounterUpdated( + ModelType type, + const CommitCounters& counters) {} + +void SyncEngineHostStub::OnDirectoryTypeUpdateCounterUpdated( + ModelType type, + const UpdateCounters& counters) {} + +void SyncEngineHostStub::OnDatatypeStatusCounterUpdated( + ModelType type, + const StatusCounters& counters) {} + +void SyncEngineHostStub::OnConnectionStatusChange(ConnectionStatus status) {} + +void SyncEngineHostStub::OnPassphraseRequired( + PassphraseRequiredReason reason, + const sync_pb::EncryptedData& pending_keys) {} + +void SyncEngineHostStub::OnPassphraseAccepted() {} + +void SyncEngineHostStub::OnEncryptedTypesChanged(ModelTypeSet encrypted_types, + bool encrypt_everything) {} + +void SyncEngineHostStub::OnEncryptionComplete() {} + +void SyncEngineHostStub::OnMigrationNeededForTypes(ModelTypeSet types) {} + +void SyncEngineHostStub::OnExperimentsChanged(const Experiments& experiments) {} + +void SyncEngineHostStub::OnActionableError(const SyncProtocolError& error) {} + +void SyncEngineHostStub::OnLocalSetPassphraseEncryption( + const SyncEncryptionHandler::NigoriState& nigori_state) {} + +} // namespace syncer
diff --git a/components/sync/engine/sync_engine_host_stub.h b/components/sync/engine/sync_engine_host_stub.h new file mode 100644 index 0000000..feef517 --- /dev/null +++ b/components/sync/engine/sync_engine_host_stub.h
@@ -0,0 +1,53 @@ +// Copyright 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_SYNC_ENGINE_SYNC_ENGINE_HOST_STUB_H_ +#define COMPONENTS_SYNC_ENGINE_SYNC_ENGINE_HOST_STUB_H_ + +#include <string> + +#include "components/sync/engine/sync_engine_host.h" + +namespace syncer { + +class SyncEngineHostStub : public SyncEngineHost { + public: + SyncEngineHostStub(); + ~SyncEngineHostStub() override; + + // SyncEngineHost implementation. + void OnEngineInitialized( + ModelTypeSet initial_types, + const WeakHandle<JsBackend>& js_backend, + const WeakHandle<DataTypeDebugInfoListener>& debug_info_listener, + const std::string& cache_guid, + bool success) override; + void OnSyncCycleCompleted() override; + void OnProtocolEvent(const ProtocolEvent& event) override; + void OnDirectoryTypeCommitCounterUpdated( + ModelType type, + const CommitCounters& counters) override; + void OnDirectoryTypeUpdateCounterUpdated( + ModelType type, + const UpdateCounters& counters) override; + void OnDatatypeStatusCounterUpdated(ModelType type, + const StatusCounters& counters) override; + void OnConnectionStatusChange(ConnectionStatus status) override; + void OnPassphraseRequired( + PassphraseRequiredReason reason, + const sync_pb::EncryptedData& pending_keys) override; + void OnPassphraseAccepted() override; + void OnEncryptedTypesChanged(ModelTypeSet encrypted_types, + bool encrypt_everything) override; + void OnEncryptionComplete() override; + void OnMigrationNeededForTypes(ModelTypeSet types) override; + void OnExperimentsChanged(const Experiments& experiments) override; + void OnActionableError(const SyncProtocolError& error) override; + void OnLocalSetPassphraseEncryption( + const SyncEncryptionHandler::NigoriState& nigori_state) override; +}; + +} // namespace syncer + +#endif // COMPONENTS_SYNC_ENGINE_SYNC_ENGINE_HOST_STUB_H_
diff --git a/components/sync/model/fake_model_type_change_processor.cc b/components/sync/model/fake_model_type_change_processor.cc index e0df9d8..d415437 100644 --- a/components/sync/model/fake_model_type_change_processor.cc +++ b/components/sync/model/fake_model_type_change_processor.cc
@@ -9,6 +9,7 @@ #include "components/sync/model/metadata_batch.h" #include "components/sync/model/model_type_sync_bridge.h" #include "components/sync/model/sync_error.h" +#include "testing/gtest/include/gtest/gtest.h" namespace syncer { @@ -19,8 +20,12 @@ return base::WrapUnique(new FakeModelTypeChangeProcessor()); } -FakeModelTypeChangeProcessor::FakeModelTypeChangeProcessor() {} -FakeModelTypeChangeProcessor::~FakeModelTypeChangeProcessor() {} +FakeModelTypeChangeProcessor::FakeModelTypeChangeProcessor() = default; + +FakeModelTypeChangeProcessor::~FakeModelTypeChangeProcessor() { + // If this fails we were expecting an error but never got one. + EXPECT_FALSE(expect_error_); +} void FakeModelTypeChangeProcessor::Put( const std::string& client_tag, @@ -32,7 +37,6 @@ MetadataChangeList* metadata_change_list) {} void FakeModelTypeChangeProcessor::OnMetadataLoaded( - SyncError error, std::unique_ptr<MetadataBatch> batch) {} void FakeModelTypeChangeProcessor::OnSyncStarting( @@ -49,10 +53,19 @@ return true; } -SyncError FakeModelTypeChangeProcessor::CreateAndUploadError( +void FakeModelTypeChangeProcessor::ReportError(const ModelError& error) { + EXPECT_TRUE(expect_error_); + expect_error_ = false; +} + +void FakeModelTypeChangeProcessor::ReportError( const tracked_objects::Location& location, const std::string& message) { - return SyncError(); + ReportError(ModelError(location, message)); +} + +void FakeModelTypeChangeProcessor::ExpectError() { + expect_error_ = true; } } // namespace syncer
diff --git a/components/sync/model/fake_model_type_change_processor.h b/components/sync/model/fake_model_type_change_processor.h index bee92ef..3463ec07 100644 --- a/components/sync/model/fake_model_type_change_processor.h +++ b/components/sync/model/fake_model_type_change_processor.h
@@ -10,6 +10,7 @@ #include "components/sync/base/model_type.h" #include "components/sync/model/metadata_change_list.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_change_processor.h" namespace syncer { @@ -32,14 +33,21 @@ MetadataChangeList* metadata_change_list) override; void Delete(const std::string& client_tag, MetadataChangeList* metadata_change_list) override; - void OnMetadataLoaded(SyncError error, - std::unique_ptr<MetadataBatch> batch) override; + void OnMetadataLoaded(std::unique_ptr<MetadataBatch> batch) override; void OnSyncStarting(std::unique_ptr<DataTypeErrorHandler> error_handler, const StartCallback& callback) override; void DisableSync() override; bool IsTrackingMetadata() override; - SyncError CreateAndUploadError(const tracked_objects::Location& location, - const std::string& message) override; + void ReportError(const ModelError& error) override; + void ReportError(const tracked_objects::Location& location, + const std::string& message) override; + + // Indicates that ReportError should be called in the future. + void ExpectError(); + + private: + // Whether we expect ReportError to be called. + bool expect_error_ = false; }; } // namespace syncer
diff --git a/components/sync/model/fake_model_type_sync_bridge.cc b/components/sync/model/fake_model_type_sync_bridge.cc index a3c01918..957dfd486 100644 --- a/components/sync/model/fake_model_type_sync_bridge.cc +++ b/components/sync/model/fake_model_type_sync_bridge.cc
@@ -9,6 +9,7 @@ #include "base/bind.h" #include "base/memory/ptr_util.h" #include "components/sync/base/hash_util.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/mutable_data_batch.h" #include "components/sync/model_impl/in_memory_metadata_change_list.h" #include "testing/gtest/include/gtest/gtest.h" @@ -198,14 +199,14 @@ return base::MakeUnique<TestMetadataChangeList>(); } -SyncError FakeModelTypeSyncBridge::MergeSyncData( +ModelError FakeModelTypeSyncBridge::MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_changes, EntityDataMap data_map) { - if (bridge_error_.IsSet()) { - SyncError error = bridge_error_; - bridge_error_ = SyncError(); - return error; + if (error_next_) { + error_next_ = false; + return ModelError(FROM_HERE, "boom"); } + // Commit any local entities that aren't being overwritten by the server. for (const auto& kv : db_->all_data()) { if (data_map.find(kv.first) == data_map.end()) { @@ -218,17 +219,17 @@ db_->PutData(kv.first, kv.second.value()); } ApplyMetadataChangeList(std::move(metadata_changes)); - return SyncError(); + return ModelError(); } -SyncError FakeModelTypeSyncBridge::ApplySyncChanges( +ModelError FakeModelTypeSyncBridge::ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_changes, EntityChangeList entity_changes) { - if (bridge_error_.IsSet()) { - SyncError error = bridge_error_; - bridge_error_ = SyncError(); - return error; + if (error_next_) { + error_next_ = false; + return ModelError(FROM_HERE, "boom"); } + for (const EntityChange& change : entity_changes) { switch (change.type()) { case EntityChange::ACTION_ADD: @@ -246,7 +247,7 @@ } } ApplyMetadataChangeList(std::move(metadata_changes)); - return SyncError(); + return ModelError(); } void FakeModelTypeSyncBridge::ApplyMetadataChangeList( @@ -281,9 +282,9 @@ void FakeModelTypeSyncBridge::GetData(StorageKeyList keys, DataCallback callback) { - if (bridge_error_.IsSet()) { - callback.Run(bridge_error_, nullptr); - bridge_error_ = SyncError(); + if (error_next_) { + error_next_ = false; + change_processor()->ReportError(FROM_HERE, "boom"); return; } @@ -292,13 +293,13 @@ DCHECK(db_->HasData(key)) << "No data for " << key; batch->Put(key, CopyEntityData(db_->GetData(key))); } - callback.Run(SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } void FakeModelTypeSyncBridge::GetAllData(DataCallback callback) { - if (bridge_error_.IsSet()) { - callback.Run(bridge_error_, nullptr); - bridge_error_ = SyncError(); + if (error_next_) { + error_next_ = false; + change_processor()->ReportError(FROM_HERE, "boom"); return; } @@ -306,7 +307,7 @@ for (const auto& kv : db_->all_data()) { batch->Put(kv.first, CopyEntityData(*kv.second)); } - callback.Run(SyncError(), std::move(batch)); + callback.Run(std::move(batch)); } std::string FakeModelTypeSyncBridge::GetClientTag( @@ -332,13 +333,13 @@ base::MakeUnique<ConflictResolution>(std::move(resolution)); } -void FakeModelTypeSyncBridge::ErrorOnNextCall(SyncError::ErrorType error_type) { - DCHECK(!bridge_error_.IsSet()); - bridge_error_ = SyncError(FROM_HERE, error_type, "TestError", PREFERENCES); +void FakeModelTypeSyncBridge::ErrorOnNextCall() { + EXPECT_FALSE(error_next_); + error_next_ = true; } void FakeModelTypeSyncBridge::CheckPostConditions() { - DCHECK(!bridge_error_.IsSet()); + EXPECT_FALSE(error_next_); } } // namespace syncer
diff --git a/components/sync/model/fake_model_type_sync_bridge.h b/components/sync/model/fake_model_type_sync_bridge.h index d238c04..31d95ae 100644 --- a/components/sync/model/fake_model_type_sync_bridge.h +++ b/components/sync/model/fake_model_type_sync_bridge.h
@@ -12,6 +12,7 @@ #include "components/sync/engine/non_blocking_sync_common.h" #include "components/sync/model/entity_data.h" #include "components/sync/model/metadata_batch.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_sync_bridge.h" #include "components/sync/protocol/entity_metadata.pb.h" #include "components/sync/protocol/model_type_state.pb.h" @@ -102,10 +103,10 @@ // ModelTypeSyncBridge implementation std::unique_ptr<MetadataChangeList> CreateMetadataChangeList() override; - SyncError MergeSyncData( + ModelError MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityDataMap entity_data_map) override; - SyncError ApplySyncChanges( + ModelError ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityChangeList entity_changes) override; void GetData(StorageKeyList storage_keys, DataCallback callback) override; @@ -120,8 +121,8 @@ // is a USE_NEW resolution, the data will only exist for one resolve call. void SetConflictResolution(ConflictResolution resolution); - // Sets the error that the next fallible call to the bridge will generate. - void ErrorOnNextCall(SyncError::ErrorType error_type); + // Sets an error that the next fallible call to the bridge will generate. + void ErrorOnNextCall(); const Store& db() { return *db_; } @@ -139,8 +140,8 @@ // The conflict resolution to use for calls to ResolveConflict. std::unique_ptr<ConflictResolution> conflict_resolution_; - // The error to produce on the next bridge call. - SyncError bridge_error_; + // Whether an error should be produced on the next bridge call. + bool error_next_ = false; }; } // namespace syncer
diff --git a/components/sync/model/mock_model_type_store.cc b/components/sync/model/mock_model_type_store.cc index 73b512c..4c5a08fb 100644 --- a/components/sync/model/mock_model_type_store.cc +++ b/components/sync/model/mock_model_type_store.cc
@@ -12,12 +12,13 @@ #include "base/memory/ptr_util.h" #include "base/single_thread_task_runner.h" #include "base/threading/thread_task_runner_handle.h" +#include "components/sync/model/metadata_batch.h" +#include "components/sync/model/model_error.h" namespace syncer { -MockModelTypeStore::MockModelTypeStore() {} - -MockModelTypeStore::~MockModelTypeStore() {} +MockModelTypeStore::MockModelTypeStore() = default; +MockModelTypeStore::~MockModelTypeStore() = default; void MockModelTypeStore::ReadData(const IdList& id_list, const ReadDataCallback& callback) { @@ -46,7 +47,7 @@ read_all_metadata_handler_.Run(callback); } else { base::ThreadTaskRunnerHandle::Get()->PostTask( - FROM_HERE, base::Bind(callback, SyncError(), + FROM_HERE, base::Bind(callback, ModelError(), base::Passed(std::unique_ptr<MetadataBatch>()))); } }
diff --git a/components/sync/model/model_error.cc b/components/sync/model/model_error.cc new file mode 100644 index 0000000..6ae6610 --- /dev/null +++ b/components/sync/model/model_error.cc
@@ -0,0 +1,31 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "components/sync/model/model_error.h" + +namespace syncer { + +ModelError::ModelError() : is_set_(false) {} + +ModelError::ModelError(const tracked_objects::Location& location, + const std::string& message) + : is_set_(true), location_(location), message_(message) {} + +ModelError::~ModelError() = default; + +bool ModelError::IsSet() const { + return is_set_; +} + +const tracked_objects::Location& ModelError::location() const { + DCHECK(IsSet()); + return location_; +} + +const std::string& ModelError::message() const { + DCHECK(IsSet()); + return message_; +} + +} // namespace syncer
diff --git a/components/sync/model/model_error.h b/components/sync/model/model_error.h new file mode 100644 index 0000000..e61816da --- /dev/null +++ b/components/sync/model/model_error.h
@@ -0,0 +1,45 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef COMPONENTS_SYNC_MODEL_MODEL_ERROR_H_ +#define COMPONENTS_SYNC_MODEL_MODEL_ERROR_H_ + +#include <string> + +#include "base/location.h" + +namespace syncer { + +// A minimal error object for use by USS model type code. +class ModelError { + public: + // Creates an un-set error object (indicating an operation was successful). + ModelError(); + + // Creates a set error object with the given location and message. + ModelError(const tracked_objects::Location& location, + const std::string& message); + + ~ModelError(); + + // Whether this object represents an actual error. + bool IsSet() const; + + // The location of the error this object represents. Can only be called if the + // error is set. + const tracked_objects::Location& location() const; + + // The message explaining the error this object represents. Can only be called + // if the error is set. + const std::string& message() const; + + private: + bool is_set_; + tracked_objects::Location location_; + std::string message_; +}; + +} // namespace syncer + +#endif // COMPONENTS_SYNC_MODEL_MODEL_ERROR_H_
diff --git a/components/sync/model/model_type_change_processor.h b/components/sync/model/model_type_change_processor.h index b0018328..83844981 100644 --- a/components/sync/model/model_type_change_processor.h +++ b/components/sync/model/model_type_change_processor.h
@@ -8,11 +8,12 @@ #include <memory> #include <string> +#include "base/location.h" #include "components/sync/base/model_type.h" #include "components/sync/engine/activation_context.h" #include "components/sync/model/data_type_error_handler.h" #include "components/sync/model/entity_data.h" -#include "components/sync/model/sync_error_factory.h" +#include "components/sync/model/model_error.h" namespace syncer { @@ -23,7 +24,7 @@ // Interface used by the ModelTypeSyncBridge to inform sync of local // changes. -class ModelTypeChangeProcessor : public SyncErrorFactory { +class ModelTypeChangeProcessor { public: typedef base::Callback<void(SyncError, std::unique_ptr<ActivationContext>)> StartCallback; @@ -34,7 +35,7 @@ ModelTypeSyncBridge* bridge); ModelTypeChangeProcessor(); - ~ModelTypeChangeProcessor() override; + virtual ~ModelTypeChangeProcessor(); // Inform the processor of a new or updated entity. The |entity_data| param // does not need to be fully set, but it should at least have specifics and @@ -48,10 +49,10 @@ virtual void Delete(const std::string& storage_key, MetadataChangeList* metadata_change_list) = 0; - // Accept the initial sync metadata loaded by the bridge. This should be - // called as soon as the metadata is available to the bridge. - virtual void OnMetadataLoaded(SyncError error, - std::unique_ptr<MetadataBatch> batch) = 0; + // Accept the initial sync metadata loaded by the bridge. This must be called + // by the bridge for syncing to begin for this model type. If an error occurs, + // call ReportError instead of this. + virtual void OnMetadataLoaded(std::unique_ptr<MetadataBatch> batch) = 0; // Indicates that sync wants to connect a sync worker to this processor. Once // the processor has metadata from the bridge, it will pass the info needed @@ -71,6 +72,17 @@ // currently up to date and accurately tracking the model type's data. If // false, calls to Put and Delete will no-op and can be omitted by bridge. virtual bool IsTrackingMetadata() = 0; + + // Report an error in the model to sync. Should be called for any persistence + // or consistency error the bridge encounters outside of a method that allows + // returning a ModelError directly. Outstanding callbacks are not expected to + // be called after an error. This will result in sync being temporarily + // disabled for the model type (generally until the next restart). + virtual void ReportError(const ModelError& error) = 0; + + // A convenience form of the above. + virtual void ReportError(const tracked_objects::Location& location, + const std::string& message) = 0; }; } // namespace syncer
diff --git a/components/sync/model/model_type_debug_info.cc b/components/sync/model/model_type_debug_info.cc index fa104ce7..749842b 100644 --- a/components/sync/model/model_type_debug_info.cc +++ b/components/sync/model/model_type_debug_info.cc
@@ -71,7 +71,6 @@ SharedModelTypeProcessor* processor, const base::Callback<void(const ModelType, std::unique_ptr<base::ListValue>)>& callback, - SyncError error, std::unique_ptr<DataBatch> batch) { std::unique_ptr<base::ListValue> all_nodes = base::MakeUnique<base::ListValue>();
diff --git a/components/sync/model/model_type_debug_info.h b/components/sync/model/model_type_debug_info.h index a5d3779..276f6ab 100644 --- a/components/sync/model/model_type_debug_info.h +++ b/components/sync/model/model_type_debug_info.h
@@ -46,7 +46,6 @@ SharedModelTypeProcessor* processor, const base::Callback<void(const ModelType, std::unique_ptr<base::ListValue>)>& callback, - SyncError error, std::unique_ptr<DataBatch> batch); };
diff --git a/components/sync/model/model_type_store.h b/components/sync/model/model_type_store.h index 02448ea0..dd361f390 100644 --- a/components/sync/model/model_type_store.h +++ b/components/sync/model/model_type_store.h
@@ -14,7 +14,7 @@ #include "components/sync/base/model_type.h" #include "components/sync/model/metadata_batch.h" #include "components/sync/model/metadata_change_list.h" -#include "components/sync/model/sync_error.h" +#include "components/sync/model/model_error.h" namespace base { class SequencedTaskRunner; @@ -119,7 +119,7 @@ typedef base::Callback<void(Result result, std::unique_ptr<RecordList> data_records)> ReadAllDataCallback; - typedef base::Callback<void(SyncError sync_error, + typedef base::Callback<void(ModelError error, std::unique_ptr<MetadataBatch> metadata_batch)> ReadMetadataCallback;
diff --git a/components/sync/model/model_type_sync_bridge.cc b/components/sync/model/model_type_sync_bridge.cc index b3b793e..ca6d733 100644 --- a/components/sync/model/model_type_sync_bridge.cc +++ b/components/sync/model/model_type_sync_bridge.cc
@@ -44,8 +44,7 @@ // processor that there is no metadata. DisableSync() should never be called // while the models are loading, aka before the service has finished loading // the initial metadata. - change_processor_->OnMetadataLoaded(SyncError(), - base::MakeUnique<MetadataBatch>()); + change_processor_->OnMetadataLoaded(base::MakeUnique<MetadataBatch>()); } ModelTypeChangeProcessor* ModelTypeSyncBridge::change_processor() const {
diff --git a/components/sync/model/model_type_sync_bridge.h b/components/sync/model/model_type_sync_bridge.h index 8c236cc..fb597e6 100644 --- a/components/sync/model/model_type_sync_bridge.h +++ b/components/sync/model/model_type_sync_bridge.h
@@ -16,25 +16,25 @@ #include "components/sync/model/data_type_error_handler.h" #include "components/sync/model/entity_change.h" #include "components/sync/model/entity_data.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_change_processor.h" -#include "components/sync/model/sync_error.h" namespace syncer { class DataBatch; class MetadataChangeList; -// Interface implemented by model types to receive updates from sync via the -// SharedModelTypeProcessor. Provides a way for sync to update the data and +// Interface implemented by model types to receive updates from sync via a +// ModelTypeChangeProcessor. Provides a way for sync to update the data and // metadata for entities, as well as the model type state. Sync bridge -// implementations have the responsibility of providing thier change_processor() -// with metadata through ModelTypeChangeProcessor::OnMetadataLoaded() as soon as -// possible. Sync will wait for this method to be called, and afterwards it will -// start calling into the bridge. +// implementations must provide their change_processor() with metadata through +// OnMetadataLoaded() as soon as possible. Once this is called, sync will +// immediately begin locally tracking changes and can start syncing with the +// server soon afterward. If an error occurs during startup, the processor's +// ReportError() method should be called instead of OnMetadataLoaded(). class ModelTypeSyncBridge : public base::SupportsWeakPtr<ModelTypeSyncBridge> { public: - typedef base::Callback<void(SyncError, std::unique_ptr<DataBatch>)> - DataCallback; + typedef base::Callback<void(std::unique_ptr<DataBatch>)> DataCallback; typedef std::vector<std::string> StorageKeyList; typedef base::Callback<std::unique_ptr<ModelTypeChangeProcessor>( ModelType type, @@ -63,7 +63,7 @@ // combine all change atomically, should save the metadata after the data // changes, so that this merge will be re-driven by sync if is not completely // saved during the current run. - virtual SyncError MergeSyncData( + virtual ModelError MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityDataMap entity_data_map) = 0; @@ -72,14 +72,18 @@ // |metadata_change_list| in case when some of the data changes are filtered // out, or even be empty in case when a commit confirmation is processed and // only the metadata needs to persisted. - virtual SyncError ApplySyncChanges( + virtual ModelError ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityChangeList entity_changes) = 0; // Asynchronously retrieve the corresponding sync data for |storage_keys|. + // |callback| should be invoked if the operation is successful, otherwise + // the processor's ReportError method should be called. virtual void GetData(StorageKeyList storage_keys, DataCallback callback) = 0; - // Asynchronously retrieve all of the local sync data. + // Asynchronously retrieve all of the local sync data. |callback| should be + // invoked if the operation is successful, otherwise the processor's + // ReportError method should be called. virtual void GetAllData(DataCallback callback) = 0; // Get or generate a client tag for |entity_data|. This must be the same tag
diff --git a/components/sync/model/model_type_sync_bridge_unittest.cc b/components/sync/model/model_type_sync_bridge_unittest.cc index c4558ac..030bee8 100644 --- a/components/sync/model/model_type_sync_bridge_unittest.cc +++ b/components/sync/model/model_type_sync_bridge_unittest.cc
@@ -27,15 +27,10 @@ void DisableSync() override { disabled_callback_.Run(); } - void OnMetadataLoaded(SyncError error, - std::unique_ptr<MetadataBatch> batch) override { - on_metadata_loaded_error_ = error; + void OnMetadataLoaded(std::unique_ptr<MetadataBatch> batch) override { on_metadata_loaded_batch_ = std::move(batch); } - const SyncError& on_metadata_loaded_error() const { - return on_metadata_loaded_error_; - } MetadataBatch* on_metadata_loaded_batch() { return on_metadata_loaded_batch_.get(); } @@ -48,7 +43,6 @@ // allows this information to reach somewhere safe instead. base::Closure disabled_callback_; - SyncError on_metadata_loaded_error_; std::unique_ptr<MetadataBatch> on_metadata_loaded_batch_; }; @@ -128,8 +122,6 @@ // processor about this. EXPECT_TRUE(bridge()->processor_disable_sync_called()); - EXPECT_FALSE( - bridge()->change_processor()->on_metadata_loaded_error().IsSet()); MetadataBatch* batch = bridge()->change_processor()->on_metadata_loaded_batch(); EXPECT_NE(nullptr, batch);
diff --git a/components/sync/model/stub_model_type_sync_bridge.cc b/components/sync/model/stub_model_type_sync_bridge.cc index b07eff4..747303c9 100644 --- a/components/sync/model/stub_model_type_sync_bridge.cc +++ b/components/sync/model/stub_model_type_sync_bridge.cc
@@ -25,16 +25,16 @@ return std::unique_ptr<MetadataChangeList>(); } -SyncError StubModelTypeSyncBridge::MergeSyncData( +ModelError StubModelTypeSyncBridge::MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityDataMap entity_data_map) { - return SyncError(); + return ModelError(); } -SyncError StubModelTypeSyncBridge::ApplySyncChanges( +ModelError StubModelTypeSyncBridge::ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityChangeList entity_changes) { - return SyncError(); + return ModelError(); } void StubModelTypeSyncBridge::GetData(StorageKeyList storage_keys,
diff --git a/components/sync/model/stub_model_type_sync_bridge.h b/components/sync/model/stub_model_type_sync_bridge.h index 73fadb98..38d6f30 100644 --- a/components/sync/model/stub_model_type_sync_bridge.h +++ b/components/sync/model/stub_model_type_sync_bridge.h
@@ -22,10 +22,10 @@ ~StubModelTypeSyncBridge() override; std::unique_ptr<MetadataChangeList> CreateMetadataChangeList() override; - SyncError MergeSyncData( + ModelError MergeSyncData( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityDataMap entity_data_map) override; - SyncError ApplySyncChanges( + ModelError ApplySyncChanges( std::unique_ptr<MetadataChangeList> metadata_change_list, EntityChangeList entity_changes) override; void GetData(StorageKeyList storage_keys, DataCallback callback) override;
diff --git a/components/sync/model_impl/model_type_store_impl.cc b/components/sync/model_impl/model_type_store_impl.cc index 542dfb9..671cdc1 100644 --- a/components/sync/model_impl/model_type_store_impl.cc +++ b/components/sync/model_impl/model_type_store_impl.cc
@@ -13,6 +13,7 @@ #include "base/memory/ptr_util.h" #include "base/task_runner_util.h" #include "base/threading/thread_task_runner_handle.h" +#include "components/sync/model/model_error.h" #include "components/sync/model_impl/model_type_store_backend.h" #include "components/sync/protocol/entity_metadata.pb.h" #include "components/sync/protocol/model_type_state.pb.h" @@ -69,8 +70,7 @@ ModelType type, scoped_refptr<ModelTypeStoreBackend> backend, scoped_refptr<base::SequencedTaskRunner> backend_task_runner) - : type_(type), - backend_(backend), + : backend_(backend), backend_task_runner_(backend_task_runner), data_prefix_(FormatDataPrefix(type)), metadata_prefix_(FormatMetaPrefix(type)), @@ -231,7 +231,7 @@ Result result) { DCHECK(CalledOnValidThread()); if (result != Result::SUCCESS) { - callback.Run(MakeSyncError("Reading metadata failed."), + callback.Run(ModelError(FROM_HERE, "Reading metadata failed."), base::MakeUnique<MetadataBatch>()); return; } @@ -262,7 +262,7 @@ DCHECK(CalledOnValidThread()); if (result != Result::SUCCESS) { - callback.Run(MakeSyncError("Reading metadata failed."), + callback.Run(ModelError(FROM_HERE, "Reading metadata failed."), base::MakeUnique<MetadataBatch>()); return; } @@ -290,8 +290,9 @@ sync_pb::ModelTypeState state; if (!state.ParseFromString(global_metadata)) { - callback.Run(MakeSyncError("Failed to deserialize model type state."), - base::MakeUnique<MetadataBatch>()); + callback.Run( + ModelError(FROM_HERE, "Failed to deserialize model type state."), + base::MakeUnique<MetadataBatch>()); return; } metadata_batch->SetModelTypeState(state); @@ -299,18 +300,15 @@ for (const Record& r : *metadata_records.get()) { sync_pb::EntityMetadata entity_metadata; if (!entity_metadata.ParseFromString(r.value)) { - callback.Run(MakeSyncError("Failed to deserialize entity metadata."), - base::MakeUnique<MetadataBatch>()); + callback.Run( + ModelError(FROM_HERE, "Failed to deserialize entity metadata."), + base::MakeUnique<MetadataBatch>()); return; } metadata_batch->AddMetadata(r.id, entity_metadata); } - callback.Run(SyncError(), std::move(metadata_batch)); -} - -SyncError ModelTypeStoreImpl::MakeSyncError(const std::string& msg) { - return SyncError(FROM_HERE, SyncError::DATATYPE_ERROR, msg, type_); + callback.Run(ModelError(), std::move(metadata_batch)); } std::unique_ptr<ModelTypeStore::WriteBatch>
diff --git a/components/sync/model_impl/model_type_store_impl.h b/components/sync/model_impl/model_type_store_impl.h index f9b6733..343e4966 100644 --- a/components/sync/model_impl/model_type_store_impl.h +++ b/components/sync/model_impl/model_type_store_impl.h
@@ -111,12 +111,6 @@ const std::string& global_metadata, std::unique_ptr<RecordList> metadata_records); - // Helper function to create a SyncError with message |msg|. - SyncError MakeSyncError(const std::string& msg); - - // The model type using this store. - const ModelType type_; - // Backend should be deleted on backend thread. // To accomplish this store's dtor posts task to backend thread passing // backend ownership to task parameter.
diff --git a/components/sync/model_impl/model_type_store_impl_unittest.cc b/components/sync/model_impl/model_type_store_impl_unittest.cc index ab709dd..e8d6b766 100644 --- a/components/sync/model_impl/model_type_store_impl_unittest.cc +++ b/components/sync/model_impl/model_type_store_impl_unittest.cc
@@ -9,6 +9,7 @@ #include "base/bind.h" #include "base/message_loop/message_loop.h" #include "base/run_loop.h" +#include "components/sync/model/model_error.h" #include "components/sync/protocol/entity_metadata.pb.h" #include "components/sync/protocol/model_type_state.pb.h" #include "testing/gmock/include/gmock/gmock.h" @@ -143,11 +144,11 @@ base::Bind(&CaptureResultAndRecords, &result, data_records)); PumpLoop(); ASSERT_EQ(ModelTypeStore::Result::SUCCESS, result); - SyncError sync_error; - store->ReadAllMetadata(base::Bind(&CaptureSyncErrorAndMetadataBatch, - &sync_error, metadata_batch)); + ModelError error; + store->ReadAllMetadata( + base::Bind(&CaptureErrorAndMetadataBatch, &error, metadata_batch)); PumpLoop(); - ASSERT_FALSE(sync_error.IsSet()); + ASSERT_FALSE(error.IsSet()); } // Following functions capture parameters passed to callbacks into variables @@ -167,12 +168,12 @@ *dst_records = std::move(records); } - static void CaptureSyncErrorAndMetadataBatch( - SyncError* dst_sync_error, + static void CaptureErrorAndMetadataBatch( + ModelError* dst_error, std::unique_ptr<MetadataBatch>* dst_batch, - SyncError sync_error, + ModelError error, std::unique_ptr<MetadataBatch> batch) { - *dst_sync_error = sync_error; + *dst_error = error; *dst_batch = std::move(batch); } @@ -267,10 +268,10 @@ PumpLoop(); ASSERT_EQ(ModelTypeStore::Result::SUCCESS, result); - SyncError error; + ModelError error; std::unique_ptr<MetadataBatch> metadata_batch; store()->ReadAllMetadata( - base::Bind(&CaptureSyncErrorAndMetadataBatch, &error, &metadata_batch)); + base::Bind(&CaptureErrorAndMetadataBatch, &error, &metadata_batch)); PumpLoop(); ASSERT_FALSE(error.IsSet()); VerifyMetadata(std::move(metadata_batch), sync_pb::ModelTypeState(), @@ -285,10 +286,10 @@ // Write a ModelTypeState that can't be parsed. WriteRawModelTypeState(store(), "unparseable"); - SyncError error; + ModelError error; std::unique_ptr<MetadataBatch> metadata_batch; store()->ReadAllMetadata( - base::Bind(&CaptureSyncErrorAndMetadataBatch, &error, &metadata_batch)); + base::Bind(&CaptureErrorAndMetadataBatch, &error, &metadata_batch)); PumpLoop(); ASSERT_TRUE(error.IsSet()); VerifyMetadata(std::move(metadata_batch), sync_pb::ModelTypeState(), @@ -303,10 +304,10 @@ // Write an EntityMetadata that can't be parsed. WriteRawMetadata(store(), "id", "unparseable"); - SyncError error; + ModelError error; std::unique_ptr<MetadataBatch> metadata_batch; store()->ReadAllMetadata( - base::Bind(&CaptureSyncErrorAndMetadataBatch, &error, &metadata_batch)); + base::Bind(&CaptureErrorAndMetadataBatch, &error, &metadata_batch)); PumpLoop(); ASSERT_TRUE(error.IsSet()); VerifyMetadata(std::move(metadata_batch), sync_pb::ModelTypeState(),
diff --git a/components/sync/model_impl/shared_model_type_processor.cc b/components/sync/model_impl/shared_model_type_processor.cc index 6b5816f..bffedd4 100644 --- a/components/sync/model_impl/shared_model_type_processor.cc +++ b/components/sync/model_impl/shared_model_type_processor.cc
@@ -23,10 +23,7 @@ SharedModelTypeProcessor::SharedModelTypeProcessor(ModelType type, ModelTypeSyncBridge* bridge) : type_(type), - is_metadata_loaded_(false), - is_initial_pending_data_loaded_(false), bridge_(bridge), - error_handler_(nullptr), weak_ptr_factory_(this) { DCHECK(bridge); } @@ -48,7 +45,6 @@ } void SharedModelTypeProcessor::OnMetadataLoaded( - SyncError error, std::unique_ptr<MetadataBatch> batch) { DCHECK(CalledOnValidThread()); DCHECK(entities_.empty()); @@ -59,12 +55,6 @@ // Flip this flag here to cover all cases where we don't need to load data. is_initial_pending_data_loaded_ = true; - if (error.IsSet()) { - start_error_ = error; - ConnectIfReady(); - return; - } - if (batch->GetModelTypeState().initial_sync_done()) { EntityMetadataMap metadata_map(batch->TakeAllMetadata()); std::vector<std::string> entities_to_commit; @@ -96,10 +86,14 @@ ConnectIfReady(); } +bool SharedModelTypeProcessor::ConnectPreconditionsMet() const { + return is_metadata_loaded_ && is_initial_pending_data_loaded_ && + error_handler_; +} + void SharedModelTypeProcessor::ConnectIfReady() { DCHECK(CalledOnValidThread()); - if (!is_metadata_loaded_ || !is_initial_pending_data_loaded_ || - start_callback_.is_null()) { + if (!ConnectPreconditionsMet()) { return; } @@ -114,7 +108,8 @@ base::ThreadTaskRunnerHandle::Get()); } - start_callback_.Run(start_error_, std::move(activation_context)); + start_callback_.Run(ModelToSyncError(start_error_), + std::move(activation_context)); start_callback_.Reset(); } @@ -144,14 +139,28 @@ return model_type_state_.initial_sync_done(); } -SyncError SharedModelTypeProcessor::CreateAndUploadError( +void SharedModelTypeProcessor::ReportError(const ModelError& error) { + DCHECK(error.IsSet()); + + if (ConnectPreconditionsMet()) { + // If both model and sync are ready, then |start_callback_| was already + // called and this can't be treated as a start error. + DCHECK(error_handler_); + error_handler_->OnUnrecoverableError(ModelToSyncError(error)); + } else if (!start_error_.IsSet()) { + start_error_ = error; + // An early model error means we're no longer expecting OnMetadataLoaded to + // be called. + is_metadata_loaded_ = true; + is_initial_pending_data_loaded_ = true; + ConnectIfReady(); + } +} + +void SharedModelTypeProcessor::ReportError( const tracked_objects::Location& location, const std::string& message) { - if (error_handler_) { - return error_handler_->CreateAndUploadError(location, message, type_); - } else { - return SyncError(location, SyncError::DATATYPE_ERROR, message, type_); - } + ReportError(ModelError(location, message)); } void SharedModelTypeProcessor::ConnectSync( @@ -295,10 +304,10 @@ } } - SyncError error = + ModelError error = bridge_->ApplySyncChanges(std::move(change_list), EntityChangeList()); if (error.IsSet()) { - error_handler_->OnUnrecoverableError(error); + ReportError(error); } } @@ -352,11 +361,11 @@ } // Inform the bridge of the new or updated data. - SyncError error = + ModelError error = bridge_->ApplySyncChanges(std::move(metadata_changes), entity_changes); if (error.IsSet()) { - error_handler_->OnUnrecoverableError(error); + ReportError(error); } else { // There may be new reasons to commit by the time this function is done. FlushPendingCommitRequests(); @@ -545,11 +554,11 @@ } // Let the bridge handle associating and merging the data. - SyncError error = + ModelError error = bridge_->MergeSyncData(std::move(metadata_changes), data_map); if (error.IsSet()) { - error_handler_->OnUnrecoverableError(error); + ReportError(error); } else { // We may have new reasons to commit by the time this function is done. FlushPendingCommitRequests(); @@ -557,30 +566,19 @@ } void SharedModelTypeProcessor::OnInitialPendingDataLoaded( - SyncError error, std::unique_ptr<DataBatch> data_batch) { DCHECK(!is_initial_pending_data_loaded_); - if (error.IsSet()) { - start_error_ = error; - } else { - ConsumeDataBatch(std::move(data_batch)); - } - + ConsumeDataBatch(std::move(data_batch)); is_initial_pending_data_loaded_ = true; + ConnectIfReady(); } void SharedModelTypeProcessor::OnDataLoadedForReEncryption( - SyncError error, std::unique_ptr<DataBatch> data_batch) { DCHECK(is_initial_pending_data_loaded_); - if (error.IsSet()) { - error_handler_->OnUnrecoverableError(error); - return; - } - ConsumeDataBatch(std::move(data_batch)); FlushPendingCommitRequests(); } @@ -646,4 +644,14 @@ return CreateEntity(bridge_->GetStorageKey(data), data); } +SyncError SharedModelTypeProcessor::ModelToSyncError( + const ModelError& error) const { + if (error.IsSet()) { + return SyncError(error.location(), SyncError::DATATYPE_ERROR, + error.message(), type_); + } else { + return SyncError(); + } +} + } // namespace syncer
diff --git a/components/sync/model_impl/shared_model_type_processor.h b/components/sync/model_impl/shared_model_type_processor.h index 65530d4..66c65356 100644 --- a/components/sync/model_impl/shared_model_type_processor.h +++ b/components/sync/model_impl/shared_model_type_processor.h
@@ -20,6 +20,7 @@ #include "components/sync/model/data_type_error_handler.h" #include "components/sync/model/metadata_batch.h" #include "components/sync/model/metadata_change_list.h" +#include "components/sync/model/model_error.h" #include "components/sync/model/model_type_change_processor.h" #include "components/sync/model/model_type_sync_bridge.h" #include "components/sync/model/sync_error.h" @@ -52,14 +53,14 @@ MetadataChangeList* metadata_change_list) override; void Delete(const std::string& storage_key, MetadataChangeList* metadata_change_list) override; - void OnMetadataLoaded(SyncError error, - std::unique_ptr<MetadataBatch> batch) override; + void OnMetadataLoaded(std::unique_ptr<MetadataBatch> batch) override; void OnSyncStarting(std::unique_ptr<DataTypeErrorHandler> error_handler, const StartCallback& callback) override; void DisableSync() override; bool IsTrackingMetadata() override; - SyncError CreateAndUploadError(const tracked_objects::Location& location, - const std::string& message) override; + void ReportError(const ModelError& error) override; + void ReportError(const tracked_objects::Location& location, + const std::string& message) override; // ModelTypeProcessor implementation. void ConnectSync(std::unique_ptr<CommitQueue> worker) override; @@ -77,7 +78,11 @@ std::map<std::string, std::unique_ptr<ProcessorEntityTracker>>; using UpdateMap = std::map<std::string, std::unique_ptr<UpdateResponseData>>; - // Check conditions, and if met inform sync that we are ready to connect. + // Whether the preconditions to connect are met. Note: returns true if we have + // already connected. + bool ConnectPreconditionsMet() const; + + // If preconditions are met, inform sync that we are ready to connect. void ConnectIfReady(); // Helper function to process the update for a single entity. If a local data @@ -100,12 +105,10 @@ const UpdateResponseDataList& updates); // ModelTypeSyncBridge::GetData() callback for initial pending commit data. - void OnInitialPendingDataLoaded(SyncError error, - std::unique_ptr<DataBatch> data_batch); + void OnInitialPendingDataLoaded(std::unique_ptr<DataBatch> data_batch); // ModelTypeSyncBridge::GetData() callback for re-encryption commit data. - void OnDataLoadedForReEncryption(SyncError error, - std::unique_ptr<DataBatch> data_batch); + void OnDataLoadedForReEncryption(std::unique_ptr<DataBatch> data_batch); // Caches EntityData from the |data_batch| in the entity trackers. void ConsumeDataBatch(std::unique_ptr<DataBatch> data_batch); @@ -138,6 +141,9 @@ // Version of the above that generates a tag for |data|. ProcessorEntityTracker* CreateEntity(const EntityData& data); + // Helper function to turn a ModelError into a SyncError. + SyncError ModelToSyncError(const ModelError& error) const; + const ModelType type_; sync_pb::ModelTypeState model_type_state_; @@ -146,13 +152,13 @@ // A cache for any error that may occur during startup and should be passed // into the |start_callback_|. - SyncError start_error_; + ModelError start_error_; // Indicates whether the metadata has finished loading. - bool is_metadata_loaded_; + bool is_metadata_loaded_ = false; // Indicates whether data for any initial pending commits has been loaded. - bool is_initial_pending_data_loaded_; + bool is_initial_pending_data_loaded_ = false; // Reference to the CommitQueue. //
diff --git a/components/sync/model_impl/shared_model_type_processor_unittest.cc b/components/sync/model_impl/shared_model_type_processor_unittest.cc index 7551170..97f7e86a 100644 --- a/components/sync/model_impl/shared_model_type_processor_unittest.cc +++ b/components/sync/model_impl/shared_model_type_processor_unittest.cc
@@ -49,10 +49,6 @@ // worker/processor will not have been initialized and thus empty. const EntitySpecifics kEmptySpecifics; -SyncError CreateSyncError(SyncError::ErrorType error_type) { - return SyncError(FROM_HERE, error_type, "TestError", PREFERENCES); -} - EntitySpecifics GenerateSpecifics(const std::string& key, const std::string& value) { return FakeModelTypeSyncBridge::GenerateSpecifics(key, value); @@ -97,8 +93,8 @@ // FakeModelTypeSyncBridge overrides. - SyncError MergeSyncData(std::unique_ptr<MetadataChangeList> mcl, - EntityDataMap entity_data_map) override { + ModelError MergeSyncData(std::unique_ptr<MetadataChangeList> mcl, + EntityDataMap entity_data_map) override { merge_call_count_++; return FakeModelTypeSyncBridge::MergeSyncData(std::move(mcl), entity_data_map); @@ -117,9 +113,8 @@ private: void CaptureDataCallback(DataCallback callback, - SyncError error, std::unique_ptr<DataBatch> data) { - data_callback_ = base::Bind(callback, error, base::Passed(std::move(data))); + data_callback_ = base::Bind(callback, base::Passed(std::move(data))); } // The number of times MergeSyncData has been called. @@ -166,7 +161,7 @@ } void OnMetadataLoaded() { - type_processor()->OnMetadataLoaded(SyncError(), db().CreateMetadataBatch()); + type_processor()->OnMetadataLoaded(db().CreateMetadataBatch()); } void OnPendingCommitDataLoaded() { bridge()->OnPendingCommitDataLoaded(); } @@ -237,9 +232,9 @@ // Sets the error type that OnReadyToConnect (our StartCallback) expects to // receive. - void ExpectStartError(SyncError::ErrorType error_type) { - DCHECK(expected_start_error_ == SyncError::UNSET); - expected_start_error_ = error_type; + void ExpectStartError() { + EXPECT_FALSE(expect_start_error_); + expect_start_error_ = true; } TestModelTypeSyncBridge* bridge() const { return bridge_.get(); } @@ -258,17 +253,14 @@ } private: - void CheckPostConditions() { - DCHECK_EQ(SyncError::UNSET, expected_start_error_); - } + void CheckPostConditions() { EXPECT_FALSE(expect_start_error_); } void OnReadyToConnect(SyncError error, std::unique_ptr<ActivationContext> context) { - if (expected_start_error_ != SyncError::UNSET) { + if (expect_start_error_) { EXPECT_TRUE(error.IsSet()); - EXPECT_EQ(expected_start_error_, error.error_type()); EXPECT_EQ(nullptr, context); - expected_start_error_ = SyncError::UNSET; + expect_start_error_ = false; return; } @@ -294,8 +286,8 @@ // The processor's error handler. DataTypeErrorHandlerMock* error_handler_; - // The error to expect in OnReadyToConnect(). - SyncError::ErrorType expected_start_error_ = SyncError::UNSET; + // Whether to expect an error in OnReadyToConnect(). + bool expect_start_error_ = false; }; // Test that an initial sync handles local and remote items properly. @@ -350,31 +342,28 @@ OnMetadataLoaded(); OnSyncStarting(); - bridge()->ErrorOnNextCall(SyncError::DATATYPE_ERROR); + bridge()->ErrorOnNextCall(); error_handler()->ExpectError(SyncError::DATATYPE_ERROR); worker()->UpdateFromServer(); } // Test that errors before it's called are passed to |start_callback| correctly. TEST_F(SharedModelTypeProcessorTest, StartErrors) { - type_processor()->OnMetadataLoaded(CreateSyncError(SyncError::DATATYPE_ERROR), - nullptr); - ExpectStartError(SyncError::DATATYPE_ERROR); + type_processor()->ReportError(FROM_HERE, "boom"); + ExpectStartError(); OnSyncStarting(); // Test OnSyncStarting happening first. ResetState(false); OnSyncStarting(); - ExpectStartError(SyncError::DATATYPE_ERROR); - type_processor()->OnMetadataLoaded(CreateSyncError(SyncError::DATATYPE_ERROR), - nullptr); + ExpectStartError(); + type_processor()->ReportError(FROM_HERE, "boom"); // Test an error loading pending data. ResetStateWriteItem(kKey1, kValue1); - bridge()->ErrorOnNextCall(SyncError::DATATYPE_ERROR); + bridge()->ErrorOnNextCall(); InitializeToMetadataLoaded(); - OnPendingCommitDataLoaded(); - ExpectStartError(SyncError::DATATYPE_ERROR); + ExpectStartError(); OnSyncStarting(); } @@ -639,7 +628,7 @@ TEST_F(SharedModelTypeProcessorTest, ErrorApplyingAck) { InitializeToReadyState(); bridge()->WriteItem(kKey1, kValue1); - bridge()->ErrorOnNextCall(SyncError::DATATYPE_ERROR); + bridge()->ErrorOnNextCall(); error_handler()->ExpectError(SyncError::DATATYPE_ERROR); worker()->AckOnePendingCommit(); } @@ -804,7 +793,7 @@ // propagated to the error handler. TEST_F(SharedModelTypeProcessorTest, ErrorApplyingUpdate) { InitializeToReadyState(); - bridge()->ErrorOnNextCall(SyncError::DATATYPE_ERROR); + bridge()->ErrorOnNextCall(); error_handler()->ExpectError(SyncError::DATATYPE_ERROR); worker()->UpdateFromServer(kHash1, GenerateSpecifics(kKey1, kValue1)); } @@ -1161,10 +1150,9 @@ TEST_F(SharedModelTypeProcessorTest, ReEncryptErrorLoadingData) { InitializeToReadyState(); WriteItemAndAck(kKey1, kValue1); - bridge()->ErrorOnNextCall(SyncError::DATATYPE_ERROR); - worker()->UpdateWithEncryptionKey("k1"); + bridge()->ErrorOnNextCall(); error_handler()->ExpectError(SyncError::DATATYPE_ERROR); - OnPendingCommitDataLoaded(); + worker()->UpdateWithEncryptionKey("k1"); } // Test receipt of updates with new and old keys.
diff --git a/content/browser/bluetooth/web_bluetooth_service_impl.cc b/content/browser/bluetooth/web_bluetooth_service_impl.cc index 14246ca..320fad47 100644 --- a/content/browser/bluetooth/web_bluetooth_service_impl.cc +++ b/content/browser/bluetooth/web_bluetooth_service_impl.cc
@@ -130,58 +130,6 @@ return blink::mojom::WebBluetoothResult::GATT_UNTRANSLATED_ERROR_CODE; } -// TODO(ortuno): This should really be a BluetoothDevice method. -// Replace when implemented. http://crbug.com/552022 -std::vector<device::BluetoothRemoteGattCharacteristic*> -GetCharacteristicsByUUID(device::BluetoothRemoteGattService* service, - const BluetoothUUID& characteristic_uuid) { - std::vector<device::BluetoothRemoteGattCharacteristic*> characteristics; - VLOG(1) << "Looking for characteristic: " - << characteristic_uuid.canonical_value(); - for (device::BluetoothRemoteGattCharacteristic* characteristic : - service->GetCharacteristics()) { - VLOG(1) << "Characteristic in cache: " - << characteristic->GetUUID().canonical_value(); - if (characteristic->GetUUID() == characteristic_uuid) { - characteristics.push_back(characteristic); - } - } - return characteristics; -} - -// TODO(ortuno): This should really be a BluetoothDevice method. -// Replace when implemented. http://crbug.com/552022 -std::vector<device::BluetoothRemoteGattService*> GetPrimaryServicesByUUID( - device::BluetoothDevice* device, - const BluetoothUUID& service_uuid) { - std::vector<device::BluetoothRemoteGattService*> services; - VLOG(1) << "Looking for service: " << service_uuid.canonical_value(); - for (device::BluetoothRemoteGattService* service : - device->GetGattServices()) { - VLOG(1) << "Service in cache: " << service->GetUUID().canonical_value(); - if (service->GetUUID() == service_uuid && service->IsPrimary()) { - services.push_back(service); - } - } - return services; -} - -// TODO(ortuno): This should really be a BluetoothDevice method. -// Replace when implemented. http://crbug.com/552022 -std::vector<device::BluetoothRemoteGattService*> GetPrimaryServices( - device::BluetoothDevice* device) { - std::vector<device::BluetoothRemoteGattService*> services; - VLOG(1) << "Looking for services."; - for (device::BluetoothRemoteGattService* service : - device->GetGattServices()) { - VLOG(1) << "Service in cache: " << service->GetUUID().canonical_value(); - if (service->IsPrimary()) { - services.push_back(service); - } - } - return services; -} - } // namespace // Struct that holds the result of a cache query. @@ -523,8 +471,8 @@ std::vector<device::BluetoothRemoteGattCharacteristic*> characteristics = characteristics_uuid - ? GetCharacteristicsByUUID(query_result.service, - characteristics_uuid.value()) + ? query_result.device->GetCharacteristicsByUUID( + service_instance_id, characteristics_uuid.value()) : query_result.service->GetCharacteristics(); std::vector<blink::mojom::WebBluetoothRemoteGATTCharacteristicPtr> @@ -755,8 +703,8 @@ DCHECK(device->IsGattServicesDiscoveryComplete()); std::vector<device::BluetoothRemoteGattService*> services = - services_uuid ? GetPrimaryServicesByUUID(device, services_uuid.value()) - : GetPrimaryServices(device); + services_uuid ? device->GetPrimaryServicesByUUID(services_uuid.value()) + : device->GetPrimaryServices(); std::vector<blink::mojom::WebBluetoothRemoteGATTServicePtr> response_services; for (device::BluetoothRemoteGattService* service : services) {
diff --git a/content/browser/renderer_host/pepper/pepper_gamepad_host_unittest.cc b/content/browser/renderer_host/pepper/pepper_gamepad_host_unittest.cc index cb42e44c..242c6ca 100644 --- a/content/browser/renderer_host/pepper/pepper_gamepad_host_unittest.cc +++ b/content/browser/renderer_host/pepper/pepper_gamepad_host_unittest.cc
@@ -126,13 +126,7 @@ AddressDiff(&ppapi_gamepad.buttons, &ppapi_gamepad)); } -// crbug.com/147549 -#if defined(OS_ANDROID) -#define MAYBE_WaitForReply DISABLED_WaitForReply -#else -#define MAYBE_WaitForReply WaitForReply -#endif -TEST_F(PepperGamepadHostTest, MAYBE_WaitForReply) { +TEST_F(PepperGamepadHostTest, WaitForReply) { blink::WebGamepads default_data; memset(&default_data, 0, sizeof(blink::WebGamepads)); default_data.items[0].connected = true;
diff --git a/content/browser/webrtc/OWNERS b/content/browser/webrtc/OWNERS index 7894a8c..4f58c911 100644 --- a/content/browser/webrtc/OWNERS +++ b/content/browser/webrtc/OWNERS
@@ -1,3 +1,5 @@ +emircan@chromium.org +mcasas@chromium.org perkj@chromium.org tommi@chromium.org
diff --git a/content/renderer/child_frame_compositing_helper.cc b/content/renderer/child_frame_compositing_helper.cc index dd1459c..716819c6 100644 --- a/content/renderer/child_frame_compositing_helper.cc +++ b/content/renderer/child_frame_compositing_helper.cc
@@ -222,8 +222,7 @@ scale_factor = 1.0f; surface_layer->SetSurfaceInfo(cc::SurfaceInfo(surface_info.id(), scale_factor, - surface_info.size_in_pixels()), - false /* stretch_content_to_fill_bounds */); + surface_info.size_in_pixels())); surface_layer->SetMasksToBounds(true); std::unique_ptr<cc_blink::WebLayerImpl> layer( new cc_blink::WebLayerImpl(surface_layer));
diff --git a/content/renderer/media/external_media_stream_audio_source.cc b/content/renderer/media/external_media_stream_audio_source.cc index 01b32ad..7c2977c 100644 --- a/content/renderer/media/external_media_stream_audio_source.cc +++ b/content/renderer/media/external_media_stream_audio_source.cc
@@ -40,10 +40,7 @@ << GetAudioParameters().AsHumanReadableString() << "}."; source_->Initialize(GetAudioParameters(), this, -1); source_->Start(); - // OnCaptureStarted() is expected to be called synchronously by this - // implementation. If this needs to be changed, the source needs to be started - // outside of EnsureSourceIsStarted since its design is synchronous. - CHECK(was_started_); + was_started_ = true; return true; } @@ -60,10 +57,6 @@ << GetAudioParameters().AsHumanReadableString() << "}."; } -void ExternalMediaStreamAudioSource::OnCaptureStarted() { - was_started_ = true; -} - void ExternalMediaStreamAudioSource::Capture(const media::AudioBus* audio_bus, int audio_delay_milliseconds, double volume,
diff --git a/content/renderer/media/external_media_stream_audio_source.h b/content/renderer/media/external_media_stream_audio_source.h index 3d85fdb..f520ae09 100644 --- a/content/renderer/media/external_media_stream_audio_source.h +++ b/content/renderer/media/external_media_stream_audio_source.h
@@ -35,7 +35,6 @@ void EnsureSourceIsStopped() final; // media::AudioCapturerSource::CaptureCallback implementation. - void OnCaptureStarted() final; void Capture(const media::AudioBus* audio_bus, int audio_delay_milliseconds, double volume,
diff --git a/content/renderer/media/local_media_stream_audio_source.cc b/content/renderer/media/local_media_stream_audio_source.cc index 5e56aa3..03353f1 100644 --- a/content/renderer/media/local_media_stream_audio_source.cc +++ b/content/renderer/media/local_media_stream_audio_source.cc
@@ -12,11 +12,9 @@ LocalMediaStreamAudioSource::LocalMediaStreamAudioSource( int consumer_render_frame_id, - const StreamDeviceInfo& device_info, - const ConstraintsCallback& started_callback) + const StreamDeviceInfo& device_info) : MediaStreamAudioSource(true /* is_local_source */), - consumer_render_frame_id_(consumer_render_frame_id), - started_callback_(started_callback) { + consumer_render_frame_id_(consumer_render_frame_id) { DVLOG(1) << "LocalMediaStreamAudioSource::LocalMediaStreamAudioSource()"; MediaStreamSource::SetDeviceInfo(device_info); @@ -86,10 +84,6 @@ << GetAudioParameters().AsHumanReadableString() << "}."; } -void LocalMediaStreamAudioSource::OnCaptureStarted() { - started_callback_.Run(this, MEDIA_DEVICE_OK, ""); -} - void LocalMediaStreamAudioSource::Capture(const media::AudioBus* audio_bus, int audio_delay_milliseconds, double volume,
diff --git a/content/renderer/media/local_media_stream_audio_source.h b/content/renderer/media/local_media_stream_audio_source.h index 0b92751..1d9d63a0e 100644 --- a/content/renderer/media/local_media_stream_audio_source.h +++ b/content/renderer/media/local_media_stream_audio_source.h
@@ -23,8 +23,7 @@ // audio data. Audio parameters and (optionally) a pre-existing audio session // ID are read from |device_info|. LocalMediaStreamAudioSource(int consumer_render_frame_id, - const StreamDeviceInfo& device_info, - const ConstraintsCallback& started_callback); + const StreamDeviceInfo& device_info); ~LocalMediaStreamAudioSource() final; @@ -34,7 +33,6 @@ void EnsureSourceIsStopped() final; // media::AudioCapturerSource::CaptureCallback implementation. - void OnCaptureStarted() final; void Capture(const media::AudioBus* audio_bus, int audio_delay_milliseconds, double volume, @@ -48,9 +46,6 @@ // The device created by the AudioDeviceFactory in EnsureSourceIsStarted(). scoped_refptr<media::AudioCapturerSource> source_; - // Callback that's called when the audio source has been initialized. - ConstraintsCallback started_callback_; - // In debug builds, check that all methods that could cause object graph // or data flow changes are being called on the main thread. base::ThreadChecker thread_checker_;
diff --git a/content/renderer/media/rtc_peer_connection_handler_unittest.cc b/content/renderer/media/rtc_peer_connection_handler_unittest.cc index 680d16d1..7745960 100644 --- a/content/renderer/media/rtc_peer_connection_handler_unittest.cc +++ b/content/renderer/media/rtc_peer_connection_handler_unittest.cc
@@ -296,10 +296,10 @@ media::AudioParameters::kAudioCDSampleRate, media::CHANNEL_LAYOUT_STEREO, media::AudioParameters::kAudioCDSampleRate / 100), - MockConstraintFactory().CreateWebMediaConstraints(), - base::Bind(&RTCPeerConnectionHandlerTest::OnAudioSourceStarted), mock_dependency_factory_.get()); audio_source->SetAllowInvalidRenderFrameIdForTesting(true); + audio_source->SetSourceConstraints( + MockConstraintFactory().CreateWebMediaConstraints()); blink_audio_source.setExtraData(audio_source); // Takes ownership. blink::WebMediaStreamSource video_source; @@ -366,10 +366,6 @@ MediaStreamVideoTrack::GetVideoTrack(track)->Stop(); } - static void OnAudioSourceStarted(MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name) {} - base::MessageLoop message_loop_; std::unique_ptr<ChildProcess> child_process_; std::unique_ptr<MockWebRTCPeerConnectionHandlerClient> mock_client_;
diff --git a/content/renderer/media/user_media_client_impl.cc b/content/renderer/media/user_media_client_impl.cc index 393d3faf..d22826f 100644 --- a/content/renderer/media/user_media_client_impl.cc +++ b/content/renderer/media/user_media_client_impl.cc
@@ -432,14 +432,6 @@ } request_info->generated = true; - for (const auto* array : {&audio_array, &video_array}) { - for (const auto& info : *array) { - WebRtcLogMessage(base::StringPrintf("Request %d for device \"%s\"", - request_id, - info.device.name.c_str())); - } - } - DCHECK(!request_info->request.isNull()); blink::WebVector<blink::WebMediaStreamTrack> audio_track_vector( audio_array.size()); @@ -454,7 +446,8 @@ blink::WebString webkit_id = blink::WebString::fromUTF8(label); blink::WebMediaStream* web_stream = &(request_info->web_stream); - web_stream->initialize(webkit_id, audio_track_vector, video_track_vector); + web_stream->initialize(webkit_id, audio_track_vector, + video_track_vector); web_stream->setExtraData(new MediaStream()); // Wait for the tracks to be started successfully or to fail. @@ -480,40 +473,6 @@ } } -// static -void UserMediaClientImpl::OnAudioSourceStartedOnAudioThread( - scoped_refptr<base::SingleThreadTaskRunner> task_runner, - base::WeakPtr<UserMediaClientImpl> weak_ptr, - MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name) { - task_runner->PostTask(FROM_HERE, - base::Bind(&UserMediaClientImpl::OnAudioSourceStarted, - weak_ptr, source, result, result_name)); -} - -void UserMediaClientImpl::OnAudioSourceStarted( - MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name) { - DCHECK(CalledOnValidThread()); - - for (auto it = pending_local_sources_.begin(); - it != pending_local_sources_.end(); ++it) { - MediaStreamSource* const source_extra_data = - static_cast<MediaStreamSource*>((*it).getExtraData()); - if (source_extra_data == source) { - if (result == MEDIA_DEVICE_OK) - local_sources_.push_back((*it)); - pending_local_sources_.erase(it); - for (const auto& request : user_media_requests_) - request->OnAudioSourceStarted(source, result, result_name); - return; - } - } - NOTREACHED(); -} - void UserMediaClientImpl::FinalizeEnumerateDevices( blink::WebMediaDevicesRequest request, const EnumerationResult& result) { @@ -581,65 +540,49 @@ RemoveLocalSource(source); } -void UserMediaClientImpl::InitializeVideoSourceObject( +void UserMediaClientImpl::InitializeSourceObject( const StreamDeviceInfo& device, + blink::WebMediaStreamSource::Type type, const blink::WebMediaConstraints& constraints, blink::WebMediaStreamSource* webkit_source) { - DCHECK(CalledOnValidThread()); - - *webkit_source = FindOrInitializeSourceObject(device); - if (webkit_source->getExtraData()) + const blink::WebMediaStreamSource* existing_source = + FindLocalSource(device); + if (existing_source) { + *webkit_source = *existing_source; + DVLOG(1) << "Source already exist. Reusing source with id " + << webkit_source->id().utf8(); return; + } - webkit_source->setExtraData(CreateVideoSource( - device, base::Bind(&UserMediaClientImpl::OnLocalSourceStopped, - weak_factory_.GetWeakPtr()))); + webkit_source->initialize(blink::WebString::fromUTF8(device.device.id), type, + blink::WebString::fromUTF8(device.device.name), + false /* remote */); + + DVLOG(1) << "Initialize source object :" + << "id = " << webkit_source->id().utf8() + << ", name = " << webkit_source->name().utf8(); + + if (type == blink::WebMediaStreamSource::TypeVideo) { + webkit_source->setExtraData( + CreateVideoSource( + device, + base::Bind(&UserMediaClientImpl::OnLocalSourceStopped, + weak_factory_.GetWeakPtr()))); + } else { + DCHECK_EQ(blink::WebMediaStreamSource::TypeAudio, type); + MediaStreamAudioSource* const audio_source = + CreateAudioSource(device, constraints); + audio_source->SetStopCallback( + base::Bind(&UserMediaClientImpl::OnLocalSourceStopped, + weak_factory_.GetWeakPtr())); + webkit_source->setExtraData(audio_source); // Takes ownership. + } local_sources_.push_back(*webkit_source); } -void UserMediaClientImpl::InitializeAudioSourceObject( - const StreamDeviceInfo& device, - const blink::WebMediaConstraints& constraints, - blink::WebMediaStreamSource* webkit_source, - bool* source_initialized) { - DCHECK(CalledOnValidThread()); - - *webkit_source = FindOrInitializeSourceObject(device); - if (webkit_source->getExtraData()) { - *source_initialized = true; - return; - } - - *source_initialized = false; - - // See if the source is already being initialized. - auto* pending = FindPendingLocalSource(device); - if (pending) { - *webkit_source = *pending; - return; - } - - // While sources are being initialized, keep them in a separate array. - // Once they've finished initialized, they'll be moved over to local_sources_. - // See OnAudioSourceStarted for more details. - pending_local_sources_.push_back(*webkit_source); - - MediaStreamSource::ConstraintsCallback source_ready = base::Bind( - &UserMediaClientImpl::OnAudioSourceStartedOnAudioThread, - base::ThreadTaskRunnerHandle::Get(), weak_factory_.GetWeakPtr()); - - MediaStreamAudioSource* const audio_source = - CreateAudioSource(device, constraints, source_ready); - audio_source->SetStopCallback(base::Bind( - &UserMediaClientImpl::OnLocalSourceStopped, weak_factory_.GetWeakPtr())); - webkit_source->setExtraData(audio_source); // Takes ownership. -} - MediaStreamAudioSource* UserMediaClientImpl::CreateAudioSource( const StreamDeviceInfo& device, - const blink::WebMediaConstraints& constraints, - const MediaStreamSource::ConstraintsCallback& source_ready) { - DCHECK(CalledOnValidThread()); + const blink::WebMediaConstraints& constraints) { // If the audio device is a loopback device (for screen capture), or if the // constraints/effects parameters indicate no audio processing is needed, // create an efficient, direct-path MediaStreamAudioSource instance. @@ -647,21 +590,20 @@ !MediaStreamAudioProcessor::WouldModifyAudio( constraints, device.device.input.effects)) { return new LocalMediaStreamAudioSource(RenderFrameObserver::routing_id(), - device, source_ready); + device); } // The audio device is not associated with screen capture and also requires // processing. ProcessedLocalAudioSource* source = new ProcessedLocalAudioSource( - RenderFrameObserver::routing_id(), device, constraints, source_ready, - dependency_factory_); + RenderFrameObserver::routing_id(), device, dependency_factory_); + source->SetSourceConstraints(constraints); return source; } MediaStreamVideoSource* UserMediaClientImpl::CreateVideoSource( const StreamDeviceInfo& device, const MediaStreamSource::SourceStoppedCallback& stop_callback) { - DCHECK(CalledOnValidThread()); content::MediaStreamVideoCapturerSource* ret = new content::MediaStreamVideoCapturerSource(stop_callback, device, render_frame()); @@ -673,12 +615,14 @@ const blink::WebMediaConstraints& constraints, blink::WebVector<blink::WebMediaStreamTrack>* webkit_tracks, UserMediaRequestInfo* request) { - DCHECK(CalledOnValidThread()); DCHECK_EQ(devices.size(), webkit_tracks->size()); for (size_t i = 0; i < devices.size(); ++i) { blink::WebMediaStreamSource webkit_source; - InitializeVideoSourceObject(devices[i], constraints, &webkit_source); + InitializeSourceObject(devices[i], + blink::WebMediaStreamSource::TypeVideo, + constraints, + &webkit_source); (*webkit_tracks)[i] = request->CreateAndStartVideoTrack(webkit_source, constraints); } @@ -689,28 +633,38 @@ const blink::WebMediaConstraints& constraints, blink::WebVector<blink::WebMediaStreamTrack>* webkit_tracks, UserMediaRequestInfo* request) { - DCHECK(CalledOnValidThread()); DCHECK_EQ(devices.size(), webkit_tracks->size()); + // Log the device names for this request. + for (StreamDeviceInfoArray::const_iterator it = devices.begin(); + it != devices.end(); ++it) { + WebRtcLogMessage(base::StringPrintf( + "Generated media stream for request id %d contains audio device name" + " \"%s\"", + request->request_id, + it->device.name.c_str())); + } + StreamDeviceInfoArray overridden_audio_array = devices; if (!request->enable_automatic_output_device_selection) { // If the GetUserMedia request did not explicitly set the constraint // kMediaStreamRenderToAssociatedSink, the output device parameters must // be removed. - for (auto& device_info : overridden_audio_array) { - device_info.device.matched_output_device_id = ""; - device_info.device.matched_output = - MediaStreamDevice::AudioDeviceParameters(); + for (StreamDeviceInfoArray::iterator it = overridden_audio_array.begin(); + it != overridden_audio_array.end(); ++it) { + it->device.matched_output_device_id = ""; + it->device.matched_output = MediaStreamDevice::AudioDeviceParameters(); } } for (size_t i = 0; i < overridden_audio_array.size(); ++i) { blink::WebMediaStreamSource webkit_source; - bool source_initialized = true; - InitializeAudioSourceObject(overridden_audio_array[i], constraints, - &webkit_source, &source_initialized); + InitializeSourceObject(overridden_audio_array[i], + blink::WebMediaStreamSource::TypeAudio, + constraints, + &webkit_source); (*webkit_tracks)[i].initialize(webkit_source); - request->StartAudioTrack((*webkit_tracks)[i], source_initialized); + request->StartAudioTrack((*webkit_tracks)[i]); } } @@ -864,70 +818,31 @@ } const blink::WebMediaStreamSource* UserMediaClientImpl::FindLocalSource( - const LocalStreamSources& sources, const StreamDeviceInfo& device) const { - for (const auto& local_source : sources) { + for (LocalStreamSources::const_iterator it = local_sources_.begin(); + it != local_sources_.end(); ++it) { MediaStreamSource* const source = - static_cast<MediaStreamSource*>(local_source.getExtraData()); + static_cast<MediaStreamSource*>(it->getExtraData()); const StreamDeviceInfo& active_device = source->device_info(); - if (IsSameDevice(active_device, device)) - return &local_source; + if (IsSameDevice(active_device, device)) { + return &(*it); + } } - return nullptr; -} - -blink::WebMediaStreamSource UserMediaClientImpl::FindOrInitializeSourceObject( - const StreamDeviceInfo& device) { - const blink::WebMediaStreamSource* existing_source = FindLocalSource(device); - if (existing_source) { - DVLOG(1) << "Source already exists. Reusing source with id " - << existing_source->id().utf8(); - return *existing_source; - } - - blink::WebMediaStreamSource::Type type = - IsAudioInputMediaType(device.device.type) - ? blink::WebMediaStreamSource::TypeAudio - : blink::WebMediaStreamSource::TypeVideo; - - blink::WebMediaStreamSource source; - source.initialize(blink::WebString::fromUTF8(device.device.id), type, - blink::WebString::fromUTF8(device.device.name), - false /* remote */); - - DVLOG(1) << "Initialize source object :" - << "id = " << source.id().utf8() - << ", name = " << source.name().utf8(); - return source; + return NULL; } bool UserMediaClientImpl::RemoveLocalSource( const blink::WebMediaStreamSource& source) { + bool device_found = false; for (LocalStreamSources::iterator device_it = local_sources_.begin(); device_it != local_sources_.end(); ++device_it) { if (IsSameSource(*device_it, source)) { + device_found = true; local_sources_.erase(device_it); - return true; + break; } } - - // Check if the source was pending. - for (LocalStreamSources::iterator device_it = pending_local_sources_.begin(); - device_it != pending_local_sources_.end(); ++device_it) { - if (IsSameSource(*device_it, source)) { - MediaStreamSource* const source_extra_data = - static_cast<MediaStreamSource*>(source.getExtraData()); - for (const auto& request : user_media_requests_) { - request->OnAudioSourceStarted(source_extra_data, - MEDIA_DEVICE_TRACK_START_FAILURE, - "Failed to access audio capture device"); - } - pending_local_sources_.erase(device_it); - return true; - } - } - - return false; + return device_found; } UserMediaClientImpl::UserMediaRequestInfo* @@ -1060,29 +975,18 @@ } void UserMediaClientImpl::UserMediaRequestInfo::StartAudioTrack( - const blink::WebMediaStreamTrack& track, - bool source_initialized) { + const blink::WebMediaStreamTrack& track) { DCHECK(track.source().getType() == blink::WebMediaStreamSource::TypeAudio); MediaStreamAudioSource* native_source = MediaStreamAudioSource::From(track.source()); - // Add the source as pending since OnTrackStarted will expect it to be there. - sources_waiting_for_callback_.push_back(native_source); + DCHECK(native_source); sources_.push_back(track.source()); - bool connected = native_source->ConnectToTrack(track); - if (source_initialized) { - OnTrackStarted( - native_source, - connected ? MEDIA_DEVICE_OK : MEDIA_DEVICE_TRACK_START_FAILURE, ""); -#if defined(OS_ANDROID) - } else if (connected) { - CHECK(native_source->is_local_source()); - // On Android, we won't get the callback indicating the device readyness. - // TODO(tommi): Update the android implementation to support the - // OnAudioSourceStarted notification. http://crbug.com/679302 + sources_waiting_for_callback_.push_back(native_source); + if (native_source->ConnectToTrack(track)) OnTrackStarted(native_source, MEDIA_DEVICE_OK, ""); -#endif - } + else + OnTrackStarted(native_source, MEDIA_DEVICE_TRACK_START_FAILURE, ""); } blink::WebMediaStreamTrack @@ -1136,20 +1040,31 @@ } } -bool UserMediaClientImpl::UserMediaRequestInfo::HasPendingSources() const { - return !sources_waiting_for_callback_.empty(); +bool UserMediaClientImpl::UserMediaRequestInfo::IsSourceUsed( + const blink::WebMediaStreamSource& source) const { + for (std::vector<blink::WebMediaStreamSource>::const_iterator source_it = + sources_.begin(); + source_it != sources_.end(); ++source_it) { + if (source_it->id() == source.id()) + return true; + } + return false; } -void UserMediaClientImpl::UserMediaRequestInfo::OnAudioSourceStarted( - MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name) { - // Check if we're waiting to be notified of this source. If not, then we'll - // ignore the notification. - auto found = std::find(sources_waiting_for_callback_.begin(), - sources_waiting_for_callback_.end(), source); - if (found != sources_waiting_for_callback_.end()) - OnTrackStarted(source, result, result_name); +void UserMediaClientImpl::UserMediaRequestInfo::RemoveSource( + const blink::WebMediaStreamSource& source) { + for (std::vector<blink::WebMediaStreamSource>::iterator it = + sources_.begin(); + it != sources_.end(); ++it) { + if (source.id() == it->id()) { + sources_.erase(it); + return; + } + } +} + +bool UserMediaClientImpl::UserMediaRequestInfo::HasPendingSources() const { + return !sources_waiting_for_callback_.empty(); } void UserMediaClientImpl::OnDestruct() {
diff --git a/content/renderer/media/user_media_client_impl.h b/content/renderer/media/user_media_client_impl.h index 9e3fc477..2492ccc 100644 --- a/content/renderer/media/user_media_client_impl.h +++ b/content/renderer/media/user_media_client_impl.h
@@ -119,8 +119,7 @@ // These are virtual for test purposes. virtual MediaStreamAudioSource* CreateAudioSource( const StreamDeviceInfo& device, - const blink::WebMediaConstraints& constraints, - const MediaStreamSource::ConstraintsCallback& source_ready); + const blink::WebMediaConstraints& constraints); virtual MediaStreamVideoSource* CreateVideoSource( const StreamDeviceInfo& device, const MediaStreamSource::SourceStoppedCallback& stop_callback); @@ -147,8 +146,7 @@ blink::WebMediaStream web_stream; blink::WebUserMediaRequest request; - void StartAudioTrack(const blink::WebMediaStreamTrack& track, - bool source_initialized); + void StartAudioTrack(const blink::WebMediaStreamTrack& track); blink::WebMediaStreamTrack CreateAndStartVideoTrack( const blink::WebMediaStreamSource& source, @@ -158,19 +156,16 @@ // successfully started, or a source has failed to start. void CallbackOnTracksStarted(const ResourcesReady& callback); - bool HasPendingSources() const; + bool IsSourceUsed(const blink::WebMediaStreamSource& source) const; + void RemoveSource(const blink::WebMediaStreamSource& source); - // Called when a local audio source has finished (or failed) initializing. - void OnAudioSourceStarted(MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name); + bool HasPendingSources() const; private: void OnTrackStarted( MediaStreamSource* source, MediaStreamRequestResult result, const blink::WebString& result_name); - void CheckAllTracksStarted(); ResourcesReady ready_callback_; @@ -198,17 +193,12 @@ // Creates a WebKit representation of stream sources based on // |devices| from the MediaStreamDispatcher. - void InitializeVideoSourceObject( + void InitializeSourceObject( const StreamDeviceInfo& device, + blink::WebMediaStreamSource::Type type, const blink::WebMediaConstraints& constraints, blink::WebMediaStreamSource* webkit_source); - void InitializeAudioSourceObject( - const StreamDeviceInfo& device, - const blink::WebMediaConstraints& constraints, - blink::WebMediaStreamSource* webkit_source, - bool* source_initialized); - void CreateVideoTracks( const StreamDeviceInfoArray& devices, const blink::WebMediaConstraints& constraints, @@ -232,17 +222,6 @@ const StreamDeviceInfoArray& audio_array, const StreamDeviceInfoArray& video_array); - static void OnAudioSourceStartedOnAudioThread( - scoped_refptr<base::SingleThreadTaskRunner> task_runner, - base::WeakPtr<UserMediaClientImpl> weak_ptr, - MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name); - - void OnAudioSourceStarted(MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name); - using EnumerationResult = std::vector<MediaDeviceInfoArray>; void FinalizeEnumerateDevices(blink::WebMediaDevicesRequest request, const EnumerationResult& result); @@ -252,22 +231,8 @@ // Returns the source that use a device with |device.session_id| // and |device.device.id|. NULL if such source doesn't exist. const blink::WebMediaStreamSource* FindLocalSource( - const StreamDeviceInfo& device) const { - return FindLocalSource(local_sources_, device); - } - const blink::WebMediaStreamSource* FindPendingLocalSource( - const StreamDeviceInfo& device) const { - return FindLocalSource(pending_local_sources_, device); - } - const blink::WebMediaStreamSource* FindLocalSource( - const LocalStreamSources& sources, const StreamDeviceInfo& device) const; - // Looks up a local source and returns it if found. If not found, prepares - // a new WebMediaStreamSource with a NULL extraData pointer. - blink::WebMediaStreamSource FindOrInitializeSourceObject( - const StreamDeviceInfo& device); - // Returns true if we do find and remove the |source|. // Otherwise returns false. bool RemoveLocalSource(const blink::WebMediaStreamSource& source); @@ -309,7 +274,6 @@ ::mojom::MediaDevicesDispatcherHostPtr media_devices_dispatcher_; LocalStreamSources local_sources_; - LocalStreamSources pending_local_sources_; UserMediaRequests user_media_requests_; MediaDevicesEventDispatcher::SubscriptionIdList
diff --git a/content/renderer/media/user_media_client_impl_unittest.cc b/content/renderer/media/user_media_client_impl_unittest.cc index 77214b3..41f634f5 100644 --- a/content/renderer/media/user_media_client_impl_unittest.cc +++ b/content/renderer/media/user_media_client_impl_unittest.cc
@@ -206,16 +206,9 @@ create_source_that_fails_ = should_fail; } - static void SignalSourceReady( - const MediaStreamSource::ConstraintsCallback& source_ready, - MediaStreamSource* source) { - source_ready.Run(source, MEDIA_DEVICE_OK, ""); - } - MediaStreamAudioSource* CreateAudioSource( const StreamDeviceInfo& device, - const blink::WebMediaConstraints& constraints, - const MediaStreamSource::ConstraintsCallback& source_ready) override { + const blink::WebMediaConstraints& constraints) override { MediaStreamAudioSource* source; if (create_source_that_fails_) { class FailedAtLifeAudioSource : public MediaStreamAudioSource { @@ -232,15 +225,6 @@ source = new MediaStreamAudioSource(true); } source->SetDeviceInfo(device); - - if (!create_source_that_fails_) { - // RunUntilIdle is required for this task to complete. - base::ThreadTaskRunnerHandle::Get()->PostTask( - FROM_HERE, - base::Bind(&UserMediaClientImplUnderTest::SignalSourceReady, - source_ready, source)); - } - return source; } @@ -362,7 +346,6 @@ ms_dispatcher_->audio_input_request_id(), ms_dispatcher_->stream_label(), ms_dispatcher_->audio_input_array(), ms_dispatcher_->video_array()); - base::RunLoop().RunUntilIdle(); } void StartMockedVideoSource() {
diff --git a/content/renderer/media/webrtc/processed_local_audio_source.cc b/content/renderer/media/webrtc/processed_local_audio_source.cc index bac1300..fa89a21 100644 --- a/content/renderer/media/webrtc/processed_local_audio_source.cc +++ b/content/renderer/media/webrtc/processed_local_audio_source.cc
@@ -29,14 +29,10 @@ ProcessedLocalAudioSource::ProcessedLocalAudioSource( int consumer_render_frame_id, const StreamDeviceInfo& device_info, - const blink::WebMediaConstraints& constraints, - const ConstraintsCallback& started_callback, PeerConnectionDependencyFactory* factory) : MediaStreamAudioSource(true /* is_local_source */), consumer_render_frame_id_(consumer_render_frame_id), pc_factory_(factory), - constraints_(constraints), - started_callback_(started_callback), volume_(0), allow_invalid_render_frame_id_for_testing_(false) { DCHECK(pc_factory_); @@ -57,6 +53,14 @@ return nullptr; } +void ProcessedLocalAudioSource::SetSourceConstraints( + const blink::WebMediaConstraints& constraints) { + DCHECK(thread_checker_.CalledOnValidThread()); + DCHECK(!constraints.isNull()); + DCHECK(!source_); + constraints_ = constraints; +} + void* ProcessedLocalAudioSource::GetClassIdentifier() const { return kClassIdentifier; } @@ -259,10 +263,6 @@ return WebRtcAudioDeviceImpl::kMaxVolumeLevel; } -void ProcessedLocalAudioSource::OnCaptureStarted() { - started_callback_.Run(this, MEDIA_DEVICE_OK, ""); -} - void ProcessedLocalAudioSource::Capture(const media::AudioBus* audio_bus, int audio_delay_milliseconds, double volume,
diff --git a/content/renderer/media/webrtc/processed_local_audio_source.h b/content/renderer/media/webrtc/processed_local_audio_source.h index dead7af..20cad5d 100644 --- a/content/renderer/media/webrtc/processed_local_audio_source.h +++ b/content/renderer/media/webrtc/processed_local_audio_source.h
@@ -38,8 +38,6 @@ // ID are derived from |device_info|. |factory| must outlive this instance. ProcessedLocalAudioSource(int consumer_render_frame_id, const StreamDeviceInfo& device_info, - const blink::WebMediaConstraints& constraints, - const ConstraintsCallback& started_callback, PeerConnectionDependencyFactory* factory); ~ProcessedLocalAudioSource() final; @@ -60,6 +58,7 @@ const blink::WebMediaConstraints& source_constraints() const { return constraints_; } + void SetSourceConstraints(const blink::WebMediaConstraints& constraints); // The following accessors are not valid until after the source is started // (when the first track is connected). @@ -89,7 +88,6 @@ // AudioCapturerSource::CaptureCallback implementation. // Called on the AudioCapturerSource audio thread. - void OnCaptureStarted() override; void Capture(const media::AudioBus* audio_source, int audio_delay_milliseconds, double volume, @@ -112,10 +110,7 @@ base::ThreadChecker thread_checker_; // Cached audio constraints for the capturer. - const blink::WebMediaConstraints constraints_; - - // Callback that's called when the audio source has been initialized. - ConstraintsCallback started_callback_; + blink::WebMediaConstraints constraints_; // Audio processor doing processing like FIFO, AGC, AEC and NS. Its output // data is in a unit of 10 ms data chunk.
diff --git a/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc b/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc index 390aeafa..11094245 100644 --- a/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc +++ b/content/renderer/media/webrtc/processed_local_audio_source_unittest.cc
@@ -102,11 +102,9 @@ StreamDeviceInfo(MEDIA_DEVICE_AUDIO_CAPTURE, "Mock audio device", "mock_audio_device_id", kSampleRate, kChannelLayout, kRequestedBufferSize), - constraints, - base::Bind(&ProcessedLocalAudioSourceTest::OnAudioSourceStarted, - base::Unretained(this)), &mock_dependency_factory_); source->SetAllowInvalidRenderFrameIdForTesting(true); + source->SetSourceConstraints(constraints); blink_audio_source_.setExtraData(source); // Takes ownership. } @@ -139,10 +137,6 @@ return blink_audio_track_; } - void OnAudioSourceStarted(MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name) {} - private: base::MessageLoop main_thread_message_loop_; // Needed for MSAudioProcessor. MockAudioDeviceFactory mock_audio_device_factory_; @@ -174,10 +168,7 @@ .WillOnce(WithArg<0>(Invoke(this, &ThisTest::CheckSourceFormatMatches))); EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(), SetAutomaticGainControl(true)); - EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(), Start()) - .WillOnce(Invoke( - capture_source_callback(), - &media::AudioCapturerSource::CaptureCallback::OnCaptureStarted)); + EXPECT_CALL(*mock_audio_device_factory()->mock_capturer_source(), Start()); ASSERT_TRUE(audio_source()->ConnectToTrack(blink_audio_track())); CheckOutputFormatMatches(audio_source()->GetAudioParameters());
diff --git a/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc b/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc index d1d4b356..01b27bc1 100644 --- a/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc +++ b/content/renderer/media/webrtc/webrtc_media_stream_adapter_unittest.cc
@@ -58,10 +58,10 @@ media::AudioParameters::kAudioCDSampleRate, media::CHANNEL_LAYOUT_STEREO, media::AudioParameters::kAudioCDSampleRate / 50), - MockConstraintFactory().CreateWebMediaConstraints(), - base::Bind(&WebRtcMediaStreamAdapterTest::OnAudioSourceStarted), dependency_factory_.get()); source->SetAllowInvalidRenderFrameIdForTesting(true); + source->SetSourceConstraints( + MockConstraintFactory().CreateWebMediaConstraints()); audio_source.setExtraData(source); // Takes ownership. audio_track_vector[0].initialize(audio_source); EXPECT_CALL(*mock_audio_device_factory_.mock_capturer_source(), @@ -119,10 +119,6 @@ } private: - static void OnAudioSourceStarted(MediaStreamSource* source, - MediaStreamRequestResult result, - const blink::WebString& result_name) {} - base::MessageLoop message_loop_; std::unique_ptr<ChildProcess> child_process_; std::unique_ptr<MockPeerConnectionDependencyFactory> dependency_factory_;
diff --git a/content/shell/renderer/layout_test/blink_test_runner.cc b/content/shell/renderer/layout_test/blink_test_runner.cc index cf81c8e8..155cff0 100644 --- a/content/shell/renderer/layout_test/blink_test_runner.cc +++ b/content/shell/renderer/layout_test/blink_test_runner.cc
@@ -208,24 +208,14 @@ void Initialize(const media::AudioParameters& params, CaptureCallback* callback, - int session_id) override { - callback_ = callback; - } - void Start() override { - if (callback_) - callback_->OnCaptureStarted(); - } + int session_id) override {} + void Start() override {} void Stop() override {} void SetVolume(double volume) override {} void SetAutomaticGainControl(bool enable) override {} protected: ~MockAudioCapturerSource() override {} - - private: - CaptureCallback* callback_ = nullptr; - - DISALLOW_COPY_AND_ASSIGN(MockAudioCapturerSource); }; // Tests in web-platform-tests use absolute path links such as
diff --git a/device/bluetooth/bluetooth_device.cc b/device/bluetooth/bluetooth_device.cc index 7c56c12..81f7d779 100644 --- a/device/bluetooth/bluetooth_device.cc +++ b/device/bluetooth/bluetooth_device.cc
@@ -15,6 +15,7 @@ #include "base/values.h" #include "device/bluetooth/bluetooth_adapter.h" #include "device/bluetooth/bluetooth_gatt_connection.h" +#include "device/bluetooth/bluetooth_remote_gatt_characteristic.h" #include "device/bluetooth/bluetooth_remote_gatt_service.h" #include "device/bluetooth/string_util_icu.h" #include "grit/bluetooth_strings.h" @@ -449,6 +450,52 @@ GetAdapter()->NotifyDeviceChanged(this); } +std::vector<BluetoothRemoteGattService*> BluetoothDevice::GetPrimaryServices() { + std::vector<BluetoothRemoteGattService*> services; + VLOG(2) << "Looking for services."; + for (BluetoothRemoteGattService* service : GetGattServices()) { + VLOG(2) << "Service in cache: " << service->GetUUID().canonical_value(); + if (service->IsPrimary()) { + services.push_back(service); + } + } + return services; +} + +std::vector<BluetoothRemoteGattService*> +BluetoothDevice::GetPrimaryServicesByUUID(const BluetoothUUID& service_uuid) { + std::vector<BluetoothRemoteGattService*> services; + VLOG(2) << "Looking for service: " << service_uuid.canonical_value(); + for (BluetoothRemoteGattService* service : GetGattServices()) { + VLOG(2) << "Service in cache: " << service->GetUUID().canonical_value(); + if (service->GetUUID() == service_uuid && service->IsPrimary()) { + services.push_back(service); + } + } + return services; +} + +std::vector<BluetoothRemoteGattCharacteristic*> +BluetoothDevice::GetCharacteristicsByUUID( + const std::string& service_instance_id, + const BluetoothUUID& characteristic_uuid) { + std::vector<BluetoothRemoteGattCharacteristic*> characteristics; + VLOG(2) << "Looking for characteristic: " + << characteristic_uuid.canonical_value(); + BluetoothRemoteGattService* service = GetGattService(service_instance_id); + if (service) { + for (BluetoothRemoteGattCharacteristic* characteristic : + service->GetCharacteristics()) { + VLOG(2) << "Characteristic in cache: " + << characteristic->GetUUID().canonical_value(); + if (characteristic->GetUUID() == characteristic_uuid) { + characteristics.push_back(characteristic); + } + } + } + return characteristics; +} + void BluetoothDevice::DidConnectGatt() { for (const auto& callback : create_gatt_connection_success_callbacks_) { callback.Run(
diff --git a/device/bluetooth/bluetooth_device.h b/device/bluetooth/bluetooth_device.h index 2964496..135aad6 100644 --- a/device/bluetooth/bluetooth_device.h +++ b/device/bluetooth/bluetooth_device.h
@@ -31,6 +31,7 @@ class BluetoothAdapter; class BluetoothGattConnection; +class BluetoothRemoteGattCharacteristic; class BluetoothSocket; class BluetoothUUID; @@ -555,6 +556,15 @@ // Return associated BluetoothAdapter. BluetoothAdapter* GetAdapter() { return adapter_; } + std::vector<BluetoothRemoteGattService*> GetPrimaryServices(); + + std::vector<BluetoothRemoteGattService*> GetPrimaryServicesByUUID( + const BluetoothUUID& service_uuid); + + std::vector<BluetoothRemoteGattCharacteristic*> GetCharacteristicsByUUID( + const std::string& service_instance_id, + const BluetoothUUID& characteristic_uuid); + protected: // BluetoothGattConnection is a friend to call Add/RemoveGattConnection. friend BluetoothGattConnection;
diff --git a/device/gamepad/gamepad_provider_unittest.cc b/device/gamepad/gamepad_provider_unittest.cc index e534d04..a1deb15 100644 --- a/device/gamepad/gamepad_provider_unittest.cc +++ b/device/gamepad/gamepad_provider_unittest.cc
@@ -70,14 +70,7 @@ DISALLOW_COPY_AND_ASSIGN(GamepadProviderTest); }; -// Crashes. http://crbug.com/106163 -// crbug.com/147549 -#if defined(OS_ANDROID) -#define MAYBE_PollingAccess DISABLED_PollingAccess -#else -#define MAYBE_PollingAccess PollingAccess -#endif -TEST_F(GamepadProviderTest, MAYBE_PollingAccess) { +TEST_F(GamepadProviderTest, PollingAccess) { WebGamepads test_data; memset(&test_data, 0, sizeof(WebGamepads)); test_data.items[0].connected = true; @@ -117,13 +110,7 @@ EXPECT_EQ(0.5f, output.items[0].axes[1]); } -// http://crbug.com/106163, crbug.com/147549 -#if defined(OS_ANDROID) -#define MAYBE_ConnectDisconnectMultiple DISABLED_ConnectDisconnectMultiple -#else -#define MAYBE_ConnectDisconnectMultiple ConnectDisconnectMultiple -#endif -TEST_F(GamepadProviderTest, MAYBE_ConnectDisconnectMultiple) { +TEST_F(GamepadProviderTest, ConnectDisconnectMultiple) { WebGamepads test_data; test_data.items[0].connected = true; test_data.items[0].timestamp = 0; @@ -218,12 +205,8 @@ EXPECT_TRUE(listener.has_user_gesture()); } -// Crashes. http://crbug.com/106163 -// crbug.com/147549 // Flaky on MSAN: http://crbug.com/640086 -#if defined(OS_ANDROID) -#define MAYBE_Sanitization DISABLED_Sanitization -#elif defined(MEMORY_SANITIZER) +#if defined(MEMORY_SANITIZER) #define MAYBE_Sanitization DISABLED_Sanitization #else #define MAYBE_Sanitization Sanitization
diff --git a/extensions/browser/api/networking_private/networking_private_api.cc b/extensions/browser/api/networking_private/networking_private_api.cc index d8f3069..5bfc42b 100644 --- a/extensions/browser/api/networking_private/networking_private_api.cc +++ b/extensions/browser/api/networking_private/networking_private_api.cc
@@ -779,4 +779,24 @@ SendResponse(false); } +//////////////////////////////////////////////////////////////////////////////// +// NetworkingPrivateGetGlobalPolicyFunction + +NetworkingPrivateGetGlobalPolicyFunction:: + ~NetworkingPrivateGetGlobalPolicyFunction() {} + +ExtensionFunction::ResponseAction +NetworkingPrivateGetGlobalPolicyFunction::Run() { + std::unique_ptr<base::DictionaryValue> policy_dict( + GetDelegate(browser_context())->GetGlobalPolicy()); + DCHECK(policy_dict); + // private_api::GlobalPolicy is a subset of the global policy dictionary + // (by definition), so use the api setter/getter to generate the subset. + std::unique_ptr<private_api::GlobalPolicy> policy( + private_api::GlobalPolicy::FromValue(*policy_dict)); + DCHECK(policy); + return RespondNow( + ArgumentList(private_api::GetGlobalPolicy::Results::Create(*policy))); +} + } // namespace extensions
diff --git a/extensions/browser/api/networking_private/networking_private_api.h b/extensions/browser/api/networking_private/networking_private_api.h index 9bfbf60..7eef833 100644 --- a/extensions/browser/api/networking_private/networking_private_api.h +++ b/extensions/browser/api/networking_private/networking_private_api.h
@@ -504,6 +504,23 @@ DISALLOW_COPY_AND_ASSIGN(NetworkingPrivateSetCellularSimStateFunction); }; +class NetworkingPrivateGetGlobalPolicyFunction + : public UIThreadExtensionFunction { + public: + NetworkingPrivateGetGlobalPolicyFunction() {} + DECLARE_EXTENSION_FUNCTION("networkingPrivate.getGlobalPolicy", + NETWORKINGPRIVATE_GETGLOBALPOLICY); + + protected: + ~NetworkingPrivateGetGlobalPolicyFunction() override; + + // ExtensionFunction: + ResponseAction Run() override; + + private: + DISALLOW_COPY_AND_ASSIGN(NetworkingPrivateGetGlobalPolicyFunction); +}; + } // namespace extensions #endif // EXTENSIONS_BROWSER_API_NETWORKING_PRIVATE_NETWORKING_PRIVATE_API_H_
diff --git a/extensions/browser/api/networking_private/networking_private_chromeos.cc b/extensions/browser/api/networking_private/networking_private_chromeos.cc index 0e2e6d7..9e5f0c3 100644 --- a/extensions/browser/api/networking_private/networking_private_chromeos.cc +++ b/extensions/browser/api/networking_private/networking_private_chromeos.cc
@@ -673,6 +673,17 @@ return device_state_list; } +std::unique_ptr<base::DictionaryValue> +NetworkingPrivateChromeOS::GetGlobalPolicy() { + auto result = base::MakeUnique<base::DictionaryValue>(); + const base::DictionaryValue* global_network_config = + GetManagedConfigurationHandler()->GetGlobalConfigFromPolicy( + std::string() /* no username hash, device policy */); + if (global_network_config) + result->MergeDictionary(global_network_config); + return result; +} + bool NetworkingPrivateChromeOS::EnableNetworkType(const std::string& type) { NetworkTypePattern pattern = chromeos::onc::NetworkTypePatternFromOncType(type);
diff --git a/extensions/browser/api/networking_private/networking_private_chromeos.h b/extensions/browser/api/networking_private/networking_private_chromeos.h index 6e517f3..fe3f4c7 100644 --- a/extensions/browser/api/networking_private/networking_private_chromeos.h +++ b/extensions/browser/api/networking_private/networking_private_chromeos.h
@@ -92,6 +92,7 @@ const FailureCallback& failure_callback) override; std::unique_ptr<base::ListValue> GetEnabledNetworkTypes() override; std::unique_ptr<DeviceStateList> GetDeviceStateList() override; + std::unique_ptr<base::DictionaryValue> GetGlobalPolicy() override; bool EnableNetworkType(const std::string& type) override; bool DisableNetworkType(const std::string& type) override; bool RequestScan() override;
diff --git a/extensions/browser/api/networking_private/networking_private_delegate.h b/extensions/browser/api/networking_private/networking_private_delegate.h index 78ebc32..1e048b1 100644 --- a/extensions/browser/api/networking_private/networking_private_delegate.h +++ b/extensions/browser/api/networking_private/networking_private_delegate.h
@@ -164,7 +164,6 @@ const std::string& puk, const VoidCallback& success_callback, const FailureCallback& failure_callback) = 0; - virtual void SetCellularSimState(const std::string& guid, bool require_pin, const std::string& current_pin, @@ -180,6 +179,12 @@ // Returns a list of DeviceStateProperties. virtual std::unique_ptr<DeviceStateList> GetDeviceStateList() = 0; + // Returns a dictionary of global policy values (may be empty). Note: the + // dictionary is expected to be a superset of the networkingPrivate + // GlobalPolicy dictionary. Any properties not in GlobalPolicy will be + // ignored. + virtual std::unique_ptr<base::DictionaryValue> GetGlobalPolicy() = 0; + // Returns true if the ONC network type |type| is enabled. virtual bool EnableNetworkType(const std::string& type) = 0;
diff --git a/extensions/browser/api/networking_private/networking_private_linux.cc b/extensions/browser/api/networking_private/networking_private_linux.cc index c6a467f..38f03532 100644 --- a/extensions/browser/api/networking_private/networking_private_linux.cc +++ b/extensions/browser/api/networking_private/networking_private_linux.cc
@@ -9,6 +9,7 @@ #include "base/bind.h" #include "base/bind_helpers.h" #include "base/callback.h" +#include "base/memory/ptr_util.h" #include "base/message_loop/message_loop.h" #include "base/strings/string16.h" #include "base/strings/string_split.h" @@ -589,6 +590,11 @@ return device_state_list; } +std::unique_ptr<base::DictionaryValue> +NetworkingPrivateLinux::GetGlobalPolicy() { + return base::MakeUnique<base::DictionaryValue>(); +} + bool NetworkingPrivateLinux::EnableNetworkType(const std::string& type) { return false; }
diff --git a/extensions/browser/api/networking_private/networking_private_linux.h b/extensions/browser/api/networking_private/networking_private_linux.h index 6d69d97..1f398f6d 100644 --- a/extensions/browser/api/networking_private/networking_private_linux.h +++ b/extensions/browser/api/networking_private/networking_private_linux.h
@@ -91,9 +91,9 @@ const std::string& new_pin, const VoidCallback& success_callback, const FailureCallback& failure_callback) override; - std::unique_ptr<base::ListValue> GetEnabledNetworkTypes() override; std::unique_ptr<DeviceStateList> GetDeviceStateList() override; + std::unique_ptr<base::DictionaryValue> GetGlobalPolicy() override; bool EnableNetworkType(const std::string& type) override; bool DisableNetworkType(const std::string& type) override; bool RequestScan() override;
diff --git a/extensions/browser/api/networking_private/networking_private_service_client.cc b/extensions/browser/api/networking_private/networking_private_service_client.cc index 5873218..b85bb76 100644 --- a/extensions/browser/api/networking_private/networking_private_service_client.cc +++ b/extensions/browser/api/networking_private/networking_private_service_client.cc
@@ -353,6 +353,11 @@ return device_state_list; } +std::unique_ptr<base::DictionaryValue> +NetworkingPrivateServiceClient::GetGlobalPolicy() { + return base::MakeUnique<base::DictionaryValue>(); +} + bool NetworkingPrivateServiceClient::EnableNetworkType( const std::string& type) { return false;
diff --git a/extensions/browser/api/networking_private/networking_private_service_client.h b/extensions/browser/api/networking_private/networking_private_service_client.h index 2ea160a..7206bcee 100644 --- a/extensions/browser/api/networking_private/networking_private_service_client.h +++ b/extensions/browser/api/networking_private/networking_private_service_client.h
@@ -107,6 +107,7 @@ const FailureCallback& failure_callback) override; std::unique_ptr<base::ListValue> GetEnabledNetworkTypes() override; std::unique_ptr<DeviceStateList> GetDeviceStateList() override; + std::unique_ptr<base::DictionaryValue> GetGlobalPolicy() override; bool EnableNetworkType(const std::string& type) override; bool DisableNetworkType(const std::string& type) override; bool RequestScan() override;
diff --git a/extensions/browser/extension_function_histogram_value.h b/extensions/browser/extension_function_histogram_value.h index 8b0079ad..662e64b 100644 --- a/extensions/browser/extension_function_histogram_value.h +++ b/extensions/browser/extension_function_histogram_value.h
@@ -1215,6 +1215,7 @@ QUICKUNLOCKPRIVATE_CHECKCREDENTIAL, QUICKUNLOCKPRIVATE_GETCREDENTIALREQUIREMENTS, CLIPBOARD_SETIMAGEDATA, + NETWORKINGPRIVATE_GETGLOBALPOLICY, // Last entry: Add new entries above, then run: // python tools/metrics/histograms/update_extension_histograms.py ENUM_BOUNDARY
diff --git a/extensions/common/api/networking_private.idl b/extensions/common/api/networking_private.idl index b1855c4..6bbe086 100644 --- a/extensions/common/api/networking_private.idl +++ b/extensions/common/api/networking_private.idl
@@ -806,6 +806,15 @@ long? limit; }; + dictionary GlobalPolicy { + // If true, only policy networks may auto connect. Defaults to false. + boolean? AllowOnlyPolicyNetworksToAutoconnect; + + // If true, only policy networks may be connected to and no new networks may + // be added or configured. Defaults to false. + boolean? AllowOnlyPolicyNetworksToConnect; + }; + callback VoidCallback = void(); callback BooleanCallback = void(boolean result); callback StringCallback = void(DOMString result); @@ -818,6 +827,7 @@ callback GetDeviceStatesCallback = void(DeviceStateProperties[] result); callback GetEnabledNetworkTypesCallback = void(NetworkType[] result); callback CaptivePortalStatusCallback = void(CaptivePortalStatus result); + callback GetGlobalPolicyCallback = void(GlobalPolicy result); // These functions all report failures via chrome.runtime.lastError. interface Functions { @@ -1034,6 +1044,10 @@ static void setCellularSimState(DOMString networkGuid, CellularSimState simState, optional VoidCallback callback); + + // Gets the global policy properties. These properties are not expected to + // change during a session. + static void getGlobalPolicy(GetGlobalPolicyCallback callback); }; interface Events {
diff --git a/gpu/command_buffer/service/gles2_cmd_decoder.cc b/gpu/command_buffer/service/gles2_cmd_decoder.cc index 954d5fd..ae0934b 100644 --- a/gpu/command_buffer/service/gles2_cmd_decoder.cc +++ b/gpu/command_buffer/service/gles2_cmd_decoder.cc
@@ -12444,9 +12444,6 @@ } else { tile_height = height; } - // TODO(zmo): This is temporary, only for Canary. Change it to DCHECK before - // M57 branch point. - CHECK_LE(size, kMaxZeroSize); { // Add extra scope to destroy zero and the object it owns right
diff --git a/gpu/ipc/service/BUILD.gn b/gpu/ipc/service/BUILD.gn index f6c126d1..365d6d6 100644 --- a/gpu/ipc/service/BUILD.gn +++ b/gpu/ipc/service/BUILD.gn
@@ -44,6 +44,9 @@ "gpu_memory_manager.h", "gpu_memory_tracking.cc", "gpu_memory_tracking.h", + "gpu_vsync_provider.h", + "gpu_vsync_provider_posix.cc", + "gpu_vsync_provider_win.cc", "gpu_watchdog_thread.cc", "gpu_watchdog_thread.h", "image_transport_surface.h", @@ -140,6 +143,7 @@ "gpu_channel_test_common.cc", "gpu_channel_test_common.h", "gpu_channel_unittest.cc", + "gpu_vsync_provider_unittest_win.cc", ] deps = [ ":service",
diff --git a/gpu/ipc/service/gpu_vsync_provider.h b/gpu/ipc/service/gpu_vsync_provider.h new file mode 100644 index 0000000..13d0ac21 --- /dev/null +++ b/gpu/ipc/service/gpu_vsync_provider.h
@@ -0,0 +1,48 @@ +// Copyright (c) 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_ +#define GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_ + +#include <memory> + +#include "base/callback.h" +#include "base/time/time.h" +#include "gpu/gpu_export.h" +#include "gpu/ipc/common/surface_handle.h" + +namespace gpu { + +class GpuVSyncWorker; + +// Implements waiting for VSync signal on background thread. +class GPU_EXPORT GpuVSyncProvider { + public: + // Once VSync is enabled, this callback is repeatedly invoked on every VSync. + // The call is made on background thread to avoid increased latency due to + // serializing callback invocation with other GPU tasks. The code that + // implements the callback function is expected to handle that. + using VSyncCallback = base::Callback<void(base::TimeTicks timestamp)>; + + ~GpuVSyncProvider(); + + static std::unique_ptr<GpuVSyncProvider> Create(const VSyncCallback& callback, + SurfaceHandle surface_handle); + + // Enable or disable VSync production. + void EnableVSync(bool enabled); + + private: +#if defined(OS_WIN) + GpuVSyncProvider(const VSyncCallback& callback, SurfaceHandle surface_handle); + + std::unique_ptr<GpuVSyncWorker> vsync_worker_; +#endif // defined(OS_WIN) + + DISALLOW_COPY_AND_ASSIGN(GpuVSyncProvider); +}; + +} // namespace gpu + +#endif // GPU_IPC_SERVICE_GPU_VSYNC_PROVIDER_H_
diff --git a/gpu/ipc/service/gpu_vsync_provider_posix.cc b/gpu/ipc/service/gpu_vsync_provider_posix.cc new file mode 100644 index 0000000..00039f65 --- /dev/null +++ b/gpu/ipc/service/gpu_vsync_provider_posix.cc
@@ -0,0 +1,22 @@ +// Copyright (c) 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/ipc/service/gpu_vsync_provider.h" + +namespace gpu { + +/* static */ +std::unique_ptr<GpuVSyncProvider> GpuVSyncProvider::Create( + const VSyncCallback& callback, + SurfaceHandle surface_handle) { + return std::unique_ptr<GpuVSyncProvider>(); +} + +GpuVSyncProvider::~GpuVSyncProvider() = default; + +void GpuVSyncProvider::EnableVSync(bool enabled) { + NOTREACHED(); +} + +} // namespace gpu
diff --git a/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc b/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc new file mode 100644 index 0000000..2b96b4a --- /dev/null +++ b/gpu/ipc/service/gpu_vsync_provider_unittest_win.cc
@@ -0,0 +1,84 @@ +// Copyright (c) 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/ipc/service/gpu_vsync_provider.h" + +#include <memory> + +#include "base/bind.h" +#include "base/synchronization/lock.h" +#include "base/synchronization/waitable_event.h" +#include "testing/gtest/include/gtest/gtest.h" +#include "ui/base/win/hidden_window.h" + +namespace gpu { + +class GpuVSyncProviderTest : public testing::Test { + public: + GpuVSyncProviderTest() + : vsync_event_(base::WaitableEvent::ResetPolicy::AUTOMATIC, + base::WaitableEvent::InitialState::NOT_SIGNALED) {} + ~GpuVSyncProviderTest() override {} + + void SetUp() override {} + + void TearDown() override {} + + void OnVSync(base::TimeTicks timestamp) { + // This is called on VSync worker thread. + base::AutoLock lock(lock_); + if (++vsync_count_ == 3) + vsync_event_.Signal(); + } + + int vsync_count() { + base::AutoLock lock(lock_); + return vsync_count_; + } + + void reset_vsync_count() { + base::AutoLock lock(lock_); + vsync_count_ = 0; + } + + protected: + base::WaitableEvent vsync_event_; + + private: + base::Lock lock_; + int vsync_count_ = 0; +}; + +TEST_F(GpuVSyncProviderTest, VSyncSignalTest) { + SurfaceHandle window = ui::GetHiddenWindow(); + + std::unique_ptr<GpuVSyncProvider> provider = GpuVSyncProvider::Create( + base::Bind(&GpuVSyncProviderTest::OnVSync, base::Unretained(this)), + window); + + constexpr base::TimeDelta wait_timeout = + base::TimeDelta::FromMilliseconds(300); + + // Verify that there are no VSync signals before provider is enabled + bool wait_result = vsync_event_.TimedWait(wait_timeout); + EXPECT_FALSE(wait_result); + EXPECT_EQ(0, vsync_count()); + + provider->EnableVSync(true); + + vsync_event_.Wait(); + + provider->EnableVSync(false); + + // Verify that VSync callbacks stop coming after disabling. + // Please note that it might still be possible for one + // callback to be in flight on VSync worker thread, so |vsync_count_| + // could still be incremented once, but not enough times to trigger + // |vsync_event_|. + reset_vsync_count(); + wait_result = vsync_event_.TimedWait(wait_timeout); + EXPECT_FALSE(wait_result); +} + +} // namespace gpu
diff --git a/gpu/ipc/service/gpu_vsync_provider_win.cc b/gpu/ipc/service/gpu_vsync_provider_win.cc new file mode 100644 index 0000000..a996e6b6 --- /dev/null +++ b/gpu/ipc/service/gpu_vsync_provider_win.cc
@@ -0,0 +1,264 @@ +// Copyright (c) 2016 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "gpu/ipc/service/gpu_vsync_provider.h" + +#include <string> + +#include "base/atomicops.h" +#include "base/strings/stringprintf.h" +#include "base/threading/thread.h" +#include "base/trace_event/trace_event.h" + +#include <windows.h> + +namespace gpu { + +namespace { +// from <D3dkmthk.h> +typedef LONG NTSTATUS; +typedef UINT D3DKMT_HANDLE; +typedef UINT D3DDDI_VIDEO_PRESENT_SOURCE_ID; + +#define STATUS_SUCCESS ((NTSTATUS)0x00000000L) + +typedef struct _D3DKMT_OPENADAPTERFROMHDC { + HDC hDc; + D3DKMT_HANDLE hAdapter; + LUID AdapterLuid; + D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId; +} D3DKMT_OPENADAPTERFROMHDC; + +typedef struct _D3DKMT_CLOSEADAPTER { + D3DKMT_HANDLE hAdapter; +} D3DKMT_CLOSEADAPTER; + +typedef struct _D3DKMT_WAITFORVERTICALBLANKEVENT { + D3DKMT_HANDLE hAdapter; + D3DKMT_HANDLE hDevice; + D3DDDI_VIDEO_PRESENT_SOURCE_ID VidPnSourceId; +} D3DKMT_WAITFORVERTICALBLANKEVENT; + +typedef NTSTATUS(APIENTRY* PFND3DKMTOPENADAPTERFROMHDC)( + D3DKMT_OPENADAPTERFROMHDC*); +typedef NTSTATUS(APIENTRY* PFND3DKMTCLOSEADAPTER)(D3DKMT_CLOSEADAPTER*); +typedef NTSTATUS(APIENTRY* PFND3DKMTWAITFORVERTICALBLANKEVENT)( + D3DKMT_WAITFORVERTICALBLANKEVENT*); +} // namespace + +// The actual implementation of background tasks plus any state that might be +// needed on the worker thread. +class GpuVSyncWorker : public base::Thread { + public: + GpuVSyncWorker(const GpuVSyncProvider::VSyncCallback& callback, + SurfaceHandle surface_handle); + ~GpuVSyncWorker() override; + + void Enable(bool enabled); + void StartRunningVSyncOnThread(); + void WaitForVSyncOnThread(); + void SendVSyncUpdate(base::TimeTicks timestamp); + + private: + void Reschedule(); + void OpenAdapter(const wchar_t* device_name); + void CloseAdapter(); + bool WaitForVBlankEvent(); + + // Specifies whether background tasks are running. + // This can be set on background thread only. + bool running_ = false; + + // Specified whether the worker is enabled. This is accessed from both + // threads but can be changed on the main thread only. + base::subtle::AtomicWord enabled_ = false; + + const GpuVSyncProvider::VSyncCallback callback_; + const SurfaceHandle surface_handle_; + + PFND3DKMTOPENADAPTERFROMHDC open_adapter_from_hdc_ptr_; + PFND3DKMTCLOSEADAPTER close_adapter_ptr_; + PFND3DKMTWAITFORVERTICALBLANKEVENT wait_for_vertical_blank_event_ptr_; + + std::wstring current_device_name_; + D3DKMT_HANDLE current_adapter_handle_ = 0; + D3DDDI_VIDEO_PRESENT_SOURCE_ID current_source_id_ = 0; +}; + +GpuVSyncWorker::GpuVSyncWorker(const GpuVSyncProvider::VSyncCallback& callback, + SurfaceHandle surface_handle) + : base::Thread(base::StringPrintf("VSync-%d", surface_handle)), + callback_(callback), + surface_handle_(surface_handle) { + HMODULE gdi32 = GetModuleHandle(L"gdi32"); + if (!gdi32) { + NOTREACHED() << "Can't open gdi32.dll"; + return; + } + + open_adapter_from_hdc_ptr_ = reinterpret_cast<PFND3DKMTOPENADAPTERFROMHDC>( + ::GetProcAddress(gdi32, "D3DKMTOpenAdapterFromHdc")); + if (!open_adapter_from_hdc_ptr_) { + NOTREACHED() << "Can't find D3DKMTOpenAdapterFromHdc in gdi32.dll"; + return; + } + + close_adapter_ptr_ = reinterpret_cast<PFND3DKMTCLOSEADAPTER>( + ::GetProcAddress(gdi32, "D3DKMTCloseAdapter")); + if (!close_adapter_ptr_) { + NOTREACHED() << "Can't find D3DKMTCloseAdapter in gdi32.dll"; + return; + } + + wait_for_vertical_blank_event_ptr_ = + reinterpret_cast<PFND3DKMTWAITFORVERTICALBLANKEVENT>( + ::GetProcAddress(gdi32, "D3DKMTWaitForVerticalBlankEvent")); + if (!wait_for_vertical_blank_event_ptr_) { + NOTREACHED() << "Can't find D3DKMTWaitForVerticalBlankEvent in gdi32.dll"; + return; + } +} + +GpuVSyncWorker::~GpuVSyncWorker() { + // Thread::Close() call below will block until this task has finished running + // so it is safe to post it here and pass unretained pointer. + task_runner()->PostTask(FROM_HERE, base::Bind(&GpuVSyncWorker::CloseAdapter, + base::Unretained(this))); + Stop(); + + DCHECK_EQ(0u, current_adapter_handle_); + DCHECK(current_device_name_.empty()); +} + +void GpuVSyncWorker::Enable(bool enabled) { + auto was_enabled = base::subtle::NoBarrier_AtomicExchange(&enabled_, enabled); + + if (enabled && !was_enabled) + task_runner()->PostTask( + FROM_HERE, base::Bind(&GpuVSyncWorker::StartRunningVSyncOnThread, + base::Unretained(this))); +} + +void GpuVSyncWorker::StartRunningVSyncOnThread() { + DCHECK(base::PlatformThread::CurrentId() == GetThreadId()); + + if (!running_) { + running_ = true; + WaitForVSyncOnThread(); + } +} + +void GpuVSyncWorker::WaitForVSyncOnThread() { + DCHECK(base::PlatformThread::CurrentId() == GetThreadId()); + + TRACE_EVENT0("gpu", "GpuVSyncWorker::WaitForVSyncOnThread"); + + HMONITOR monitor = + MonitorFromWindow(surface_handle_, MONITOR_DEFAULTTONEAREST); + MONITORINFOEX monitor_info; + monitor_info.cbSize = sizeof(MONITORINFOEX); + BOOL success = GetMonitorInfo(monitor, &monitor_info); + CHECK(success); + + if (current_device_name_.compare(monitor_info.szDevice) != 0) { + // Monitor changed. Close the current adapter handle and open a new one. + CloseAdapter(); + OpenAdapter(monitor_info.szDevice); + } + + if (WaitForVBlankEvent()) { + // Note: this sends update on background thread which the callback is + // expected to handle. + SendVSyncUpdate(base::TimeTicks::Now()); + } + + Reschedule(); +} + +void GpuVSyncWorker::SendVSyncUpdate(base::TimeTicks timestamp) { + if (base::subtle::NoBarrier_Load(&enabled_)) { + TRACE_EVENT0("gpu", "GpuVSyncWorker::SendVSyncUpdate"); + callback_.Run(timestamp); + } +} + +void GpuVSyncWorker::Reschedule() { + // Restart the task if still enabled. + if (base::subtle::NoBarrier_Load(&enabled_)) { + task_runner()->PostTask(FROM_HERE, + base::Bind(&GpuVSyncWorker::WaitForVSyncOnThread, + base::Unretained(this))); + } else { + running_ = false; + } +} + +void GpuVSyncWorker::OpenAdapter(const wchar_t* device_name) { + DCHECK_EQ(0u, current_adapter_handle_); + + HDC hdc = CreateDC(NULL, device_name, NULL, NULL); + + D3DKMT_OPENADAPTERFROMHDC open_adapter_data; + open_adapter_data.hDc = hdc; + + NTSTATUS result = open_adapter_from_hdc_ptr_(&open_adapter_data); + DeleteDC(hdc); + + CHECK(result == STATUS_SUCCESS); + + current_device_name_ = device_name; + current_adapter_handle_ = open_adapter_data.hAdapter; + current_source_id_ = open_adapter_data.VidPnSourceId; +} + +void GpuVSyncWorker::CloseAdapter() { + if (current_adapter_handle_ != 0) { + D3DKMT_CLOSEADAPTER close_adapter_data; + close_adapter_data.hAdapter = current_adapter_handle_; + + NTSTATUS result = close_adapter_ptr_(&close_adapter_data); + CHECK(result == STATUS_SUCCESS); + + current_adapter_handle_ = 0; + current_device_name_.clear(); + } +} + +bool GpuVSyncWorker::WaitForVBlankEvent() { + D3DKMT_WAITFORVERTICALBLANKEVENT wait_for_vertical_blank_event_data; + wait_for_vertical_blank_event_data.hAdapter = current_adapter_handle_; + wait_for_vertical_blank_event_data.hDevice = 0; + wait_for_vertical_blank_event_data.VidPnSourceId = current_source_id_; + + NTSTATUS result = + wait_for_vertical_blank_event_ptr_(&wait_for_vertical_blank_event_data); + + return result == STATUS_SUCCESS; +} + +/* static */ +std::unique_ptr<GpuVSyncProvider> GpuVSyncProvider::Create( + const VSyncCallback& callback, + SurfaceHandle surface_handle) { + return std::unique_ptr<GpuVSyncProvider>( + new GpuVSyncProvider(callback, surface_handle)); +} + +GpuVSyncProvider::GpuVSyncProvider(const VSyncCallback& callback, + SurfaceHandle surface_handle) + : vsync_worker_(new GpuVSyncWorker(callback, surface_handle)) { + // Start the thread. + base::Thread::Options options; + // TODO(stanisc): might consider even higher priority - REALTIME_AUDIO. + options.priority = base::ThreadPriority::DISPLAY; + vsync_worker_->StartWithOptions(options); +} + +GpuVSyncProvider::~GpuVSyncProvider() = default; + +void GpuVSyncProvider::EnableVSync(bool enabled) { + vsync_worker_->Enable(enabled); +} + +} // namespace gpu
diff --git a/ios/chrome/app/strings/ios_chromium_strings.grd b/ios/chrome/app/strings/ios_chromium_strings.grd index e58bffd..04792e1a 100644 --- a/ios/chrome/app/strings/ios_chromium_strings.grd +++ b/ios/chrome/app/strings/ios_chromium_strings.grd
@@ -214,7 +214,7 @@ Chromium may use web services to improve your browsing experience. You may optionally disable these services. <ph name="BEGIN_LINK">BEGIN_LINK</ph>Learn more<ph name="END_LINK">END_LINK</ph> </message> <message name="IDS_IOS_PAGE_INFO_SECURITY_TAB_SECURE_IDENTITY" desc="The text of the identity section for iOS."> - Chromiums verified that <ph name="ISSUER">$1<ex>VeriSign</ex></ph> issued this website's certificate. + Chromium verified that <ph name="ISSUER">$1<ex>VeriSign</ex></ph> issued this website's certificate. </message> <message name="IDS_IOS_PASSWORD_MANAGER_SAVE_PASSWORD_PROMPT" desc="Info bar message to save a password. [Length: 60em]"> Do you want Chromium to save your password for this site?
diff --git a/ios/chrome/browser/tabs/BUILD.gn b/ios/chrome/browser/tabs/BUILD.gn index 04646b8d..15f57eb 100644 --- a/ios/chrome/browser/tabs/BUILD.gn +++ b/ios/chrome/browser/tabs/BUILD.gn
@@ -88,12 +88,14 @@ "//ios/chrome/browser/ui", "//ios/chrome/browser/ui:browser_list", "//ios/chrome/browser/ui:ui_internal", + "//ios/chrome/browser/ui/alert_coordinator", "//ios/chrome/browser/ui/commands", "//ios/chrome/browser/ui/downloads", "//ios/chrome/browser/ui/overscroll_actions", "//ios/chrome/browser/ui/reader_mode", "//ios/chrome/browser/ui/sad_tab", "//ios/chrome/browser/ui/toolbar", + "//ios/chrome/browser/ui/util", "//ios/chrome/browser/web", "//ios/chrome/browser/web:web_internal", "//ios/net",
diff --git a/ios/chrome/browser/tabs/tab.mm b/ios/chrome/browser/tabs/tab.mm index 72d4d06..a9557a4 100644 --- a/ios/chrome/browser/tabs/tab.mm +++ b/ios/chrome/browser/tabs/tab.mm
@@ -98,6 +98,7 @@ #import "ios/chrome/browser/tabs/tab_snapshotting_delegate.h" #include "ios/chrome/browser/translate/chrome_ios_translate_client.h" #import "ios/chrome/browser/u2f/u2f_controller.h" +#import "ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.h" #import "ios/chrome/browser/ui/commands/UIKit+ChromeExecuteCommand.h" #import "ios/chrome/browser/ui/commands/generic_chrome_command.h" #include "ios/chrome/browser/ui/commands/ios_command_ids.h" @@ -112,13 +113,13 @@ #import "ios/chrome/browser/ui/reader_mode/reader_mode_controller.h" #import "ios/chrome/browser/ui/sad_tab/sad_tab_view.h" #include "ios/chrome/browser/ui/ui_util.h" +#import "ios/chrome/browser/ui/util/top_view_controller.h" #import "ios/chrome/browser/web/auto_reload_bridge.h" #import "ios/chrome/browser/web/blocked_popup_handler.h" #import "ios/chrome/browser/web/external_app_launcher.h" #include "ios/chrome/browser/web/network_activity_indicator_tab_helper.h" #import "ios/chrome/browser/web/passkit_dialog_provider.h" #include "ios/chrome/browser/web/print_observer.h" -#import "ios/chrome/browser/web/resubmit_data_controller.h" #import "ios/chrome/browser/xcallback_parameters.h" #include "ios/chrome/grit/ios_strings.h" #include "ios/public/provider/chrome/browser/chrome_browser_provider.h" @@ -292,12 +293,9 @@ base::scoped_nsobject<WebControllerSnapshotHelper> webControllerSnapshotHelper_; - // The controller that displays an action sheet to confirm form data - // resubmission. - base::scoped_nsobject<ResubmitDataController> resubmitDataController_; - - // Number of attempts to show the resubmit data action sheet. - NSUInteger showResubmitDataActionSheetAttempt_; + // Coordinates Form Resubmission dialog presentation. + base::scoped_nsobject<FormResubmissionCoordinator> + formResubmissionCoordinator_; // Handles support for window.print JavaScript calls. std::unique_ptr<PrintObserver> printObserver_; @@ -375,13 +373,6 @@ // Calls the model and ask to close this tab. - (void)closeThisTab; -// Shows the ResubmitDataActionSheet to the user to allow the user to make a -// choice. -- (void)showResubmitDataActionSheet; - -// Clears the ResubmitDataActionSheet from the UI. -- (void)clearResubmitDataActionSheet; - // Initialize the Native App Launcher controller. - (void)initNativeAppNavigationController; @@ -1258,7 +1249,7 @@ [readerModeController_ detachFromWebState]; readerModeController_.reset(); - [self clearResubmitDataActionSheet]; + formResubmissionCoordinator_.reset(); // Invalidate any snapshot stored for this session. NSString* sessionID = [self currentSessionID]; @@ -1717,62 +1708,28 @@ onFormResubmissionForRequest:(NSURLRequest*)request continueBlock:(ProceduralBlock)continueBlock cancelBlock:(ProceduralBlock)cancelBlock { - // Clear the resubmit data action sheet before loading a new request. - [self clearResubmitDataActionSheet]; - resubmitDataController_.reset([[ResubmitDataController alloc] - initWithContinueBlock:continueBlock - cancelBlock:cancelBlock]); - [self showResubmitDataActionSheet]; -} + UIViewController* topController = + top_view_controller::TopPresentedViewControllerFrom( + [UIApplication sharedApplication].keyWindow.rootViewController); -- (void)showResubmitDataActionSheet { - // Return early if the CRWWebController has been closed or web - // usage is disabled on it. - if (![self.webController webUsageEnabled]) - return; - // Check to see if an action sheet can be shown. - if (self.webState && [self.webState->GetView() window]) { - // Display the action sheet with the arrow pointing at the top center of the - // web contents. - CGFloat xOrigin = CGRectGetMidX(self.webState->GetView().frame); - CGFloat yOrigin = CGRectGetMinY(self.webState->GetView().frame) + - [[self fullScreenControllerDelegate] headerHeight]; - [resubmitDataController_ - presentActionSheetFromRect:CGRectMake(xOrigin, yOrigin, 1, 1) - inView:self.webState->GetView()]; - showResubmitDataActionSheetAttempt_ = 0; - return; - } + // Display the action sheet with the arrow pointing at the top center of the + // web contents. + CGPoint dialogLocation = + CGPointMake(CGRectGetMidX(webController.view.frame), + CGRectGetMinY(webController.view.frame) + + [[self fullScreenControllerDelegate] headerHeight]); - // The resubmit data action cannot be presented as the |contentView_| was not - // yet added to the window. Retry after |kDelayBetweenAttemptsNanoSecs|. - // TODO(crbug.com/227868): The strategy to poll until the resubmit data action - // sheet can be presented is a temporary workaround. This needs to be - // refactored to match the Chromium implementation: - // * web_controller should notify/ the BVC once an action sheet should be - // shown. - // * BVC should present the action sheet and then trigger the reload - const NSUInteger kMaximumNumberAttempts = 10; - // 400 milliseconds - const int64_t kDelayBetweenAttemptsNanoSecs = 0.4 * NSEC_PER_SEC; - if (showResubmitDataActionSheetAttempt_ >= kMaximumNumberAttempts) { - NOTREACHED(); - [self clearResubmitDataActionSheet]; - return; - } - base::WeakNSObject<Tab> weakTab(self); - dispatch_after( - dispatch_time(DISPATCH_TIME_NOW, kDelayBetweenAttemptsNanoSecs), - dispatch_get_main_queue(), ^{ - [weakTab showResubmitDataActionSheet]; - }); - showResubmitDataActionSheetAttempt_++; -} - -- (void)clearResubmitDataActionSheet { - [resubmitDataController_ dismissActionSheet]; - resubmitDataController_.reset(); - showResubmitDataActionSheetAttempt_ = 0; + formResubmissionCoordinator_.reset([[FormResubmissionCoordinator alloc] + initWithBaseViewController:topController + dialogLocation:dialogLocation + webState:webController.webState + completionHandler:^(BOOL shouldContinue) { + if (shouldContinue) + continueBlock(); + else + cancelBlock(); + }]); + [formResubmissionCoordinator_ start]; } // The web page wants to close its own window. @@ -1795,7 +1752,7 @@ transition:(ui::PageTransition)transition { DCHECK(self.webController.loadPhase == web::LOAD_REQUESTED); DCHECK([self navigationManager]); - [self clearResubmitDataActionSheet]; + formResubmissionCoordinator_.reset(); // Move the toolbar to visible during page load. [fullScreenController_ disableFullScreen];
diff --git a/ios/chrome/browser/ui/alert_coordinator/BUILD.gn b/ios/chrome/browser/ui/alert_coordinator/BUILD.gn index ed6238b2..db803b74 100644 --- a/ios/chrome/browser/ui/alert_coordinator/BUILD.gn +++ b/ios/chrome/browser/ui/alert_coordinator/BUILD.gn
@@ -9,12 +9,16 @@ "action_sheet_coordinator.mm", "alert_coordinator.h", "alert_coordinator.mm", + "form_resubmission_coordinator.h", + "form_resubmission_coordinator.mm", "input_alert_coordinator.h", "input_alert_coordinator.mm", ] deps = [ "//base", + "//components/strings", "//ios/chrome/browser", + "//ios/web", "//ui/base", "//ui/strings", ] @@ -26,11 +30,17 @@ sources = [ "action_sheet_coordinator_unittest.mm", "alert_coordinator_unittest.mm", + "form_resubmission_coordinator_unittest.mm", "input_alert_coordinator_unittest.mm", ] deps = [ ":alert_coordinator", "//base", + "//base/test:test_support", + "//components/strings", + "//ios/chrome/browser/ui", + "//ios/chrome/test:test_support", + "//ios/web:test_support", "//testing/gtest", "//third_party/ocmock", "//ui/base",
diff --git a/ios/chrome/browser/ui/alert_coordinator/action_sheet_coordinator_unittest.mm b/ios/chrome/browser/ui/alert_coordinator/action_sheet_coordinator_unittest.mm index 83eb94e..9437ff3d 100644 --- a/ios/chrome/browser/ui/alert_coordinator/action_sheet_coordinator_unittest.mm +++ b/ios/chrome/browser/ui/alert_coordinator/action_sheet_coordinator_unittest.mm
@@ -7,6 +7,7 @@ #import <UIKit/UIKit.h> #import "base/mac/foundation_util.h" +#import "ios/chrome/test/scoped_key_window.h" #include "testing/platform_test.h" #if !defined(__has_feature) || !__has_feature(objc_arc) @@ -15,12 +16,9 @@ // Tests that if there is a popover, it uses the CGRect passed in init. TEST(ActionSheetCoordinatorTest, CGRectUsage) { - // Setup. - UIWindow* window = - [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]]; - [window makeKeyAndVisible]; + ScopedKeyWindow scoped_key_window; UIViewController* viewController = [[UIViewController alloc] init]; - [window setRootViewController:viewController]; + [scoped_key_window.Get() setRootViewController:viewController]; UIView* view = [[UIView alloc] initWithFrame:viewController.view.bounds];
diff --git a/ios/chrome/browser/ui/alert_coordinator/alert_coordinator_unittest.mm b/ios/chrome/browser/ui/alert_coordinator/alert_coordinator_unittest.mm index 9a984b9..28fc00ad 100644 --- a/ios/chrome/browser/ui/alert_coordinator/alert_coordinator_unittest.mm +++ b/ios/chrome/browser/ui/alert_coordinator/alert_coordinator_unittest.mm
@@ -7,6 +7,7 @@ #import <UIKit/UIKit.h> #import "base/mac/foundation_util.h" +#import "ios/chrome/test/scoped_key_window.h" #include "testing/platform_test.h" #import "third_party/ocmock/OCMock/OCMock.h" #include "third_party/ocmock/gtest_support.h" @@ -23,16 +24,10 @@ class AlertCoordinatorTest : public PlatformTest { protected: AlertCoordinatorTest() { - // Save the current key window and restore it after the test. - previous_key_window_ = [[UIApplication sharedApplication] keyWindow]; - window_ = [[UIWindow alloc] initWithFrame:[[UIScreen mainScreen] bounds]]; - [window_ makeKeyAndVisible]; view_controller_ = [[UIViewController alloc] init]; - [window_ setRootViewController:view_controller_]; + [scoped_key_window_.Get() setRootViewController:view_controller_]; } - ~AlertCoordinatorTest() override { [previous_key_window_ makeKeyAndVisible]; } - void startAlertCoordinator() { [alert_coordinator_ start]; } UIViewController* getViewController() { return view_controller_; } @@ -52,9 +47,8 @@ } private: - UIWindow* previous_key_window_; AlertCoordinator* alert_coordinator_; - UIWindow* window_; + ScopedKeyWindow scoped_key_window_; UIViewController* view_controller_; };
diff --git a/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.h b/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.h new file mode 100644 index 0000000..a0ec8d9 --- /dev/null +++ b/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.h
@@ -0,0 +1,35 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef IOS_CHROME_BROWSER_UI_ALERT_COORDINATOR_FORM_RESUBMISSION_COORDINATOR_H_ +#define IOS_CHROME_BROWSER_UI_ALERT_COORDINATOR_FORM_RESUBMISSION_COORDINATOR_H_ + +#import <UIKit/UIKit.h> + +#import "base/ios/block_types.h" +#import "ios/chrome/browser/chrome_coordinator.h" +#import "ios/web/public/web_state/web_state.h" + +// Creates and manages Form Resubmission dialog that has Continue and Cancel +// buttons. +@interface FormResubmissionCoordinator : ChromeCoordinator + +// Initializes a coordinator for displaying an alert on this |viewController|. +// |dialogLocation| is a point where resubmission dialog should be presented on +// iPad. |webState| must not be null and must be owned by the caller. +// |completionHandler| will be called with YES when Continue button is tapped +// and with NO when Cancel button is tapped. |completionHandler| can not be +// null. +- (instancetype)initWithBaseViewController:(UIViewController*)viewController + dialogLocation:(CGPoint)dialogLocation + webState:(web::WebState*)webState + completionHandler:(void (^)(BOOL))completionHandler + NS_DESIGNATED_INITIALIZER; + +- (instancetype)initWithBaseViewController:(UIViewController*)viewController + NS_UNAVAILABLE; + +@end + +#endif // IOS_CHROME_BROWSER_UI_ALERT_COORDINATOR_FORM_RESUBMISSION_COORDINATOR_H_
diff --git a/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.mm b/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.mm new file mode 100644 index 0000000..61486fc1 --- /dev/null +++ b/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.mm
@@ -0,0 +1,139 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#import "ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.h" + +#include "base/logging.h" +#include "components/strings/grit/components_strings.h" +#include "ui/base/l10n/l10n_util.h" + +#if !defined(__has_feature) || !__has_feature(objc_arc) +#error "This file requires ARC support." +#endif + +@interface FormResubmissionCoordinator () { + // WebState which requested this dialog. + web::WebState* _webState; + // View Controller representing the dialog. + UIAlertController* _dialogController; + // Number of attempts to show the resubmit data action sheet. + NSUInteger _resubmissionAttemptCount; +} + +// Creates a new UIAlertController to use for the dialog. ++ (UIAlertController*)newDialogControllerForSourceView:(UIView*)sourceView + sourceRect:(CGRect)sourceRect + completionHandler: + (void (^)(BOOL))completionHandler; + +@end + +@implementation FormResubmissionCoordinator + +- (instancetype)initWithBaseViewController:(UIViewController*)viewController + dialogLocation:(CGPoint)dialogLocation + webState:(web::WebState*)webState + completionHandler:(void (^)(BOOL))completionHandler { + DCHECK(webState); + DCHECK(completionHandler); + self = [super initWithBaseViewController:viewController]; + if (self) { + _webState = webState; + CGRect sourceRect = CGRectMake(dialogLocation.x, dialogLocation.y, 1, 1); + _dialogController = + [[self class] newDialogControllerForSourceView:webState->GetView() + sourceRect:sourceRect + completionHandler:completionHandler]; + } + return self; +} + +- (void)start { + if (!_webState->IsWebUsageEnabled()) + return; + + // Check to see if an action sheet can be shown. + if ([_webState->GetView() window]) { + [self.baseViewController presentViewController:_dialogController + animated:YES + completion:nil]; + _resubmissionAttemptCount = 0; + return; + } + + // The resubmit data action cannot be presented as the view was not + // yet added to the window. Retry after |kDelayBetweenAttemptsNanoSecs|. + // TODO(crbug.com/227868): The strategy to poll until the resubmit data action + // sheet can be presented is a temporary workaround. This needs to be + // refactored to match the Chromium implementation: + // * web_controller should notify/ the BVC once an action sheet should be + // shown. + // * BVC should present the action sheet and then trigger the reload + const NSUInteger kMaximumNumberAttempts = 10; + // 400 milliseconds + const int64_t kDelayBetweenAttemptsNanoSecs = 0.4 * NSEC_PER_SEC; + if (_resubmissionAttemptCount >= kMaximumNumberAttempts) { + NOTREACHED(); + [self stop]; + return; + } + __weak FormResubmissionCoordinator* weakSelf = self; + dispatch_after( + dispatch_time(DISPATCH_TIME_NOW, kDelayBetweenAttemptsNanoSecs), + dispatch_get_main_queue(), ^{ + [weakSelf start]; + }); + _resubmissionAttemptCount++; +} + +- (void)stop { + [_dialogController.presentingViewController + dismissViewControllerAnimated:YES + completion:nil]; + _resubmissionAttemptCount = 0; +} + +#pragma mark - Private + ++ (UIAlertController*)newDialogControllerForSourceView:(UIView*)sourceView + sourceRect:(CGRect)sourceRect + completionHandler: + (void (^)(BOOL))completionHandler { + NSString* message = [NSString + stringWithFormat:@"%@\n\n%@", + l10n_util::GetNSString(IDS_HTTP_POST_WARNING_TITLE), + l10n_util::GetNSString(IDS_HTTP_POST_WARNING)]; + NSString* buttonTitle = l10n_util::GetNSString(IDS_HTTP_POST_WARNING_RESEND); + NSString* cancelTitle = l10n_util::GetNSString(IDS_CANCEL); + + UIAlertController* result = [UIAlertController + alertControllerWithTitle:nil + message:message + preferredStyle:UIAlertControllerStyleActionSheet]; + // Make sure the block is located on the heap. + completionHandler = [completionHandler copy]; + + UIAlertAction* cancelAction = + [UIAlertAction actionWithTitle:cancelTitle + style:UIAlertActionStyleCancel + handler:^(UIAlertAction* _Nonnull action) { + completionHandler(NO); + }]; + [result addAction:cancelAction]; + UIAlertAction* continueAction = + [UIAlertAction actionWithTitle:buttonTitle + style:UIAlertActionStyleDefault + handler:^(UIAlertAction* _Nonnull action) { + completionHandler(YES); + }]; + [result addAction:continueAction]; + + result.modalPresentationStyle = UIModalPresentationPopover; + result.popoverPresentationController.sourceView = sourceView; + result.popoverPresentationController.sourceRect = sourceRect; + + return result; +} + +@end
diff --git a/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator_unittest.mm b/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator_unittest.mm new file mode 100644 index 0000000..04a92d3e --- /dev/null +++ b/ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator_unittest.mm
@@ -0,0 +1,127 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#import "ios/chrome/browser/ui/alert_coordinator/form_resubmission_coordinator.h" + +#import <UIKit/UIKit.h> + +#import "base/mac/foundation_util.h" +#include "base/test/ios/wait_util.h" +#include "components/strings/grit/components_strings.h" +#include "ios/chrome/browser/ui/ui_util.h" +#import "ios/chrome/test/scoped_key_window.h" +#import "ios/web/public/test/fakes/test_web_state.h" +#import "testing/gtest_mac.h" +#include "testing/platform_test.h" +#include "ui/base/l10n/l10n_util.h" + +#if !defined(__has_feature) || !__has_feature(objc_arc) +#error "This file requires ARC support." +#endif + +namespace { +// Test location passed to FormResubmissionCoordinator. +const CGFloat kDialogHorizontalLocation = 10; +const CGFloat kDialogVerticalLocation = 20; +} + +// Test fixture to test FormResubmissionCoordinator class. +class FormResubmissionCoordinatorTest : public PlatformTest { + protected: + FormResubmissionCoordinatorTest() { + view_controller_ = [[UIViewController alloc] init]; + [scoped_key_window_.Get() setRootViewController:view_controller_]; + UIView* view = [[UIView alloc] initWithFrame:view_controller_.view.bounds]; + web_state_.SetView(view); + web_state_.SetWebUsageEnabled(true); + + CGPoint dialogLocation = + CGPointMake(kDialogHorizontalLocation, kDialogVerticalLocation); + coordinator_ = [[FormResubmissionCoordinator alloc] + initWithBaseViewController:view_controller_ + dialogLocation:dialogLocation + webState:&web_state_ + completionHandler:^(BOOL){ + }]; + } + + UIAlertController* GetAlertController() const { + return base::mac::ObjCCastStrict<UIAlertController>( + view_controller_.presentedViewController); + } + + // Coordinator will not present the dialog until view is added to the window. + void AddViewToWindow() { + [view_controller_.view addSubview:web_state_.GetView()]; + } + + FormResubmissionCoordinator* coordinator_; + + private: + ScopedKeyWindow scoped_key_window_; + web::TestWebState web_state_; + UIViewController* view_controller_; +}; + +// Tests that if there is a popover, it uses location passed in init. +TEST_F(FormResubmissionCoordinatorTest, CGRectUsage) { + AddViewToWindow(); + [coordinator_ start]; + UIPopoverPresentationController* popover_presentation_controller = + GetAlertController().popoverPresentationController; + if (IsIPadIdiom()) { + CGRect source_rect = popover_presentation_controller.sourceRect; + EXPECT_EQ(kDialogHorizontalLocation, CGRectGetMinX(source_rect)); + EXPECT_EQ(kDialogVerticalLocation, CGRectGetMinY(source_rect)); + } +} + +// Tests the form resubmission dialog has nil title. +TEST_F(FormResubmissionCoordinatorTest, Title) { + AddViewToWindow(); + [coordinator_ start]; + EXPECT_FALSE(GetAlertController().title); + [coordinator_ stop]; +} + +// Tests the form resubmission dialog has correct message. +TEST_F(FormResubmissionCoordinatorTest, Message) { + AddViewToWindow(); + [coordinator_ start]; + EXPECT_TRUE([GetAlertController().message + containsString:l10n_util::GetNSString(IDS_HTTP_POST_WARNING_TITLE)]); + EXPECT_TRUE([GetAlertController().message + containsString:l10n_util::GetNSString(IDS_HTTP_POST_WARNING)]); + [coordinator_ stop]; +} + +// Tests the form resubmission dialog actions have correct titles. +TEST_F(FormResubmissionCoordinatorTest, ActionTitles) { + AddViewToWindow(); + [coordinator_ start]; + EXPECT_EQ(2U, GetAlertController().actions.count); + + EXPECT_NSEQ(l10n_util::GetNSString(IDS_CANCEL), + [GetAlertController().actions.firstObject title]); + EXPECT_NSEQ(l10n_util::GetNSString(IDS_HTTP_POST_WARNING_RESEND), + [GetAlertController().actions.lastObject title]); + [coordinator_ stop]; +} + +// Tests that form resubmission dialog is presented once view is added to the +// window. +TEST_F(FormResubmissionCoordinatorTest, Retrying) { + [coordinator_ start]; + EXPECT_FALSE(GetAlertController()); + + AddViewToWindow(); + + base::test::ios::WaitUntilCondition(^bool { + return GetAlertController(); + }); + + EXPECT_EQ(2U, GetAlertController().actions.count); + + [coordinator_ stop]; +}
diff --git a/ios/chrome/browser/web/BUILD.gn b/ios/chrome/browser/web/BUILD.gn index 07e03361..f6ff8757 100644 --- a/ios/chrome/browser/web/BUILD.gn +++ b/ios/chrome/browser/web/BUILD.gn
@@ -11,8 +11,6 @@ "dom_altering_lock.mm", "network_activity_indicator_tab_helper.h", "network_activity_indicator_tab_helper.mm", - "resubmit_data_controller.h", - "resubmit_data_controller.mm", ] deps = [ "//base",
diff --git a/ios/chrome/browser/web/resubmit_data_controller.h b/ios/chrome/browser/web/resubmit_data_controller.h deleted file mode 100644 index 0f847d7..0000000 --- a/ios/chrome/browser/web/resubmit_data_controller.h +++ /dev/null
@@ -1,30 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#ifndef IOS_CHROME_BROWSER_WEB_RESUBMIT_DATA_CONTROLLER_H_ -#define IOS_CHROME_BROWSER_WEB_RESUBMIT_DATA_CONTROLLER_H_ - -#import <UIKit/UIKit.h> - -#import "base/ios/block_types.h" - -// Handles the action sheet that is presenting to confirm POST data -// resubmission. -@interface ResubmitDataController : NSObject - -- (instancetype)initWithContinueBlock:(ProceduralBlock)continueBlock - cancelBlock:(ProceduralBlock)cancelBlock - NS_DESIGNATED_INITIALIZER; -- (instancetype)init NS_UNAVAILABLE; - -// Presents the action sheet. On regular horizontal size class, it is presented -// in a popover from |rect| in |view|. -- (void)presentActionSheetFromRect:(CGRect)rect inView:(UIView*)view; - -// Dismisses the action sheet. -- (void)dismissActionSheet; - -@end - -#endif // IOS_CHROME_BROWSER_WEB_RESUBMIT_DATA_CONTROLLER_H_
diff --git a/ios/chrome/browser/web/resubmit_data_controller.mm b/ios/chrome/browser/web/resubmit_data_controller.mm deleted file mode 100644 index fa700276..0000000 --- a/ios/chrome/browser/web/resubmit_data_controller.mm +++ /dev/null
@@ -1,87 +0,0 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. -// Use of this source code is governed by a BSD-style license that can be -// found in the LICENSE file. - -#import "ios/chrome/browser/web/resubmit_data_controller.h" - -#import "base/logging.h" -#include "components/strings/grit/components_strings.h" -#include "ui/base/l10n/l10n_util.h" - -#if !defined(__has_feature) || !__has_feature(objc_arc) -#error "This file requires ARC support." -#endif - -@interface ResubmitDataController () { - UIAlertController* _alertController; -} -@end - -@implementation ResubmitDataController - -- (instancetype)init { - NOTREACHED(); - return nil; -} - -- (instancetype)initWithContinueBlock:(ProceduralBlock)continueBlock - cancelBlock:(ProceduralBlock)cancelBlock { - DCHECK(continueBlock); - DCHECK(cancelBlock); - self = [super init]; - if (self) { - NSString* message = [NSString - stringWithFormat:@"%@\n\n%@", - l10n_util::GetNSString(IDS_HTTP_POST_WARNING_TITLE), - l10n_util::GetNSString(IDS_HTTP_POST_WARNING)]; - NSString* buttonTitle = - l10n_util::GetNSString(IDS_HTTP_POST_WARNING_RESEND); - NSString* cancelTitle = l10n_util::GetNSString(IDS_CANCEL); - - _alertController = [UIAlertController - alertControllerWithTitle:nil - message:message - preferredStyle:UIAlertControllerStyleActionSheet]; - // Make sure the blocks are located on the heap. - continueBlock = [continueBlock copy]; - cancelBlock = [cancelBlock copy]; - - UIAlertAction* cancelAction = - [UIAlertAction actionWithTitle:cancelTitle - style:UIAlertActionStyleCancel - handler:^(UIAlertAction* _Nonnull action) { - cancelBlock(); - }]; - [_alertController addAction:cancelAction]; - UIAlertAction* continueAction = - [UIAlertAction actionWithTitle:buttonTitle - style:UIAlertActionStyleDefault - handler:^(UIAlertAction* _Nonnull action) { - continueBlock(); - }]; - [_alertController addAction:continueAction]; - } - return self; -} - -- (void)presentActionSheetFromRect:(CGRect)rect inView:(UIView*)view { - _alertController.modalPresentationStyle = UIModalPresentationPopover; - UIPopoverPresentationController* popPresenter = - _alertController.popoverPresentationController; - popPresenter.sourceView = view; - popPresenter.sourceRect = rect; - - UIViewController* topController = view.window.rootViewController; - while (topController.presentedViewController) - topController = topController.presentedViewController; - [topController presentViewController:_alertController - animated:YES - completion:nil]; -} - -- (void)dismissActionSheet { - [_alertController.presentingViewController dismissViewControllerAnimated:YES - completion:nil]; -} - -@end
diff --git a/ios/chrome/test/BUILD.gn b/ios/chrome/test/BUILD.gn index c1d0cc51..80c9356 100644 --- a/ios/chrome/test/BUILD.gn +++ b/ios/chrome/test/BUILD.gn
@@ -27,6 +27,7 @@ "ios_chrome_scoped_testing_local_state.h", "ios_chrome_unit_test_suite.h", "ios_chrome_unit_test_suite.mm", + "scoped_key_window.h", "testing_application_context.h", "testing_application_context.mm", ]
diff --git a/ios/chrome/test/scoped_key_window.h b/ios/chrome/test/scoped_key_window.h new file mode 100644 index 0000000..a6ed60ce --- /dev/null +++ b/ios/chrome/test/scoped_key_window.h
@@ -0,0 +1,28 @@ +// Copyright 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef IOS_CHROME_TEST_SCOPED_KEY_WINDOW_H_ +#define IOS_CHROME_TEST_SCOPED_KEY_WINDOW_H_ + +#import <UIKit/UIKit.h> + +// Sets temporary key window and returns it via Get method. Saves the current +// key window and restores it on destruction. +class ScopedKeyWindow { + public: + explicit ScopedKeyWindow() + : current_key_window_( + [[UIWindow alloc] initWithFrame:[UIScreen mainScreen].bounds]), + original_key_window_([UIApplication sharedApplication].keyWindow) { + [current_key_window_ makeKeyAndVisible]; + } + ~ScopedKeyWindow() { [original_key_window_ makeKeyAndVisible]; } + UIWindow* Get() { return current_key_window_; } + + private: + UIWindow* current_key_window_; + UIWindow* original_key_window_; +}; + +#endif // IOS_CHROME_TEST_SCOPED_KEY_WINDOW_H_
diff --git a/ios/web/public/test/fakes/test_web_state.h b/ios/web/public/test/fakes/test_web_state.h index c6554fef..ba140e01 100644 --- a/ios/web/public/test/fakes/test_web_state.h +++ b/ios/web/public/test/fakes/test_web_state.h
@@ -83,6 +83,7 @@ void SetTrustLevel(URLVerificationTrustLevel trust_level); void SetNavigationManager( std::unique_ptr<NavigationManager> navigation_manager); + void SetView(UIView* view); // Notifier for tests. void OnPageLoaded(PageLoadCompletionStatus load_completion_status); @@ -97,6 +98,7 @@ std::string mime_type_; std::string content_language_; std::unique_ptr<NavigationManager> navigation_manager_; + base::scoped_nsobject<UIView> view_; // A list of observers notified when page state changes. Weak references. base::ObserverList<WebStateObserver, true> observers_;
diff --git a/ios/web/public/test/fakes/test_web_state.mm b/ios/web/public/test/fakes/test_web_state.mm index 3171d2f0..8757e14 100644 --- a/ios/web/public/test/fakes/test_web_state.mm +++ b/ios/web/public/test/fakes/test_web_state.mm
@@ -57,7 +57,7 @@ void TestWebState::SetShouldSuppressDialogs(bool should_suppress) {} UIView* TestWebState::GetView() { - return nullptr; + return view_.get(); } const NavigationManager* TestWebState::GetNavigationManager() const { @@ -73,6 +73,10 @@ navigation_manager_ = std::move(navigation_manager); } +void TestWebState::SetView(UIView* view) { + view_.reset([view retain]); +} + CRWJSInjectionReceiver* TestWebState::GetJSInjectionReceiver() const { return nullptr; }
diff --git a/media/audio/audio_input_device.cc b/media/audio/audio_input_device.cc index aa44d9d..ed5e79d 100644 --- a/media/audio/audio_input_device.cc +++ b/media/audio/audio_input_device.cc
@@ -292,12 +292,6 @@ audio_buses_.push_back(std::move(audio_bus)); ptr += segment_length_; } - - // Indicate that browser side capture initialization has succeeded and IPC - // channel initialized. This effectively completes the - // AudioCapturerSource::Start()' phase as far as the caller of that function - // is concerned. - capture_callback_->OnCaptureStarted(); } void AudioInputDevice::AudioThreadCallback::Process(uint32_t pending_data) {
diff --git a/media/audio/audio_input_device_unittest.cc b/media/audio/audio_input_device_unittest.cc index eb144c6..94976a21 100644 --- a/media/audio/audio_input_device_unittest.cc +++ b/media/audio/audio_input_device_unittest.cc
@@ -3,23 +3,16 @@ // found in the LICENSE file. #include "base/memory/ptr_util.h" -#include "base/memory/shared_memory.h" #include "base/message_loop/message_loop.h" -#include "base/process/process_handle.h" #include "base/run_loop.h" #include "base/single_thread_task_runner.h" -#include "base/sync_socket.h" #include "media/audio/audio_input_device.h" #include "testing/gmock/include/gmock/gmock.h" #include "testing/gmock_mutant.h" #include "testing/gtest/include/gtest/gtest.h" -using base::CancelableSyncSocket; -using base::SharedMemory; -using base::SyncSocket; using testing::_; using testing::DoAll; -using testing::Invoke; namespace media { @@ -46,7 +39,6 @@ MockCaptureCallback() {} ~MockCaptureCallback() override {} - MOCK_METHOD0(OnCaptureStarted, void()); MOCK_METHOD4(Capture, void(const AudioBus* audio_source, int audio_delay_milliseconds, @@ -95,49 +87,4 @@ base::RunLoop().Run(); } -ACTION_P5(ReportOnStreamCreated, device, handle, socket, length, segments) { - static_cast<AudioInputIPCDelegate*>(device)->OnStreamCreated( - handle, socket, length, segments); -} - -TEST(AudioInputDeviceTest, CreateStream) { - AudioParameters params(AudioParameters::AUDIO_PCM_LOW_LATENCY, - CHANNEL_LAYOUT_STEREO, 48000, 16, 480); - SharedMemory shared_memory; - CancelableSyncSocket browser_socket; - CancelableSyncSocket renderer_socket; - - const int memory_size = sizeof(AudioInputBufferParameters) + - AudioBus::CalculateMemorySize(params); - - ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(memory_size)); - memset(shared_memory.memory(), 0xff, memory_size); - - ASSERT_TRUE( - CancelableSyncSocket::CreatePair(&browser_socket, &renderer_socket)); - SyncSocket::TransitDescriptor audio_device_socket_descriptor; - ASSERT_TRUE(renderer_socket.PrepareTransitDescriptor( - base::GetCurrentProcessHandle(), &audio_device_socket_descriptor)); - base::SharedMemoryHandle duplicated_memory_handle; - ASSERT_TRUE(shared_memory.ShareToProcess(base::GetCurrentProcessHandle(), - &duplicated_memory_handle)); - - base::MessageLoopForIO io_loop; - MockCaptureCallback callback; - MockAudioInputIPC* input_ipc = new MockAudioInputIPC(); - scoped_refptr<AudioInputDevice> device( - new AudioInputDevice(base::WrapUnique(input_ipc), io_loop.task_runner())); - device->Initialize(params, &callback, 1); - device->Start(); - - EXPECT_CALL(*input_ipc, CreateStream(_, _, _, _, _)) - .WillOnce(ReportOnStreamCreated( - device.get(), duplicated_memory_handle, - SyncSocket::UnwrapHandle(audio_device_socket_descriptor), memory_size, - 1)); - EXPECT_CALL(*input_ipc, RecordStream()); - EXPECT_CALL(callback, OnCaptureStarted()) - .WillOnce(QuitLoop(io_loop.task_runner())); - base::RunLoop().Run(); -} } // namespace media.
diff --git a/media/base/audio_capturer_source.h b/media/base/audio_capturer_source.h index 9b6f8e8..d4f9cab 100644 --- a/media/base/audio_capturer_source.h +++ b/media/base/audio_capturer_source.h
@@ -22,11 +22,6 @@ public: class CaptureCallback { public: - // Signals that audio recording has been started. Called asynchronously - // after Start() has completed. If Start() encounters problems before this - // callback can be made, OnCaptureError will be called instead. - virtual void OnCaptureStarted() = 0; - // Callback to deliver the captured data from the OS. // TODO(chcunningham): Update delay argument to use frames instead of // milliseconds to prevent loss of precision. See http://crbug.com/587291.
diff --git a/net/net.gypi b/net/net.gypi index 61b4c175..bf64a2d 100644 --- a/net/net.gypi +++ b/net/net.gypi
@@ -15,6 +15,8 @@ 'base/address_family.h', 'base/address_list.cc', 'base/address_list.h', + 'base/arena.cc', + 'base/arena.h', 'base/auth.cc', 'base/auth.h', 'base/completion_callback.h', @@ -30,6 +32,7 @@ 'base/ip_address.h', 'base/ip_endpoint.cc', 'base/ip_endpoint.h', + 'base/linked_hash_map.h', 'base/load_timing_info.cc', 'base/load_timing_info.h', 'base/lookup_string_in_fixed_set.cc', @@ -219,6 +222,8 @@ 'socket/ssl_client_socket_impl.cc', 'socket/ssl_client_socket_impl.h', 'socket/ssl_socket.h', + 'spdy/spdy_header_block.cc', + 'spdy/spdy_header_block.h', 'ssl/channel_id_service.cc', 'ssl/channel_id_service.h', 'ssl/channel_id_store.cc', @@ -279,8 +284,6 @@ 'android/traffic_stats.h', 'base/address_tracker_linux.cc', 'base/address_tracker_linux.h', - 'base/arena.cc', - 'base/arena.h', 'base/backoff_entry.cc', 'base/backoff_entry.h', 'base/backoff_entry_serializer.cc', @@ -316,7 +319,6 @@ # TODO(tc): gnome-vfs? xdgmime? /etc/mime.types? 'base/layered_network_delegate.cc', 'base/layered_network_delegate.h', - 'base/linked_hash_map.h', 'base/load_flags.h', 'base/load_flags_list.h', 'base/load_states.h', @@ -1380,8 +1382,6 @@ 'spdy/spdy_framer.h', 'spdy/spdy_framer_decoder_adapter.cc', 'spdy/spdy_framer_decoder_adapter.h', - 'spdy/spdy_header_block.cc', - 'spdy/spdy_header_block.h', 'spdy/spdy_header_indexing.cc', 'spdy/spdy_header_indexing.h', 'spdy/spdy_headers_block_parser.cc', @@ -1986,6 +1986,8 @@ 'quic/test_tools/crypto_test_utils_test.cc', 'quic/test_tools/delayed_verify_strike_register_client.cc', 'quic/test_tools/delayed_verify_strike_register_client.h', + 'quic/test_tools/failing_proof_source.cc', + 'quic/test_tools/failing_proof_source.h', 'quic/test_tools/fake_proof_source.cc', 'quic/test_tools/fake_proof_source.h', 'quic/test_tools/mock_clock.cc',
diff --git a/net/quic/core/crypto/crypto_server_test.cc b/net/quic/core/crypto/crypto_server_test.cc index f078524..941e2fe 100644 --- a/net/quic/core/crypto/crypto_server_test.cc +++ b/net/quic/core/crypto/crypto_server_test.cc
@@ -23,6 +23,7 @@ #include "net/quic/platform/api/quic_text_utils.h" #include "net/quic/test_tools/crypto_test_utils.h" #include "net/quic/test_tools/delayed_verify_strike_register_client.h" +#include "net/quic/test_tools/failing_proof_source.h" #include "net/quic/test_tools/mock_clock.h" #include "net/quic/test_tools/mock_random.h" #include "net/quic/test_tools/quic_crypto_server_config_peer.h" @@ -107,6 +108,7 @@ config_(QuicCryptoServerConfig::TESTING, rand_, CryptoTestUtils::ProofSourceForTesting()), + peer_(&config_), compressed_certs_cache_( QuicCompressedCertsCache::kQuicCompressedCertsCacheSize), params_(new QuicCryptoNegotiatedParameters), @@ -399,6 +401,7 @@ QuicVersion client_version_; string client_version_string_; QuicCryptoServerConfig config_; + QuicCryptoServerConfigPeer peer_; QuicCompressedCertsCache compressed_certs_cache_; QuicCryptoServerConfig::ConfigOptions config_options_; QuicReferenceCountedPointer<QuicCryptoNegotiatedParameters> params_; @@ -996,6 +999,28 @@ EXPECT_TRUE(out_.GetStringPiece(kServerNonceTag, &nonce)); } +TEST_P(CryptoServerTest, ProofSourceFailure) { + // Install a ProofSource which will unconditionally fail + peer_.ResetProofSource(std::unique_ptr<ProofSource>(new FailingProofSource)); + + // clang-format off + CryptoHandshakeMessage msg = CryptoTestUtils::Message( + "CHLO", + "AEAD", "AESG", + "KEXS", "C255", + "SCID", scid_hex_.c_str(), + "PUBS", pub_hex_.c_str(), + "NONC", nonce_hex_.c_str(), + "PDMD", "X509", + "VER\0", client_version_string_.c_str(), + "$padding", static_cast<int>(kClientHelloMinimumSize), + nullptr); + // clang-format on + + // Just ensure that we don't crash as occurred in b/33916924. + ShouldFailMentioning("", msg); +} + TEST(CryptoServerConfigGenerationTest, Determinism) { // Test that using a deterministic PRNG causes the server-config to be // deterministic.
diff --git a/net/quic/core/crypto/quic_crypto_server_config.cc b/net/quic/core/crypto/quic_crypto_server_config.cc index fa8df9c..e012f271 100644 --- a/net/quic/core/crypto/quic_crypto_server_config.cc +++ b/net/quic/core/crypto/quic_crypto_server_config.cc
@@ -774,8 +774,7 @@ use_stateless_rejects, server_designated_connection_id, rand, compressed_certs_cache, params, *signed_config, total_framing_overhead, chlo_packet_size, out.get()); - if (FLAGS_quic_reloadable_flag_quic_export_rej_for_all_rejects && - rejection_observer_ != nullptr) { + if (rejection_observer_ != nullptr) { rejection_observer_->OnRejectionBuilt(info.reject_reasons, out.get()); } helper.Succeed(std::move(out), std::move(out_diversification_nonce), @@ -1327,7 +1326,9 @@ info->reject_reasons.push_back(SERVER_CONFIG_UNKNOWN_CONFIG_FAILURE); } - if (!ValidateExpectedLeafCertificate(client_hello, *signed_config)) { + if (signed_config->chain != nullptr && + !ValidateExpectedLeafCertificate(client_hello, + signed_config->chain->certs)) { info->reject_reasons.push_back(INVALID_EXPECTED_LEAF_CERTIFICATE); } @@ -1986,8 +1987,8 @@ bool QuicCryptoServerConfig::ValidateExpectedLeafCertificate( const CryptoHandshakeMessage& client_hello, - const QuicSignedServerConfig& signed_config) const { - if (signed_config.chain->certs.empty()) { + const std::vector<string>& certs) const { + if (certs.empty()) { return false; } @@ -1995,8 +1996,7 @@ if (client_hello.GetUint64(kXLCT, &hash_from_client) != QUIC_NO_ERROR) { return false; } - return CryptoUtils::ComputeLeafCertHash(signed_config.chain->certs.at(0)) == - hash_from_client; + return CryptoUtils::ComputeLeafCertHash(certs.at(0)) == hash_from_client; } bool QuicCryptoServerConfig::ClientDemandsX509Proof(
diff --git a/net/quic/core/crypto/quic_crypto_server_config.h b/net/quic/core/crypto/quic_crypto_server_config.h index d8e0e409..42e9e2e 100644 --- a/net/quic/core/crypto/quic_crypto_server_config.h +++ b/net/quic/core/crypto/quic_crypto_server_config.h
@@ -656,13 +656,12 @@ // ValidateExpectedLeafCertificate checks the |client_hello| to see if it has // an XLCT tag, and if so, verifies that its value matches the hash of the - // server's leaf certificate. The certs field of |crypto_proof| is used to - // compare against the XLCT value. This method returns true if the XLCT tag - // is not present, or if the XLCT tag is present and valid. It returns false - // otherwise. + // server's leaf certificate. |certs| is used to compare against the XLCT + // value. This method returns true if the XLCT tag is not present, or if the + // XLCT tag is present and valid. It returns false otherwise. bool ValidateExpectedLeafCertificate( const CryptoHandshakeMessage& client_hello, - const QuicSignedServerConfig& crypto_proof) const; + const std::vector<std::string>& certs) const; // Returns true if the PDMD field from the client hello demands an X509 // certificate.
diff --git a/net/quic/core/quic_crypto_server_stream_test.cc b/net/quic/core/quic_crypto_server_stream_test.cc index e9450454..1060da9 100644 --- a/net/quic/core/quic_crypto_server_stream_test.cc +++ b/net/quic/core/quic_crypto_server_stream_test.cc
@@ -24,6 +24,7 @@ #include "net/quic/core/quic_session.h" #include "net/quic/platform/api/quic_socket_address.h" #include "net/quic/test_tools/crypto_test_utils.h" +#include "net/quic/test_tools/failing_proof_source.h" #include "net/quic/test_tools/quic_crypto_server_config_peer.h" #include "net/quic/test_tools/quic_test_utils.h" #include "testing/gmock/include/gmock/gmock.h" @@ -483,30 +484,6 @@ EXPECT_TRUE(server_stream()->handshake_confirmed()); } -class FailingProofSource : public ProofSource { - public: - bool GetProof(const QuicSocketAddress& server_address, - const string& hostname, - const string& server_config, - QuicVersion quic_version, - StringPiece chlo_hash, - const QuicTagVector& connection_options, - QuicReferenceCountedPointer<ProofSource::Chain>* out_chain, - QuicCryptoProof* out_proof) override { - return false; - } - - void GetProof(const QuicSocketAddress& server_address, - const string& hostname, - const string& server_config, - QuicVersion quic_version, - StringPiece chlo_hash, - const QuicTagVector& connection_options, - std::unique_ptr<Callback> callback) override { - callback->Run(false, nullptr, QuicCryptoProof(), nullptr); - } -}; - class QuicCryptoServerStreamTestWithFailingProofSource : public QuicCryptoServerStreamTest { public:
diff --git a/net/quic/core/quic_flags_list.h b/net/quic/core/quic_flags_list.h index c73676f..3941adc 100644 --- a/net/quic/core/quic_flags_list.h +++ b/net/quic/core/quic_flags_list.h
@@ -86,12 +86,6 @@ FLAGS_quic_reloadable_flag_quic_enable_server_push_by_default, true) -// If true, export reject reasons for all rejects, i.e., rejects, -// stateless rejects and cheap stateless rejects. -QUIC_FLAG(bool, - FLAGS_quic_reloadable_flag_quic_export_rej_for_all_rejects, - true) - // Allow large send deltas to be used as RTT samples. QUIC_FLAG(bool, FLAGS_quic_reloadable_flag_quic_allow_large_send_deltas, true)
diff --git a/net/quic/test_tools/failing_proof_source.cc b/net/quic/test_tools/failing_proof_source.cc new file mode 100644 index 0000000..53b69e1 --- /dev/null +++ b/net/quic/test_tools/failing_proof_source.cc
@@ -0,0 +1,33 @@ +// Copyright (c) 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#include "net/quic/test_tools/failing_proof_source.h" + +namespace net { +namespace test { + +bool FailingProofSource::GetProof( + const QuicSocketAddress& server_address, + const std::string& hostname, + const std::string& server_config, + QuicVersion quic_version, + base::StringPiece chlo_hash, + const QuicTagVector& connection_options, + QuicReferenceCountedPointer<ProofSource::Chain>* out_chain, + QuicCryptoProof* out_proof) { + return false; +} + +void FailingProofSource::GetProof(const QuicSocketAddress& server_address, + const std::string& hostname, + const std::string& server_config, + QuicVersion quic_version, + base::StringPiece chlo_hash, + const QuicTagVector& connection_options, + std::unique_ptr<Callback> callback) { + callback->Run(false, nullptr, QuicCryptoProof(), nullptr); +} + +} // namespace test +} // namespace net
diff --git a/net/quic/test_tools/failing_proof_source.h b/net/quic/test_tools/failing_proof_source.h new file mode 100644 index 0000000..67aa44e --- /dev/null +++ b/net/quic/test_tools/failing_proof_source.h
@@ -0,0 +1,36 @@ +// Copyright (c) 2017 The Chromium Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef NET_QUIC_TEST_TOOLS_FAILING_PROOF_SOURCE_H_ +#define NET_QUIC_TEST_TOOLS_FAILING_PROOF_SOURCE_H_ + +#include "net/quic/core/crypto/proof_source.h" + +namespace net { +namespace test { + +class FailingProofSource : public ProofSource { + public: + bool GetProof(const QuicSocketAddress& server_address, + const std::string& hostname, + const std::string& server_config, + QuicVersion quic_version, + base::StringPiece chlo_hash, + const QuicTagVector& connection_options, + QuicReferenceCountedPointer<ProofSource::Chain>* out_chain, + QuicCryptoProof* out_proof) override; + + void GetProof(const QuicSocketAddress& server_address, + const std::string& hostname, + const std::string& server_config, + QuicVersion quic_version, + base::StringPiece chlo_hash, + const QuicTagVector& connection_options, + std::unique_ptr<Callback> callback) override; +}; + +} // namespace test +} // namespace net + +#endif // NET_QUIC_TEST_TOOLS_FAILING_PROOF_SOURCE_H_
diff --git a/net/quic/test_tools/quic_crypto_server_config_peer.cc b/net/quic/test_tools/quic_crypto_server_config_peer.cc index 3856b9f8e..d9a47f7 100644 --- a/net/quic/test_tools/quic_crypto_server_config_peer.cc +++ b/net/quic/test_tools/quic_crypto_server_config_peer.cc
@@ -42,6 +42,11 @@ return server_config_->proof_source_.get(); } +void QuicCryptoServerConfigPeer::ResetProofSource( + std::unique_ptr<ProofSource> proof_source) { + server_config_->proof_source_ = std::move(proof_source); +} + string QuicCryptoServerConfigPeer::NewSourceAddressToken( string config_id, SourceAddressTokens previous_tokens,
diff --git a/net/quic/test_tools/quic_crypto_server_config_peer.h b/net/quic/test_tools/quic_crypto_server_config_peer.h index bd26a95..310be87 100644 --- a/net/quic/test_tools/quic_crypto_server_config_peer.h +++ b/net/quic/test_tools/quic_crypto_server_config_peer.h
@@ -13,8 +13,7 @@ // Peer for accessing otherwise private members of a QuicCryptoServerConfig. class QuicCryptoServerConfigPeer { public: - explicit QuicCryptoServerConfigPeer( - const QuicCryptoServerConfig* server_config) + explicit QuicCryptoServerConfigPeer(QuicCryptoServerConfig* server_config) : server_config_(server_config) {} // Returns the proof source. @@ -31,6 +30,9 @@ // Returns a pointer to the ProofSource object. ProofSource* GetProofSource() const; + // Reset the proof_source_ member. + void ResetProofSource(std::unique_ptr<ProofSource> proof_source); + // Generates a new valid source address token. std::string NewSourceAddressToken( std::string config_id, @@ -91,7 +93,7 @@ uint32_t source_address_token_lifetime_secs(); private: - const QuicCryptoServerConfig* server_config_; + QuicCryptoServerConfig* server_config_; }; } // namespace test
diff --git a/net/quic/test_tools/simple_quic_framer.cc b/net/quic/test_tools/simple_quic_framer.cc index c4d36b2..48a72d2f 100644 --- a/net/quic/test_tools/simple_quic_framer.cc +++ b/net/quic/test_tools/simple_quic_framer.cc
@@ -52,8 +52,7 @@ bool OnStreamFrame(const QuicStreamFrame& frame) override { // Save a copy of the data so it is valid after the packet is processed. - string* string_data = new string(); - string_data->append(frame.data_buffer, frame.data_length); + string* string_data = new string(frame.data_buffer, frame.data_length); stream_data_.push_back(base::WrapUnique(string_data)); // TODO(ianswett): A pointer isn't necessary with emplace_back. stream_frames_.push_back(base::MakeUnique<QuicStreamFrame>(
diff --git a/net/tools/quic/quic_time_wait_list_manager.h b/net/tools/quic/quic_time_wait_list_manager.h index 17eda88e..3c4dc02 100644 --- a/net/tools/quic/quic_time_wait_list_manager.h +++ b/net/tools/quic/quic_time_wait_list_manager.h
@@ -120,6 +120,12 @@ virtual std::unique_ptr<QuicEncryptedPacket> BuildPublicReset( const QuicPublicResetPacket& packet); + // Creates a public reset packet and sends it or queues it to be sent later. + virtual void SendPublicReset(const QuicSocketAddress& server_address, + const QuicSocketAddress& client_address, + QuicConnectionId connection_id, + QuicPacketNumber rejected_packet_number); + private: friend class test::QuicDispatcherPeer; friend class test::QuicTimeWaitListManagerPeer; @@ -131,12 +137,6 @@ // number of received packets. bool ShouldSendResponse(int received_packet_count); - // Creates a public reset packet and sends it or queues it to be sent later. - void SendPublicReset(const QuicSocketAddress& server_address, - const QuicSocketAddress& client_address, - QuicConnectionId connection_id, - QuicPacketNumber rejected_packet_number); - // Either sends the packet and deletes it or makes pending_packets_queue_ the // owner of the packet. void SendOrQueuePacket(std::unique_ptr<QueuedPacket> packet);
diff --git a/net/url_request/url_request_file_job.cc b/net/url_request/url_request_file_job.cc index 8f96f40..4924e02 100644 --- a/net/url_request/url_request_file_job.cc +++ b/net/url_request/url_request_file_job.cc
@@ -172,12 +172,14 @@ // because we need to do multipart encoding here. // TODO(hclam): decide whether we want to support multiple range // requests. - range_parse_result_ = net::ERR_REQUEST_RANGE_NOT_SATISFIABLE; + range_parse_result_ = ERR_REQUEST_RANGE_NOT_SATISFIABLE; } } } } +void URLRequestFileJob::OnOpenComplete(int result) {} + void URLRequestFileJob::OnSeekComplete(int64_t result) {} void URLRequestFileJob::OnReadComplete(IOBuffer* buf, int result) { @@ -239,20 +241,15 @@ } void URLRequestFileJob::DidOpen(int result) { + OnOpenComplete(result); if (result != OK) { NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, result)); return; } - if (range_parse_result_ != net::OK) { - NotifyStartError( - URLRequestStatus(URLRequestStatus::FAILED, range_parse_result_)); - return; - } - - if (!byte_range_.ComputeBounds(meta_info_.file_size)) { - NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, - net::ERR_REQUEST_RANGE_NOT_SATISFIABLE)); + if (range_parse_result_ != OK || + !byte_range_.ComputeBounds(meta_info_.file_size)) { + DidSeek(ERR_REQUEST_RANGE_NOT_SATISFIABLE); return; } @@ -264,11 +261,8 @@ int rv = stream_->Seek(byte_range_.first_byte_position(), base::Bind(&URLRequestFileJob::DidSeek, weak_ptr_factory_.GetWeakPtr())); - if (rv != ERR_IO_PENDING) { - // stream_->Seek() failed, so pass an intentionally erroneous value - // into DidSeek(). - DidSeek(-1); - } + if (rv != ERR_IO_PENDING) + DidSeek(ERR_REQUEST_RANGE_NOT_SATISFIABLE); } else { // We didn't need to call stream_->Seek() at all, so we pass to DidSeek() // the value that would mean seek success. This way we skip the code @@ -278,8 +272,10 @@ } void URLRequestFileJob::DidSeek(int64_t result) { + DCHECK(result < 0 || result == byte_range_.first_byte_position()); + OnSeekComplete(result); - if (result != byte_range_.first_byte_position()) { + if (result < 0) { NotifyStartError(URLRequestStatus(URLRequestStatus::FAILED, ERR_REQUEST_RANGE_NOT_SATISFIABLE)); return;
diff --git a/net/url_request/url_request_file_job.h b/net/url_request/url_request_file_job.h index d288244..ec90767 100644 --- a/net/url_request/url_request_file_job.h +++ b/net/url_request/url_request_file_job.h
@@ -46,7 +46,18 @@ void SetExtraRequestHeaders(const HttpRequestHeaders& headers) override; // An interface for subclasses who wish to monitor read operations. + // + // |result| is the net::Error code resulting from attempting to open the file. + // Called before OnSeekComplete, only called if the request advanced to the + // point the file was opened, without being canceled. + virtual void OnOpenComplete(int result); + // Called at most once. On success, |result| is the non-negative offset into + // the file that the request will read from. On seek failure, it's a negative + // net:Error code. virtual void OnSeekComplete(int64_t result); + // Called once per read attempt. |buf| contains the read data, if any. + // |result| is the number of read bytes. 0 (net::OK) indicates EOF, negative + // numbers indicate it's a net::Error code. virtual void OnReadComplete(IOBuffer* buf, int result); protected:
diff --git a/net/url_request/url_request_file_job_unittest.cc b/net/url_request/url_request_file_job_unittest.cc index 613353d..2e5aaff 100644 --- a/net/url_request/url_request_file_job_unittest.cc +++ b/net/url_request/url_request_file_job_unittest.cc
@@ -15,6 +15,7 @@ #include "base/threading/sequenced_worker_pool.h" #include "base/threading/thread_task_runner_handle.h" #include "net/base/filename_util.h" +#include "net/base/net_errors.h" #include "net/url_request/url_request.h" #include "net/url_request/url_request_test_util.h" #include "testing/gtest/include/gtest/gtest.h" @@ -34,30 +35,45 @@ NetworkDelegate* network_delegate, const base::FilePath& file_path, const scoped_refptr<base::TaskRunner>& file_task_runner, + int* open_result, int64_t* seek_position, std::string* observed_content) : URLRequestFileJob(request, network_delegate, file_path, file_task_runner), + open_result_(open_result), seek_position_(seek_position), observed_content_(observed_content) { - *seek_position_ = 0; + *open_result_ = ERR_IO_PENDING; + *seek_position_ = ERR_IO_PENDING; observed_content_->clear(); } ~TestURLRequestFileJob() override {} protected: + void OnOpenComplete(int result) override { + // Should only be called once. + ASSERT_EQ(ERR_IO_PENDING, *open_result_); + *open_result_ = result; + } + void OnSeekComplete(int64_t result) override { - ASSERT_EQ(*seek_position_, 0); + // Should only call this if open succeeded. + EXPECT_EQ(OK, *open_result_); + // Should only be called once. + ASSERT_EQ(ERR_IO_PENDING, *seek_position_); *seek_position_ = result; } void OnReadComplete(IOBuffer* buf, int result) override { + // Should only call this if seek succeeded. + EXPECT_GE(*seek_position_, 0); observed_content_->append(std::string(buf->data(), result)); } + int* const open_result_; int64_t* const seek_position_; std::string* const observed_content_; }; @@ -67,11 +83,14 @@ class TestJobFactory : public URLRequestJobFactory { public: TestJobFactory(const base::FilePath& path, + int* open_result, int64_t* seek_position, std::string* observed_content) : path_(path), + open_result_(open_result), seek_position_(seek_position), observed_content_(observed_content) { + CHECK(open_result_); CHECK(seek_position_); CHECK(observed_content_); } @@ -82,11 +101,13 @@ const std::string& scheme, URLRequest* request, NetworkDelegate* network_delegate) const override { + CHECK(open_result_); CHECK(seek_position_); CHECK(observed_content_); URLRequestJob* job = new TestURLRequestFileJob( request, network_delegate, path_, base::ThreadTaskRunnerHandle::Get(), - seek_position_, observed_content_); + open_result_, seek_position_, observed_content_); + open_result_ = nullptr; seek_position_ = nullptr; observed_content_ = nullptr; return job; @@ -120,6 +141,7 @@ const base::FilePath path_; // These are mutable because MaybeCreateJobWithProtocolHandler is const. + mutable int* open_result_; mutable int64_t* seek_position_; mutable std::string* observed_content_; }; @@ -147,28 +169,41 @@ } }; -// A superclass for tests of the OnSeekComplete / OnReadComplete functions of -// URLRequestFileJob. +// A superclass for tests of the OnReadComplete / OnSeekComplete / +// OnReadComplete functions of URLRequestFileJob. class URLRequestFileJobEventsTest : public testing::Test { public: URLRequestFileJobEventsTest(); protected: + void TearDown() override; + // This creates a file with |content| as the contents, and then creates and - // runs a URLRequestFileJobWithCallbacks job to get the contents out of it, + // runs a TestURLRequestFileJob job to get the contents out of it, // and makes sure that the callbacks observed the correct bytes. If a Range // is provided, this function will add the appropriate Range http header to // the request and verify that only the bytes in that range (inclusive) were // observed. - void RunRequest(const std::string& content, const Range* range); + void RunSuccessfulRequestWithString(const std::string& content, + const Range* range); // This is the same as the method above it, except that it will make sure // the content matches |expected_content| and allow caller to specify the // extension of the filename in |file_extension|. - void RunRequest(const std::string& content, - const std::string& expected_content, - const base::FilePath::StringPieceType& file_extension, - const Range* range); + void RunSuccessfulRequestWithString( + const std::string& content, + const std::string& expected_content, + const base::FilePath::StringPieceType& file_extension, + const Range* range); + + // Creates and runs a TestURLRequestFileJob job to read from file provided by + // |path|. If |range| value is provided, it will be passed in the range + // header. + void RunRequestWithPath(const base::FilePath& path, + const std::string& range, + int* open_result, + int64_t* seek_position, + std::string* observed_content); TestURLRequestContext context_; TestDelegate delegate_; @@ -176,12 +211,19 @@ URLRequestFileJobEventsTest::URLRequestFileJobEventsTest() {} -void URLRequestFileJobEventsTest::RunRequest(const std::string& content, - const Range* range) { - RunRequest(content, content, FILE_PATH_LITERAL(""), range); +void URLRequestFileJobEventsTest::TearDown() { + // Gives a chance to close the opening file. + base::RunLoop().RunUntilIdle(); } -void URLRequestFileJobEventsTest::RunRequest( +void URLRequestFileJobEventsTest::RunSuccessfulRequestWithString( + const std::string& content, + const Range* range) { + RunSuccessfulRequestWithString(content, content, FILE_PATH_LITERAL(""), + range); +} + +void URLRequestFileJobEventsTest::RunSuccessfulRequestWithString( const std::string& raw_content, const std::string& expected_content, const base::FilePath::StringPieceType& file_extension, @@ -193,29 +235,23 @@ path = path.AddExtension(file_extension); ASSERT_TRUE(CreateFileWithContent(raw_content, path)); + std::string range_value; + if (range) { + ASSERT_GE(range->start, 0); + ASSERT_GE(range->end, 0); + ASSERT_LE(range->start, range->end); + ASSERT_LT(static_cast<unsigned int>(range->end), expected_content.length()); + range_value = base::StringPrintf("bytes=%d-%d", range->start, range->end); + } + { + int open_result; int64_t seek_position; std::string observed_content; - TestJobFactory factory(path, &seek_position, &observed_content); - context_.set_job_factory(&factory); + RunRequestWithPath(path, range_value, &open_result, &seek_position, + &observed_content); - std::unique_ptr<URLRequest> request(context_.CreateRequest( - FilePathToFileURL(path), DEFAULT_PRIORITY, &delegate_)); - if (range) { - ASSERT_GE(range->start, 0); - ASSERT_GE(range->end, 0); - ASSERT_LE(range->start, range->end); - ASSERT_LT(static_cast<unsigned int>(range->end), - expected_content.length()); - std::string range_value = - base::StringPrintf("bytes=%d-%d", range->start, range->end); - request->SetExtraRequestHeaderByName(HttpRequestHeaders::kRange, - range_value, true /*overwrite*/); - } - request->Start(); - - base::RunLoop().Run(); - + EXPECT_EQ(OK, open_result); EXPECT_FALSE(delegate_.request_failed()); int expected_length = range ? (range->end - range->start + 1) : expected_content.length(); @@ -234,8 +270,26 @@ EXPECT_EQ(expected_data_received, delegate_.data_received()); EXPECT_EQ(seek_position, range ? range->start : 0); } +} - base::RunLoop().RunUntilIdle(); +void URLRequestFileJobEventsTest::RunRequestWithPath( + const base::FilePath& path, + const std::string& range, + int* open_result, + int64_t* seek_position, + std::string* observed_content) { + TestJobFactory factory(path, open_result, seek_position, observed_content); + context_.set_job_factory(&factory); + + std::unique_ptr<URLRequest> request(context_.CreateRequest( + FilePathToFileURL(path), DEFAULT_PRIORITY, &delegate_)); + if (!range.empty()) { + request->SetExtraRequestHeaderByName(HttpRequestHeaders::kRange, range, + true /*overwrite*/); + } + request->Start(); + + base::RunLoop().Run(); } // Helper function to make a character array filled with |size| bytes of @@ -251,15 +305,15 @@ } TEST_F(URLRequestFileJobEventsTest, TinyFile) { - RunRequest(std::string("hello world"), NULL); + RunSuccessfulRequestWithString(std::string("hello world"), NULL); } TEST_F(URLRequestFileJobEventsTest, SmallFile) { - RunRequest(MakeContentOfSize(17 * 1024), NULL); + RunSuccessfulRequestWithString(MakeContentOfSize(17 * 1024), NULL); } TEST_F(URLRequestFileJobEventsTest, BigFile) { - RunRequest(MakeContentOfSize(3 * 1024 * 1024), NULL); + RunSuccessfulRequestWithString(MakeContentOfSize(3 * 1024 * 1024), NULL); } TEST_F(URLRequestFileJobEventsTest, Range) { @@ -267,7 +321,7 @@ // not aligned on any likely page boundaries. int size = 15 * 1024; Range range(1701, (6 * 1024) + 3); - RunRequest(MakeContentOfSize(size), &range); + RunSuccessfulRequestWithString(MakeContentOfSize(size), &range); } TEST_F(URLRequestFileJobEventsTest, DecodeSvgzFile) { @@ -278,8 +332,77 @@ 0x1f, 0x8b, 0x08, 0x00, 0x2b, 0x02, 0x84, 0x55, 0x00, 0x03, 0xf3, 0x48, 0xcd, 0xc9, 0xc9, 0xd7, 0x51, 0x08, 0xcf, 0x2f, 0xca, 0x49, 0x51, 0x04, 0x00, 0xd0, 0xc3, 0x4a, 0xec, 0x0d, 0x00, 0x00, 0x00}; - RunRequest(std::string(reinterpret_cast<char*>(gzip_data), sizeof(gzip_data)), - expected_content, FILE_PATH_LITERAL("svgz"), nullptr); + RunSuccessfulRequestWithString( + std::string(reinterpret_cast<char*>(gzip_data), sizeof(gzip_data)), + expected_content, FILE_PATH_LITERAL("svgz"), nullptr); +} + +TEST_F(URLRequestFileJobEventsTest, OpenNonExistentFile) { + base::FilePath path; + PathService::Get(base::DIR_SOURCE_ROOT, &path); + path = path.Append( + FILE_PATH_LITERAL("net/data/url_request_unittest/non-existent.txt")); + + int open_result; + int64_t seek_position; + std::string observed_content; + RunRequestWithPath(path, std::string(), &open_result, &seek_position, + &observed_content); + + EXPECT_EQ(ERR_FILE_NOT_FOUND, open_result); + EXPECT_TRUE(delegate_.request_failed()); +} + +TEST_F(URLRequestFileJobEventsTest, MultiRangeRequestNotSupported) { + base::FilePath path; + PathService::Get(base::DIR_SOURCE_ROOT, &path); + path = path.Append( + FILE_PATH_LITERAL("net/data/url_request_unittest/BullRunSpeech.txt")); + + int open_result; + int64_t seek_position; + std::string observed_content; + RunRequestWithPath(path, "bytes=1-5,20-30", &open_result, &seek_position, + &observed_content); + + EXPECT_EQ(OK, open_result); + EXPECT_EQ(ERR_REQUEST_RANGE_NOT_SATISFIABLE, seek_position); + EXPECT_TRUE(delegate_.request_failed()); +} + +TEST_F(URLRequestFileJobEventsTest, RangeExceedingFileSize) { + base::FilePath path; + PathService::Get(base::DIR_SOURCE_ROOT, &path); + path = path.Append( + FILE_PATH_LITERAL("net/data/url_request_unittest/BullRunSpeech.txt")); + + int open_result; + int64_t seek_position; + std::string observed_content; + RunRequestWithPath(path, "bytes=50000-", &open_result, &seek_position, + &observed_content); + + EXPECT_EQ(OK, open_result); + EXPECT_EQ(ERR_REQUEST_RANGE_NOT_SATISFIABLE, seek_position); + EXPECT_TRUE(delegate_.request_failed()); +} + +TEST_F(URLRequestFileJobEventsTest, IgnoreRangeParsingError) { + base::FilePath path; + PathService::Get(base::DIR_SOURCE_ROOT, &path); + path = path.Append( + FILE_PATH_LITERAL("net/data/url_request_unittest/simple.html")); + + int open_result; + int64_t seek_position; + std::string observed_content; + RunRequestWithPath(path, "bytes=3-z", &open_result, &seek_position, + &observed_content); + + EXPECT_EQ(OK, open_result); + EXPECT_EQ(0, seek_position); + EXPECT_EQ("hello\n", observed_content); + EXPECT_FALSE(delegate_.request_failed()); } } // namespace
diff --git a/ppapi/shared_impl/ppb_gamepad_shared.h b/ppapi/shared_impl/ppb_gamepad_shared.h index 577a13d..df95fab 100644 --- a/ppapi/shared_impl/ppb_gamepad_shared.h +++ b/ppapi/shared_impl/ppb_gamepad_shared.h
@@ -17,7 +17,7 @@ // TODO(brettw) when we remove the non-IPC-based gamepad implementation, this // code should all move into the GamepadResource. -#pragma pack(push, 1) +#pragma pack(push, 4) struct WebKitGamepadButton { bool pressed;
diff --git a/remoting/codec/webrtc_video_encoder.h b/remoting/codec/webrtc_video_encoder.h index be0f988..bf097c2 100644 --- a/remoting/codec/webrtc_video_encoder.h +++ b/remoting/codec/webrtc_video_encoder.h
@@ -53,12 +53,13 @@ virtual ~WebrtcVideoEncoder() {} - // Encode an image stored in |frame|. If |frame.updated_region()| is empty - // then the encoder may return a packet (e.g. to top-off previously-encoded + // Encode an image stored in |frame|. If frame.updated_region() is empty + // then the encoder may return a frame (e.g. to top-off previously-encoded // portions of the frame to higher quality) or return nullptr to indicate that - // there is no work to do. + // there is no work to do. |frame| may be nullptr. This case must be handled + // the same as if frame.updated_region() is empty. virtual std::unique_ptr<EncodedFrame> Encode( - const webrtc::DesktopFrame& frame, + const webrtc::DesktopFrame* frame, const FrameParams& param) = 0; };
diff --git a/remoting/codec/webrtc_video_encoder_vpx.cc b/remoting/codec/webrtc_video_encoder_vpx.cc index dfa6050d..da198feb 100644 --- a/remoting/codec/webrtc_video_encoder_vpx.cc +++ b/remoting/codec/webrtc_video_encoder_vpx.cc
@@ -274,16 +274,25 @@ } std::unique_ptr<WebrtcVideoEncoder::EncodedFrame> WebrtcVideoEncoderVpx::Encode( - const webrtc::DesktopFrame& frame, + const webrtc::DesktopFrame* frame, const FrameParams& params) { - DCHECK_LE(32, frame.size().width()); - DCHECK_LE(32, frame.size().height()); + webrtc::DesktopSize previous_frame_size = + image_ ? webrtc::DesktopSize(image_->w, image_->h) + : webrtc::DesktopSize(); + + webrtc::DesktopSize frame_size = frame ? frame->size() : previous_frame_size; + + // Don't need to send anything until we get the first non-null frame. + if (frame_size.is_empty()) { + return nullptr; + } + + DCHECK_GE(frame_size.width(), 32); + DCHECK_GE(frame_size.height(), 32); // Create or reconfigure the codec to match the size of |frame|. - if (!codec_ || - (image_ && - !frame.size().equals(webrtc::DesktopSize(image_->w, image_->h)))) { - Configure(frame.size()); + if (!codec_ || !frame_size.equals(previous_frame_size)) { + Configure(frame_size); } UpdateConfig(params); @@ -302,7 +311,7 @@ ClearActiveMap(); if (params.key_frame) - updated_region.SetRect(webrtc::DesktopRect::MakeSize(frame.size())); + updated_region.SetRect(webrtc::DesktopRect::MakeSize(frame_size)); SetActiveMapFromRegion(updated_region); @@ -337,7 +346,7 @@ bool got_data = false; std::unique_ptr<EncodedFrame> encoded_frame(new EncodedFrame()); - encoded_frame->size = frame.size(); + encoded_frame->size = frame_size; while (!got_data) { const vpx_codec_cx_pkt_t* vpx_packet = @@ -472,9 +481,9 @@ } void WebrtcVideoEncoderVpx::PrepareImage( - const webrtc::DesktopFrame& frame, + const webrtc::DesktopFrame* frame, webrtc::DesktopRegion* updated_region) { - if (frame.updated_region().is_empty()) { + if (!frame || frame->updated_region().is_empty()) { updated_region->Clear(); return; } @@ -490,7 +499,7 @@ // is required by ConvertRGBToYUVWithRect(). // TODO(wez): Do we still need 16x16 align, or is even alignment sufficient? int padding = use_vp9_ ? 8 : 3; - for (webrtc::DesktopRegion::Iterator r(frame.updated_region()); + for (webrtc::DesktopRegion::Iterator r(frame->updated_region()); !r.IsAtEnd(); r.Advance()) { const webrtc::DesktopRect& rect = r.rect(); updated_region->AddRect(AlignRect(webrtc::DesktopRect::MakeLTRB( @@ -505,13 +514,13 @@ updated_region->IntersectWith( webrtc::DesktopRect::MakeWH(image_->w, image_->h)); } else { - CreateImage(lossless_color_, frame.size(), &image_, &image_buffer_); + CreateImage(lossless_color_, frame->size(), &image_, &image_buffer_); updated_region->AddRect(webrtc::DesktopRect::MakeWH(image_->w, image_->h)); } // Convert the updated region to YUV ready for encoding. - const uint8_t* rgb_data = frame.data(); - const int rgb_stride = frame.stride(); + const uint8_t* rgb_data = frame->data(); + const int rgb_stride = frame->stride(); const int y_stride = image_->stride[0]; DCHECK_EQ(image_->stride[1], image_->stride[2]); const int uv_stride = image_->stride[1];
diff --git a/remoting/codec/webrtc_video_encoder_vpx.h b/remoting/codec/webrtc_video_encoder_vpx.h index 34357189..5c5cfd4 100644 --- a/remoting/codec/webrtc_video_encoder_vpx.h +++ b/remoting/codec/webrtc_video_encoder_vpx.h
@@ -41,7 +41,7 @@ void SetLosslessColor(bool want_lossless); // WebrtcVideoEncoder interface. - std::unique_ptr<EncodedFrame> Encode(const webrtc::DesktopFrame& frame, + std::unique_ptr<EncodedFrame> Encode(const webrtc::DesktopFrame* frame, const FrameParams& params) override; private: @@ -56,7 +56,7 @@ // Prepares |image_| for encoding. Writes updated rectangles into // |updated_region|. - void PrepareImage(const webrtc::DesktopFrame& frame, + void PrepareImage(const webrtc::DesktopFrame* frame, webrtc::DesktopRegion* updated_region); // Clears active map.
diff --git a/remoting/host/desktop_session_proxy.cc b/remoting/host/desktop_session_proxy.cc index 84b6a06..4e9e88b 100644 --- a/remoting/host/desktop_session_proxy.cc +++ b/remoting/host/desktop_session_proxy.cc
@@ -248,7 +248,7 @@ while (pending_capture_frame_requests_) { --pending_capture_frame_requests_; video_capturer_->OnCaptureResult( - webrtc::DesktopCapturer::Result::ERROR_PERMANENT, nullptr); + webrtc::DesktopCapturer::Result::ERROR_TEMPORARY, nullptr); } } @@ -267,7 +267,7 @@ SendToDesktop(new ChromotingNetworkDesktopMsg_CaptureFrame()); } else { video_capturer_->OnCaptureResult( - webrtc::DesktopCapturer::Result::ERROR_PERMANENT, nullptr); + webrtc::DesktopCapturer::Result::ERROR_TEMPORARY, nullptr); } }
diff --git a/remoting/protocol/connection_unittest.cc b/remoting/protocol/connection_unittest.cc index 1217aea..b1e40f6 100644 --- a/remoting/protocol/connection_unittest.cc +++ b/remoting/protocol/connection_unittest.cc
@@ -76,19 +76,25 @@ void Start(Callback* callback) override { callback_ = callback; } + void CaptureFrame() override { + if (capture_request_index_to_fail_ >= 0) { + capture_request_index_to_fail_--; + if (capture_request_index_to_fail_ < 0) { + callback_->OnCaptureResult( + webrtc::DesktopCapturer::Result::ERROR_TEMPORARY, nullptr); + return; + } + } + // Return black 100x100 frame. std::unique_ptr<webrtc::DesktopFrame> frame( new webrtc::BasicDesktopFrame(webrtc::DesktopSize(100, 100))); - memset(frame->data(), 0, frame->stride() * frame->size().height()); - - // Set updated_region only for the first frame, as the frame content - // doesn't change. - if (!first_frame_sent_) { - first_frame_sent_ = true; - frame->mutable_updated_region()->SetRect( - webrtc::DesktopRect::MakeSize(frame->size())); - } + memset(frame->data(), frame_index_, + frame->stride() * frame->size().height()); + frame_index_++; + frame->mutable_updated_region()->SetRect( + webrtc::DesktopRect::MakeSize(frame->size())); callback_->OnCaptureResult(webrtc::DesktopCapturer::Result::SUCCESS, std::move(frame)); @@ -102,9 +108,13 @@ return true; } + void FailNthFrame(int n) { capture_request_index_to_fail_ = n; } + private: Callback* callback_ = nullptr; - bool first_frame_sent_ = false; + int frame_index_ = 0; + + int capture_request_index_to_fail_ = -1; }; static const int kAudioSampleRate = AudioPacket::SAMPLING_RATE_48000; @@ -346,7 +356,14 @@ run_loop_->Quit(); } - void WaitFirstVideoFrame() { + void WaitNextVideoFrame() { + size_t received_frames = + is_using_webrtc() + ? client_video_renderer_.GetFrameConsumer() + ->received_frames() + .size() + : client_video_renderer_.GetVideoStub()->received_packets().size(); + base::RunLoop run_loop; // Expect frames to be passed to FrameConsumer when WebRTC is used, or to @@ -364,7 +381,7 @@ if (is_using_webrtc()) { EXPECT_EQ( client_video_renderer_.GetFrameConsumer()->received_frames().size(), - 1U); + received_frames + 1); EXPECT_EQ( client_video_renderer_.GetVideoStub()->received_packets().size(), 0U); client_video_renderer_.GetFrameConsumer()->set_on_frame_callback( @@ -374,7 +391,8 @@ client_video_renderer_.GetFrameConsumer()->received_frames().size(), 0U); EXPECT_EQ( - client_video_renderer_.GetVideoStub()->received_packets().size(), 1U); + client_video_renderer_.GetVideoStub()->received_packets().size(), + received_frames + 1); client_video_renderer_.GetVideoStub()->set_on_frame_callback( base::Closure()); } @@ -494,7 +512,7 @@ host_connection_->StartVideoStream( base::MakeUnique<TestScreenCapturer>()); - WaitFirstVideoFrame(); + WaitNextVideoFrame(); } // Verifies that the VideoStream doesn't loose any video frames while the @@ -510,7 +528,7 @@ host_connection_->StartVideoStream( base::WrapUnique(new TestScreenCapturer())); - WaitFirstVideoFrame(); + WaitNextVideoFrame(); } TEST_P(ConnectionTest, DestroyOnIncomingMessage) { @@ -554,7 +572,7 @@ base::MakeUnique<TestScreenCapturer>()); video_stream->SetEventTimestampsSource(input_event_timestamps_source); - WaitFirstVideoFrame(); + WaitNextVideoFrame(); base::TimeTicks finish_time = base::TimeTicks::Now(); @@ -597,5 +615,49 @@ client_audio_player_.Verify(); } +TEST_P(ConnectionTest, FirstCaptureFailed) { + Connect(); + + base::TimeTicks event_timestamp = base::TimeTicks::FromInternalValue(42); + + scoped_refptr<InputEventTimestampsSourceImpl> input_event_timestamps_source = + new InputEventTimestampsSourceImpl(); + input_event_timestamps_source->OnEventReceived( + InputEventTimestamps{event_timestamp, base::TimeTicks::Now()}); + + auto capturer = base::MakeUnique<TestScreenCapturer>(); + capturer->FailNthFrame(0); + auto video_stream = host_connection_->StartVideoStream(std::move(capturer)); + video_stream->SetEventTimestampsSource(input_event_timestamps_source); + + WaitNextVideoFrame(); + + // Currently stats work in this test only for WebRTC because for ICE + // connections stats are reported by SoftwareVideoRenderer which is not used + // in this test. + // TODO(sergeyu): Fix this. + if (is_using_webrtc()) { + WaitFirstFrameStats(); + + // Verify that the event timestamp received before the first frame gets used + // for the second frame. + const FrameStats& stats = client_video_renderer_.GetFrameStatsConsumer() + ->received_stats() + .front(); + EXPECT_EQ(event_timestamp, stats.host_stats.latest_event_timestamp); + } +} + +TEST_P(ConnectionTest, SecondCaptureFailed) { + Connect(); + + auto capturer = base::MakeUnique<TestScreenCapturer>(); + capturer->FailNthFrame(1); + auto video_stream = host_connection_->StartVideoStream(std::move(capturer)); + + WaitNextVideoFrame(); + WaitNextVideoFrame(); +} + } // namespace protocol } // namespace remoting
diff --git a/remoting/protocol/webrtc_frame_scheduler.h b/remoting/protocol/webrtc_frame_scheduler.h index c69b113..da8aabe 100644 --- a/remoting/protocol/webrtc_frame_scheduler.h +++ b/remoting/protocol/webrtc_frame_scheduler.h
@@ -8,7 +8,6 @@ #include "base/callback_forward.h" #include "base/memory/weak_ptr.h" #include "remoting/codec/webrtc_video_encoder.h" -#include "third_party/webrtc/video_encoder.h" namespace remoting { namespace protocol { @@ -33,18 +32,17 @@ virtual void Pause(bool pause) = 0; // Called after |frame| has been captured to get encoding parameters for the - // frame. Returns false if the frame should be dropped (e.g. when there are - // no changed), true otherwise. - virtual bool GetEncoderFrameParams( - const webrtc::DesktopFrame& frame, - WebrtcVideoEncoder::FrameParams* params_out) = 0; + // frame. Returns false if the frame should be dropped (e.g. when there are no + // changes), true otherwise. |frame| may be set to nullptr if the capture + // request failed. + virtual bool OnFrameCaptured(const webrtc::DesktopFrame* frame, + WebrtcVideoEncoder::FrameParams* params_out) = 0; - // Called after a frame has been encoded and passed to the sender. If - // |frame_stats| is not null then sets send_pending_delay, rtt_estimate and - // bandwidth_estimate_kbps fields. + // Called after a frame has been encoded and passed to the sender. + // |encoded_frame| may be nullptr. If |frame_stats| is not null then sets + // send_pending_delay, rtt_estimate and bandwidth_estimate_kbps fields. virtual void OnFrameEncoded( - const WebrtcVideoEncoder::EncodedFrame& encoded_frame, - const webrtc::EncodedImageCallback::Result& send_result, + const WebrtcVideoEncoder::EncodedFrame* encoded_frame, HostFrameStats* frame_stats) = 0; };
diff --git a/remoting/protocol/webrtc_frame_scheduler_simple.cc b/remoting/protocol/webrtc_frame_scheduler_simple.cc index 5b90725..ed03767 100644 --- a/remoting/protocol/webrtc_frame_scheduler_simple.cc +++ b/remoting/protocol/webrtc_frame_scheduler_simple.cc
@@ -84,20 +84,33 @@ bandwidth_samples_.push(std::make_pair(now, bandwidth_kbps)); bandwidth_samples_sum_ += bandwidth_kbps; + + UpdateTargetBitrate(); } -int WebrtcFrameSchedulerSimple::EncoderBitrateFilter::GetTargetBitrateKbps( - webrtc::DesktopSize size, - base::TimeTicks now) { - DCHECK(!bandwidth_samples_.empty()); - - // TODO(sergeyu): This logic is applicable only to VP8. Reconsider it for - // VP9. - int bandwidth_estimate = bandwidth_samples_sum_ / bandwidth_samples_.size(); - int minimum_bitrate = +void WebrtcFrameSchedulerSimple::EncoderBitrateFilter::SetFrameSize( + webrtc::DesktopSize size) { + // TODO(sergeyu): This logic is applicable only to VP8. Reconsider it for VP9. + minimum_bitrate_ = static_cast<int64_t>(kVp8MinimumTargetBitrateKbpsPerMegapixel) * size.width() * size.height() / 1000000LL; - int target_bitrate = std::max(minimum_bitrate, bandwidth_estimate); + + UpdateTargetBitrate(); +} + +int WebrtcFrameSchedulerSimple::EncoderBitrateFilter::GetTargetBitrateKbps() + const { + DCHECK_GT(current_target_bitrate_, 0); + return current_target_bitrate_; +} + +void WebrtcFrameSchedulerSimple::EncoderBitrateFilter::UpdateTargetBitrate() { + if (bandwidth_samples_.empty()) { + return; + } + + int bandwidth_estimate = bandwidth_samples_sum_ / bandwidth_samples_.size(); + int target_bitrate = std::max(minimum_bitrate_, bandwidth_estimate); // Update encoder bitrate only when it changes by more than 30%. This is // necessary because the encoder resets internal state when it's reconfigured @@ -107,7 +120,6 @@ current_target_bitrate_ * kEncoderBitrateChangePercentage / 100) { current_target_bitrate_ = target_bitrate; } - return current_target_bitrate_; } WebrtcFrameSchedulerSimple::WebrtcFrameSchedulerSimple() @@ -159,45 +171,51 @@ } } -bool WebrtcFrameSchedulerSimple::GetEncoderFrameParams( - const webrtc::DesktopFrame& frame, +bool WebrtcFrameSchedulerSimple::OnFrameCaptured( + const webrtc::DesktopFrame* frame, WebrtcVideoEncoder::FrameParams* params_out) { DCHECK(thread_checker_.CalledOnValidThread()); base::TimeTicks now = base::TimeTicks::Now(); - if (frame.updated_region().is_empty() && !top_off_is_active_ && + if ((!frame || frame->updated_region().is_empty()) && !top_off_is_active_ && !key_frame_request_) { frame_pending_ = false; ScheduleNextFrame(now); return false; } - params_out->bitrate_kbps = - encoder_bitrate_.GetTargetBitrateKbps(frame.size(), now); + if (frame) { + encoder_bitrate_.SetFrameSize(frame->size()); + } + params_out->bitrate_kbps = encoder_bitrate_.GetTargetBitrateKbps(); params_out->duration = kTargetFrameInterval; params_out->key_frame = key_frame_request_; key_frame_request_ = false; params_out->vpx_min_quantizer = 10; - int64_t updated_area = params_out->key_frame - ? frame.size().width() * frame.size().height() - : GetRegionArea(frame.updated_region()); + int64_t updated_area = 0; + if (frame) { + updated_area = params_out->key_frame + ? frame->size().width() * frame->size().height() + : GetRegionArea(frame->updated_region()); + } // If bandwidth is being underutilized then libvpx is likely to choose the - // minimum allowed quantizer value, which means that encoded frame size may be - // significantly bigger than the bandwidth allows. Detect this case and set + // minimum allowed quantizer value, which means that encoded frame size may + // be significantly bigger than the bandwidth allows. Detect this case and set // vpx_min_quantizer to 60. The quality will be topped off later. if (updated_area - updated_region_area_.Max() > kBigFrameThresholdPixels) { - int expected_frame_size = updated_area * - kEstimatedBytesPerMegapixel / kPixelsPerMegapixel; + int expected_frame_size = + updated_area * kEstimatedBytesPerMegapixel / kPixelsPerMegapixel; base::TimeDelta expected_send_delay = base::TimeDelta::FromMicroseconds( base::Time::kMicrosecondsPerSecond * expected_frame_size / pacing_bucket_.rate()); - if (expected_send_delay > kTargetFrameInterval) + if (expected_send_delay > kTargetFrameInterval) { params_out->vpx_min_quantizer = 60; + } } updated_region_area_.Record(updated_area); @@ -210,8 +228,7 @@ } void WebrtcFrameSchedulerSimple::OnFrameEncoded( - const WebrtcVideoEncoder::EncodedFrame& encoded_frame, - const webrtc::EncodedImageCallback::Result& send_result, + const WebrtcVideoEncoder::EncodedFrame* encoded_frame, HostFrameStats* frame_stats) { DCHECK(thread_checker_.CalledOnValidThread()); DCHECK(frame_pending_); @@ -225,16 +242,17 @@ std::max(base::TimeDelta(), pacing_bucket_.GetEmptyTime() - now); } - pacing_bucket_.RefillOrSpill(encoded_frame.data.size(), now); - - if (encoded_frame.data.empty()) { + if (!encoded_frame || encoded_frame->data.empty()) { top_off_is_active_ = false; } else { + pacing_bucket_.RefillOrSpill(encoded_frame->data.size(), now); + frame_processing_delay_us_.Record( (now - last_capture_started_time_).InMicroseconds()); // Top-off until the target quantizer value is reached. - top_off_is_active_ = encoded_frame.quantizer > kTargetQuantizerForVp8TopOff; + top_off_is_active_ = + encoded_frame->quantizer > kTargetQuantizerForVp8TopOff; } ScheduleNextFrame(now); @@ -266,8 +284,9 @@ target_capture_time, last_capture_started_time_ + kTargetFrameInterval); } - if (target_capture_time < now) + if (target_capture_time < now) { target_capture_time = now; + } capture_timer_.Start(FROM_HERE, target_capture_time - now, base::Bind(&WebrtcFrameSchedulerSimple::CaptureNextFrame,
diff --git a/remoting/protocol/webrtc_frame_scheduler_simple.h b/remoting/protocol/webrtc_frame_scheduler_simple.h index 413dabd8..cd5f4dd5 100644 --- a/remoting/protocol/webrtc_frame_scheduler_simple.h +++ b/remoting/protocol/webrtc_frame_scheduler_simple.h
@@ -37,11 +37,9 @@ void Start(WebrtcDummyVideoEncoderFactory* video_encoder_factory, const base::Closure& capture_callback) override; void Pause(bool pause) override; - bool GetEncoderFrameParams( - const webrtc::DesktopFrame& frame, - WebrtcVideoEncoder::FrameParams* params_out) override; - void OnFrameEncoded(const WebrtcVideoEncoder::EncodedFrame& encoded_frame, - const webrtc::EncodedImageCallback::Result& send_result, + bool OnFrameCaptured(const webrtc::DesktopFrame* frame, + WebrtcVideoEncoder::FrameParams* params_out) override; + void OnFrameEncoded(const WebrtcVideoEncoder::EncodedFrame* encoded_frame, HostFrameStats* frame_stats) override; private: @@ -52,13 +50,17 @@ ~EncoderBitrateFilter(); void SetBandwidthEstimate(int bandwidth_kbps, base::TimeTicks now); - int GetTargetBitrateKbps(webrtc::DesktopSize size, base::TimeTicks now); + void SetFrameSize(webrtc::DesktopSize size); + int GetTargetBitrateKbps() const; private: + void UpdateTargetBitrate(); + std::queue<std::pair<base::TimeTicks, int>> bandwidth_samples_; int bandwidth_samples_sum_ = 0; - int current_target_bitrate_; + int minimum_bitrate_ = 0; + int current_target_bitrate_ = 0; }; void ScheduleNextFrame(base::TimeTicks now);
diff --git a/remoting/protocol/webrtc_video_stream.cc b/remoting/protocol/webrtc_video_stream.cc index 7e2434b..7450fb5 100644 --- a/remoting/protocol/webrtc_video_stream.cc +++ b/remoting/protocol/webrtc_video_stream.cc
@@ -139,26 +139,33 @@ std::unique_ptr<webrtc::DesktopFrame> frame) { DCHECK(thread_checker_.CalledOnValidThread()); - // TODO(sergeyu): Handle ERROR_PERMANENT result here. - - webrtc::DesktopVector dpi = - frame->dpi().is_zero() ? webrtc::DesktopVector(kDefaultDpi, kDefaultDpi) - : frame->dpi(); - - if (!frame_size_.equals(frame->size()) || !frame_dpi_.equals(dpi)) { - frame_size_ = frame->size(); - frame_dpi_ = dpi; - if (observer_) - observer_->OnVideoSizeChanged(this, frame_size_, frame_dpi_); - } - captured_frame_timestamps_->capture_ended_time = base::TimeTicks::Now(); captured_frame_timestamps_->capture_delay = - base::TimeDelta::FromMilliseconds(frame->capture_time_ms()); + base::TimeDelta::FromMilliseconds(frame ? frame->capture_time_ms() : 0); WebrtcVideoEncoder::FrameParams frame_params; - if (!scheduler_->GetEncoderFrameParams(*frame, &frame_params)) + if (!scheduler_->OnFrameCaptured(frame.get(), &frame_params)) { return; + } + + // TODO(sergeyu): Handle ERROR_PERMANENT result here. + if (frame) { + webrtc::DesktopVector dpi = + frame->dpi().is_zero() ? webrtc::DesktopVector(kDefaultDpi, kDefaultDpi) + : frame->dpi(); + + if (!frame_size_.equals(frame->size()) || !frame_dpi_.equals(dpi)) { + frame_size_ = frame->size(); + frame_dpi_ = dpi; + if (observer_) + observer_->OnVideoSizeChanged(this, frame_size_, frame_dpi_); + } + } else { + // Save event timestamps to be used for the next frame. + next_frame_input_event_timestamps_ = + captured_frame_timestamps_->input_event_timestamps; + captured_frame_timestamps_->input_event_timestamps = InputEventTimestamps(); + } base::PostTaskAndReplyWithResult( encode_task_runner_.get(), FROM_HERE, @@ -185,7 +192,11 @@ captured_frame_timestamps_.reset(new FrameTimestamps()); captured_frame_timestamps_->capture_started_time = base::TimeTicks::Now(); - if (event_timestamps_source_) { + if (!next_frame_input_event_timestamps_.is_null()) { + captured_frame_timestamps_->input_event_timestamps = + next_frame_input_event_timestamps_; + next_frame_input_event_timestamps_ = InputEventTimestamps(); + } else if (event_timestamps_source_) { captured_frame_timestamps_->input_event_timestamps = event_timestamps_source_->TakeLastEventTimestamps(); } @@ -202,7 +213,7 @@ EncodedFrameWithTimestamps result; result.timestamps = std::move(timestamps); result.timestamps->encode_started_time = base::TimeTicks::Now(); - result.frame = encoder->Encode(*frame, params); + result.frame = encoder->Encode(frame.get(), params); result.timestamps->encode_ended_time = base::TimeTicks::Now(); return result; } @@ -210,6 +221,13 @@ void WebrtcVideoStream::OnFrameEncoded(EncodedFrameWithTimestamps frame) { DCHECK(thread_checker_.CalledOnValidThread()); + HostFrameStats stats; + scheduler_->OnFrameEncoded(frame.frame.get(), &stats); + + if (!frame.frame) { + return; + } + webrtc::EncodedImageCallback::Result result = webrtc_transport_->video_encoder_factory()->SendEncodedFrame( *frame.frame, frame.timestamps->capture_started_time); @@ -219,12 +237,9 @@ return; } - HostFrameStats stats; - scheduler_->OnFrameEncoded(*frame.frame, result, &stats); - // Send FrameStats message. if (video_stats_dispatcher_.is_connected()) { - stats.frame_size = frame.frame->data.size(); + stats.frame_size = frame.frame ? frame.frame->data.size() : 0; if (!frame.timestamps->input_event_timestamps.is_null()) { stats.capture_pending_delay =
diff --git a/remoting/protocol/webrtc_video_stream.h b/remoting/protocol/webrtc_video_stream.h index 6d1d1c67..2e704368 100644 --- a/remoting/protocol/webrtc_video_stream.h +++ b/remoting/protocol/webrtc_video_stream.h
@@ -90,6 +90,11 @@ HostVideoStatsDispatcher video_stats_dispatcher_; + // In case when the capturer failed to capture a frame the corresponding event + // timestamps are saved in |next_frame_input_event_timestamps_| to be used for + // the following frame. + InputEventTimestamps next_frame_input_event_timestamps_; + // Timestamps for the frame that's being captured. std::unique_ptr<FrameTimestamps> captured_frame_timestamps_;
diff --git a/testing/buildbot/chromium.perf.fyi.json b/testing/buildbot/chromium.perf.fyi.json index a262111..73274e7 100644 --- a/testing/buildbot/chromium.perf.fyi.json +++ b/testing/buildbot/chromium.perf.fyi.json
@@ -21,7 +21,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -50,7 +50,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -78,7 +78,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -107,7 +107,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -135,7 +135,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -164,7 +164,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -192,7 +192,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -221,7 +221,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -306,7 +306,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -335,7 +335,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -363,7 +363,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -392,7 +392,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -420,7 +420,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -449,7 +449,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -477,7 +477,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -506,7 +506,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -534,7 +534,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -563,7 +563,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -591,7 +591,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -620,7 +620,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -648,7 +648,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -677,7 +677,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -705,7 +705,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -734,7 +734,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -762,7 +762,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -791,7 +791,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -819,7 +819,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -848,7 +848,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -876,7 +876,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -905,7 +905,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -933,7 +933,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -962,7 +962,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -990,7 +990,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -1019,7 +1019,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -1047,7 +1047,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -1076,7 +1076,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -1104,7 +1104,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -1133,7 +1133,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -1161,7 +1161,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -1190,7 +1190,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -1218,7 +1218,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -1247,7 +1247,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -1275,7 +1275,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -1304,7 +1304,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -1332,7 +1332,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1361,7 +1361,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1389,7 +1389,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1418,7 +1418,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1503,7 +1503,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -1532,7 +1532,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -1560,7 +1560,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -1589,7 +1589,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -1617,7 +1617,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1646,7 +1646,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1674,7 +1674,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1703,7 +1703,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1731,7 +1731,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -1760,7 +1760,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -1788,7 +1788,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -1817,7 +1817,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -1845,7 +1845,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -1874,7 +1874,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -1902,7 +1902,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -1931,7 +1931,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -1959,7 +1959,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -1988,7 +1988,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -2016,7 +2016,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -2045,7 +2045,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -2073,7 +2073,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2102,7 +2102,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2130,7 +2130,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -2159,7 +2159,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -2187,7 +2187,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -2216,7 +2216,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -2244,7 +2244,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -2273,7 +2273,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -2301,7 +2301,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2330,7 +2330,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2358,7 +2358,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -2387,7 +2387,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -2415,7 +2415,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -2444,7 +2444,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -2472,7 +2472,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -2501,7 +2501,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -2529,7 +2529,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2558,7 +2558,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2586,7 +2586,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -2615,7 +2615,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -2643,7 +2643,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -2672,7 +2672,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -2700,7 +2700,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2729,7 +2729,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -2757,7 +2757,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -2786,7 +2786,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -2814,7 +2814,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -2843,7 +2843,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -2871,7 +2871,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -2900,7 +2900,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -2928,7 +2928,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -2957,7 +2957,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -2985,7 +2985,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -3014,7 +3014,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -3042,7 +3042,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -3071,7 +3071,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -3099,7 +3099,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -3128,7 +3128,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -3156,7 +3156,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -3185,7 +3185,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -3213,7 +3213,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -3242,7 +3242,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -3270,7 +3270,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3299,7 +3299,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3327,7 +3327,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -3356,7 +3356,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -3441,7 +3441,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -3470,7 +3470,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -3498,7 +3498,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3527,7 +3527,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3555,7 +3555,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -3584,7 +3584,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -3612,7 +3612,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3641,7 +3641,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3669,7 +3669,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3698,7 +3698,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3726,7 +3726,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3755,7 +3755,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -3783,7 +3783,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -3812,7 +3812,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -3840,7 +3840,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -3869,7 +3869,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -3897,7 +3897,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -3926,7 +3926,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -3954,7 +3954,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -3983,7 +3983,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4011,7 +4011,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -4040,7 +4040,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -4068,7 +4068,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4097,7 +4097,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4125,7 +4125,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -4154,7 +4154,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -4182,7 +4182,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4211,7 +4211,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4239,7 +4239,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4268,7 +4268,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4296,7 +4296,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -4325,7 +4325,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -4353,7 +4353,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -4382,7 +4382,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -4524,7 +4524,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4553,7 +4553,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4581,7 +4581,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4610,7 +4610,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4695,7 +4695,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4724,7 +4724,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4752,7 +4752,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4781,7 +4781,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -4809,7 +4809,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4838,7 +4838,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4866,7 +4866,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -4895,7 +4895,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -4923,7 +4923,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4952,7 +4952,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -4980,7 +4980,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -5009,7 +5009,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -5037,7 +5037,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -5066,7 +5066,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -5094,7 +5094,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -5123,7 +5123,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -5151,7 +5151,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -5180,7 +5180,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -5208,7 +5208,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5237,7 +5237,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5265,7 +5265,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5294,7 +5294,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5322,7 +5322,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -5351,7 +5351,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -5379,7 +5379,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -5408,7 +5408,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -5436,7 +5436,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -5465,7 +5465,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -5493,7 +5493,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -5522,7 +5522,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -5550,7 +5550,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5579,7 +5579,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5607,7 +5607,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5636,7 +5636,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5664,7 +5664,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5693,7 +5693,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -5721,7 +5721,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -5750,7 +5750,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -5778,7 +5778,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -5807,7 +5807,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -5835,7 +5835,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -5864,7 +5864,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -5892,7 +5892,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -5921,7 +5921,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -5949,7 +5949,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -5978,7 +5978,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -6006,7 +6006,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -6035,7 +6035,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -6063,7 +6063,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -6092,7 +6092,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -6120,7 +6120,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -6149,7 +6149,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -6177,7 +6177,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6206,7 +6206,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6291,7 +6291,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -6320,7 +6320,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -6348,7 +6348,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6377,7 +6377,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6405,7 +6405,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6434,7 +6434,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6462,7 +6462,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -6491,7 +6491,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -6519,7 +6519,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -6548,7 +6548,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -6576,7 +6576,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -6605,7 +6605,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -6633,7 +6633,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6662,7 +6662,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build245-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -6690,7 +6690,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -6719,7 +6719,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -6747,7 +6747,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -6776,7 +6776,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -6804,7 +6804,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -6833,7 +6833,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -6861,7 +6861,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -6890,7 +6890,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -6918,7 +6918,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -6947,7 +6947,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -6975,7 +6975,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7004,7 +7004,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7032,7 +7032,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7061,7 +7061,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7146,7 +7146,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -7175,7 +7175,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -7203,7 +7203,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -7232,7 +7232,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -7260,7 +7260,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7289,7 +7289,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7317,7 +7317,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -7346,7 +7346,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -7374,7 +7374,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -7403,7 +7403,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -7431,7 +7431,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -7460,7 +7460,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -7488,7 +7488,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7517,7 +7517,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7545,7 +7545,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7574,7 +7574,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7602,7 +7602,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7631,7 +7631,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7659,7 +7659,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7688,7 +7688,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -7716,7 +7716,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -7744,7 +7744,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -7773,7 +7773,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -7801,7 +7801,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7830,7 +7830,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7859,7 +7859,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -7887,7 +7887,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7916,7 +7916,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -7944,7 +7944,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -7973,7 +7973,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -8001,7 +8001,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8030,7 +8030,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8058,7 +8058,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8087,7 +8087,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8115,7 +8115,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8144,7 +8144,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8172,7 +8172,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -8201,7 +8201,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -8229,7 +8229,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8258,7 +8258,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8286,7 +8286,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -8315,7 +8315,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -8343,7 +8343,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8372,7 +8372,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8400,7 +8400,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8429,7 +8429,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8457,7 +8457,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8486,7 +8486,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -8514,7 +8514,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -8543,7 +8543,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -8628,7 +8628,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -8657,7 +8657,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -8742,7 +8742,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -8771,7 +8771,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -8799,7 +8799,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8828,7 +8828,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8856,7 +8856,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8885,7 +8885,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8913,7 +8913,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8942,7 +8942,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8970,7 +8970,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -8999,7 +8999,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9027,7 +9027,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9056,7 +9056,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9084,7 +9084,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -9113,7 +9113,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -9141,7 +9141,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9170,7 +9170,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9198,7 +9198,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -9227,7 +9227,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -9255,7 +9255,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -9284,7 +9284,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -9312,7 +9312,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -9341,7 +9341,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -9369,7 +9369,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -9398,7 +9398,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device7", + "id": "build248-m4--device1", "os": "Android", "pool": "Chrome-perf" } @@ -9426,7 +9426,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9455,7 +9455,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9483,7 +9483,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9512,7 +9512,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9540,7 +9540,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9569,7 +9569,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9597,7 +9597,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9626,7 +9626,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9654,7 +9654,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -9683,7 +9683,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -9711,7 +9711,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -9740,7 +9740,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build248-m4--device3", "os": "Android", "pool": "Chrome-perf" } @@ -9768,7 +9768,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -9797,7 +9797,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -9825,7 +9825,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9854,7 +9854,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9882,7 +9882,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9911,7 +9911,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -9939,7 +9939,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9968,7 +9968,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -9996,7 +9996,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10025,7 +10025,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10053,7 +10053,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10082,7 +10082,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10110,7 +10110,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10139,7 +10139,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10167,7 +10167,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -10196,7 +10196,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device4", + "id": "build249-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -10224,7 +10224,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -10253,7 +10253,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -10281,7 +10281,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10310,7 +10310,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build245-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10338,7 +10338,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -10367,7 +10367,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build248-m4--device2", "os": "Android", "pool": "Chrome-perf" } @@ -10395,7 +10395,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10424,7 +10424,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10452,7 +10452,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10481,7 +10481,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10509,7 +10509,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10538,7 +10538,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10566,7 +10566,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10594,7 +10594,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -10623,7 +10623,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device3", + "id": "build245-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -10652,7 +10652,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10737,7 +10737,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10766,7 +10766,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device2", + "id": "build248-m4--device5", "os": "Android", "pool": "Chrome-perf" } @@ -10794,7 +10794,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10823,7 +10823,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build248-m4--device7", "os": "Android", "pool": "Chrome-perf" } @@ -10851,7 +10851,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -10880,7 +10880,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device6", + "id": "build245-m4--device4", "os": "Android", "pool": "Chrome-perf" } @@ -10908,7 +10908,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -10937,7 +10937,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device1", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -10965,7 +10965,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" } @@ -10994,7 +10994,7 @@ "dimension_sets": [ { "android_devices": "1", - "id": "build245-m4--device5", + "id": "build249-m4--device6", "os": "Android", "pool": "Chrome-perf" }
diff --git a/third_party/WebKit/LayoutTests/LeakExpectations b/third_party/WebKit/LayoutTests/LeakExpectations index 24b6c47..61f8a3f6 100644 --- a/third_party/WebKit/LayoutTests/LeakExpectations +++ b/third_party/WebKit/LayoutTests/LeakExpectations
@@ -52,7 +52,7 @@ # ----------------------------------------------------------------- crbug.com/364417 paint/invalidation/japanese-rl-selection-clear.html [ Leak ] -crbug.com/364417 virtual/spinvalidation/paint/invalidation/japanese-rl-selection-clear.html [ Leak ] +crbug.com/364417 virtual/stable/paint/invalidation/japanese-rl-selection-clear.html [ Leak ] crbug.com/455369 fast/html/marquee-destroyed-without-removed-from-crash.html [ Leak Pass ]
diff --git a/third_party/WebKit/LayoutTests/TestExpectations b/third_party/WebKit/LayoutTests/TestExpectations index d8a254c..438e095 100644 --- a/third_party/WebKit/LayoutTests/TestExpectations +++ b/third_party/WebKit/LayoutTests/TestExpectations
@@ -1635,7 +1635,8 @@ crbug.com/600248 imported/wpt/web-animations/interfaces/Animation/playbackRate.html [ Pass Failure ] # This test may either fail a DCHECK or time out: # https://storage.googleapis.com/chromium-layout-test-archives/mac_chromium_rel_ng/364861/layout-test-results/results.html -crbug.com/600248 imported/wpt/web-animations/interfaces/Animation/cancel.html [ Crash Timeout ] +# However, this test seems to just fail now. Allowing this, with a TODO(https://crbug.com/678077) to fix. +crbug.com/600248 imported/wpt/web-animations/interfaces/Animation/cancel.html [ Pass Failure Crash Timeout ] crbug.com/600248 imported/wpt/web-animations/timing-model/animations/updating-the-finished-state.html [ Pass Failure Timeout ] crbug.com/611658 [ Win7 ] fast/forms/text/text-font-height-mismatch.html [ Failure ]
diff --git a/third_party/WebKit/LayoutTests/http/tests/inspector/resource-tree/resource-tree-events-expected.txt b/third_party/WebKit/LayoutTests/http/tests/inspector/resource-tree/resource-tree-events-expected.txt index 997d3c1f..4bdd2f9 100644 --- a/third_party/WebKit/LayoutTests/http/tests/inspector/resource-tree/resource-tree-events-expected.txt +++ b/third_party/WebKit/LayoutTests/http/tests/inspector/resource-tree/resource-tree-events-expected.txt
@@ -3,6 +3,7 @@ Navigating root frame FrameAdded : root1 + SecurityOriginRemoved : http://127.0.0.1:8000 FrameNavigated : root1 : loader-root1 MainFrameNavigated : root1 : loader-root1 SecurityOriginAdded : http://frame/root1.html @@ -11,21 +12,21 @@ FrameNavigated : child1 : loader-child1 SecurityOriginAdded : http://frame/child1.html Navigating child frame 1 to a different URL - SecurityOriginRemoved : http://frame/child1.html FrameNavigated : child1 : loader-child1 + SecurityOriginRemoved : http://frame/child1.html SecurityOriginAdded : http://frame/child1-new.html Navigating child frame 2 FrameAdded : child2 FrameNavigated : child2 : loader-child2 SecurityOriginAdded : http://frame/child2.html Detaching child frame 1 - SecurityOriginRemoved : http://frame/child1-new.html FrameDetached : child1 + SecurityOriginRemoved : http://frame/child1-new.html Navigating root frame - SecurityOriginRemoved : http://frame/child2.html - SecurityOriginRemoved : http://frame/root1.html FrameDetached : child2 FrameDetached : root1 + SecurityOriginRemoved : http://frame/root1.html + SecurityOriginRemoved : http://frame/child2.html FrameAdded : root2 FrameNavigated : root2 : loader-root2 MainFrameNavigated : root2 : loader-root2
diff --git a/third_party/WebKit/LayoutTests/inspector/tracing/timeline-misc/timeline-aggregated-details-expected.txt b/third_party/WebKit/LayoutTests/inspector/tracing/timeline-misc/timeline-aggregated-details-expected.txt index 7b3784d..d74f19f 100644 --- a/third_party/WebKit/LayoutTests/inspector/tracing/timeline-misc/timeline-aggregated-details-expected.txt +++ b/third_party/WebKit/LayoutTests/inspector/tracing/timeline-misc/timeline-aggregated-details-expected.txt
@@ -204,7 +204,7 @@ f: 0.100 0.100 CallTree Group by: Domain - unattributed: 0.000 22.125 + unattributed: 0.000 12.250 a: 0.000 11.125 b: 0.000 11.125 c: 8.250 8.250 @@ -214,15 +214,16 @@ l: 0.000 0.125 a: 0.025 0.125 Layout: 0.100 0.100 - sin: 0.000 9.875 - f: 0.000 9.875 - l: 0.000 9.875 - a: 9.875 9.875 x: 0.000 1.000 y: 0.600 1.000 z: 0.000 0.200 w: 0.200 0.200 w: 0.200 0.200 + [V8 Runtime]: 0.000 9.875 + sin: 0.000 9.875 + f: 0.000 9.875 + l: 0.000 9.875 + a: 9.875 9.875 abc.com: 0.101 0.215 recursive_a: 0.101 0.215 recursive_b: 0.102 0.114 @@ -256,7 +257,6 @@ a: 0.100 0.100 l: 0.100 0.100 f: 0.100 0.100 - sin: 0.000 9.875 x: 0.000 1.000 y: 0.600 1.000 x: 0.600 1.000 @@ -279,7 +279,7 @@ recursive_a: 0.008 0.008 CallTree Group by: Subdomain - unattributed: 0.000 22.125 + unattributed: 0.000 12.250 a: 0.000 11.125 b: 0.000 11.125 c: 8.250 8.250 @@ -289,15 +289,16 @@ l: 0.000 0.125 a: 0.025 0.125 Layout: 0.100 0.100 - sin: 0.000 9.875 - f: 0.000 9.875 - l: 0.000 9.875 - a: 9.875 9.875 x: 0.000 1.000 y: 0.600 1.000 z: 0.000 0.200 w: 0.200 0.200 w: 0.200 0.200 + [V8 Runtime]: 0.000 9.875 + sin: 0.000 9.875 + f: 0.000 9.875 + l: 0.000 9.875 + a: 9.875 9.875 xyz.abc.com: 0.101 0.215 recursive_a: 0.101 0.215 recursive_b: 0.102 0.114 @@ -331,7 +332,6 @@ a: 0.100 0.100 l: 0.100 0.100 f: 0.100 0.100 - sin: 0.000 9.875 x: 0.000 1.000 y: 0.600 1.000 x: 0.600 1.000
diff --git a/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-details-expected.txt b/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-details-expected.txt index 2051ff30..30ad017 100644 --- a/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-details-expected.txt +++ b/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-details-expected.txt
@@ -6,11 +6,13 @@ Request Method: GET Priority: Low Mime Type: string +Encoded Length: 0 B Initiator: timeline-network-resource-details.html:15 URL: anImage.png Duration: string Request Method: GET Priority: Low Mime Type: string +Encoded Length: 0 B Preview:
diff --git a/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-expected.txt b/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-expected.txt index 45a33712..46b7b48 100644 --- a/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-expected.txt +++ b/third_party/WebKit/LayoutTests/inspector/tracing/timeline-network/timeline-network-resource-expected.txt
@@ -23,7 +23,10 @@ ResourceReceiveResponse Properties: { data : { + encodedDataLength : <number> frame : <string> + fromCache : false + fromServiceWorker : false mimeType : <string> requestId : <string> statusCode : 0 @@ -39,6 +42,7 @@ { data : { didFail : false + encodedDataLength : <number> finishTime : <number> requestId : <string> }
diff --git a/third_party/WebKit/LayoutTests/storage/indexeddb/idbindex-getAll-enforcerange.html b/third_party/WebKit/LayoutTests/storage/indexeddb/idbindex-getAll-enforcerange.html new file mode 100644 index 0000000..358c716 --- /dev/null +++ b/third_party/WebKit/LayoutTests/storage/indexeddb/idbindex-getAll-enforcerange.html
@@ -0,0 +1,26 @@ +<!DOCTYPE html> +<title>IndexedDB: IDBIndex getAll() uses [EnforceRange]</title> +<meta charset=utf-8> +<script src="../../resources/testharness.js"></script> +<script src="../../resources/testharnessreport.js"></script> +<script src="resources/testharness-helpers.js"></script> +<script> + +indexeddb_test( + (t, db) => { + const store = db.createObjectStore('store'); + const index = store.createIndex('index', 'keyPath'); + }, + (t, db) => { + const tx = db.transaction('store'); + const store = tx.objectStore('store'); + const index = store.index('index'); + [NaN, Infinity, -Infinity, -1, -Number.MAX_SAFE_INTEGER].forEach(count => { + assert_throws(TypeError(), () => { index.getAll(null, count); }, + `getAll with count ${count} count should throw TypeError`); + }); + t.done(); + }, + `IDBIndex.getAll() uses [EnforceRange]` +); +</script>
diff --git a/third_party/WebKit/LayoutTests/storage/indexeddb/idbindex-getAllKeys-enforcerange.html b/third_party/WebKit/LayoutTests/storage/indexeddb/idbindex-getAllKeys-enforcerange.html new file mode 100644 index 0000000..5360dc8 --- /dev/null +++ b/third_party/WebKit/LayoutTests/storage/indexeddb/idbindex-getAllKeys-enforcerange.html
@@ -0,0 +1,26 @@ +<!DOCTYPE html> +<title>IndexedDB: IDBIndex getAllKeys() uses [EnforceRange]</title> +<meta charset=utf-8> +<script src="../../resources/testharness.js"></script> +<script src="../../resources/testharnessreport.js"></script> +<script src="resources/testharness-helpers.js"></script> +<script> + +indexeddb_test( + (t, db) => { + const store = db.createObjectStore('store'); + const index = store.createIndex('index', 'keyPath'); + }, + (t, db) => { + const tx = db.transaction('store'); + const store = tx.objectStore('store'); + const index = store.index('index'); + [NaN, Infinity, -Infinity, -1, -Number.MAX_SAFE_INTEGER].forEach(count => { + assert_throws(TypeError(), () => { index.getAllKeys(null, count); }, + `getAllKeys with count ${count} count should throw TypeError`); + }); + t.done(); + }, + `IDBIndex.getAllKeys() uses [EnforceRange]` +); +</script>
diff --git a/third_party/WebKit/LayoutTests/storage/indexeddb/idbobjectstore-getAll-enforcerange.html b/third_party/WebKit/LayoutTests/storage/indexeddb/idbobjectstore-getAll-enforcerange.html new file mode 100644 index 0000000..c62f1736 --- /dev/null +++ b/third_party/WebKit/LayoutTests/storage/indexeddb/idbobjectstore-getAll-enforcerange.html
@@ -0,0 +1,24 @@ +<!DOCTYPE html> +<title>IndexedDB: IDBObjectStore getAll() uses [EnforceRange]</title> +<meta charset=utf-8> +<script src="../../resources/testharness.js"></script> +<script src="../../resources/testharnessreport.js"></script> +<script src="resources/testharness-helpers.js"></script> +<script> + +indexeddb_test( + (t, db) => { + const store = db.createObjectStore('store'); + }, + (t, db) => { + const tx = db.transaction('store'); + const store = tx.objectStore('store'); + [NaN, Infinity, -Infinity, -1, -Number.MAX_SAFE_INTEGER].forEach(count => { + assert_throws(TypeError(), () => { store.getAll(null, count); }, + `getAll with count ${count} count should throw TypeError`); + }); + t.done(); + }, + `IDBObjectStore.getAll() uses [EnforceRange]` +); +</script>
diff --git a/third_party/WebKit/LayoutTests/storage/indexeddb/idbobjectstore-getAllKeys-enforcerange.html b/third_party/WebKit/LayoutTests/storage/indexeddb/idbobjectstore-getAllKeys-enforcerange.html new file mode 100644 index 0000000..45d4843 --- /dev/null +++ b/third_party/WebKit/LayoutTests/storage/indexeddb/idbobjectstore-getAllKeys-enforcerange.html
@@ -0,0 +1,24 @@ +<!DOCTYPE html> +<title>IndexedDB: IDBIObjectStore getAllKeys() uses [EnforceRange]</title> +<meta charset=utf-8> +<script src="../../resources/testharness.js"></script> +<script src="../../resources/testharnessreport.js"></script> +<script src="resources/testharness-helpers.js"></script> +<script> + +indexeddb_test( + (t, db) => { + const store = db.createObjectStore('store'); + }, + (t, db) => { + const tx = db.transaction('store'); + const store = tx.objectStore('store'); + [NaN, Infinity, -Infinity, -1, -Number.MAX_SAFE_INTEGER].forEach(count => { + assert_throws(TypeError(), () => { store.getAllKeys(null, count); }, + `getAllKeys with count ${count} count should throw TypeError`); + }); + t.done(); + }, + `IDBObjectStore.getAllKeys() uses [EnforceRange]` +); +</script>
diff --git a/third_party/WebKit/Source/BUILD.gn b/third_party/WebKit/Source/BUILD.gn index f862546..ca5d37ce 100644 --- a/third_party/WebKit/Source/BUILD.gn +++ b/third_party/WebKit/Source/BUILD.gn
@@ -69,17 +69,6 @@ # Force include the header. cflags = [ "/FI$precompiled_header" ] - - # Disable warning for "this file was empty after preprocessing". This - # error is generated only in C mode for ANSI compatibility. It conflicts - # with precompiled headers since the source file that's "compiled" for - # making the precompiled header is empty. - # - # This error doesn't happen every time. In VS2013, it seems if the .pch - # file doesn't exist, no error will be generated (probably MS tested this - # case but forgot the other one?). To reproduce this error, do a build, - # then delete the precompile.c.obj file, then build again. - cflags_c = [ "/wd4206" ] } } }
diff --git a/third_party/WebKit/Source/core/BUILD.gn b/third_party/WebKit/Source/core/BUILD.gn index 8bb5ecc..b6e498519 100644 --- a/third_party/WebKit/Source/core/BUILD.gn +++ b/third_party/WebKit/Source/core/BUILD.gn
@@ -66,17 +66,6 @@ # Force include the header. cflags = [ "/FI$precompiled_header" ] - - # Disable warning for "this file was empty after preprocessing". This - # error is generated only in C mode for ANSI compatibility. It conflicts - # with precompiled headers since the source file that's "compiled" for - # making the precompiled header is empty. - # - # This error doesn't happen every time. In VS2013, it seems if the .pch - # file doesn't exist, no error will be generated (probably MS tested this - # case but forgot the other one?). To reproduce this error, do a build, - # then delete the precompile.c.obj file, then build again. - cflags_c = [ "/wd4206" ] } } }
diff --git a/third_party/WebKit/Source/core/css/parser/CSSParserMode.h b/third_party/WebKit/Source/core/css/parser/CSSParserMode.h index 4b8aa17..05853f6 100644 --- a/third_party/WebKit/Source/core/css/parser/CSSParserMode.h +++ b/third_party/WebKit/Source/core/css/parser/CSSParserMode.h
@@ -37,9 +37,6 @@ enum CSSParserMode { HTMLStandardMode, HTMLQuirksMode, - // HTML attributes are parsed in quirks mode but also allows internal - // properties and values. - HTMLAttributeMode, // SVG attributes are parsed in quirks mode but rules differ slightly. SVGAttributeMode, // @viewport/@font-face rules are specially tagged in StylePropertySet so @@ -52,17 +49,13 @@ }; inline bool isQuirksModeBehavior(CSSParserMode mode) { - return mode == HTMLQuirksMode; // || mode == HTMLAttributeMode; + return mode == HTMLQuirksMode; } inline bool isUASheetBehavior(CSSParserMode mode) { return mode == UASheetMode; } -inline bool isUnitLessLengthParsingEnabledForMode(CSSParserMode mode) { - return mode == HTMLAttributeMode || mode == SVGAttributeMode; -} - inline bool isCSSViewportParsingEnabledForMode(CSSParserMode mode) { return mode == CSSViewportRuleMode; }
diff --git a/third_party/WebKit/Source/core/css/parser/CSSPropertyParserHelpers.cpp b/third_party/WebKit/Source/core/css/parser/CSSPropertyParserHelpers.cpp index a537649..b63c13b 100644 --- a/third_party/WebKit/Source/core/css/parser/CSSPropertyParserHelpers.cpp +++ b/third_party/WebKit/Source/core/css/parser/CSSPropertyParserHelpers.cpp
@@ -154,9 +154,7 @@ inline bool shouldAcceptUnitlessLength(double value, CSSParserMode cssParserMode, UnitlessQuirk unitless) { - // TODO(timloh): Presentational HTML attributes shouldn't use the CSS parser - // for lengths - return value == 0 || isUnitLessLengthParsingEnabledForMode(cssParserMode) || + return value == 0 || cssParserMode == SVGAttributeMode || (cssParserMode == HTMLQuirksMode && unitless == UnitlessQuirk::Allow); }
diff --git a/third_party/WebKit/Source/core/dom/PresentationAttributeStyle.cpp b/third_party/WebKit/Source/core/dom/PresentationAttributeStyle.cpp index a9009bb..ab70f65 100644 --- a/third_party/WebKit/Source/core/dom/PresentationAttributeStyle.cpp +++ b/third_party/WebKit/Source/core/dom/PresentationAttributeStyle.cpp
@@ -192,7 +192,7 @@ cacheCleaner.didHitPresentationAttributeCache(); } else { style = MutableStylePropertySet::create( - element.isSVGElement() ? SVGAttributeMode : HTMLAttributeMode); + element.isSVGElement() ? SVGAttributeMode : HTMLStandardMode); AttributeCollection attributes = element.attributesWithoutUpdate(); for (const Attribute& attr : attributes) element.collectStyleForPresentationAttribute(
diff --git a/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.cpp b/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.cpp index 5dc9862..8280c6a3 100644 --- a/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.cpp +++ b/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.cpp
@@ -555,6 +555,9 @@ value->setString("frame", toHexString(frame)); value->setInteger("statusCode", response.httpStatusCode()); value->setString("mimeType", response.mimeType().getString().isolatedCopy()); + value->setDouble("encodedDataLength", response.encodedDataLength()); + value->setBoolean("fromCache", response.wasCached()); + value->setBoolean("fromServiceWorker", response.wasFetchedViaServiceWorker()); if (response.resourceLoadTiming()) { value->beginDictionary("timing"); recordTiming(*response.resourceLoadTiming(), value.get()); @@ -581,12 +584,14 @@ std::unique_ptr<TracedValue> InspectorResourceFinishEvent::data( unsigned long identifier, double finishTime, - bool didFail) { + bool didFail, + int64_t encodedDataLength) { String requestId = IdentifiersFactory::requestId(identifier); std::unique_ptr<TracedValue> value = TracedValue::create(); value->setString("requestId", requestId); value->setBoolean("didFail", didFail); + value->setDouble("encodedDataLength", encodedDataLength); if (finishTime) value->setDouble("finishTime", finishTime); return value;
diff --git a/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.h b/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.h index 87b8f26..b5e314c 100644 --- a/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.h +++ b/third_party/WebKit/Source/core/inspector/InspectorTraceEvents.h
@@ -214,7 +214,8 @@ namespace InspectorResourceFinishEvent { std::unique_ptr<TracedValue> data(unsigned long identifier, double finishTime, - bool didFail); + bool didFail, + int64_t encodedDataLength); } namespace InspectorTimerInstallEvent {
diff --git a/third_party/WebKit/Source/core/layout/LayoutObject.cpp b/third_party/WebKit/Source/core/layout/LayoutObject.cpp index cb6243a..ca896f73 100644 --- a/third_party/WebKit/Source/core/layout/LayoutObject.cpp +++ b/third_party/WebKit/Source/core/layout/LayoutObject.cpp
@@ -139,13 +139,13 @@ void* LayoutObject::operator new(size_t sz) { ASSERT(isMainThread()); - return partitionAlloc(WTF::Partitions::layoutPartition(), sz, + return PartitionAlloc(WTF::Partitions::layoutPartition(), sz, WTF_HEAP_PROFILER_TYPE_NAME(LayoutObject)); } void LayoutObject::operator delete(void* ptr) { ASSERT(isMainThread()); - WTF::partitionFree(ptr); + WTF::PartitionFree(ptr); } LayoutObject* LayoutObject::createObject(Element* element,
diff --git a/third_party/WebKit/Source/core/layout/line/InlineBox.cpp b/third_party/WebKit/Source/core/layout/line/InlineBox.cpp index 738cc4bb..ba2aa25 100644 --- a/third_party/WebKit/Source/core/layout/line/InlineBox.cpp +++ b/third_party/WebKit/Source/core/layout/line/InlineBox.cpp
@@ -80,12 +80,12 @@ } void* InlineBox::operator new(size_t sz) { - return partitionAlloc(WTF::Partitions::layoutPartition(), sz, + return PartitionAlloc(WTF::Partitions::layoutPartition(), sz, WTF_HEAP_PROFILER_TYPE_NAME(InlineBox)); } void InlineBox::operator delete(void* ptr) { - WTF::partitionFree(ptr); + WTF::PartitionFree(ptr); } const char* InlineBox::boxName() const {
diff --git a/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp b/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp index c0ccedf..355011b 100644 --- a/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp +++ b/third_party/WebKit/Source/core/loader/FrameFetchContext.cpp
@@ -460,9 +460,9 @@ void FrameFetchContext::dispatchDidFinishLoading(unsigned long identifier, double finishTime, int64_t encodedDataLength) { - TRACE_EVENT1( - "devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(identifier, finishTime, false)); + TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", + InspectorResourceFinishEvent::data(identifier, finishTime, false, + encodedDataLength)); frame()->loader().progress().completeProgress(identifier); InspectorInstrumentation::didFinishLoading(frame(), identifier, finishTime, encodedDataLength); @@ -475,7 +475,8 @@ int64_t encodedDataLength, bool isInternalRequest) { TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(identifier, 0, true)); + InspectorResourceFinishEvent::data(identifier, 0, true, + encodedDataLength)); frame()->loader().progress().completeProgress(identifier); InspectorInstrumentation::didFailLoading(frame(), identifier, error); // Notification to FrameConsole should come AFTER InspectorInstrumentation
diff --git a/third_party/WebKit/Source/core/loader/PingLoader.cpp b/third_party/WebKit/Source/core/loader/PingLoader.cpp index aa216f5..5e597c1 100644 --- a/third_party/WebKit/Source/core/loader/PingLoader.cpp +++ b/third_party/WebKit/Source/core/loader/PingLoader.cpp
@@ -210,8 +210,8 @@ bool willFollowRedirect(WebURLRequest&, const WebURLResponse&) override; void didReceiveResponse(const WebURLResponse&) final; void didReceiveData(const char*, int) final; - void didFinishLoading(double, int64_t, int64_t) final; - void didFail(const WebURLError&, int64_t, int64_t) final; + void didFinishLoading(double, int64_t, int64_t encodedDataLength) final; + void didFail(const WebURLError&, int64_t, int64_t encodedDataLength) final; void timeout(TimerBase*); @@ -335,7 +335,7 @@ void PingLoaderImpl::didReceiveResponse(const WebURLResponse& response) { if (frame()) { TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(m_identifier, 0, true)); + InspectorResourceFinishEvent::data(m_identifier, 0, true, 0)); const ResourceResponse& resourceResponse = response.toResourceResponse(); InspectorInstrumentation::didReceiveResourceResponse( frame(), m_identifier, 0, resourceResponse, 0); @@ -344,19 +344,23 @@ dispose(); } -void PingLoaderImpl::didReceiveData(const char*, int) { +void PingLoaderImpl::didReceiveData(const char*, int dataLength) { if (frame()) { - TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(m_identifier, 0, true)); + TRACE_EVENT1( + "devtools.timeline", "ResourceFinish", "data", + InspectorResourceFinishEvent::data(m_identifier, 0, true, dataLength)); didFailLoading(frame()); } dispose(); } -void PingLoaderImpl::didFinishLoading(double, int64_t, int64_t) { +void PingLoaderImpl::didFinishLoading(double, + int64_t, + int64_t encodedDataLength) { if (frame()) { TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(m_identifier, 0, true)); + InspectorResourceFinishEvent::data(m_identifier, 0, true, + encodedDataLength)); didFailLoading(frame()); } dispose(); @@ -364,10 +368,11 @@ void PingLoaderImpl::didFail(const WebURLError& resourceError, int64_t, - int64_t) { + int64_t encodedDataLength) { if (frame()) { TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(m_identifier, 0, true)); + InspectorResourceFinishEvent::data(m_identifier, 0, true, + encodedDataLength)); didFailLoading(frame()); } dispose(); @@ -376,7 +381,7 @@ void PingLoaderImpl::timeout(TimerBase*) { if (frame()) { TRACE_EVENT1("devtools.timeline", "ResourceFinish", "data", - InspectorResourceFinishEvent::data(m_identifier, 0, true)); + InspectorResourceFinishEvent::data(m_identifier, 0, true, 0)); didFailLoading(frame()); } dispose();
diff --git a/third_party/WebKit/Source/core/paint/PaintInvalidator.cpp b/third_party/WebKit/Source/core/paint/PaintInvalidator.cpp index f3f7f8b..cd44b82 100644 --- a/third_party/WebKit/Source/core/paint/PaintInvalidator.cpp +++ b/third_party/WebKit/Source/core/paint/PaintInvalidator.cpp
@@ -333,12 +333,9 @@ PaintInvalidatorContext::ForcedSubtreeInvalidationChecking; // TODO(crbug.com/637313): This is temporary before we support filters in - // paint property tree. + // GeometryMapper. // TODO(crbug.com/648274): This is a workaround for multi-column contents. - // TODO(crbug.com/672989): This is a workaround for out-of-flow positioned - // objects in multi-column spanner. - if (object.hasFilterInducingProperty() || object.isLayoutFlowThread() || - object.isColumnSpanAll()) { + if (object.hasFilterInducingProperty() || object.isLayoutFlowThread()) { context.forcedSubtreeInvalidationFlags |= PaintInvalidatorContext::ForcedSubtreeSlowPathRect; }
diff --git a/third_party/WebKit/Source/core/paint/PaintLayer.cpp b/third_party/WebKit/Source/core/paint/PaintLayer.cpp index 02e2e26..1d47911 100644 --- a/third_party/WebKit/Source/core/paint/PaintLayer.cpp +++ b/third_party/WebKit/Source/core/paint/PaintLayer.cpp
@@ -1194,12 +1194,12 @@ } void* PaintLayer::operator new(size_t sz) { - return partitionAlloc(WTF::Partitions::layoutPartition(), sz, + return PartitionAlloc(WTF::Partitions::layoutPartition(), sz, WTF_HEAP_PROFILER_TYPE_NAME(PaintLayer)); } void PaintLayer::operator delete(void* ptr) { - WTF::partitionFree(ptr); + WTF::PartitionFree(ptr); } void PaintLayer::addChild(PaintLayer* child, PaintLayer* beforeChild) {
diff --git a/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilder.cpp b/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilder.cpp index c74a5c2..c9ecc25 100644 --- a/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilder.cpp +++ b/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilder.cpp
@@ -85,7 +85,8 @@ return true; } -// True if a new property was created, false if an existing one was updated. +// True if a new property was created or a main thread scrolling reason changed +// (which can affect descendants), false if an existing one was updated. bool updateScroll(FrameView& frameView, PassRefPtr<const ScrollPaintPropertyNode> parent, PassRefPtr<const TransformPaintPropertyNode> scrollOffset, @@ -96,10 +97,11 @@ MainThreadScrollingReasons mainThreadScrollingReasons) { DCHECK(!RuntimeEnabledFeatures::rootLayerScrollingEnabled()); if (auto* existingScroll = frameView.scroll()) { + auto existingReasons = existingScroll->mainThreadScrollingReasons(); existingScroll->update(std::move(parent), std::move(scrollOffset), clip, bounds, userScrollableHorizontal, userScrollableVertical, mainThreadScrollingReasons); - return false; + return existingReasons != mainThreadScrollingReasons; } frameView.setScroll(ScrollPaintPropertyNode::create( std::move(parent), std::move(scrollOffset), clip, bounds, @@ -108,6 +110,17 @@ return true; } +MainThreadScrollingReasons mainThreadScrollingReasons( + const FrameView& frameView, + MainThreadScrollingReasons ancestorReasons) { + auto reasons = ancestorReasons; + if (!frameView.frame().settings()->getThreadedScrollingEnabled()) + reasons |= MainThreadScrollingReason::kThreadedScrollingDisabled; + if (frameView.hasBackgroundAttachmentFixedObjects()) + reasons |= MainThreadScrollingReason::kHasBackgroundAttachmentFixedObjects; + return reasons; +} + void PaintPropertyTreeBuilder::updateProperties( FrameView& frameView, PaintPropertyTreeBuilderContext& context) { @@ -156,13 +169,10 @@ bool userScrollableVertical = frameView.userInputScrollable(VerticalScrollbar); - MainThreadScrollingReasons reasons = 0; - if (!frameView.frame().settings()->getThreadedScrollingEnabled()) - reasons |= MainThreadScrollingReason::kThreadedScrollingDisabled; - if (frameView.hasBackgroundAttachmentFixedObjects()) { - reasons |= - MainThreadScrollingReason::kHasBackgroundAttachmentFixedObjects; - } + auto ancestorReasons = + context.current.scroll->mainThreadScrollingReasons(); + auto reasons = mainThreadScrollingReasons(frameView, ancestorReasons); + context.forceSubtreeUpdate |= updateScroll( frameView, context.current.scroll, frameView.scrollTranslation(), scrollClip, scrollBounds, userScrollableHorizontal, @@ -734,19 +744,16 @@ context.current.paintOffset = LayoutPoint(); } -MainThreadScrollingReasons mainScrollingReasons(const LayoutObject& object) { - MainThreadScrollingReasons reasons = 0; - if (!object.document().settings()->getThreadedScrollingEnabled()) - reasons |= MainThreadScrollingReason::kThreadedScrollingDisabled; - // Checking for descendants in the layout tree has two downsides: - // 1) There can be more descendants in layout order than in paint order (e.g., - // fixed position objects). - // 2) Iterating overall all background attachment fixed objects for every - // scroll node can be slow, though there will be none in the common case. - const FrameView& frameView = *object.frameView(); - if (frameView.hasBackgroundAttachmentFixedDescendants(object)) - reasons |= MainThreadScrollingReason::kHasBackgroundAttachmentFixedObjects; - return reasons; +MainThreadScrollingReasons mainThreadScrollingReasons( + const LayoutObject& object, + MainThreadScrollingReasons ancestorReasons) { + // The current main thread scrolling reasons implementation only changes + // reasons at frame boundaries, so we can early-out when not at a LayoutView. + // TODO(pdr): Need to find a solution to the style-related main thread + // scrolling reasons such as opacity and transform which violate this. + if (!object.isLayoutView()) + return ancestorReasons; + return mainThreadScrollingReasons(*object.frameView(), ancestorReasons); } void PaintPropertyTreeBuilder::updateScrollAndScrollTranslation( @@ -755,11 +762,15 @@ if (object.needsPaintPropertyUpdate() || context.forceSubtreeUpdate) { bool needsScrollProperties = false; if (object.hasOverflowClip()) { - auto mainThreadScrollingReasons = mainScrollingReasons(object); + auto ancestorReasons = + context.current.scroll->mainThreadScrollingReasons(); + auto reasons = mainThreadScrollingReasons(object, ancestorReasons); + bool scrollNodeNeededForMainThreadReasons = ancestorReasons != reasons; + const LayoutBox& box = toLayoutBox(object); const auto* scrollableArea = box.getScrollableArea(); IntSize scrollOffset = box.scrolledContentOffset(); - if (mainThreadScrollingReasons || !scrollOffset.isZero() || + if (scrollNodeNeededForMainThreadReasons || !scrollOffset.isZero() || scrollableArea->scrollsOverflow()) { needsScrollProperties = true; auto& properties = @@ -777,10 +788,18 @@ scrollableArea->userInputScrollable(HorizontalScrollbar); bool userScrollableVertical = scrollableArea->userInputScrollable(VerticalScrollbar); + + // Main thread scrolling reasons depend on their ancestor's reasons + // so ensure the entire subtree is updated when reasons change. + if (auto* existingScrollNode = properties.scroll()) { + if (existingScrollNode->mainThreadScrollingReasons() != reasons) + context.forceSubtreeUpdate = true; + } + context.forceSubtreeUpdate |= properties.updateScroll( context.current.scroll, properties.scrollTranslation(), scrollClip, scrollBounds, userScrollableHorizontal, userScrollableVertical, - mainThreadScrollingReasons); + reasons); } }
diff --git a/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilderTest.cpp b/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilderTest.cpp index b1c63fa..76339c18e9 100644 --- a/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilderTest.cpp +++ b/third_party/WebKit/Source/core/paint/PaintPropertyTreeBuilderTest.cpp
@@ -2873,10 +2873,8 @@ "<div class='forceScroll'></div>"); Element* overflow = document().getElementById("overflow"); EXPECT_TRUE(frameScroll()->hasBackgroundAttachmentFixedDescendants()); - EXPECT_TRUE(overflow->layoutObject() - ->paintProperties() - ->scroll() - ->hasBackgroundAttachmentFixedDescendants()); + // No scroll node is needed. + EXPECT_EQ(overflow->layoutObject()->paintProperties()->scroll(), nullptr); } TEST_P(PaintPropertyTreeBuilderTest, PaintOffsetsUnderMultiColumn) {
diff --git a/third_party/WebKit/Source/core/paint/PaintPropertyTreeUpdateTests.cpp b/third_party/WebKit/Source/core/paint/PaintPropertyTreeUpdateTests.cpp index a2a0a83..5aaeb80 100644 --- a/third_party/WebKit/Source/core/paint/PaintPropertyTreeUpdateTests.cpp +++ b/third_party/WebKit/Source/core/paint/PaintPropertyTreeUpdateTests.cpp
@@ -90,10 +90,10 @@ ->paintProperties() ->scroll() ->hasBackgroundAttachmentFixedDescendants()); - EXPECT_FALSE(overflowB->layoutObject() - ->paintProperties() - ->scroll() - ->hasBackgroundAttachmentFixedDescendants()); + EXPECT_TRUE(overflowB->layoutObject() + ->paintProperties() + ->scroll() + ->hasBackgroundAttachmentFixedDescendants()); // Removing a main thread scrolling reason should update the entire tree. overflowB->removeAttribute("class"); @@ -116,10 +116,82 @@ ->paintProperties() ->scroll() ->hasBackgroundAttachmentFixedDescendants()); - EXPECT_FALSE(overflowB->layoutObject() - ->paintProperties() - ->scroll() - ->hasBackgroundAttachmentFixedDescendants()); + EXPECT_TRUE(overflowB->layoutObject() + ->paintProperties() + ->scroll() + ->hasBackgroundAttachmentFixedDescendants()); +} + +TEST_P(PaintPropertyTreeUpdateTest, ParentFrameMainThreadScrollReasons) { + setBodyInnerHTML( + "<style>" + " body { margin: 0; }" + " .fixedBackground {" + " background-image: url('foo');" + " background-attachment: fixed;" + " }" + "</style>" + "<iframe></iframe>" + "<div id='fixedBackground' class='fixedBackground'></div>" + "<div id='forceScroll' style='height: 8888px;'></div>"); + setChildFrameHTML( + "<style>body { margin: 0; }</style>" + "<div id='forceScroll' style='height: 8888px;'></div>"); + document().view()->updateAllLifecyclePhases(); + + FrameView* parent = document().view(); + EXPECT_TRUE(frameScroll(parent)->hasBackgroundAttachmentFixedDescendants()); + FrameView* child = childDocument().view(); + EXPECT_TRUE(frameScroll(child)->hasBackgroundAttachmentFixedDescendants()); + + // Removing a main thread scrolling reason should update the entire tree. + auto* fixedBackground = document().getElementById("fixedBackground"); + fixedBackground->removeAttribute(HTMLNames::classAttr); + document().view()->updateAllLifecyclePhases(); + EXPECT_FALSE(frameScroll(parent)->hasBackgroundAttachmentFixedDescendants()); + EXPECT_FALSE(frameScroll(child)->hasBackgroundAttachmentFixedDescendants()); + + // Adding a main thread scrolling reason should update the entire tree. + fixedBackground->setAttribute(HTMLNames::classAttr, "fixedBackground"); + document().view()->updateAllLifecyclePhases(); + EXPECT_TRUE(frameScroll(parent)->hasBackgroundAttachmentFixedDescendants()); + EXPECT_TRUE(frameScroll(child)->hasBackgroundAttachmentFixedDescendants()); +} + +TEST_P(PaintPropertyTreeUpdateTest, ChildFrameMainThreadScrollReasons) { + setBodyInnerHTML( + "<style>body { margin: 0; }</style>" + "<iframe></iframe>" + "<div id='forceScroll' style='height: 8888px;'></div>"); + setChildFrameHTML( + "<style>" + " body { margin: 0; }" + " .fixedBackground {" + " background-image: url('foo');" + " background-attachment: fixed;" + " }" + "</style>" + "<div id='fixedBackground' class='fixedBackground'></div>" + "<div id='forceScroll' style='height: 8888px;'></div>"); + document().view()->updateAllLifecyclePhases(); + + FrameView* parent = document().view(); + EXPECT_FALSE(frameScroll(parent)->hasBackgroundAttachmentFixedDescendants()); + FrameView* child = childDocument().view(); + EXPECT_TRUE(frameScroll(child)->hasBackgroundAttachmentFixedDescendants()); + + // Removing a main thread scrolling reason should update the entire tree. + auto* fixedBackground = childDocument().getElementById("fixedBackground"); + fixedBackground->removeAttribute(HTMLNames::classAttr); + document().view()->updateAllLifecyclePhases(); + EXPECT_FALSE(frameScroll(parent)->hasBackgroundAttachmentFixedDescendants()); + EXPECT_FALSE(frameScroll(child)->hasBackgroundAttachmentFixedDescendants()); + + // Adding a main thread scrolling reason should update the entire tree. + fixedBackground->setAttribute(HTMLNames::classAttr, "fixedBackground"); + document().view()->updateAllLifecyclePhases(); + EXPECT_FALSE(frameScroll(parent)->hasBackgroundAttachmentFixedDescendants()); + EXPECT_TRUE(frameScroll(child)->hasBackgroundAttachmentFixedDescendants()); } TEST_P(PaintPropertyTreeUpdateTest,
diff --git a/third_party/WebKit/Source/devtools/front_end/components/JavaScriptAutocomplete.js b/third_party/WebKit/Source/devtools/front_end/components/JavaScriptAutocomplete.js index aa0168a..0314ec0 100644 --- a/third_party/WebKit/Source/devtools/front_end/components/JavaScriptAutocomplete.js +++ b/third_party/WebKit/Source/devtools/front_end/components/JavaScriptAutocomplete.js
@@ -290,7 +290,7 @@ var result = []; var lastGroupTitle; for (var group of propertyGroups) { - group.items.sort(); + group.items.sort(itemComparator); var caseSensitivePrefix = []; var caseInsensitivePrefix = []; var caseSensitiveAnywhere = []; @@ -332,4 +332,19 @@ result = result.concat(structuredGroup); } return result; + + /** + * @param {string} a + * @param {string} b + * @return {number} + */ + function itemComparator(a, b) { + var aStartsWithUnderscore = a.startsWith('_'); + var bStartsWithUnderscore = b.startsWith('_'); + if (aStartsWithUnderscore && !bStartsWithUnderscore) + return 1; + if (bStartsWithUnderscore && !aStartsWithUnderscore) + return -1; + return String.naturalOrderComparator(a, b); + } };
diff --git a/third_party/WebKit/Source/devtools/front_end/sdk/ResourceTreeModel.js b/third_party/WebKit/Source/devtools/front_end/sdk/ResourceTreeModel.js index eae9251..a3b46a7 100644 --- a/third_party/WebKit/Source/devtools/front_end/sdk/ResourceTreeModel.js +++ b/third_party/WebKit/Source/devtools/front_end/sdk/ResourceTreeModel.js
@@ -129,30 +129,10 @@ */ _addFrame(frame, aboutToNavigate) { this._frames.set(frame.id, frame); - if (frame.isMainFrame()) { + if (frame.isMainFrame()) this.mainFrame = frame; - this._securityOriginManager.setMainSecurityOrigin(frame.url); - } this.dispatchEventToListeners(SDK.ResourceTreeModel.Events.FrameAdded, frame); - if (!aboutToNavigate) - this._securityOriginManager.addSecurityOrigin(frame.securityOrigin); - } - - /** - * @param {!SDK.ResourceTreeFrame} mainFrame - */ - _handleMainFrameDetached(mainFrame) { - /** - * @param {!SDK.ResourceTreeFrame} frame - * @this {SDK.ResourceTreeModel} - */ - function removeOriginForFrame(frame) { - for (var i = 0; i < frame.childFrames.length; ++i) - removeOriginForFrame.call(this, frame.childFrames[i]); - if (!frame.isMainFrame()) - this._securityOriginManager.removeSecurityOrigin(frame.securityOrigin); - } - removeOriginForFrame.call(this, mainFrame); + this._updateSecurityOrigins(); } /** @@ -170,7 +150,6 @@ var parentFrame = parentFrameId ? (this._frames.get(parentFrameId) || null) : null; var frame = new SDK.ResourceTreeFrame(this, parentFrame, frameId); if (frame.isMainFrame() && this.mainFrame) { - this._handleMainFrameDetached(this.mainFrame); // Navigation to the new backend process. this._frameDetached(this.mainFrame.id); } @@ -194,19 +173,14 @@ } this.dispatchEventToListeners(SDK.ResourceTreeModel.Events.FrameWillNavigate, frame); - - this._securityOriginManager.removeSecurityOrigin(frame.securityOrigin); frame._navigate(framePayload); - var addedOrigin = frame.securityOrigin; - this.dispatchEventToListeners(SDK.ResourceTreeModel.Events.FrameNavigated, frame); + if (frame.isMainFrame()) { this.dispatchEventToListeners(SDK.ResourceTreeModel.Events.MainFrameNavigated, frame); if (Common.moduleSetting('preserveConsoleLog').get()) Common.console.log(Common.UIString('Navigated to %s', frame.url)); } - if (addedOrigin) - this._securityOriginManager.addSecurityOrigin(addedOrigin); // Fill frame with retained resources (the ones loaded using new loader). var resources = frame.resources(); @@ -215,6 +189,7 @@ if (frame.isMainFrame()) this.target().setInspectedURL(frame.url); + this._updateSecurityOrigins(); } /** @@ -229,11 +204,11 @@ if (!frame) return; - this._securityOriginManager.removeSecurityOrigin(frame.securityOrigin); if (frame.parentFrame) frame.parentFrame._removeChildFrame(frame); else frame._remove(); + this._updateSecurityOrigins(); } /** @@ -451,6 +426,21 @@ return SDK.ExecutionContext.comparator(a, b); } + + _updateSecurityOrigins() { + var securityOrigins = new Set(); + var mainSecurityOrigin = null; + for (var frame of this._frames.values()) { + var origin = frame.securityOrigin; + if (!origin) + continue; + securityOrigins.add(origin); + if (frame.isMainFrame()) + mainSecurityOrigin = origin; + } + this._securityOriginManager.updateSecurityOrigins(securityOrigins); + this._securityOriginManager.setMainSecurityOrigin(mainSecurityOrigin || ''); + } }; /** @enum {symbol} */
diff --git a/third_party/WebKit/Source/devtools/front_end/sdk/SecurityOriginManager.js b/third_party/WebKit/Source/devtools/front_end/sdk/SecurityOriginManager.js index 2b0b903f..0c579c05 100644 --- a/third_party/WebKit/Source/devtools/front_end/sdk/SecurityOriginManager.js +++ b/third_party/WebKit/Source/devtools/front_end/sdk/SecurityOriginManager.js
@@ -11,7 +11,8 @@ constructor(target) { super(SDK.SecurityOriginManager, target); - this._securityOriginCounter = new Map(); + /** @type {!Set<string>} */ + this._securityOrigins = new Set(); this._mainSecurityOrigin = ''; } @@ -27,36 +28,28 @@ } /** - * @param {string} securityOrigin + * @param {!Set<string>} securityOrigins */ - addSecurityOrigin(securityOrigin) { - var currentCount = this._securityOriginCounter.get(securityOrigin); - if (!currentCount) { - this._securityOriginCounter.set(securityOrigin, 1); - this.dispatchEventToListeners(SDK.SecurityOriginManager.Events.SecurityOriginAdded, securityOrigin); - return; - } - this._securityOriginCounter.set(securityOrigin, currentCount + 1); - } + updateSecurityOrigins(securityOrigins) { + var oldOrigins = this._securityOrigins; + this._securityOrigins = securityOrigins; - /** - * @param {string} securityOrigin - */ - removeSecurityOrigin(securityOrigin) { - var currentCount = this._securityOriginCounter.get(securityOrigin); - if (currentCount === 1) { - this._securityOriginCounter.delete(securityOrigin); - this.dispatchEventToListeners(SDK.SecurityOriginManager.Events.SecurityOriginRemoved, securityOrigin); - return; + for (var origin of oldOrigins) { + if (!this._securityOrigins.has(origin)) + this.dispatchEventToListeners(SDK.SecurityOriginManager.Events.SecurityOriginRemoved, origin); } - this._securityOriginCounter.set(securityOrigin, currentCount - 1); + + for (var origin of this._securityOrigins) { + if (!oldOrigins.has(origin)) + this.dispatchEventToListeners(SDK.SecurityOriginManager.Events.SecurityOriginAdded, origin); + } } /** * @return {!Array<string>} */ securityOrigins() { - return this._securityOriginCounter.keysArray(); + return this._securityOrigins.valuesArray(); } /**
diff --git a/third_party/WebKit/Source/devtools/front_end/timeline/TimelineTreeView.js b/third_party/WebKit/Source/devtools/front_end/timeline/TimelineTreeView.js index cc640277..bf16e03 100644 --- a/third_party/WebKit/Source/devtools/front_end/timeline/TimelineTreeView.js +++ b/third_party/WebKit/Source/devtools/front_end/timeline/TimelineTreeView.js
@@ -506,6 +506,8 @@ var name = node.id; if (Timeline.AggregatedTimelineTreeView._isExtensionInternalURL(name)) name = Common.UIString('[Chrome extensions overhead]'); + else if (Timeline.AggregatedTimelineTreeView._isV8NativeURL(name)) + name = Common.UIString('[V8 Runtime]'); else if (name.startsWith('chrome-extension')) name = this._executionContextNamesByOrigin.get(name) || name; return {name: name || Common.UIString('unattributed'), color: color}; @@ -635,6 +637,8 @@ var url = TimelineModel.TimelineProfileTree.eventURL(event) || ''; if (Timeline.AggregatedTimelineTreeView._isExtensionInternalURL(url)) return Timeline.AggregatedTimelineTreeView._extensionInternalPrefix; + if (Timeline.AggregatedTimelineTreeView._isV8NativeURL(url)) + return Timeline.AggregatedTimelineTreeView._v8NativePrefix; var parsedURL = url.asParsedURL(); if (!parsedURL) return ''; @@ -691,9 +695,18 @@ static _isExtensionInternalURL(url) { return url.startsWith(Timeline.AggregatedTimelineTreeView._extensionInternalPrefix); } + + /** + * @param {string} url + * @return {boolean} + */ + static _isV8NativeURL(url) { + return url.startsWith(Timeline.AggregatedTimelineTreeView._v8NativePrefix); + } }; Timeline.AggregatedTimelineTreeView._extensionInternalPrefix = 'extensions::'; +Timeline.AggregatedTimelineTreeView._v8NativePrefix = 'native '; /** * @enum {string}
diff --git a/third_party/WebKit/Source/devtools/front_end/timeline/TimelineUIUtils.js b/third_party/WebKit/Source/devtools/front_end/timeline/TimelineUIUtils.js index 75956ee..84de7bb 100644 --- a/third_party/WebKit/Source/devtools/front_end/timeline/TimelineUIUtils.js +++ b/third_party/WebKit/Source/devtools/front_end/timeline/TimelineUIUtils.js
@@ -1031,26 +1031,33 @@ if (request.requestMethod) contentHelper.appendTextRow(Common.UIString('Request Method'), request.requestMethod); if (typeof request.priority === 'string') { - var priority = + const priority = Components.uiLabelForPriority(/** @type {!Protocol.Network.ResourcePriority} */ (request.priority)); contentHelper.appendTextRow(Common.UIString('Priority'), priority); } if (request.mimeType) contentHelper.appendTextRow(Common.UIString('Mime Type'), request.mimeType); - - var title = Common.UIString('Initiator'); - var sendRequest = request.children[0]; - var topFrame = TimelineModel.TimelineData.forEvent(sendRequest).topFrame(); + var lengthText = ''; + if (request.fromCache) + lengthText += Common.UIString('(from cache) '); + if (request.fromServiceWorker) + lengthText += Common.UIString('(from service worker)'); + if (request.encodedDataLength || !lengthText) + lengthText = `${Number.bytesToString(request.encodedDataLength)} ${lengthText}`; + contentHelper.appendTextRow(Common.UIString('Encoded Length'), lengthText); + const title = Common.UIString('Initiator'); + const sendRequest = request.children[0]; + const topFrame = TimelineModel.TimelineData.forEvent(sendRequest).topFrame(); if (topFrame) { - var link = linkifier.maybeLinkifyConsoleCallFrame(target, topFrame); + const link = linkifier.maybeLinkifyConsoleCallFrame(target, topFrame); if (link) contentHelper.appendElementRow(title, link); } else { - var initiator = TimelineModel.TimelineData.forEvent(sendRequest).initiator(); + const initiator = TimelineModel.TimelineData.forEvent(sendRequest).initiator(); if (initiator) { - var initiatorURL = TimelineModel.TimelineData.forEvent(initiator).url; + const initiatorURL = TimelineModel.TimelineData.forEvent(initiator).url; if (initiatorURL) { - var link = linkifier.maybeLinkifyScriptLocation(target, null, initiatorURL, 0); + const link = linkifier.maybeLinkifyScriptLocation(target, null, initiatorURL, 0); if (link) contentHelper.appendElementRow(title, link); }
diff --git a/third_party/WebKit/Source/devtools/front_end/timeline/timelinePanel.css b/third_party/WebKit/Source/devtools/front_end/timeline/timelinePanel.css index 021b1b1..4f5292a 100644 --- a/third_party/WebKit/Source/devtools/front_end/timeline/timelinePanel.css +++ b/third_party/WebKit/Source/devtools/front_end/timeline/timelinePanel.css
@@ -275,6 +275,7 @@ background: transparent; text-align: left; border-spacing: 0; + margin: 4px; } .image-preview-container img {
diff --git a/third_party/WebKit/Source/devtools/front_end/timeline_model/TimelineModel.js b/third_party/WebKit/Source/devtools/front_end/timeline_model/TimelineModel.js index 9452b44..fa3e9ee 100644 --- a/third_party/WebKit/Source/devtools/front_end/timeline_model/TimelineModel.js +++ b/third_party/WebKit/Source/devtools/front_end/timeline_model/TimelineModel.js
@@ -1486,6 +1486,7 @@ constructor(event) { this.startTime = event.name === TimelineModel.TimelineModel.RecordType.ResourceSendRequest ? event.startTime : 0; this.endTime = Infinity; + this.encodedDataLength = 0; /** @type {!Array<!SDK.TracingModel.Event>} */ this.children = []; /** @type {?Object} */ @@ -1518,6 +1519,18 @@ if (!this.responseTime && (event.name === recordType.ResourceReceiveResponse || event.name === recordType.ResourceReceivedData)) this.responseTime = event.startTime; + const encodedDataLength = eventData['encodedDataLength'] || 0; + if (event.name === recordType.ResourceReceiveResponse) { + if (eventData['fromCache']) + this.fromCache = true; + if (eventData['fromServiceWorker']) + this.fromServiceWorker = true; + this.encodedDataLength = encodedDataLength; + } + if (event.name === recordType.ResourceReceivedData) + this.encodedDataLength += encodedDataLength; + if (event.name === recordType.ResourceFinish && encodedDataLength) + this.encodedDataLength = encodedDataLength; if (!this.url) this.url = eventData['url']; if (!this.requestMethod)
diff --git a/third_party/WebKit/Source/modules/bluetooth/Bluetooth.cpp b/third_party/WebKit/Source/modules/bluetooth/Bluetooth.cpp index ac700a4..08e9cd9 100644 --- a/third_party/WebKit/Source/modules/bluetooth/Bluetooth.cpp +++ b/third_party/WebKit/Source/modules/bluetooth/Bluetooth.cpp
@@ -160,8 +160,8 @@ return; if (result == mojom::blink::WebBluetoothResult::SUCCESS) { - BluetoothDevice* bluetoothDevice = getBluetoothDeviceRepresentingDevice( - device->id->device_id, device->name, resolver); + BluetoothDevice* bluetoothDevice = + getBluetoothDeviceRepresentingDevice(std::move(device), resolver); resolver->resolve(bluetoothDevice); } else { resolver->reject(BluetoothError::take(resolver, result)); @@ -275,25 +275,24 @@ characteristic->dispatchCharacteristicValueChanged(value); } -void Bluetooth::GattServerDisconnected( - mojom::blink::WebBluetoothDeviceIdPtr deviceId) { - BluetoothDevice* device = m_connectedDevices.get(deviceId->device_id); +void Bluetooth::GattServerDisconnected(const WTF::String& deviceId) { + BluetoothDevice* device = m_connectedDevices.get(deviceId); if (device) { // Remove device from the map before calling dispatchGattServerDisconnected // to avoid removing a device the gattserverdisconnected event handler might // have re-connected. - m_connectedDevices.remove(deviceId->device_id); + m_connectedDevices.remove(deviceId); device->dispatchGattServerDisconnected(); } } BluetoothDevice* Bluetooth::getBluetoothDeviceRepresentingDevice( - const String& id, - const String& name, + mojom::blink::WebBluetoothDevicePtr devicePtr, ScriptPromiseResolver* resolver) { + WTF::String id = devicePtr->id; BluetoothDevice* device = m_deviceInstanceMap.get(id); if (!device) { - device = BluetoothDevice::take(resolver, id, name, this); + device = BluetoothDevice::take(resolver, std::move(devicePtr), this); auto result = m_deviceInstanceMap.add(id, device); DCHECK(result.isNewEntry); }
diff --git a/third_party/WebKit/Source/modules/bluetooth/Bluetooth.h b/third_party/WebKit/Source/modules/bluetooth/Bluetooth.h index 9b0588957..2adaa45 100644 --- a/third_party/WebKit/Source/modules/bluetooth/Bluetooth.h +++ b/third_party/WebKit/Source/modules/bluetooth/Bluetooth.h
@@ -56,11 +56,11 @@ void RemoteCharacteristicValueChanged( const WTF::String& characteristicInstanceId, const WTF::Vector<uint8_t>& value) override; - void GattServerDisconnected(mojom::blink::WebBluetoothDeviceIdPtr) override; + void GattServerDisconnected(const WTF::String& deviceId) override; - BluetoothDevice* getBluetoothDeviceRepresentingDevice(const String& id, - const String& name, - ScriptPromiseResolver*); + BluetoothDevice* getBluetoothDeviceRepresentingDevice( + mojom::blink::WebBluetoothDevicePtr, + ScriptPromiseResolver*); void RequestDeviceCallback(ScriptPromiseResolver*, mojom::blink::WebBluetoothResult,
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.cpp b/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.cpp index 14a9734..5cfa298e 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.cpp +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.cpp
@@ -17,16 +17,16 @@ BluetoothRemoteGATTService* BluetoothAttributeInstanceMap::getOrCreateRemoteGATTService( - const String& serviceInstanceId, - const String& uuid, + mojom::blink::WebBluetoothRemoteGATTServicePtr remoteGATTService, bool isPrimary, const String& deviceInstanceId) { + String serviceInstanceId = remoteGATTService->instance_id; BluetoothRemoteGATTService* service = m_serviceIdToObject.get(serviceInstanceId); if (!service) { - service = new BluetoothRemoteGATTService(serviceInstanceId, uuid, isPrimary, - deviceInstanceId, m_device); + service = new BluetoothRemoteGATTService( + std::move(remoteGATTService), isPrimary, deviceInstanceId, m_device); m_serviceIdToObject.add(serviceInstanceId, service); } @@ -41,19 +41,19 @@ BluetoothRemoteGATTCharacteristic* BluetoothAttributeInstanceMap::getOrCreateRemoteGATTCharacteristic( ExecutionContext* context, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr + remoteGATTCharacteristic, BluetoothRemoteGATTService* service) { + String instanceId = remoteGATTCharacteristic->instance_id; BluetoothRemoteGATTCharacteristic* characteristic = - m_characteristicIdToObject.get(characteristicInstanceId); + m_characteristicIdToObject.get(instanceId); if (!characteristic) { characteristic = BluetoothRemoteGATTCharacteristic::create( - context, characteristicInstanceId, serviceInstanceId, uuid, - characteristicProperties, service, m_device); - m_characteristicIdToObject.add(characteristicInstanceId, characteristic); + context, serviceInstanceId, std::move(remoteGATTCharacteristic), + service, m_device); + m_characteristicIdToObject.add(instanceId, characteristic); } return characteristic;
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.h b/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.h index 0fb529c..65e55a25b 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.h +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothAttributeInstanceMap.h
@@ -30,8 +30,7 @@ // Otherwise returns the BluetoothRemoteGATTService object already // in the map. BluetoothRemoteGATTService* getOrCreateRemoteGATTService( - const String& serviceInstanceId, - const String& uuid, + mojom::blink::WebBluetoothRemoteGATTServicePtr, bool isPrimary, const String& deviceInstanceId); @@ -45,10 +44,8 @@ // the map. BluetoothRemoteGATTCharacteristic* getOrCreateRemoteGATTCharacteristic( ExecutionContext*, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr, BluetoothRemoteGATTService*); // Returns true if a BluetoothRemoteGATTCharacteristic with
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.cpp b/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.cpp index dd0a366a..ee156e5 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.cpp +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.cpp
@@ -19,40 +19,29 @@ namespace blink { BluetoothDevice::BluetoothDevice(ExecutionContext* context, - const String& id, - const String& name, + mojom::blink::WebBluetoothDevicePtr device, Bluetooth* bluetooth) : ContextLifecycleObserver(context), m_attributeInstanceMap(new BluetoothAttributeInstanceMap(this)), - m_id(id), - m_name(name), + m_device(std::move(device)), m_gatt(BluetoothRemoteGATTServer::create(this)), m_bluetooth(bluetooth) {} // static -BluetoothDevice* BluetoothDevice::take(ScriptPromiseResolver* resolver, - const String& id, - const String& name, - Bluetooth* bluetooth) { - return new BluetoothDevice(resolver->getExecutionContext(), id, name, +BluetoothDevice* BluetoothDevice::take( + ScriptPromiseResolver* resolver, + mojom::blink::WebBluetoothDevicePtr device, + Bluetooth* bluetooth) { + return new BluetoothDevice(resolver->getExecutionContext(), std::move(device), bluetooth); } -// static -mojom::blink::WebBluetoothDeviceIdPtr BluetoothDevice::createMojoDeviceId( - const String& deviceId) { - auto result = mojom::blink::WebBluetoothDeviceId::New(); - result->device_id = deviceId; - return result; -} - BluetoothRemoteGATTService* BluetoothDevice::getOrCreateRemoteGATTService( - const String& serviceInstanceId, - const String& uuid, + mojom::blink::WebBluetoothRemoteGATTServicePtr service, bool isPrimary, const String& deviceInstanceId) { return m_attributeInstanceMap->getOrCreateRemoteGATTService( - serviceInstanceId, uuid, isPrimary, deviceInstanceId); + std::move(service), isPrimary, deviceInstanceId); } bool BluetoothDevice::isValidService(const String& serviceInstanceId) { @@ -62,14 +51,11 @@ BluetoothRemoteGATTCharacteristic* BluetoothDevice::getOrCreateRemoteGATTCharacteristic( ExecutionContext* context, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr characteristic, BluetoothRemoteGATTService* service) { return m_attributeInstanceMap->getOrCreateRemoteGATTCharacteristic( - context, characteristicInstanceId, serviceInstanceId, uuid, - characteristicProperties, service); + context, serviceInstanceId, std::move(characteristic), service); } bool BluetoothDevice::isValidCharacteristic( @@ -92,9 +78,7 @@ m_gatt->ClearActiveAlgorithms(); m_bluetooth->removeDevice(id()); mojom::blink::WebBluetoothService* service = m_bluetooth->service(); - auto deviceId = mojom::blink::WebBluetoothDeviceId::New(); - deviceId->device_id = id(); - service->RemoteServerDisconnect(std::move(deviceId)); + service->RemoteServerDisconnect(id()); } }
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.h b/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.h index 61f97528..317c43e0 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.h +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothDevice.h
@@ -37,32 +37,24 @@ public: BluetoothDevice(ExecutionContext*, - const String& id, - const String& name, + mojom::blink::WebBluetoothDevicePtr, Bluetooth*); // Interface required by CallbackPromiseAdapter: static BluetoothDevice* take(ScriptPromiseResolver*, - const String& id, - const String& name, + mojom::blink::WebBluetoothDevicePtr, Bluetooth*); - static mojom::blink::WebBluetoothDeviceIdPtr createMojoDeviceId( - const String& deviceId); - BluetoothRemoteGATTService* getOrCreateRemoteGATTService( - const String& serviceInstanceId, - const String& uuid, + mojom::blink::WebBluetoothRemoteGATTServicePtr, bool isPrimary, const String& deviceInstanceId); bool isValidService(const String& serviceInstanceId); BluetoothRemoteGATTCharacteristic* getOrCreateRemoteGATTCharacteristic( ExecutionContext*, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr, BluetoothRemoteGATTService*); bool isValidCharacteristic(const String& characteristicInstanceId); @@ -104,8 +96,8 @@ DECLARE_VIRTUAL_TRACE(); // IDL exposed interface: - String id() { return m_id; } - String name() { return m_name; } + String id() { return m_device->id; } + String name() { return m_device->name; } BluetoothRemoteGATTServer* gatt() { return m_gatt; } DEFINE_ATTRIBUTE_EVENT_LISTENER(gattserverdisconnected); @@ -114,8 +106,7 @@ // Holds all GATT Attributes associated with this BluetoothDevice. Member<BluetoothAttributeInstanceMap> m_attributeInstanceMap; - const String m_id; - const String m_name; + mojom::blink::WebBluetoothDevicePtr m_device; Member<BluetoothRemoteGATTServer> m_gatt; Member<Bluetooth> m_bluetooth; };
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.cpp b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.cpp index a1614922..9a55773 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.cpp +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.cpp
@@ -16,7 +16,7 @@ #include "modules/bluetooth/BluetoothDevice.h" #include "modules/bluetooth/BluetoothError.h" #include "modules/bluetooth/BluetoothRemoteGATTService.h" -#include <memory> +#include <utility> namespace blink { @@ -42,35 +42,28 @@ BluetoothRemoteGATTCharacteristic::BluetoothRemoteGATTCharacteristic( ExecutionContext* context, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr characteristic, BluetoothRemoteGATTService* service, BluetoothDevice* device) : ContextLifecycleObserver(context), - m_characteristicInstanceId(characteristicInstanceId), m_serviceInstanceId(serviceInstanceId), - m_uuid(uuid), - m_characteristicProperties(characteristicProperties), + m_characteristic(std::move(characteristic)), m_service(service), m_stopped(false), m_device(device) { m_properties = - BluetoothCharacteristicProperties::create(m_characteristicProperties); + BluetoothCharacteristicProperties::create(m_characteristic->properties); } BluetoothRemoteGATTCharacteristic* BluetoothRemoteGATTCharacteristic::create( ExecutionContext* context, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr characteristic, BluetoothRemoteGATTService* service, BluetoothDevice* device) { return new BluetoothRemoteGATTCharacteristic( - context, characteristicInstanceId, serviceInstanceId, uuid, - characteristicProperties, service, device); + context, serviceInstanceId, std::move(characteristic), service, device); } void BluetoothRemoteGATTCharacteristic::setValue(DOMDataView* domDataView) { @@ -95,7 +88,7 @@ if (!m_stopped) { m_stopped = true; m_device->bluetooth()->characteristicObjectRemoved( - m_characteristicInstanceId); + m_characteristic->instance_id); } } @@ -117,7 +110,7 @@ // listeners have been removed. See http://crbug.com/541390 if (eventType == EventTypeNames::characteristicvaluechanged) { m_device->bluetooth()->registerCharacteristicObject( - m_characteristicInstanceId, this); + m_characteristic->instance_id, this); } } @@ -156,7 +149,7 @@ DOMException::create(NetworkError, kGATTServerNotConnected)); } - if (!gatt()->device()->isValidCharacteristic(m_characteristicInstanceId)) { + if (!gatt()->device()->isValidCharacteristic(m_characteristic->instance_id)) { return ScriptPromise::rejectWithDOMException( scriptState, DOMException::create(InvalidStateError, kInvalidCharacteristic)); @@ -168,7 +161,7 @@ mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); service->RemoteCharacteristicReadValue( - m_characteristicInstanceId, + m_characteristic->instance_id, convertToBaseCallback( WTF::bind(&BluetoothRemoteGATTCharacteristic::ReadValueCallback, wrapPersistent(this), wrapPersistent(resolver)))); @@ -210,7 +203,7 @@ DOMException::create(NetworkError, kGATTServerNotConnected)); } - if (!gatt()->device()->isValidCharacteristic(m_characteristicInstanceId)) { + if (!gatt()->device()->isValidCharacteristic(m_characteristic->instance_id)) { return ScriptPromise::rejectWithDOMException( scriptState, DOMException::create(InvalidStateError, kInvalidCharacteristic)); @@ -237,7 +230,7 @@ mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); service->RemoteCharacteristicWriteValue( - m_characteristicInstanceId, valueVector, + m_characteristic->instance_id, valueVector, convertToBaseCallback(WTF::bind( &BluetoothRemoteGATTCharacteristic::WriteValueCallback, wrapPersistent(this), wrapPersistent(resolver), valueVector))); @@ -276,7 +269,7 @@ DOMException::create(NetworkError, kGATTServerNotConnected)); } - if (!gatt()->device()->isValidCharacteristic(m_characteristicInstanceId)) { + if (!gatt()->device()->isValidCharacteristic(m_characteristic->instance_id)) { return ScriptPromise::rejectWithDOMException( scriptState, DOMException::create(InvalidStateError, kInvalidCharacteristic)); @@ -288,7 +281,7 @@ mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); service->RemoteCharacteristicStartNotifications( - m_characteristicInstanceId, + m_characteristic->instance_id, convertToBaseCallback( WTF::bind(&BluetoothRemoteGATTCharacteristic::NotificationsCallback, wrapPersistent(this), wrapPersistent(resolver)))); @@ -305,7 +298,7 @@ DOMException::create(NetworkError, kGATTServerNotConnected)); } - if (!gatt()->device()->isValidCharacteristic(m_characteristicInstanceId)) { + if (!gatt()->device()->isValidCharacteristic(m_characteristic->instance_id)) { return ScriptPromise::rejectWithDOMException( scriptState, DOMException::create(InvalidStateError, kInvalidCharacteristic)); @@ -317,7 +310,7 @@ mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); service->RemoteCharacteristicStopNotifications( - m_characteristicInstanceId, + m_characteristic->instance_id, convertToBaseCallback( WTF::bind(&BluetoothRemoteGATTCharacteristic::NotificationsCallback, wrapPersistent(this), wrapPersistent(resolver),
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.h b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.h index 4f0135d..01166db 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.h +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTCharacteristic.h
@@ -42,19 +42,15 @@ public: explicit BluetoothRemoteGATTCharacteristic( ExecutionContext*, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr, BluetoothRemoteGATTService*, BluetoothDevice*); static BluetoothRemoteGATTCharacteristic* create( ExecutionContext*, - const String& characteristicInstanceId, const String& serviceInstanceId, - const String& uuid, - uint32_t characteristicProperties, + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr, BluetoothRemoteGATTService*, BluetoothDevice*); @@ -83,7 +79,7 @@ // IDL exposed interface: BluetoothRemoteGATTService* service() { return m_service; } - String uuid() { return m_uuid; } + String uuid() { return m_characteristic->uuid; } BluetoothCharacteristicProperties* properties() { return m_properties; } DOMDataView* value() const { return m_value; } ScriptPromise readValue(ScriptState*); @@ -110,10 +106,8 @@ void NotificationsCallback(ScriptPromiseResolver*, mojom::blink::WebBluetoothResult); - const String m_characteristicInstanceId; const String m_serviceInstanceId; - const String m_uuid; - const uint32_t m_characteristicProperties; + mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr m_characteristic; Member<BluetoothRemoteGATTService> m_service; bool m_stopped; Member<BluetoothCharacteristicProperties> m_properties;
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTServer.cpp b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTServer.cpp index 2583213..85341816 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTServer.cpp +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTServer.cpp
@@ -15,6 +15,7 @@ #include "modules/bluetooth/BluetoothError.h" #include "modules/bluetooth/BluetoothRemoteGATTService.h" #include "modules/bluetooth/BluetoothUUID.h" +#include <utility> namespace blink { @@ -77,13 +78,10 @@ ScriptPromise promise = resolver->promise(); mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); - mojom::blink::WebBluetoothDeviceIdPtr deviceId = - BluetoothDevice::createMojoDeviceId(device()->id()); service->RemoteServerConnect( - std::move(deviceId), - convertToBaseCallback( - WTF::bind(&BluetoothRemoteGATTServer::ConnectCallback, - wrapPersistent(this), wrapPersistent(resolver)))); + device()->id(), convertToBaseCallback(WTF::bind( + &BluetoothRemoteGATTServer::ConnectCallback, + wrapPersistent(this), wrapPersistent(resolver)))); return promise; } @@ -94,9 +92,7 @@ device()->cleanupDisconnectedDeviceAndFireEvent(); m_device->bluetooth()->removeDevice(device()->id()); mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); - mojom::blink::WebBluetoothDeviceIdPtr deviceId = - BluetoothDevice::createMojoDeviceId(device()->id()); - service->RemoteServerDisconnect(std::move(deviceId)); + service->RemoteServerDisconnect(device()->id()); } // Callback that allows us to resolve the promise with a single service or @@ -124,18 +120,17 @@ if (quantity == mojom::blink::WebBluetoothGATTQueryQuantity::SINGLE) { DCHECK_EQ(1u, services->size()); resolver->resolve(m_device->getOrCreateRemoteGATTService( - services.value()[0]->instance_id, services.value()[0]->uuid, - true /* isPrimary */, device()->id())); + std::move(services.value()[0]), true /* isPrimary */, + device()->id())); return; } HeapVector<Member<BluetoothRemoteGATTService>> gattServices; gattServices.reserveInitialCapacity(services->size()); - for (const auto& service : services.value()) { + for (auto& service : services.value()) { gattServices.push_back(m_device->getOrCreateRemoteGATTService( - service->instance_id, service->uuid, true /* isPrimary */, - device()->id())); + std::move(service), true /* isPrimary */, device()->id())); } resolver->resolve(gattServices); } else { @@ -192,14 +187,12 @@ AddToActiveAlgorithms(resolver); mojom::blink::WebBluetoothService* service = m_device->bluetooth()->service(); - mojom::blink::WebBluetoothDeviceIdPtr deviceId = - BluetoothDevice::createMojoDeviceId(device()->id()); WTF::Optional<String> uuid = WTF::nullopt; if (!servicesUUID.isEmpty()) uuid = servicesUUID; service->RemoteServerGetPrimaryServices( - std::move(deviceId), quantity, uuid, + device()->id(), quantity, uuid, convertToBaseCallback( WTF::bind(&BluetoothRemoteGATTServer::GetPrimaryServicesCallback, wrapPersistent(this), quantity, wrapPersistent(resolver))));
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.cpp b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.cpp index 9407314..a338fc0 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.cpp +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.cpp
@@ -14,7 +14,6 @@ #include "modules/bluetooth/BluetoothRemoteGATTCharacteristic.h" #include "modules/bluetooth/BluetoothUUID.h" #include "wtf/PtrUtil.h" -#include <memory> #include <utility> namespace blink { @@ -32,13 +31,11 @@ } // namespace BluetoothRemoteGATTService::BluetoothRemoteGATTService( - const String& serviceInstanceId, - const String& uuid, + mojom::blink::WebBluetoothRemoteGATTServicePtr service, bool isPrimary, const String& deviceInstanceId, BluetoothDevice* device) - : m_serviceInstanceId(serviceInstanceId), - m_uuid(uuid), + : m_service(std::move(service)), m_isPrimary(isPrimary), m_deviceInstanceId(deviceInstanceId), m_device(device) {} @@ -74,21 +71,18 @@ if (quantity == mojom::blink::WebBluetoothGATTQueryQuantity::SINGLE) { DCHECK_EQ(1u, characteristics->size()); resolver->resolve(device()->getOrCreateRemoteGATTCharacteristic( - resolver->getExecutionContext(), - characteristics.value()[0]->instance_id, serviceInstanceId, - characteristics.value()[0]->uuid, - characteristics.value()[0]->properties, this)); + resolver->getExecutionContext(), serviceInstanceId, + std::move(characteristics.value()[0]), this)); return; } HeapVector<Member<BluetoothRemoteGATTCharacteristic>> gattCharacteristics; gattCharacteristics.reserveInitialCapacity(characteristics->size()); - for (const auto& characteristic : characteristics.value()) { + for (auto& characteristic : characteristics.value()) { gattCharacteristics.push_back( device()->getOrCreateRemoteGATTCharacteristic( - resolver->getExecutionContext(), characteristic->instance_id, - serviceInstanceId, characteristic->uuid, - characteristic->properties, this)); + resolver->getExecutionContext(), serviceInstanceId, + std::move(characteristic), this)); } resolver->resolve(gattCharacteristics); } else { @@ -142,7 +136,7 @@ DOMException::create(NetworkError, kGATTServerNotConnected)); } - if (!device()->isValidService(m_serviceInstanceId)) { + if (!device()->isValidService(m_service->instance_id)) { return ScriptPromise::rejectWithDOMException( scriptState, DOMException::create(InvalidStateError, kInvalidService)); } @@ -157,10 +151,10 @@ if (!characteristicsUUID.isEmpty()) uuid = characteristicsUUID; service->RemoteServiceGetCharacteristics( - m_serviceInstanceId, quantity, uuid, + m_service->instance_id, quantity, uuid, convertToBaseCallback( WTF::bind(&BluetoothRemoteGATTService::GetCharacteristicsCallback, - wrapPersistent(this), m_serviceInstanceId, quantity, + wrapPersistent(this), m_service->instance_id, quantity, wrapPersistent(resolver)))); return promise;
diff --git a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.h b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.h index 1c59fc8..2884d1f 100644 --- a/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.h +++ b/third_party/WebKit/Source/modules/bluetooth/BluetoothRemoteGATTService.h
@@ -33,8 +33,7 @@ DEFINE_WRAPPERTYPEINFO(); public: - BluetoothRemoteGATTService(const String& serviceInstanceId, - const String& uuid, + BluetoothRemoteGATTService(mojom::blink::WebBluetoothRemoteGATTServicePtr, bool isPrimary, const String& deviceInstanceId, BluetoothDevice*); @@ -43,7 +42,7 @@ DECLARE_VIRTUAL_TRACE(); // IDL exposed interface: - String uuid() { return m_uuid; } + String uuid() { return m_service->uuid; } bool isPrimary() { return m_isPrimary; } BluetoothDevice* device() { return m_device; } ScriptPromise getCharacteristic(ScriptState*, @@ -61,15 +60,14 @@ ScriptPromiseResolver*, mojom::blink::WebBluetoothResult, Optional<Vector<mojom::blink::WebBluetoothRemoteGATTCharacteristicPtr>> - services); + characteristics); ScriptPromise getCharacteristicsImpl( ScriptState*, mojom::blink::WebBluetoothGATTQueryQuantity, const String& characteristicUUID = String()); - const String m_serviceInstanceId; - const String m_uuid; + mojom::blink::WebBluetoothRemoteGATTServicePtr m_service; const bool m_isPrimary; const String m_deviceInstanceId; Member<BluetoothDevice> m_device;
diff --git a/third_party/WebKit/Source/modules/indexeddb/IDBIndex.idl b/third_party/WebKit/Source/modules/indexeddb/IDBIndex.idl index e1c4f1d..1d33c93 100644 --- a/third_party/WebKit/Source/modules/indexeddb/IDBIndex.idl +++ b/third_party/WebKit/Source/modules/indexeddb/IDBIndex.idl
@@ -37,7 +37,7 @@ [CallWith=ScriptState, RaisesException] IDBRequest get(any key); [CallWith=ScriptState, RaisesException] IDBRequest getKey(any key); // TODO(cmumford): 0xFFFFFFFF is not necessary. Remove once crbug.com/335871 is fixed. - [CallWith=ScriptState, RaisesException] IDBRequest getAll([Default=Undefined] optional any range, optional unsigned long maxCount = 0xFFFFFFFF); + [CallWith=ScriptState, RaisesException] IDBRequest getAll([Default=Undefined] optional any range, [EnforceRange] optional unsigned long maxCount = 0xFFFFFFFF); // TODO(cmumford): 0xFFFFFFFF is not necessary. Remove once crbug.com/335871 is fixed. [CallWith=ScriptState, RaisesException] IDBRequest getAllKeys([Default=Undefined] optional any range, [EnforceRange] optional unsigned long maxCount = 0xFFFFFFFF); [CallWith=ScriptState, RaisesException] IDBRequest count([Default=Undefined] optional any key);
diff --git a/third_party/WebKit/Source/modules/indexeddb/IDBObjectStore.idl b/third_party/WebKit/Source/modules/indexeddb/IDBObjectStore.idl index eb57f91..abafe06 100644 --- a/third_party/WebKit/Source/modules/indexeddb/IDBObjectStore.idl +++ b/third_party/WebKit/Source/modules/indexeddb/IDBObjectStore.idl
@@ -41,9 +41,9 @@ [CallWith=ScriptState, RaisesException] IDBRequest get(any key); [CallWith=ScriptState, RaisesException, RuntimeEnabled=IndexedDBExperimental] IDBRequest getKey(any key); // TODO(cmumford): 0xFFFFFFFF is not necessary. Remove once crbug.com/335871 is fixed. - [CallWith=ScriptState, RaisesException] IDBRequest getAll([Default=Undefined] optional any range, optional unsigned long maxCount = 0xFFFFFFFF); + [CallWith=ScriptState, RaisesException] IDBRequest getAll([Default=Undefined] optional any range, [EnforceRange] optional unsigned long maxCount = 0xFFFFFFFF); // TODO(cmumford): 0xFFFFFFFF is not necessary. Remove once crbug.com/335871 is fixed. - [CallWith=ScriptState, RaisesException] IDBRequest getAllKeys([Default=Undefined] optional any range, optional unsigned long maxCount = 0xFFFFFFFF); + [CallWith=ScriptState, RaisesException] IDBRequest getAllKeys([Default=Undefined] optional any range, [EnforceRange] optional unsigned long maxCount = 0xFFFFFFFF); [CallWith=ScriptState, RaisesException] IDBRequest count([Default=Undefined] optional any key); [CallWith=ScriptState, RaisesException] IDBRequest openCursor([Default=Undefined] optional any range, optional IDBCursorDirection direction = "next"); [CallWith=ScriptState, RaisesException] IDBRequest openKeyCursor([Default=Undefined] optional any range, optional IDBCursorDirection direction = "next");
diff --git a/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp b/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp index 182465b..5a5448d9 100644 --- a/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp +++ b/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.cpp
@@ -471,6 +471,7 @@ int numberOfEvents = m_events.size(); if (numberOfEvents > 0) { + bool clampedSomeEventTime = false; double currentTime = startFrame / sampleRate; // Look at all the events in the timeline and check to see if any needs @@ -482,8 +483,10 @@ // in the past so clamp the event time to the current time (start of // the rendering quantum). if (event.needsTimeClampCheck()) { - if (event.time() < currentTime) + if (event.time() < currentTime) { event.setTime(currentTime); + clampedSomeEventTime = true; + } // In all cases, we can clear the flag because the event is either // in the future, or we've already checked it (just now). @@ -491,6 +494,14 @@ } } + if (clampedSomeEventTime) { + // If we clamped some event time to current time, we need to + // sort the event list in time order again, but it must be + // stable! + std::stable_sort(m_events.begin(), m_events.end(), + ParamEvent::eventPreceeds); + } + // Optimize the case where the last event is in the past. ParamEvent& lastEvent = m_events[m_events.size() - 1]; ParamEvent::Type lastEventType = lastEvent.getType(); @@ -646,6 +657,7 @@ float value2 = nextEvent ? nextEvent->value() : value1; double time2 = nextEvent ? nextEvent->time() : endFrame / sampleRate + 1; + DCHECK_GE(time2, time1); double deltaTime = time2 - time1; float k = deltaTime > 0 ? 1 / deltaTime : 0;
diff --git a/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h b/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h index af85e1c..eeb09099 100644 --- a/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h +++ b/third_party/WebKit/Source/modules/webaudio/AudioParamTimeline.h
@@ -124,6 +124,10 @@ double time, double duration); + static bool eventPreceeds(const ParamEvent& a, const ParamEvent& b) { + return a.time() < b.time(); + } + Type getType() const { return m_type; } float value() const { return m_value; } double time() const { return m_time; }
diff --git a/third_party/WebKit/Source/modules/webgl/WebGL2RenderingContextBase.cpp b/third_party/WebKit/Source/modules/webgl/WebGL2RenderingContextBase.cpp index 84e495c..3f07acb6 100644 --- a/third_party/WebKit/Source/modules/webgl/WebGL2RenderingContextBase.cpp +++ b/third_party/WebKit/Source/modules/webgl/WebGL2RenderingContextBase.cpp
@@ -4550,10 +4550,6 @@ } DEFINE_TRACE_WRAPPERS(WebGL2RenderingContextBase) { - if (isContextLost()) { - return; - } - visitor->traceWrappers(m_transformFeedbackBinding); visitor->traceWrappers(m_readFramebufferBinding); visitor->traceWrappers(m_boundCopyReadBuffer);
diff --git a/third_party/WebKit/Source/platform/BUILD.gn b/third_party/WebKit/Source/platform/BUILD.gn index ee8a5ef..4098586 100644 --- a/third_party/WebKit/Source/platform/BUILD.gn +++ b/third_party/WebKit/Source/platform/BUILD.gn
@@ -220,17 +220,6 @@ # Force include the header. cflags = [ "/FI$precompiled_header" ] - - # Disable warning for "this file was empty after preprocessing". This - # error is generated only in C mode for ANSI compatibility. It conflicts - # with precompiled headers since the source file that's "compiled" for - # making the precompiled header is empty. - # - # This error doesn't happen every time. In VS2013, it seems if the .pch - # file doesn't exist, no error will be generated (probably MS tested this - # case but forgot the other one?). To reproduce this error, do a build, - # then delete the precompile.c.obj file, then build again. - cflags_c = [ "/wd4206" ] } } }
diff --git a/third_party/WebKit/Source/platform/PartitionAllocMemoryDumpProvider.cpp b/third_party/WebKit/Source/platform/PartitionAllocMemoryDumpProvider.cpp index 384b529..5b66c0b 100644 --- a/third_party/WebKit/Source/platform/PartitionAllocMemoryDumpProvider.cpp +++ b/third_party/WebKit/Source/platform/PartitionAllocMemoryDumpProvider.cpp
@@ -49,13 +49,13 @@ : m_memoryDump(memoryDump), m_uid(0), m_totalActiveBytes(0) {} // PartitionStatsDumper implementation. - void partitionDumpTotals(const char* partitionName, + void PartitionDumpTotals(const char* partitionName, const WTF::PartitionMemoryStats*) override; - void partitionsDumpBucketStats( + void PartitionsDumpBucketStats( const char* partitionName, const WTF::PartitionBucketMemoryStats*) override; - size_t totalActiveBytes() const { return m_totalActiveBytes; } + size_t TotalActiveBytes() const { return m_totalActiveBytes; } private: base::trace_event::ProcessMemoryDump* m_memoryDump; @@ -63,56 +63,58 @@ size_t m_totalActiveBytes; }; -void PartitionStatsDumperImpl::partitionDumpTotals( +void PartitionStatsDumperImpl::PartitionDumpTotals( const char* partitionName, const WTF::PartitionMemoryStats* memoryStats) { - m_totalActiveBytes += memoryStats->totalActiveBytes; + m_totalActiveBytes += memoryStats->total_active_bytes; std::string dumpName = getPartitionDumpName(partitionName); base::trace_event::MemoryAllocatorDump* allocatorDump = m_memoryDump->CreateAllocatorDump(dumpName); - allocatorDump->AddScalar("size", "bytes", memoryStats->totalResidentBytes); + allocatorDump->AddScalar("size", "bytes", memoryStats->total_resident_bytes); allocatorDump->AddScalar("allocated_objects_size", "bytes", - memoryStats->totalActiveBytes); + memoryStats->total_active_bytes); allocatorDump->AddScalar("virtual_size", "bytes", - memoryStats->totalMmappedBytes); + memoryStats->total_mmapped_bytes); allocatorDump->AddScalar("virtual_committed_size", "bytes", - memoryStats->totalCommittedBytes); + memoryStats->total_committed_bytes); allocatorDump->AddScalar("decommittable_size", "bytes", - memoryStats->totalDecommittableBytes); + memoryStats->total_decommittable_bytes); allocatorDump->AddScalar("discardable_size", "bytes", - memoryStats->totalDiscardableBytes); + memoryStats->total_discardable_bytes); } -void PartitionStatsDumperImpl::partitionsDumpBucketStats( +void PartitionStatsDumperImpl::PartitionsDumpBucketStats( const char* partitionName, const WTF::PartitionBucketMemoryStats* memoryStats) { - ASSERT(memoryStats->isValid); + DCHECK(memoryStats->is_valid); std::string dumpName = getPartitionDumpName(partitionName); - if (memoryStats->isDirectMap) + if (memoryStats->is_direct_map) { dumpName.append(base::StringPrintf("/directMap_%lu", ++m_uid)); - else + } else { dumpName.append(base::StringPrintf( - "/bucket_%u", static_cast<unsigned>(memoryStats->bucketSlotSize))); + "/bucket_%u", static_cast<unsigned>(memoryStats->bucket_slot_size))); + } base::trace_event::MemoryAllocatorDump* allocatorDump = m_memoryDump->CreateAllocatorDump(dumpName); - allocatorDump->AddScalar("size", "bytes", memoryStats->residentBytes); + allocatorDump->AddScalar("size", "bytes", memoryStats->resident_bytes); allocatorDump->AddScalar("allocated_objects_size", "bytes", - memoryStats->activeBytes); - allocatorDump->AddScalar("slot_size", "bytes", memoryStats->bucketSlotSize); + memoryStats->active_bytes); + allocatorDump->AddScalar("slot_size", "bytes", memoryStats->bucket_slot_size); allocatorDump->AddScalar("decommittable_size", "bytes", - memoryStats->decommittableBytes); + memoryStats->decommittable_bytes); allocatorDump->AddScalar("discardable_size", "bytes", - memoryStats->discardableBytes); + memoryStats->discardable_bytes); allocatorDump->AddScalar("total_pages_size", "bytes", - memoryStats->allocatedPageSize); + memoryStats->allocated_page_size); allocatorDump->AddScalar("active_pages", "objects", - memoryStats->numActivePages); - allocatorDump->AddScalar("full_pages", "objects", memoryStats->numFullPages); + memoryStats->num_active_pages); + allocatorDump->AddScalar("full_pages", "objects", + memoryStats->num_full_pages); allocatorDump->AddScalar("empty_pages", "objects", - memoryStats->numEmptyPages); + memoryStats->num_empty_pages); allocatorDump->AddScalar("decommitted_pages", "objects", - memoryStats->numDecommittedPages); + memoryStats->num_decommitted_pages); } } // namespace @@ -165,7 +167,7 @@ base::trace_event::MemoryAllocatorDump* allocatedObjectsDump = memoryDump->CreateAllocatorDump(Partitions::kAllocatedObjectPoolName); allocatedObjectsDump->AddScalar("size", "bytes", - partitionStatsDumper.totalActiveBytes()); + partitionStatsDumper.TotalActiveBytes()); memoryDump->AddOwnershipEdge(allocatedObjectsDump->guid(), partitionsDump->guid()); @@ -186,11 +188,11 @@ if (!m_allocationRegister) m_allocationRegister.reset(new base::trace_event::AllocationRegister()); } - WTF::PartitionAllocHooks::setAllocationHook(reportAllocation); - WTF::PartitionAllocHooks::setFreeHook(reportFree); + WTF::PartitionAllocHooks::SetAllocationHook(reportAllocation); + WTF::PartitionAllocHooks::SetFreeHook(reportFree); } else { - WTF::PartitionAllocHooks::setAllocationHook(nullptr); - WTF::PartitionAllocHooks::setFreeHook(nullptr); + WTF::PartitionAllocHooks::SetAllocationHook(nullptr); + WTF::PartitionAllocHooks::SetFreeHook(nullptr); } m_isHeapProfilingEnabled = enabled; }
diff --git a/third_party/WebKit/Source/platform/graphics/CanvasSurfaceLayerBridge.cpp b/third_party/WebKit/Source/platform/graphics/CanvasSurfaceLayerBridge.cpp index 3351fcd..67b4817 100644 --- a/third_party/WebKit/Source/platform/graphics/CanvasSurfaceLayerBridge.cpp +++ b/third_party/WebKit/Source/platform/graphics/CanvasSurfaceLayerBridge.cpp
@@ -94,8 +94,8 @@ scoped_refptr<cc::SurfaceLayer> surfaceLayer = cc::SurfaceLayer::Create(m_refFactory); - surfaceLayer->SetSurfaceInfo( - surfaceInfo, true /* scale layer bounds with surface size */); + surfaceLayer->SetSurfaceInfo(surfaceInfo); + surfaceLayer->SetStretchContentToFillBounds(true); m_CCLayer = surfaceLayer; m_webLayer = @@ -108,8 +108,7 @@ m_currentSurfaceId = surfaceInfo.id(); cc::SurfaceLayer* surfaceLayer = static_cast<cc::SurfaceLayer*>(m_CCLayer.get()); - surfaceLayer->SetSurfaceInfo( - surfaceInfo, true /* scale layer bounds with surface size */); + surfaceLayer->SetSurfaceInfo(surfaceInfo); } m_observer->OnWebLayerReplaced();
diff --git a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp index b9742b22..93a1232 100644 --- a/third_party/WebKit/Source/platform/heap/CallbackStack.cpp +++ b/third_party/WebKit/Source/platform/heap/CallbackStack.cpp
@@ -20,13 +20,13 @@ } m_freeListNext[kPooledBlockCount - 1] = -1; m_pooledMemory = static_cast<CallbackStack::Item*>( - WTF::allocPages(nullptr, kBlockBytes * kPooledBlockCount, + WTF::AllocPages(nullptr, kBlockBytes * kPooledBlockCount, WTF::kPageAllocationGranularity, WTF::PageAccessible)); CHECK(m_pooledMemory); } void CallbackStackMemoryPool::shutdown() { - WTF::freePages(m_pooledMemory, kBlockBytes * kPooledBlockCount); + WTF::FreePages(m_pooledMemory, kBlockBytes * kPooledBlockCount); m_pooledMemory = nullptr; m_freeListFirst = 0; }
diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.cpp b/third_party/WebKit/Source/platform/heap/HeapPage.cpp index 6863654..ad097d4 100644 --- a/third_party/WebKit/Source/platform/heap/HeapPage.cpp +++ b/third_party/WebKit/Source/platform/heap/HeapPage.cpp
@@ -1342,11 +1342,11 @@ #if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) static void discardPages(Address begin, Address end) { uintptr_t beginAddress = - WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t>(begin)); + WTF::RoundUpToSystemPage(reinterpret_cast<uintptr_t>(begin)); uintptr_t endAddress = - WTF::roundDownToSystemPage(reinterpret_cast<uintptr_t>(end)); + WTF::RoundDownToSystemPage(reinterpret_cast<uintptr_t>(end)); if (beginAddress < endAddress) - WTF::discardSystemPages(reinterpret_cast<void*>(beginAddress), + WTF::DiscardSystemPages(reinterpret_cast<void*>(beginAddress), endAddress - beginAddress); } #endif
diff --git a/third_party/WebKit/Source/platform/heap/PageMemory.cpp b/third_party/WebKit/Source/platform/heap/PageMemory.cpp index d99f9bb..967c764c 100644 --- a/third_party/WebKit/Source/platform/heap/PageMemory.cpp +++ b/third_party/WebKit/Source/platform/heap/PageMemory.cpp
@@ -12,18 +12,18 @@ namespace blink { void MemoryRegion::release() { - WTF::freePages(m_base, m_size); + WTF::FreePages(m_base, m_size); } bool MemoryRegion::commit() { - WTF::recommitSystemPages(m_base, m_size); - return WTF::setSystemPagesAccessible(m_base, m_size); + WTF::RecommitSystemPages(m_base, m_size); + return WTF::SetSystemPagesAccessible(m_base, m_size); } void MemoryRegion::decommit() { ASAN_UNPOISON_MEMORY_REGION(m_base, m_size); - WTF::decommitSystemPages(m_base, m_size); - WTF::setSystemPagesInaccessible(m_base, m_size); + WTF::DecommitSystemPages(m_base, m_size); + WTF::SetSystemPagesInaccessible(m_base, m_size); } PageMemoryRegion::PageMemoryRegion(Address base, @@ -65,7 +65,7 @@ size = (size + WTF::kPageAllocationGranularityOffsetMask) & WTF::kPageAllocationGranularityBaseMask; Address base = static_cast<Address>( - WTF::allocPages(nullptr, size, blinkPageSize, WTF::PageInaccessible)); + WTF::AllocPages(nullptr, size, blinkPageSize, WTF::PageInaccessible)); if (!base) blinkGCOutOfMemory(); return new PageMemoryRegion(base, size, numPages, regionTree);
diff --git a/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.cpp b/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.cpp index 636be5a..4f660e0 100644 --- a/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.cpp +++ b/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.cpp
@@ -9,6 +9,13 @@ namespace mojo { // static +bool StructTraits<::blink::mojom::WebBluetoothDeviceIdDataView, WTF::String>:: + Read(::blink::mojom::WebBluetoothDeviceIdDataView data, + WTF::String* output) { + return data.ReadDeviceId(output); +} + +// static bool StructTraits<bluetooth::mojom::UUIDDataView, WTF::String>::Read( bluetooth::mojom::UUIDDataView data, WTF::String* output) {
diff --git a/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.h b/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.h index 905fcd5..0234b3d4 100644 --- a/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.h +++ b/third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.h
@@ -6,11 +6,22 @@ #define BluetoothStructTraits_h #include "device/bluetooth/public/interfaces/uuid.mojom-blink.h" +#include "public/platform/modules/bluetooth/web_bluetooth.mojom-blink.h" #include "wtf/text/WTFString.h" namespace mojo { template <> +struct StructTraits<::blink::mojom::WebBluetoothDeviceIdDataView, WTF::String> { + static const WTF::String& device_id(const WTF::String& input) { + return input; + } + + static bool Read(::blink::mojom::WebBluetoothDeviceIdDataView, + WTF::String* output); +}; + +template <> struct StructTraits<bluetooth::mojom::UUIDDataView, WTF::String> { static const WTF::String& uuid(const WTF::String& input) { return input; }
diff --git a/third_party/WebKit/Source/platform/mojo/blink_typemaps.gni b/third_party/WebKit/Source/platform/mojo/blink_typemaps.gni index bbad913d..f25ae1c 100644 --- a/third_party/WebKit/Source/platform/mojo/blink_typemaps.gni +++ b/third_party/WebKit/Source/platform/mojo/blink_typemaps.gni
@@ -3,10 +3,10 @@ # found in the LICENSE file. typemaps = [ - "//third_party/WebKit/Source/platform/mojo/Bluetooth.typemap", "//third_party/WebKit/Source/platform/mojo/File.typemap", "//third_party/WebKit/Source/platform/mojo/Geometry.typemap", "//third_party/WebKit/Source/platform/mojo/KURL.typemap", "//third_party/WebKit/Source/platform/mojo/SecurityOrigin.typemap", "//third_party/WebKit/Source/platform/mojo/String.typemap", + "//third_party/WebKit/public/platform/modules/bluetooth/Bluetooth.typemap", ]
diff --git a/third_party/WebKit/Source/platform/text/BidiCharacterRun.cpp b/third_party/WebKit/Source/platform/text/BidiCharacterRun.cpp index 18c1ed5..9d7fb18 100644 --- a/third_party/WebKit/Source/platform/text/BidiCharacterRun.cpp +++ b/third_party/WebKit/Source/platform/text/BidiCharacterRun.cpp
@@ -29,12 +29,12 @@ namespace blink { void* BidiCharacterRun::operator new(size_t sz) { - return partitionAlloc(Partitions::layoutPartition(), sz, + return PartitionAlloc(Partitions::layoutPartition(), sz, WTF_HEAP_PROFILER_TYPE_NAME(BidiCharacterRun)); } void BidiCharacterRun::operator delete(void* ptr) { - partitionFree(ptr); + PartitionFree(ptr); } } // namespace blink
diff --git a/third_party/WebKit/Source/web/WebMemoryStatistics.cpp b/third_party/WebKit/Source/web/WebMemoryStatistics.cpp index a12b65d..f722a18c 100644 --- a/third_party/WebKit/Source/web/WebMemoryStatistics.cpp +++ b/third_party/WebKit/Source/web/WebMemoryStatistics.cpp
@@ -15,13 +15,13 @@ public: LightPartitionStatsDumperImpl() : m_totalActiveBytes(0) {} - void partitionDumpTotals( + void PartitionDumpTotals( const char* partitionName, const WTF::PartitionMemoryStats* memoryStats) override { - m_totalActiveBytes += memoryStats->totalActiveBytes; + m_totalActiveBytes += memoryStats->total_active_bytes; } - void partitionsDumpBucketStats( + void PartitionsDumpBucketStats( const char* partitionName, const WTF::PartitionBucketMemoryStats*) override {}
diff --git a/third_party/WebKit/Source/wtf/allocator/PartitionAllocator.h b/third_party/WebKit/Source/wtf/allocator/PartitionAllocator.h index 266cbab..1408aa1 100644 --- a/third_party/WebKit/Source/wtf/allocator/PartitionAllocator.h +++ b/third_party/WebKit/Source/wtf/allocator/PartitionAllocator.h
@@ -30,7 +30,7 @@ template <typename T> static size_t quantizedSize(size_t count) { RELEASE_ASSERT(count <= base::kGenericMaxDirectMapped / sizeof(T)); - return partitionAllocActualSize(WTF::Partitions::bufferPartition(), + return PartitionAllocActualSize(WTF::Partitions::bufferPartition(), count * sizeof(T)); } template <typename T>
diff --git a/third_party/WebKit/Source/wtf/allocator/Partitions.cpp b/third_party/WebKit/Source/wtf/allocator/Partitions.cpp index 7b233829..743b900 100644 --- a/third_party/WebKit/Source/wtf/allocator/Partitions.cpp +++ b/third_party/WebKit/Source/wtf/allocator/Partitions.cpp
@@ -53,7 +53,7 @@ base::subtle::SpinLock::Guard guard(s_initializationLock); if (!s_initialized) { - base::partitionAllocGlobalInit(&Partitions::handleOutOfMemory); + base::PartitionAllocGlobalInit(&Partitions::handleOutOfMemory); m_fastMallocAllocator.init(); m_bufferAllocator.init(); m_layoutAllocator.init(); @@ -80,11 +80,11 @@ if (!s_initialized) return; - partitionPurgeMemoryGeneric(bufferPartition(), + PartitionPurgeMemoryGeneric(bufferPartition(), base::PartitionPurgeDecommitEmptyPages); - partitionPurgeMemoryGeneric(fastMallocPartition(), + PartitionPurgeMemoryGeneric(fastMallocPartition(), base::PartitionPurgeDecommitEmptyPages); - partitionPurgeMemory(layoutPartition(), + PartitionPurgeMemory(layoutPartition(), base::PartitionPurgeDecommitEmptyPages); } @@ -112,11 +112,11 @@ DCHECK(isMainThread()); decommitFreeableMemory(); - partitionDumpStatsGeneric(fastMallocPartition(), "fast_malloc", isLightDump, + PartitionDumpStatsGeneric(fastMallocPartition(), "fast_malloc", isLightDump, partitionStatsDumper); - partitionDumpStatsGeneric(bufferPartition(), "buffer", isLightDump, + PartitionDumpStatsGeneric(bufferPartition(), "buffer", isLightDump, partitionStatsDumper); - partitionDumpStats(layoutPartition(), "layout", isLightDump, + PartitionDumpStats(layoutPartition(), "layout", isLightDump, partitionStatsDumper); } @@ -172,12 +172,12 @@ size_t signature = 16 * 1024 * 1024 - 1; base::debug::Alias(&signature); DLOG(FATAL) << "ParitionAlloc: out of memory with < 16M usage (error:" - << getAllocPageErrorCode() << ")"; + << GetAllocPageErrorCode() << ")"; } void Partitions::handleOutOfMemory() { volatile size_t totalUsage = totalSizeOfCommittedPages(); - uint32_t allocPageErrorCode = getAllocPageErrorCode(); + uint32_t allocPageErrorCode = GetAllocPageErrorCode(); base::debug::Alias(&allocPageErrorCode); if (totalUsage >= 2UL * 1024 * 1024 * 1024)
diff --git a/third_party/WebKit/Source/wtf/allocator/Partitions.h b/third_party/WebKit/Source/wtf/allocator/Partitions.h index 6bb530d..3f246be 100644 --- a/third_party/WebKit/Source/wtf/allocator/Partitions.h +++ b/third_party/WebKit/Source/wtf/allocator/Partitions.h
@@ -76,9 +76,9 @@ static size_t totalSizeOfCommittedPages() { size_t totalSize = 0; - totalSize += m_fastMallocAllocator.root()->totalSizeOfCommittedPages; - totalSize += m_bufferAllocator.root()->totalSizeOfCommittedPages; - totalSize += m_layoutAllocator.root()->totalSizeOfCommittedPages; + totalSize += m_fastMallocAllocator.root()->total_size_of_committed_pages; + totalSize += m_bufferAllocator.root()->total_size_of_committed_pages; + totalSize += m_layoutAllocator.root()->total_size_of_committed_pages; return totalSize; } @@ -89,21 +89,21 @@ static void dumpMemoryStats(bool isLightDump, base::PartitionStatsDumper*); ALWAYS_INLINE static void* bufferMalloc(size_t n, const char* typeName) { - return partitionAllocGeneric(bufferPartition(), n, typeName); + return PartitionAllocGeneric(bufferPartition(), n, typeName); } ALWAYS_INLINE static void* bufferRealloc(void* p, size_t n, const char* typeName) { - return partitionReallocGeneric(bufferPartition(), p, n, typeName); + return PartitionReallocGeneric(bufferPartition(), p, n, typeName); } ALWAYS_INLINE static void bufferFree(void* p) { - partitionFreeGeneric(bufferPartition(), p); + PartitionFreeGeneric(bufferPartition(), p); } ALWAYS_INLINE static size_t bufferActualSize(size_t n) { - return partitionAllocActualSize(bufferPartition(), n); + return PartitionAllocActualSize(bufferPartition(), n); } static void* fastMalloc(size_t n, const char* typeName) { - return partitionAllocGeneric(Partitions::fastMallocPartition(), n, + return PartitionAllocGeneric(Partitions::fastMallocPartition(), n, typeName); } static void* fastZeroedMalloc(size_t n, const char* typeName) { @@ -112,11 +112,11 @@ return result; } static void* fastRealloc(void* p, size_t n, const char* typeName) { - return partitionReallocGeneric(Partitions::fastMallocPartition(), p, n, + return PartitionReallocGeneric(Partitions::fastMallocPartition(), p, n, typeName); } static void fastFree(void* p) { - partitionFreeGeneric(Partitions::fastMallocPartition(), p); + PartitionFreeGeneric(Partitions::fastMallocPartition(), p); } static void handleOutOfMemory(); @@ -149,17 +149,17 @@ using base::kPageAllocationGranularityOffsetMask; using base::kSystemPageSize; -using base::allocPages; -using base::decommitSystemPages; -using base::discardSystemPages; -using base::partitionFree; -using base::freePages; -using base::getAllocPageErrorCode; -using base::recommitSystemPages; -using base::roundDownToSystemPage; -using base::roundUpToSystemPage; -using base::setSystemPagesAccessible; -using base::setSystemPagesInaccessible; +using base::AllocPages; +using base::DecommitSystemPages; +using base::DiscardSystemPages; +using base::PartitionFree; +using base::FreePages; +using base::GetAllocPageErrorCode; +using base::RecommitSystemPages; +using base::RoundDownToSystemPage; +using base::RoundUpToSystemPage; +using base::SetSystemPagesAccessible; +using base::SetSystemPagesInaccessible; using base::PageAccessible; using base::PageInaccessible;
diff --git a/third_party/WebKit/Source/wtf/typed_arrays/ArrayBufferContents.cpp b/third_party/WebKit/Source/wtf/typed_arrays/ArrayBufferContents.cpp index cc8adf7..6b92d6f 100644 --- a/third_party/WebKit/Source/wtf/typed_arrays/ArrayBufferContents.cpp +++ b/third_party/WebKit/Source/wtf/typed_arrays/ArrayBufferContents.cpp
@@ -111,7 +111,7 @@ InitializationPolicy policy, int flags, void*& data) { - data = partitionAllocGenericFlags( + data = PartitionAllocGenericFlags( WTF::Partitions::bufferPartition(), flags, size, WTF_HEAP_PROFILER_TYPE_NAME(ArrayBufferContents)); if (policy == ZeroInitialize && data)
diff --git a/third_party/WebKit/public/platform/WebGamepad.h b/third_party/WebKit/public/platform/WebGamepad.h index 0e14669..0f85cd69 100644 --- a/third_party/WebKit/public/platform/WebGamepad.h +++ b/third_party/WebKit/public/platform/WebGamepad.h
@@ -28,7 +28,7 @@ namespace blink { -#pragma pack(push, 1) +#pragma pack(push, 4) class WebGamepadButton { public:
diff --git a/third_party/WebKit/public/platform/WebGamepads.h b/third_party/WebKit/public/platform/WebGamepads.h index 4b76542..e425d035 100644 --- a/third_party/WebKit/public/platform/WebGamepads.h +++ b/third_party/WebKit/public/platform/WebGamepads.h
@@ -26,7 +26,7 @@ #include "WebGamepad.h" -#pragma pack(push, 1) +#pragma pack(push, 4) namespace blink {
diff --git a/third_party/WebKit/Source/platform/mojo/Bluetooth.typemap b/third_party/WebKit/public/platform/modules/bluetooth/Bluetooth.typemap similarity index 64% rename from third_party/WebKit/Source/platform/mojo/Bluetooth.typemap rename to third_party/WebKit/public/platform/modules/bluetooth/Bluetooth.typemap index c657a6b..c11dd123 100644 --- a/third_party/WebKit/Source/platform/mojo/Bluetooth.typemap +++ b/third_party/WebKit/public/platform/modules/bluetooth/Bluetooth.typemap
@@ -2,11 +2,15 @@ # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. -mojom = "//device/bluetooth/public/interfaces/uuid.mojom" +mojom = + "//third_party/WebKit/public/platform/modules/bluetooth/web_bluetooth.mojom" public_headers = [ "//third_party/WebKit/Source/wtf/text/WTFString.h" ] traits_headers = [ "//third_party/WebKit/Source/platform/mojo/BluetoothStructTraits.h" ] deps = [ "//device/bluetooth", ] -type_mappings = [ "bluetooth.mojom.UUID=WTF::String" ] +type_mappings = [ + "blink.mojom.WebBluetoothDeviceId=WTF::String", + "bluetooth.mojom.UUID=WTF::String", +]
diff --git a/third_party/closure_compiler/externs/networking_private.js b/third_party/closure_compiler/externs/networking_private.js index 5c7c329..a1ec4ea 100644 --- a/third_party/closure_compiler/externs/networking_private.js +++ b/third_party/closure_compiler/externs/networking_private.js
@@ -1012,6 +1012,15 @@ chrome.networkingPrivate.NetworkFilter; /** + * @typedef {{ + * AllowOnlyPolicyNetworksToAutoconnect: (boolean|undefined), + * AllowOnlyPolicyNetworksToConnect: (boolean|undefined) + * }} + * @see https://developer.chrome.com/extensions/networkingPrivate#type-GlobalPolicy + */ +chrome.networkingPrivate.GlobalPolicy; + +/** * Gets all the properties of the network with id networkGuid. Includes all * properties of the network (read-only and read/write values). * @param {string} networkGuid The GUID of the network to get properties for. @@ -1290,6 +1299,14 @@ chrome.networkingPrivate.setCellularSimState = function(networkGuid, simState, callback) {}; /** + * Gets the global policy properties. These properties are not expected to + * change during a session. + * @param {function(!chrome.networkingPrivate.GlobalPolicy):void} callback + * @see https://developer.chrome.com/extensions/networkingPrivate#method-getGlobalPolicy + */ +chrome.networkingPrivate.getGlobalPolicy = function(callback) {}; + +/** * Fired when the properties change on any of the networks. Sends a list of * GUIDs for networks whose properties have changed. * @type {!ChromeEvent}
diff --git a/third_party/closure_compiler/interfaces/networking_private_interface.js b/third_party/closure_compiler/interfaces/networking_private_interface.js index 97d8718..9e0ac67 100644 --- a/third_party/closure_compiler/interfaces/networking_private_interface.js +++ b/third_party/closure_compiler/interfaces/networking_private_interface.js
@@ -1,4 +1,4 @@ -// Copyright 2016 The Chromium Authors. All rights reserved. +// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. @@ -251,6 +251,7 @@ * operation succeeds (|puk| is valid), the PIN will be set to |pin|. (If * |pin| is empty or invalid the operation will fail). * @param {string} networkGuid The GUID of the cellular network to unlock. + * If empty, the default cellular device will be used. * @param {string} pin The current SIM PIN, or the new PIN if PUK is provided. * @param {string=} puk The operator provided PUK for unblocking a blocked * SIM. @@ -268,13 +269,21 @@ * unlockCellularSim() before this can be called (otherwise it will fail and * chrome.runtime.lastError will be set to Error.SimLocked). * @param {string} networkGuid The GUID of the cellular network to set the SIM - * state of. + * state of. If empty, the default cellular device will be used. * @param {!chrome.networkingPrivate.CellularSimState} simState The SIM state * to set. * @param {function():void=} callback Called when the operation has completed. * @see https://developer.chrome.com/extensions/networkingPrivate#method-setCellularSimState */ setCellularSimState: assertNotReached, + + /** + * Gets the global policy properties. These properties are not expected to + * change during a session. + * @param {function(!chrome.networkingPrivate.GlobalPolicy):void} callback + * @see https://developer.chrome.com/extensions/networkingPrivate#method-getGlobalPolicy + */ + getGlobalPolicy: assertNotReached, }; /**
diff --git a/tools/gn/setup.cc b/tools/gn/setup.cc index 8a433bc..06062e9 100644 --- a/tools/gn/setup.cc +++ b/tools/gn/setup.cc
@@ -137,15 +137,31 @@ return FindDotFile(up_one_dir); } +void ForwardItemDefinedToBuilderInMainThread( + Builder* builder_call_on_main_thread_only, + std::unique_ptr<Item> item) { + builder_call_on_main_thread_only->ItemDefined(std::move(item)); + + // Pair to the Increment in ItemDefinedCallback. + g_scheduler->DecrementWorkCount(); +} + // Called on any thread. Post the item to the builder on the main thread. void ItemDefinedCallback( scoped_refptr<base::SingleThreadTaskRunner> task_runner, Builder* builder_call_on_main_thread_only, std::unique_ptr<Item> item) { DCHECK(item); + + // Increment the work count for the duration of defining the item with the + // builder. Otherwise finishing this callback will race finishing loading + // files. If there is no other pending work at any point in the middle of + // this call completing on the main thread, the 'Complete' function will + // be signaled and we'll stop running with an incomplete build. + g_scheduler->IncrementWorkCount(); task_runner->PostTask( FROM_HERE, - base::Bind(&Builder::ItemDefined, + base::Bind(&ForwardItemDefinedToBuilderInMainThread, base::Unretained(builder_call_on_main_thread_only), base::Passed(&item))); } @@ -321,11 +337,11 @@ } void Setup::RunPreMessageLoop() { - // Load the root build file. - loader_->Load(root_build_file_, LocationRange(), Label()); - // Will be decremented with the loader is drained. g_scheduler->IncrementWorkCount(); + + // Load the root build file. + loader_->Load(root_build_file_, LocationRange(), Label()); } bool Setup::RunPostMessageLoop() {
diff --git a/tools/mb/mb_config.pyl b/tools/mb/mb_config.pyl index 1cf29a3..f6a72bb 100644 --- a/tools/mb/mb_config.pyl +++ b/tools/mb/mb_config.pyl
@@ -1786,7 +1786,7 @@ 'ozone_linux': { 'gn_args': ('ozone_auto_platforms=false ozone_platform_wayland=true ' 'ozone_platform_x11=true ozone_platform_gbm=true ' - 'use_ozone=true use_ash=false'), + 'enable_package_mash_services=true use_ash=false'), }, 'pdf_xfa': {
diff --git a/tools/metrics/histograms/histograms.xml b/tools/metrics/histograms/histograms.xml index a8155d8..c0b7cf8 100644 --- a/tools/metrics/histograms/histograms.xml +++ b/tools/metrics/histograms/histograms.xml
@@ -1302,6 +1302,26 @@ </summary> </histogram> +<histogram name="Apps.NoteTakingApp.DefaultLaunchResult" + enum="NoteTakingAppLaunchResult"> + <owner>derat@chromium.org</owner> + <owner>jdufault@chromium.org</owner> + <summary> + The result of attempting to launch a default note-taking app on Chrome OS. + Only reported if the preferred app was unspecified or failed to launch. + </summary> +</histogram> + +<histogram name="Apps.NoteTakingApp.PreferredLaunchResult" + enum="NoteTakingAppLaunchResult"> + <owner>derat@chromium.org</owner> + <owner>jdufault@chromium.org</owner> + <summary> + The result of attempting to launch the user-specified preferred note-taking + app, if any, on Chrome OS. + </summary> +</histogram> + <histogram name="Arc.AndroidBootTime" units="ms"> <owner>elijahtaylor@google.com</owner> <owner>mitsuji@google.com</owner> @@ -41390,6 +41410,33 @@ </summary> </histogram> +<histogram name="OfflinePages.RequestJob.OpenFileErrorCode" + enum="NetErrorCodes"> + <owner>jianli@chromium.org</owner> + <summary> + Positive net error code for opening the underlying file to serve the offline + page, including net::OK. + </summary> +</histogram> + +<histogram name="OfflinePages.RequestJob.ReadFileErrorCode" + enum="NetErrorCodes"> + <owner>jianli@chromium.org</owner> + <summary> + Positive net error code for the failure to read the underlying file to serve + the offline page. net::OK is not included. + </summary> +</histogram> + +<histogram name="OfflinePages.RequestJob.SeekFileErrorCode" + enum="NetErrorCodes"> + <owner>jianli@chromium.org</owner> + <summary> + Positive net error code for the failure to seek the underlying file to serve + the offline page. net::OK is not included. + </summary> +</histogram> + <histogram name="OfflinePages.SavedPageCount" units="pages"> <owner>jianli@chromium.org</owner> <summary> @@ -86150,6 +86197,7 @@ <int value="1154" label="QUICKUNLOCKPRIVATE_CHECKCREDENTIAL"/> <int value="1155" label="QUICKUNLOCKPRIVATE_GETCREDENTIALREQUIREMENTS"/> <int value="1156" label="CLIPBOARD_SETIMAGEDATA"/> + <int value="1157" label="NETWORKINGPRIVATE_GETGLOBALPOLICY"/> </enum> <enum name="ExtensionIconState" type="int"> @@ -97530,6 +97578,17 @@ <int value="7" label="Main resource redirect, no-store"/> </enum> +<enum name="NoteTakingAppLaunchResult" type="int"> + <int value="0" label="Chrome app launched successfully"/> + <int value="1" label="Chrome app missing"/> + <int value="2" label="Android app launched successfully"/> + <int value="3" label="Android not supported by profile"/> + <int value="4" label="Android not running"/> + <int value="5" label="Couldn't convert path to Android URL"/> + <int value="6" label="No app specified"/> + <int value="7" label="No apps available"/> +</enum> + <enum name="NotificationActionType" type="int"> <int value="0" label="Unknown"/> <int value="1" label="Notification added"/>
diff --git a/tools/perf/generate_perf_json.py b/tools/perf/generate_perf_json.py index 20c9a52..08bb666 100755 --- a/tools/perf/generate_perf_json.py +++ b/tools/perf/generate_perf_json.py
@@ -179,7 +179,14 @@ 'build245-m4--device1', 'build245-m4--device2', 'build245-m4--device3', 'build245-m4--device4', 'build245-m4--device5', 'build245-m4--device6', - 'build245-m4--device7' + 'build245-m4--device7', 'build248-m4--device1', + 'build248-m4--device2', 'build248-m4--device3', + 'build248-m4--device4', 'build248-m4--device5', + 'build248-m4--device6', 'build248-m4--device7', + 'build249-m4--device1', 'build249-m4--device2', + 'build249-m4--device3', 'build249-m4--device4', + 'build249-m4--device5', 'build249-m4--device6', + 'build249-m4--device7' ] } ]) @@ -683,7 +690,7 @@ benchmark_sharding_map['22'] = shard_benchmarks(22, all_benchmarks) benchmark_sharding_map['5'] = shard_benchmarks(5, all_benchmarks) benchmark_sharding_map['1'] = shard_benchmarks(1, all_benchmarks) - benchmark_sharding_map['7'] = shard_benchmarks(7, all_benchmarks) + benchmark_sharding_map['21'] = shard_benchmarks(21, all_benchmarks) for name, config in waterfall['testers'].iteritems(): use_whitelist = config['use_whitelist']
diff --git a/ui/android/delegated_frame_host_android.cc b/ui/android/delegated_frame_host_android.cc index 936bdd859d..feb9e24 100644 --- a/ui/android/delegated_frame_host_android.cc +++ b/ui/android/delegated_frame_host_android.cc
@@ -33,8 +33,7 @@ bool surface_opaque) { // manager must outlive compositors using it. auto layer = cc::SurfaceLayer::Create(surface_manager->reference_factory()); - layer->SetSurfaceInfo(cc::SurfaceInfo(surface_id, 1.f, surface_size), - false /* stretch_content_to_fill_bounds */); + layer->SetSurfaceInfo(cc::SurfaceInfo(surface_id, 1.f, surface_size)); layer->SetBounds(surface_size); layer->SetIsDrawable(true); layer->SetContentsOpaque(surface_opaque);
diff --git a/ui/compositor/layer.cc b/ui/compositor/layer.cc index eb80962..7d9c07b 100644 --- a/ui/compositor/layer.cc +++ b/ui/compositor/layer.cc
@@ -660,8 +660,7 @@ scoped_refptr<cc::SurfaceLayer> new_layer = cc::SurfaceLayer::Create(ref_factory); - new_layer->SetSurfaceInfo(surface_info, - false /* stretch_content_to_fill_bounds */); + new_layer->SetSurfaceInfo(surface_info); SwitchToLayer(new_layer); surface_layer_ = new_layer;
diff --git a/ui/webui/resources/cr_elements/network/cr_network_list.html b/ui/webui/resources/cr_elements/network/cr_network_list.html index 38221b6..96010336 100644 --- a/ui/webui/resources/cr_elements/network/cr_network_list.html +++ b/ui/webui/resources/cr_elements/network/cr_network_list.html
@@ -13,9 +13,6 @@ min-height: 50px; overflow-y: auto; } - cr-network-list-item[show-separator]:not(:first-of-type) { - border-top: var(--cr-separator-line); - } </style> <div id="container" class="layout vertical" scrollable> <iron-list items="[[getItems_(networks, customItems)]]" @@ -23,8 +20,7 @@ selected-item="{{selectedItem}}"> <template> <cr-network-list-item item="[[item]]" is-list-item - show-buttons="[[showButtons]]" tabindex$="[[tabIndex]]" - show-separator$="[[showSeparators]]"> + show-buttons="[[showButtons]]" tabindex$="[[tabIndex]]"> </cr-network-list-item> </template> </iron-list>
diff --git a/ui/webui/resources/cr_elements/network/cr_network_list.js b/ui/webui/resources/cr_elements/network/cr_network_list.js index e1ded9a..e6b273f 100644 --- a/ui/webui/resources/cr_elements/network/cr_network_list.js +++ b/ui/webui/resources/cr_elements/network/cr_network_list.js
@@ -51,13 +51,6 @@ reflectToAttribute: true, }, - /** Whether to show separators between all items. */ - showSeparators: { - type: Boolean, - value: false, - reflectToAttribute: true, - }, - /** * Reflects the iron-list selecteditem property. * @type {!CrNetworkList.CrNetworkListItemType} @@ -90,7 +83,7 @@ getItems_: function() { let customItems = this.customItems.slice(); // Flag the first custom item with isFirstCustomItem = true. - if (!this.showSeparators && customItems.length > 0) + if (customItems.length > 0) customItems[0].isFirstCustomItem = true; return this.networks.concat(customItems); },
diff --git a/ui/webui/resources/cr_elements/network/cr_network_list_item.html b/ui/webui/resources/cr_elements/network/cr_network_list_item.html index eed8280..44c563db9 100644 --- a/ui/webui/resources/cr_elements/network/cr_network_list_item.html +++ b/ui/webui/resources/cr_elements/network/cr_network_list_item.html
@@ -40,6 +40,7 @@ } #divText { + -webkit-padding-end: 8px; display: flex; flex: 1 0 auto; flex-direction: column;
diff --git a/ui/webui/resources/cr_elements/network/cr_network_select.html b/ui/webui/resources/cr_elements/network/cr_network_select.html index cf8a41c1..0960f7b 100644 --- a/ui/webui/resources/cr_elements/network/cr_network_select.html +++ b/ui/webui/resources/cr_elements/network/cr_network_select.html
@@ -26,8 +26,7 @@ <iron-collapse opened="[[networkListOpened]]"> <cr-network-list on-selected="onNetworkListItemSelected_" max-height="[[maxHeight]]" networks="[[networkStateList]]" - custom-items="[[customItems]]" show-buttons="[[showButtons]]" - show-separators="[[showSeparators]]"> + custom-items="[[customItems]]" show-buttons="[[showButtons]]"> </cr-network-list> </iron-collapse> </div>
diff --git a/ui/webui/resources/cr_elements/network/cr_network_select.js b/ui/webui/resources/cr_elements/network/cr_network_select.js index de4cdef..b4192ab 100644 --- a/ui/webui/resources/cr_elements/network/cr_network_select.js +++ b/ui/webui/resources/cr_elements/network/cr_network_select.js
@@ -64,15 +64,6 @@ }, /** - * Show separators between all items. - */ - showSeparators: { - type: Boolean, - value: false, - reflectToAttribute: true, - }, - - /** * List of all network state data for all visible networks. * @type {!Array<!CrOnc.NetworkStateProperties>} */
diff --git a/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.html b/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.html index 3acba1f..e7b0346 100644 --- a/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.html +++ b/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.html
@@ -1,18 +1,25 @@ <link rel="import" href="chrome://resources/cr_elements/icons.html"> <link rel="import" href="chrome://resources/html/polymer.html"> +<link rel="import" href="chrome://resources/polymer/v1_0/iron-icon/iron-icon.html"> +<link rel="import" href="chrome://resources/polymer/v1_0/paper-tooltip/paper-tooltip.html"> <link rel="import" href="cr_policy_indicator_behavior.html"> <link rel="import" href="cr_policy_network_behavior.html"> <dom-module id="cr-policy-network-indicator"> <link rel="import" type="css" href="cr_policy_indicator.css"> + <style> + paper-tooltip { + --paper-tooltip: var(--cr-policy-tooltip); + } + </style> <template> - <div class="layout horizontal"> - <iron-icon id="indicator" - hidden$="[[!isIndicatorVisible(indicatorType)]]" - icon="[[getPolicyIndicatorIcon(indicatorType)]]" - title$="[[getTooltip_(indicatorType, property, recommended)]]"> - </iron-icon> - </div> + <iron-icon id="indicator" tabindex=0 + hidden$="[[!isIndicatorVisible(indicatorType)]]" + icon="[[getPolicyIndicatorIcon(indicatorType)]]"> + </iron-icon> + <paper-tooltip for="indicator" position="top" fit-to-visible-bounds> + [[getTooltip_(indicatorType)]] + </paper-tooltip> </template> <script src="cr_policy_network_indicator.js"></script> </dom-module>
diff --git a/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.js b/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.js index 494d6f4..38e0d0f 100644 --- a/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.js +++ b/ui/webui/resources/cr_elements/policy/cr_policy_network_indicator.js
@@ -27,9 +27,9 @@ /** * Recommended value for non enforced properties. - * @type {?CrOnc.NetworkPropertyType} + * @private {!CrOnc.NetworkPropertyType|undefined} */ - recommended: {type: Object, value: null}, + recommended_: Object, }, /** @@ -49,16 +49,16 @@ if (property.UserEditable === true && property.hasOwnProperty('UserPolicy')) { // We ignore UserEditable unless there is a UserPolicy. + this.recommended_ = + /** @type {!CrOnc.NetworkPropertyType} */ (property.UserPolicy); this.indicatorType = CrPolicyIndicatorType.RECOMMENDED; - this.recommended = - /** @type {CrOnc.NetworkPropertyType} */ (property.UserPolicy); } else if ( property.DeviceEditable === true && property.hasOwnProperty('DevicePolicy')) { // We ignore DeviceEditable unless there is a DevicePolicy. + this.recommended_ = + /** @type {!CrOnc.NetworkPropertyType} */ (property.DevicePolicy); this.indicatorType = CrPolicyIndicatorType.RECOMMENDED; - this.recommended = - /** @type {CrOnc.NetworkPropertyType} */ (property.DevicePolicy); } else if (effective == 'UserPolicy') { this.indicatorType = CrPolicyIndicatorType.USER_POLICY; } else if (effective == 'DevicePolicy') { @@ -71,23 +71,22 @@ }, /** - * @param {CrPolicyIndicatorType} type - * @param {!CrOnc.ManagedProperty} property - * @param {!CrOnc.NetworkPropertyType} recommended * @return {string} The tooltip text for |type|. * @private */ - getTooltip_: function(type, property, recommended) { - if (type == CrPolicyIndicatorType.NONE) + getTooltip_: function() { + if (this.indicatorType == CrPolicyIndicatorType.NONE) return ''; - if (type == CrPolicyIndicatorType.RECOMMENDED) { - var value = property.Active; - if (value == undefined && property.Effective) - value = property[property.Effective]; - if (value == recommended) + if (this.indicatorType == CrPolicyIndicatorType.RECOMMENDED) { + if (!this.property) + return ''; + var value = this.property.Active; + if (value == undefined && this.property.Effective) + value = this.property[this.property.Effective]; + if (value == this.recommended_) return this.i18n_('controlledSettingRecommendedMatches'); return this.i18n_('controlledSettingRecommendedDiffers'); } - return this.getPolicyIndicatorTooltip(type, ''); + return this.getPolicyIndicatorTooltip(this.indicatorType, ''); } });
diff --git a/url/scheme_host_port.cc b/url/scheme_host_port.cc index 6c03101e..f0f56850 100644 --- a/url/scheme_host_port.cc +++ b/url/scheme_host_port.cc
@@ -59,12 +59,6 @@ if (!is_standard) return false; - // These schemes do not follow the generic URL syntax, so we treat them as - // invalid (scheme, host, port) tuples (even though such URLs' _Origin_ might - // have a (scheme, host, port) tuple, they themselves do not). - if (scheme == kFileSystemScheme || scheme == kBlobScheme) - return false; - switch (scheme_type) { case SCHEME_WITH_PORT: // A URL with |scheme| is required to have the host and port (may be
diff --git a/url/scheme_host_port_unittest.cc b/url/scheme_host_port_unittest.cc index 81d4371..ba97a6a 100644 --- a/url/scheme_host_port_unittest.cc +++ b/url/scheme_host_port_unittest.cc
@@ -42,11 +42,19 @@ EXPECT_TRUE(invalid.IsInvalid()); EXPECT_TRUE(invalid.Equals(invalid)); - const char* urls[] = {"data:text/html,Hello!", - "javascript:alert(1)", - "file://example.com:443/etc/passwd", - "blob:https://example.com/uuid-goes-here", - "filesystem:https://example.com/temporary/yay.png"}; + const char* urls[] = { + "data:text/html,Hello!", "javascript:alert(1)", + "file://example.com:443/etc/passwd", + + // These schemes do not follow the generic URL syntax, so make sure we + // treat them as invalid (scheme, host, port) tuples (even though such + // URLs' _Origin_ might have a (scheme, host, port) tuple, they themselves + // do not). This is only *implicitly* checked in the code, by means of + // blob schemes not being standard, and filesystem schemes having type + // SCHEME_WITHOUT_AUTHORITY. If conditions change such that the implicit + // checks no longer hold, this policy should be made explicit. + "blob:https://example.com/uuid-goes-here", + "filesystem:https://example.com/temporary/yay.png"}; for (auto* test : urls) { SCOPED_TRACE(test);